code stringlengths 281 23.7M |
|---|
class LoggerDepthLoss():
def __init__(self, type='train', empty_value=0.0):
super(LoggerDepthLoss, self).__init__()
self.type = type
self.empty_value = empty_value
def tick(self, logs, out_rgb, target_rgb, out_depth, target_depth=None):
if (target_depth is None):
return logs
mask = (target_depth != self.empty_value)
rgb_surface_loss = torch.nn.functional.mse_loss(out_rgb[mask], target_rgb[mask])
rgb_void_loss = torch.nn.functional.mse_loss(out_rgb[(~ mask)], target_rgb[(~ mask)])
(depth_loss, depth_empty, depth_space, l1) = comp_depth(out_depth, target_depth, self.empty_value)
return {**logs, f'{self.type}/rgb_surface_loss': rgb_surface_loss, f'{self.type}/rgb_void_loss': rgb_void_loss, f'{self.type}/depth_loss': depth_loss, f'{self.type}/depth_surface_loss': depth_space, f'{self.type}/depth_void_loss': depth_empty, f'{self.type}/depth_l1_loss': l1} |
_cache(maxsize=1)
def _float_max_string_len() -> int:
PA_POS_FLOAT64_MAX_STR_BYTES = pc.binary_length(pc.cast(pa.scalar(np.finfo(np.float64).max, type=pa.float64()), pa.string())).as_py()
PA_NEG_FLOAT64_MAX_STR_BYTES = pc.binary_length(pc.cast(pa.scalar(np.finfo(np.float64).min, type=pa.float64()), pa.string())).as_py()
return max(PA_POS_FLOAT64_MAX_STR_BYTES, PA_NEG_FLOAT64_MAX_STR_BYTES) |
class UnboundCollector(diamond.collector.ProcessCollector):
def get_default_config_help(self):
config_help = super(UnboundCollector, self).get_default_config_help()
config_help.update({'bin': 'Path to unbound-control binary', 'histogram': 'Include histogram in collection'})
return config_help
def get_default_config(self):
config = super(UnboundCollector, self).get_default_config()
config.update({'path': 'unbound', 'bin': self.find_binary('/usr/sbin/unbound-control'), 'histogram': True})
return config
def get_massaged_histogram(self, raw_histogram):
histogram = defaultdict(int)
for intv in sorted(raw_histogram.keys()):
if (intv <= 0.001024):
histogram['1ms'] += raw_histogram[intv]
elif (intv < 1.0):
intv_name = ''.join([str(int(((intv / 0.001024) / 2))), 'ms+'])
histogram[intv_name] = raw_histogram[intv]
elif (intv == 1.0):
histogram['512ms+'] = raw_histogram[intv]
elif (1.0 < intv <= 64.0):
intv_name = ''.join([str(int((intv / 2))), 's+'])
histogram[intv_name] = raw_histogram[intv]
else:
histogram['64s+'] += raw_histogram[intv]
return histogram
def collect(self):
stats_output = self.run_command([' stats'])
if (stats_output is None):
return
stats_output = stats_output[0]
raw_histogram = {}
include_hist = str_to_bool(self.config['histogram'])
for line in stats_output.splitlines():
(stat_name, stat_value) = line.split('=')
if (not stat_name.startswith('histogram')):
self.publish(stat_name, stat_value)
elif include_hist:
hist_intv = float(stat_name.split('.', 4)[4])
raw_histogram[hist_intv] = float(stat_value)
if include_hist:
histogram = self.get_massaged_histogram(raw_histogram)
for (intv, value) in histogram.iteritems():
self.publish(('histogram.' + intv), value) |
class Tiny200_boxes(datasets.ImageFolder):
def __init__(self, root, transform_rcrop, transform_ccrop, init_box=(0.0, 0.0, 1.0, 1.0), **kwargs):
super().__init__(root=root, **kwargs)
self.transform_rcrop = transform_rcrop
self.transform_ccrop = transform_ccrop
self.boxes = torch.tensor(init_box).repeat(self.__len__(), 1)
self.use_box = True
def __getitem__(self, index):
(path, target) = self.samples[index]
img = self.loader(path)
if self.use_box:
box = self.boxes[index].float().tolist()
img = self.transform_ccrop([img, box])
else:
img = self.transform_rcrop(img)
if (self.target_transform is not None):
target = self.target_transform(target)
return (img, target) |
.parametrize('iterators', [[[1, 2, 3], [4, 5], [6, 7, 8]], [(i for i in range(1, 7)), (i for i in range(7, 9))]])
def test_flatten(iterators):
source = Stream()
L = source.flatten().sink_to_list()
for iterator in iterators:
source.emit(iterator)
assert (L == [1, 2, 3, 4, 5, 6, 7, 8]) |
class MultiChoiceInstruction(Instruction):
def __init__(self, data_name: str, data_list: List, verbalizer: Dict, instruction: str, keys_order: List[str], data_type: str):
super(MultiChoiceInstruction, self).__init__(data_name, data_list, verbalizer, instruction, keys_order, data_type)
self.NO_ANSWER = NO_ANSWER
def process_answer(self, answer_text):
if (answer_text == ''):
return self.NO_ANSWER
return answer_text
def transform2instruction(self):
examples = []
for sample in self.data_list:
example = {k: v for (k, v) in sample.items()}
if (self.data_name == 'c3'):
example['target'] = self.process_answer(example['answer'][0])
example['choice'] = SEP.join(example['choice'])
elif (self.data_name == 'dureader_yesno'):
example['target'] = self.process_answer(self.verbalizer[example['label'].lower()])
example['choice'] = SEP.join(list(set(self.verbalizer.values())))
elif (self.data_name == 'cail_yesno'):
example['target'] = self.process_answer(self.verbalizer[example['answer'][0].lower()])
example['choice'] = SEP.join(list(set(self.verbalizer.values())))
example['instruction'] = self.instruction.format(*[example[k] for k in self.keys_order])
example['data_type'] = self.data_type
example['start'] = self.get_start(example)
assert (example['instruction'][example['start']:(example['start'] + len(example['target']))] == example['target'])
examples.append(example)
return examples
def get_start(self, example):
if (example['target'] in ['', NO_ANSWER]):
return 0
start = 0
verbalizer = example['choice'].split(SEP)
target = example['target']
for n in range(0, verbalizer.index(target)):
start = ((start + len(verbalizer[n])) + 1)
start += example['instruction'].index(example['choice'])
return start |
class CNN_encoder(nn.Module):
def __init__(self, hidden_states=256):
super(CNN_encoder, self).__init__()
self.encoder = nn.Sequential(nn.Conv2d(3, 32, (15, 23), stride=9), nn.ReLU(True), nn.Conv2d(32, 64, 3, stride=(1, 3)), nn.ReLU(True), nn.Conv2d(64, 96, (7, 3), stride=(1, 3)), nn.ReLU(True))
self.mapping = nn.Linear(((96 * 4) * 6), hidden_states)
self.bn = nn.BatchNorm1d(hidden_states)
self.conv_channel = 96
self.conv_feat_num = 24
def forward(self, x, flag='unsupervised'):
x = self.encoder(x)
x = x.view((- 1), (self.conv_channel * self.conv_feat_num))
if (flag == 'supervised'):
return x
else:
x = self.bn(self.mapping(x))
return x |
class UserPersonalAccessTokenManager(CreateMixin, RESTManager):
_path = '/users/{user_id}/personal_access_tokens'
_obj_cls = UserPersonalAccessToken
_from_parent_attrs = {'user_id': 'id'}
_create_attrs = RequiredOptional(required=('name', 'scopes'), optional=('expires_at',))
_types = {'scopes': ArrayAttribute} |
def cache(repository_cache_dir: Path, repository_one: str, mock_caches: None) -> FileCache[dict[(str, str)]]:
cache: FileCache[dict[(str, str)]] = FileCache(path=(repository_cache_dir / repository_one))
cache.remember('cachy:0.1', (lambda : {'name': 'cachy', 'version': '0.1'}), minutes=None)
cache.remember('cleo:0.2', (lambda : {'name': 'cleo', 'version': '0.2'}), minutes=None)
return cache |
(st.sets(text))
def test_map_with_pad(tmpdir_factory, keys):
trie = marisa_trie.Trie(keys)
dirname = f'{str(uuid4())}_'
path = str(tmpdir_factory.mktemp(dirname).join('trie.bin'))
trie.save(path)
data = ((b'pad' + open(path, 'rb').read()) + b'pad')
trie2 = marisa_trie.Trie()
trie2.map(memoryview(data)[3:(- 3)])
for key in keys:
assert (key in trie2) |
def _send_twilio(msg, numbers):
twilio_sid = (SMS_CREDENTIALS['sid'].strip().split(' ')[0] if ('sid' in SMS_CREDENTIALS) else None)
twilio_token = (SMS_CREDENTIALS['token'].strip().split(' ')[0] if ('token' in SMS_CREDENTIALS) else None)
twilio_from = (SMS_CREDENTIALS['from'].strip().split(' ')[0] if ('from' in SMS_CREDENTIALS) else 'QTPyLib')
if ((twilio_sid is None) or (twilio_token is None) or (twilio_from is None)):
return 0
sent = 0
smsClient = twilioClient(twilio_sid, twilio_token)
for number in numbers:
if ('+' not in number):
number = ('+' + str(number))
response = smsClient.messages.create(body=msg, to=number, from_=twilio_from)
if (response.sid != ''):
sent += 1
return (len(numbers) == sent) |
def test_archs_platform_specific(platform, intercepted_build_args, monkeypatch):
monkeypatch.setenv('CIBW_ARCHS', 'unused')
monkeypatch.setenv('CIBW_ARCHS_LINUX', 'ppc64le')
monkeypatch.setenv('CIBW_ARCHS_WINDOWS', 'x86')
monkeypatch.setenv('CIBW_ARCHS_MACOS', 'x86_64')
main()
options = intercepted_build_args.args[0]
if (platform == 'linux'):
assert (options.globals.architectures == {Architecture.ppc64le})
elif (platform == 'windows'):
assert (options.globals.architectures == {Architecture.x86})
elif (platform == 'macos'):
assert (options.globals.architectures == {Architecture.x86_64}) |
('/api_save', methods=['POST'])
def api_save():
flag = request.form['flag']
req_host = request.form['host']
req_api_name = request.form['api_name']
req_project_name = request.form['project_name']
req_url = request.form['url']
req_method = request.form['method']
req_data = request.form['data']
req_auth = request.form['auth']
req_headers = request.form['headers']
req_assert_data = request.form['assert_data']
if (flag == '1'):
obj = g.db.execute(("select * from api where name='%s' and project_name='%s';" % (req_api_name, req_project_name))).fetchall()
if len(obj):
return jsonify(code=500, msg='')
try:
g.db.execute(("insert into api (name, project_name, method, url, data, headers, auth, assert, host) values('%s','%s', '%s', '%s', '%s','%s', '%s', '%s', '%s');" % (req_api_name, req_project_name, req_method, req_url, req_data, req_headers, req_auth, req_assert_data, req_host)))
g.db.commit()
return jsonify(code=200)
except Exception as msg:
return jsonify(code=500, msg=msg)
else:
obj = g.db.execute(("select * from api where name='%s' and project_name='%s';" % (req_api_name, req_project_name))).fetchall()
if (not len(obj)):
return jsonify(code=500, msg=',')
try:
g.db.execute(("update api set method='%s', url='%s', data='%s', headers='%s', auth='%s', assert='%s', host='%s' where name='%s' and project_name='%s';" % (req_method, req_url, req_data, req_headers, req_auth, req_assert_data, req_host, req_api_name, req_project_name)))
g.db.commit()
return jsonify(code=200)
except Exception as msg:
return jsonify(code=500, msg=msg) |
class InitializationSection(BasePathMixin):
tag = ext_x_map
def __init__(self, base_uri, uri, byterange=None):
self.base_uri = base_uri
self.uri = uri
self.byterange = byterange
def __str__(self):
output = []
if self.uri:
output.append(('URI=' + quoted(self.uri)))
if self.byterange:
output.append(('BYTERANGE=' + self.byterange))
return '{tag}:{attributes}'.format(tag=self.tag, attributes=','.join(output))
def __eq__(self, other):
if (not other):
return False
return ((self.uri == other.uri) and (self.byterange == other.byterange) and (self.base_uri == other.base_uri))
def __ne__(self, other):
return (not self.__eq__(other)) |
def convert_net_g(ori_net, crt_net):
for (crt_k, crt_v) in crt_net.items():
if ('style_mlp' in crt_k):
ori_k = crt_k.replace('style_mlp', 'style')
elif ('constant_input.weight' in crt_k):
ori_k = crt_k.replace('constant_input.weight', 'input.input')
elif ('style_conv1.modulated_conv' in crt_k):
ori_k = crt_k.replace('style_conv1.modulated_conv', 'conv1.conv')
elif ('style_conv1' in crt_k):
if (crt_v.shape == torch.Size([1])):
ori_k = crt_k.replace('style_conv1', 'conv1.noise')
else:
ori_k = crt_k.replace('style_conv1', 'conv1')
elif ('style_convs' in crt_k):
ori_k = crt_k.replace('style_convs', 'convs').replace('modulated_conv', 'conv')
if (crt_v.shape == torch.Size([1])):
ori_k = ori_k.replace('.weight', '.noise.weight')
elif ('to_rgb1.modulated_conv' in crt_k):
ori_k = crt_k.replace('to_rgb1.modulated_conv', 'to_rgb1.conv')
elif ('to_rgbs' in crt_k):
ori_k = crt_k.replace('modulated_conv', 'conv')
elif ('noises' in crt_k):
ori_k = crt_k.replace('.noise', '.noise_')
else:
ori_k = crt_k
if (crt_net[crt_k].size() != ori_net[ori_k].size()):
raise ValueError(f'''Wrong tensor size:
crt_net: {crt_net[crt_k].size()}
ori_net: {ori_net[ori_k].size()}''')
else:
crt_net[crt_k] = ori_net[ori_k]
return crt_net |
def get_word_list(line, dictionary):
splitted_words = json.loads(line.lower()).split()
words = ['<bos>']
for word in splitted_words:
word = filter_symbols.search(word)[0]
if (len(word) > 1):
if dictionary.word2idx.get(word, False):
words.append(word)
else:
words.append('<unk>')
words.append('<eos>')
return words |
def import_question(element, save=False, user=None):
try:
question = Question.objects.get(uri=element.get('uri'))
except Question.DoesNotExist:
question = Question()
set_common_fields(question, element)
set_foreign_field(question, 'attribute', element)
question.is_collection = (element.get('is_collection') or False)
question.is_optional = (element.get('is_optional') or False)
set_lang_field(question, 'text', element)
set_lang_field(question, 'help', element)
set_lang_field(question, 'default_text', element)
set_lang_field(question, 'verbose_name', element)
set_foreign_field(question, 'default_option', element)
question.default_external_id = (element.get('default_external_id') or '')
if (element.get('widget_type') in get_widget_types()):
question.widget_type = element.get('widget_type')
else:
question.widget_type = 'text'
question.value_type = (element.get('value_type') or '')
question.maximum = element.get('maximum')
question.minimum = element.get('minimum')
question.step = element.get('step')
question.unit = (element.get('unit') or '')
question.width = element.get('width')
validate_instance(question, element, QuestionLockedValidator, QuestionUniqueURIValidator)
check_permissions(question, element, user)
if (save and (not element.get('errors'))):
if question.id:
element['updated'] = True
logger.info('Question %s updated.', element.get('uri'))
else:
element['created'] = True
logger.info('Question created with uri %s.', element.get('uri'))
question.save()
set_reverse_m2m_through_instance(question, 'page', element, 'question', 'page', 'question_pages')
set_reverse_m2m_through_instance(question, 'questionset', element, 'question', 'questionset', 'question_questionsets')
set_m2m_instances(question, 'conditions', element)
set_m2m_instances(question, 'optionsets', element)
question.editors.add(Site.objects.get_current())
return question |
def pytest_configure(config: pytest.Config):
markers = []
if config.option.skip_generation_tests:
markers.append('not skip_generation_tests')
if config.option.skip_resolver_tests:
markers.append('not skip_resolver_tests')
if config.option.skip_gui_tests:
markers.append('not skip_gui_tests')
config.option.markexpr = ' and '.join(markers) |
class CPythonmacOsFramework(CPython, metaclass=ABCMeta):
def can_describe(cls, interpreter):
return (is_mac_os_framework(interpreter) and super().can_describe(interpreter))
def create(self):
super().create()
target = self.desired_mach_o_image_path()
current = self.current_mach_o_image_path()
for src in self._sources:
if (isinstance(src, ExePathRefToDest) and ((src.must == RefMust.COPY) or (not self.symlinks))):
exes = [(self.bin_dir / src.base)]
if (not self.symlinks):
exes.extend(((self.bin_dir / a) for a in src.aliases))
for exe in exes:
fix_mach_o(str(exe), current, target, self.interpreter.max_size)
def _executables(cls, interpreter):
for (_, targets, must, when) in super()._executables(interpreter):
fixed_host_exe = (((((Path(interpreter.prefix) / 'Resources') / 'Python.app') / 'Contents') / 'MacOS') / 'Python')
(yield (fixed_host_exe, targets, must, when))
def current_mach_o_image_path(self):
raise NotImplementedError
def desired_mach_o_image_path(self):
raise NotImplementedError |
class BackBone(nn.Module):
def __init__(self, opt):
super(BackBone, self).__init__()
self._name = 'BackBone'
self._opt = opt
self.model = self._init_create_networks()
def _init_create_networks(self):
if ((self._opt.pretrained_dataset == 'ferplus') or (self._opt.pretrained_dataset == 'sfew')):
if (self._opt.pretrained_dataset == 'ferplus'):
model_name = 'resnet50_ferplus_dag'
model_dir = os.path.join(MODEL_DIR, 'fer+')
else:
model_name = 'resnet50_face_sfew_dag'
model_dir = os.path.join(MODEL_DIR, 'sfew')
feature_extractor = load_model(model_name, model_dir)
meta = feature_extractor.meta
if (not (meta['imageSize'][0] == self._opt.image_size)):
new_imageSize = [self._opt.image_size, self._opt.image_size, 3]
override_meta_imsize = True
else:
new_imageSize = None
override_meta_imsize = False
setattr(self, 'augment_transforms', augment_transforms(meta, new_imageSize=new_imageSize, override_meta_imsize=override_meta_imsize))
setattr(self, 'compose_transforms', compose_transforms(meta, new_imageSize=new_imageSize, override_meta_imsize=override_meta_imsize))
elif (self._opt.pretrained_dataset == 'imagenet'):
import torchvision.models as models
model_name = 'resnet50_imagenet'
feature_extractor = models.resnext50_32x4d(pretrained=True)
im_size = self._opt.image_size
transform_list = transforms.Compose([transforms.Resize(int((im_size * 1.2))), transforms.CenterCrop(im_size), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
setattr(self, 'compose_transforms', transform_list)
transform_list = transforms.Compose([transforms.Resize(int((im_size * 1.2))), transforms.RandomCrop(im_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
setattr(self, 'augment_transforms', transform_list)
else:
raise ValueError(('Pretrained dataset %s not recognized.' % pretrained_dataset))
setattr(feature_extractor, 'name', model_name)
(last_layer_name, last_module) = list(feature_extractor.named_modules())[(- 1)]
try:
(in_channels, out_channels) = (last_module.in_features, last_module.out_features)
last_linear = True
except:
(in_channels, out_channels) = (last_module.in_channels, last_module.out_channels)
last_linear = False
setattr(feature_extractor, '{}'.format(last_layer_name), Identity())
setattr(self, 'output_feature_dim', in_channels)
if (not (meta['imageSize'][0] == self._opt.image_size)):
(pool_layer_name, pool_layer) = list(feature_extractor.named_modules())[(- 2)]
setattr(feature_extractor, '{}'.format(pool_layer_name), nn.AdaptiveAvgPool2d((1, 1)))
return feature_extractor
def forward(self, x):
return self.model(x) |
.fast
def test_line_survey(verbose=True, plot=False, warnings=True, *args, **kwargs):
_temp_file = 'radis_test_line_survey.html'
if exists(_temp_file):
os.remove(_temp_file)
s = load_spec(getTestFile('CO_Tgas1500K_mole_fraction0.01.spec'), binary=True)
s.line_survey(overlay='abscoeff', writefile=_temp_file, barwidth='fwhm_voigt')
assert exists(_temp_file)
with open(_temp_file) as f:
d = f.read()
assert ('Linestrength' in d)
assert ('Wavenumber' in d)
if verbose:
print('test_line_survey: html file was correctly generated')
if (not plot):
os.remove(_temp_file)
return True |
def pytest_configure(config: Config) -> None:
if config.getvalue('lsof'):
checker = LsofFdLeakChecker()
if checker.matching_platform():
config.pluginmanager.register(checker)
config.addinivalue_line('markers', 'pytester_example_path(*path_segments): join the given path segments to `pytester_example_dir` for this test.') |
def convert_folder_with_preds_back_to_BraTS_labeling_convention(input_folder: str, output_folder: str, num_processes: int=12):
maybe_mkdir_p(output_folder)
nii = subfiles(input_folder, suffix='.nii.gz', join=False)
with multiprocessing.get_context('spawn').Pool(num_processes) as p:
p.starmap(load_convert_labels_back_to_BraTS, zip(nii, ([input_folder] * len(nii)), ([output_folder] * len(nii)))) |
class AutoTokenizer(object):
def __init__(self):
raise EnvironmentError('AutoTokenizer is designed to be instantiated using the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)` method.')
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
if ('t5' in pretrained_model_name_or_path):
return T5Tokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
elif ('distilbert' in pretrained_model_name_or_path):
return DistilBertTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
elif ('albert' in pretrained_model_name_or_path):
return AlbertTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
elif ('camembert' in pretrained_model_name_or_path):
return CamembertTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
elif ('xlm-roberta' in pretrained_model_name_or_path):
return XLMRobertaTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
elif ('roberta' in pretrained_model_name_or_path):
return RobertaTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
elif ('bert-base-japanese' in pretrained_model_name_or_path):
return BertJapaneseTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
elif ('bert' in pretrained_model_name_or_path):
return BertTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
elif ('openai-gpt' in pretrained_model_name_or_path):
return OpenAIGPTTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
elif ('gpt2' in pretrained_model_name_or_path):
return GPT2Tokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
elif ('transfo-xl' in pretrained_model_name_or_path):
return TransfoXLTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
elif ('xlnet' in pretrained_model_name_or_path):
return XLNetTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
elif ('xlm' in pretrained_model_name_or_path):
return XLMTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
elif ('ctrl' in pretrained_model_name_or_path):
return CTRLTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
raise ValueError("Unrecognized model identifier in {}. Should contains one of 'bert', 'openai-gpt', 'gpt2', 'transfo-xl', 'xlnet', 'xlm-roberta', 'xlm', 'roberta', 'distilbert,' 'camembert', 'ctrl', 'albert'".format(pretrained_model_name_or_path)) |
class BotConfigureTest(TestCase):
def test_kwargs(self):
bot = bot_factory()
bot.provider.get_file.return_value = (None, None)
bot.configure(branch='bogus-branch', pin='bogus-pin', close_prs='bogus-close')
self.assertEqual(bot.config.branch, 'bogus-branch')
self.assertEqual(bot.config.pin, 'bogus-pin')
self.assertEqual(bot.config.close_prs, 'bogus-close')
def test_file(self):
bot = bot_factory()
bot.provider.get_file.return_value = ('close_prs: bogus-close\nbranch: bogus-branch', None)
bot.configure()
self.assertEqual(bot.config.branch, 'bogus-branch')
self.assertEqual(bot.config.close_prs, 'bogus-close')
def test_numeric_branch(self):
bot = bot_factory()
bot.provider.get_file.return_value = ('branch: 2.0\n', None)
bot.configure()
self.assertEqual(bot.config.branch, '2.0')
def test_write_config(self):
bot = bot_factory()
bot.provider.get_file.return_value = (None, None)
bot.configure(write_config={'branch': 'bogus-branch'})
self.assertEqual(bot.config.branch, 'bogus-branch') |
class ForwardSchedule(object):
def __init__(self, timesteps, beta_start=0.0001, beta_end=0.02, mode='linear'):
self.timesteps = timesteps
self.beta_start = beta_start
self.beta_end = beta_end
self.mode = mode.lower()
self.calc_vars()
def get_scheduler(self):
if (self.mode == 'linear'):
return linear_beta_schedule
elif (self.mode == 'quadratic'):
return quadratic_beta_schedule
elif (self.mode == 'cosine'):
return cosine_beta_schedule
elif (self.mode == 'sigmoid'):
return sigmoid_beta_schedule
else:
raise ValueError('Schedule mode must be in: [linear,quadratic,cosine,sigmoid]')
def calc_vars(self):
scheduler = self.get_scheduler()
self.betas = scheduler(timesteps=self.timesteps, beta_start=self.beta_start, beta_end=self.beta_end)
self.alphas = (1.0 - self.betas)
self.alphas_cumprod = torch.cumprod(self.alphas, axis=0)
self.alphas_cumprod_prev = F.pad(self.alphas_cumprod[:(- 1)], (1, 0), value=1.0)
self.sqrt_recip_alphas = torch.sqrt((1.0 / self.alphas))
self.sqrt_alphas_cumprod = torch.sqrt(self.alphas_cumprod)
self.sqrt_one_minus_alphas_cumprod = torch.sqrt((1.0 - self.alphas_cumprod))
self.posterior_variance = ((self.betas * (1.0 - self.alphas_cumprod_prev)) / (1.0 - self.alphas_cumprod))
def __extract(self, a, t, x_shape):
batch_size = t.shape[0]
out = a.gather((- 1), t.cpu())
return out.reshape(batch_size, *((1,) * (len(x_shape) - 1))).to(t.device)
def extract(self, t, x_shape):
return {'betas': self.__extract(self.betas, t, x_shape), 'sqrt_alphas_cumprod': self.__extract(self.sqrt_alphas_cumprod, t, x_shape), 'sqrt_one_minus_alphas_cumprod': self.__extract(self.sqrt_one_minus_alphas_cumprod, t, x_shape), 'sqrt_recip_alphas': self.__extract(self.sqrt_recip_alphas, t, x_shape), 'posterior_variance': self.__extract(self.posterior_variance, t, x_shape)} |
def test_first_query_delay():
type_ = '_
zeroconf_browser = Zeroconf(interfaces=['127.0.0.1'])
_wait_for_start(zeroconf_browser)
old_send = zeroconf_browser.async_send
first_query_time = None
def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT):
nonlocal first_query_time
if (first_query_time is None):
first_query_time = current_time_millis()
old_send(out, addr=addr, port=port)
with patch.object(zeroconf_browser, 'async_send', send):
def on_service_state_change(zeroconf, service_type, state_change, name):
pass
start_time = current_time_millis()
browser = ServiceBrowser(zeroconf_browser, type_, [on_service_state_change])
time.sleep(millis_to_seconds((_services_browser._FIRST_QUERY_DELAY_RANDOM_INTERVAL[1] + 5)))
try:
assert ((current_time_millis() - start_time) > _services_browser._FIRST_QUERY_DELAY_RANDOM_INTERVAL[0])
finally:
browser.cancel()
zeroconf_browser.close() |
def test_outer_loading_bad_item_quantity():
bad_quantity_data = change(outer_sample_data, ['items', 0, 'quantity'], Decimal(0))
raises_exc(AggregateLoadError(f'while loading model {Receipt}', [with_trail(AggregateLoadError(f'while loading iterable {list}', [with_trail(AggregateLoadError(f'while loading model {RecItem!r}', [with_trail(ValidationError('Value must be > 0', 0), ['quantity'])]), [0])]), ['items'])]), (lambda : outer_receipt_loader(bad_quantity_data))) |
def performance(ob, fo, grade_list=[1e-30], member_list=None, save_path=None, show=False, dpi=300, title=''):
sup_fontsize = 10
hfmc_array = hfmc(ob, fo, grade_list)
pod = pod_hfmc(hfmc_array)
sr = sr_hfmc(hfmc_array)
leftw = 0.6
rightw = 2
uphight = 1.2
lowhight = 1.2
axis_size_x = 3.7
axis_size_y = 3.5
width = ((axis_size_x + leftw) + rightw)
hight = ((axis_size_y + uphight) + lowhight)
fig = plt.figure(figsize=(width, hight), dpi=dpi)
ax1 = fig.add_axes([(leftw / width), (lowhight / width), (axis_size_x / width), (axis_size_y / hight)])
x = np.arange(0.0001, 1, 0.0001)
bias_list = [0.2, 0.4, 0.6, 0.8, 1, 1.25, 1.67, 2.5, 5]
ts_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
for i in range(len(bias_list)):
bias = bias_list[i]
y1 = (bias * x)
x2 = x[(y1 < 1)]
y2 = y1[(y1 < 1)]
if (bias < 1):
ax1.plot(x2, y2, '--', color='k', linewidth=0.5)
ax1.text(1.01, bias, ('bias=' + str(bias)), fontsize=(sup_fontsize * 0.8))
elif (bias > 1):
ax1.plot(x2, y2, '--', color='k', linewidth=0.5)
ax1.text(((1.0 / bias) - 0.05), 1.02, ('bias=' + str(bias)), fontsize=(sup_fontsize * 0.8))
else:
ax1.plot(x2, y2, '-', color='k', linewidth=0.5)
for i in range(len(ts_list)):
ts = ts_list[i]
hf = 1
x2 = np.arange(ts, 1, 0.001)
hit = (hf * x2)
hfm = (hit / ts)
m = (hfm - hf)
y2 = (hit / (hit + m))
plt.plot(x2, y2, '--', color='y', linewidth=0.5)
error = np.abs((y2 - x2))
index = np.argmin(error)
sx = (x2[index] + 0.02)
sy = (y2[index] - 0.02)
ax1.text(sx, sy, ('ts=' + str(ts)))
new_sr = sr.reshape(((- 1), len(grade_list)))
new_pod = pod.reshape(((- 1), len(grade_list)))
new_sr_shape = new_sr.shape
label = []
legend_num = new_sr_shape[0]
if (member_list is None):
if (legend_num == 1):
label.append('')
else:
for i in range(legend_num):
label.append(('' + str((i + 1))))
else:
label.extend(member_list)
colors = validation.color_tools.get_color_list(legend_num)
marker = ['o', 'v', 's', 'p', 'P', '*', 'h', 'X', 'd', '1', '+', 'x', '.', '^', '<', '>', '2', '3', '4', '8', 'H', 'D', '|', '_']
a_list = []
grade_num = len(grade_list)
if ((legend_num > 1) and (grade_num > 1)):
for line in range(legend_num):
for i in range(len(grade_list)):
ax1.plot(new_sr[(line, i)], new_pod[(line, i)], marker[i], label=(i * line), color=colors[line], markersize=6)
a_list.append((i * line))
(lines, label1) = ax1.get_legend_handles_labels()
legend2 = ax1.legend(lines[0:len(lines):len(grade_list)], label, loc='upper right', bbox_to_anchor=(1.5, 1), ncol=1, fontsize=(sup_fontsize * 0.9))
legend1 = ax1.legend(lines[:len(grade_list)], [('grade:' + str(i)) for i in grade_list], loc='lower right', bbox_to_anchor=(1.5, 0), ncol=1, fontsize=(sup_fontsize * 0.9))
ax1.add_artist(legend1)
ax1.add_artist(legend2)
elif (legend_num > 1):
for line in range(legend_num):
i = 0
ax1.plot(new_sr[(line, i)], new_pod[(line, i)], marker[line], label=(i * line), color=colors[line], markersize=6)
a_list.append((i * line))
(lines, label1) = ax1.get_legend_handles_labels()
legend2 = ax1.legend(lines[0:len(lines):len(grade_list)], label, loc='upper right', bbox_to_anchor=(1.5, 1), ncol=1, fontsize=(sup_fontsize * 0.9))
ax1.add_artist(legend2)
elif (grade_num > 1):
colors = validation.color_tools.get_color_list(grade_num)
for i in range(grade_num):
line = 0
ax1.plot(new_sr[(line, i)], new_pod[(line, i)], marker[i], label=(i * line), color=colors[i], markersize=6)
a_list.append((i * line))
(lines, label1) = ax1.get_legend_handles_labels()
legend1 = ax1.legend(lines[:len(grade_list)], [('grade:' + str(i)) for i in grade_list], loc='upper right', bbox_to_anchor=(1.5, 1), ncol=1, fontsize=(sup_fontsize * 0.9))
ax1.add_artist(legend1)
ax1.set_xlim(0, 1)
ax1.set_ylim(0, 1)
ax1.set_xlabel('', fontsize=(sup_fontsize * 0.9))
ax1.set_ylabel('', fontsize=(sup_fontsize * 0.9))
title = (title + '\n')
ax1.set_title(title, fontsize=sup_fontsize)
if (save_path is None):
show = True
else:
plt.savefig(save_path, bbox_inches='tight')
print(('' + save_path))
if (show is True):
plt.show()
plt.close() |
def update_alpha(gamma, p, Ap, has_converged, xnp):
denom = xnp.sum((xnp.conj(p) * Ap), axis=(- 2), keepdims=True)
alpha = do_safe_div(gamma, denom, xnp=xnp)
device = xnp.get_device(p)
alpha = xnp.where(has_converged, xnp.array(0.0, dtype=p.dtype, device=device), alpha)
return alpha |
class QAReplayMemory(object):
def __init__(self, capacity=100000, priority_fraction=0.0, seed=None):
self.rng = np.random.RandomState(seed)
self.priority_fraction = priority_fraction
self.alpha_capacity = int((capacity * priority_fraction))
self.beta_capacity = (capacity - self.alpha_capacity)
(self.alpha_memory, self.beta_memory) = ([], [])
(self.alpha_rewards, self.beta_rewards) = ([], [])
def push(self, is_prior=False, reward=0.0, *args):
if (self.priority_fraction == 0.0):
is_prior = False
if is_prior:
self.alpha_memory.append(qa_transition(*args))
self.alpha_rewards.append(reward)
if (len(self.alpha_memory) > self.alpha_capacity):
remove_id = self.rng.randint(self.alpha_capacity)
self.alpha_memory = (self.alpha_memory[:remove_id] + self.alpha_memory[(remove_id + 1):])
self.alpha_rewards = (self.alpha_rewards[:remove_id] + self.alpha_rewards[(remove_id + 1):])
else:
self.beta_memory.append(qa_transition(*args))
self.beta_rewards.append(reward)
if (len(self.beta_memory) > self.beta_capacity):
remove_id = self.rng.randint(self.beta_capacity)
self.beta_memory = (self.beta_memory[:remove_id] + self.beta_memory[(remove_id + 1):])
self.beta_rewards = (self.beta_rewards[:remove_id] + self.beta_rewards[(remove_id + 1):])
def sample(self, batch_size):
if (self.priority_fraction == 0.0):
from_beta = min(batch_size, len(self.beta_memory))
res = self.rng.choice(self.beta_memory, from_beta)
else:
from_alpha = min(int((self.priority_fraction * batch_size)), len(self.alpha_memory))
from_beta = min((batch_size - int((self.priority_fraction * batch_size))), len(self.beta_memory))
res = (self.rng.choice(self.alpha_memory, from_alpha) + self.rng.choice(self.beta_memory, from_beta))
self.rng.shuffle(res)
return res
def avg_rewards(self):
if ((len(self.alpha_rewards) == 0) and (len(self.beta_rewards) == 0)):
return 0.0
return np.mean((self.alpha_rewards + self.beta_rewards))
def __len__(self):
return (len(self.alpha_memory) + len(self.beta_memory)) |
class KubernetesPodmanExecutor(KubernetesExecutor):
def __init__(self, *args, **kwargs):
super(KubernetesExecutor, self).__init__(*args, **kwargs)
self.namespace = self.executor_config.get('BUILDER_NAMESPACE', 'builder')
self.image = self.executor_config.get('BUILDER_CONTAINER_IMAGE', 'quay.io/projectquay/quay-builder:latest')
def _build_job_containers(self, token, build_uuid):
server_grpc_addr = (((self.manager_hostname.split(':', 1)[0] + ':') + str(SECURE_GRPC_SERVER_PORT)) if (self.registry_hostname == self.manager_hostname) else self.manager_hostname)
cert = self.executor_config.get('CA_CERT', self._ca_cert())
certs = ([cert] if (cert is not None) else [])
for extra_cert in self.executor_config.get('EXTRA_CA_CERTS', []):
try:
with open(os.path.join(OVERRIDE_CONFIG_DIRECTORY, extra_cert), 'r') as f:
certs.append(f.read())
except:
logger.warning('Failed to load extra CA cert for builder %s', extra_cert)
certs = '\n'.join(certs)
container = {'name': 'builder', 'imagePullPolicy': self.executor_config.get('IMAGE_PULL_POLICY', 'Always'), 'image': self.image, 'env': [{'name': 'TOKEN', 'value': token}, {'name': 'BUILD_UUID', 'value': build_uuid}, {'name': 'SERVER', 'value': server_grpc_addr}, {'name': 'REGISTRY_HOSTNAME', 'value': self.registry_hostname}, {'name': 'CONTAINER_RUNTIME', 'value': 'podman'}, {'name': 'BULDAH_ISOLATION', 'value': 'chroot'}, {'name': 'CA_CERT', 'value': certs}, {'name': 'GIT_SSL_CAINFO', 'value': '/certs/cacert.crt'}, {'name': 'TLS_CERT_PATH', 'value': '/certs/cacert.crt'}, {'name': 'SSL_CERT_FILE', 'value': '/certs/cacert.crt'}, {'name': 'DEBUG', 'value': str(self.executor_config.get('DEBUG', False)).lower()}, {'name': 'HTTP_PROXY', 'value': self.executor_config.get('HTTP_PROXY', '')}, {'name': 'HTTPS_PROXY', 'value': self.executor_config.get('HTTPS_PROXY', '')}, {'name': 'NO_PROXY', 'value': self.executor_config.get('NO_PROXY', '')}, {'name': 'DOCKER_HOST', 'value': 'unix:///tmp/podman-run-1000/podman/podman.sock'}, {'name': 'EXECUTOR', 'value': self.executor_config.get('EXECUTOR', 'kubernetesPodman')}], 'resources': self._build_job_container_resources()}
if self._is_basic_kubernetes_distribution():
container['volumeMounts'] = [{'name': 'secrets-mask', 'mountPath': '/var/run/secrets/kubernetes.io/serviceaccount'}]
return container |
.functions
def test_truncate_datetime_dataframe_all_parts():
x = datetime(2022, 3, 21, 9, 1, 15, 666)
df = pd.DataFrame({'dt': [x], 'foo': [np.nan]}, copy=False)
result = df.truncate_datetime_dataframe('second')
assert (result.loc[(0, 'dt')] == datetime(2022, 3, 21, 9, 1, 15, 0))
result = df.truncate_datetime_dataframe('minute')
assert (result.loc[(0, 'dt')] == datetime(2022, 3, 21, 9, 1))
result = df.truncate_datetime_dataframe('HOUR')
assert (result.loc[(0, 'dt')] == datetime(2022, 3, 21, 9))
result = df.truncate_datetime_dataframe('Day')
assert (result.loc[(0, 'dt')] == datetime(2022, 3, 21))
result = df.truncate_datetime_dataframe('month')
assert (result.loc[(0, 'dt')] == datetime(2022, 3, 1))
result = df.truncate_datetime_dataframe('yeaR')
assert (result.loc[(0, 'dt')] == datetime(2022, 1, 1)) |
class TestLanguageModeling(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_fconv_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv_lm') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(data_dir, 'fconv_lm', ['--decoder-layers', '[(850, 3)] * 2 + [(1024,4)]', '--decoder-embed-dim', '280', '--optimizer', 'nag', '--lr', '0.1'])
eval_lm_main(data_dir)
generate_main(data_dir, ['--task', 'language_modeling', '--sample-break-mode', 'eos', '--tokens-per-sample', '500'])
def test_transformer_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer_lm') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(data_dir, 'transformer_lm', ['--add-bos-token'], run_validation=True)
eval_lm_main(data_dir)
generate_main(data_dir, ['--task', 'language_modeling', '--sample-break-mode', 'eos', '--tokens-per-sample', '500'])
def test_transformer_lm_with_adaptive_softmax(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer_lm_with_adaptive_softmax') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(data_dir, 'transformer_lm', ['--add-bos-token', '--criterion', 'adaptive_loss', '--adaptive-softmax-cutoff', '5,10,15'], run_validation=True)
eval_lm_main(data_dir)
generate_main(data_dir, ['--task', 'language_modeling', '--sample-break-mode', 'eos', '--tokens-per-sample', '500'])
def test_lightconv_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lightconv_lm') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(data_dir, 'lightconv_lm', ['--add-bos-token'], run_validation=True)
eval_lm_main(data_dir)
generate_main(data_dir, ['--task', 'language_modeling', '--sample-break-mode', 'eos', '--tokens-per-sample', '500'])
def test_lstm_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lstm_lm') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(data_dir, 'lstm_lm', ['--add-bos-token'], run_validation=True)
eval_lm_main(data_dir)
generate_main(data_dir, ['--task', 'language_modeling', '--sample-break-mode', 'eos', '--tokens-per-sample', '500'])
def test_lstm_lm_residuals(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lstm_lm_residuals') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(data_dir, 'lstm_lm', ['--add-bos-token', '--residuals'], run_validation=True)
eval_lm_main(data_dir)
generate_main(data_dir, ['--task', 'language_modeling', '--sample-break-mode', 'eos', '--tokens-per-sample', '500'])
((not has_hf_transformers), 'skip test if transformers is missing')
def test_transformer_xl_bptt_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer_xl_bptt_lm') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
task_flags = ['--user-dir', 'examples/truncated_bptt', '--task', 'truncated_bptt_lm', '--batch-size', '2', '--tokens-per-sample', '50']
train_language_model(data_dir=data_dir, arch='transformer_xl', extra_flags=(task_flags + ['--n-layer', '2']), task='truncated_bptt_lm', run_validation=True, extra_valid_flags=task_flags)
eval_lm_main(data_dir, extra_flags=task_flags) |
class A(HTMLElement):
def from_bookmark(cls, view, bookmark):
if bookmark.is_page_internal:
raise ProgrammerError('You cannot use page-internal Bookmarks directly, first add it to a Bookmark to a View')
return cls(view, bookmark.href, description=bookmark.description, ajax=bookmark.ajax, read_check=bookmark.read_check, write_check=bookmark.write_check)
def factory_from_bookmark(cls, bookmark):
if bookmark.is_page_internal:
raise ProgrammerError('You cannot use page-internal Bookmarks directly, first add it to a Bookmark to a View')
return WidgetFactory(cls, bookmark.href, description=bookmark.description, ajax=bookmark.ajax, read_check=bookmark.read_check, write_check=bookmark.write_check)
_checks(href=IsInstance(Url, allow_none=True))
def __init__(self, view, href, description=None, ajax=False, read_check=None, write_check=None, css_id=None):
self.href = href
self.ajax = ajax
super().__init__(view, 'a', children_allowed=True, read_check=read_check, write_check=write_check, css_id=css_id)
if description:
self.add_child(TextNode(self.view, description))
if self.ajax:
self.append_class('reahl-ajaxlink')
self.active = True
def attributes(self):
attributes = super().attributes
if (self.active and (not self.disabled) and (self.href is not None)):
attributes.set_to('href', str(self.href))
return attributes
def get_js(self, context=None):
return [('$(%s).ajaxlink();' % self.contextualise_selector('".reahl-ajaxlink"', context))]
def set_active(self, active):
self.active = active |
('pytube.cli._download')
('pytube.cli.YouTube')
def test_download_audio_none(youtube, download):
youtube_instance = youtube.return_value
youtube_instance.streams.filter.return_value.order_by.return_value.last.return_value = None
with pytest.raises(SystemExit):
cli.download_audio(youtube_instance, 'filetype', 'target')
download.assert_not_called() |
def _math_define_validator(value, values):
if (not isinstance(value, tuple)):
raise ValueError('Input value {} of trigger_select should be a tuple'.format(value))
if (len(value) != 3):
raise ValueError('Number of parameters {} different from 3'.format(len(value)))
output = (sanitize_source(value[0]), value[1], sanitize_source(value[2]))
for i in range(3):
strict_discrete_set(output[i], values=values[i])
return output |
def test_request_pattern_generic_arg():
check_request_pattern(P[Dict].generic_arg(0, str), [LocatedRequest(loc_map=LocMap(TypeHintLoc(Dict))), LocatedRequest(loc_map=LocMap(TypeHintLoc(str), GenericParamLoc(0)))], fail=False)
check_request_pattern(P[Dict].generic_arg(0, str), [LocatedRequest(loc_map=LocMap(TypeHintLoc(Dict))), LocatedRequest(loc_map=LocMap(TypeHintLoc(str), GenericParamLoc(1)))], fail=True) |
.parametrize('key', FUNCTION_METHODS)
def test_given_function_is_set_then_reading_avaliable(resetted_dmm6500, key):
if (key[(- 2):] == 'ac'):
getattr(resetted_dmm6500, FUNCTION_METHODS[key])(ac=True)
elif (key[(- 2):] == '4W'):
getattr(resetted_dmm6500, FUNCTION_METHODS[key])(wires=4)
else:
getattr(resetted_dmm6500, FUNCTION_METHODS[key])()
value = getattr(resetted_dmm6500, key.split(' ')[0])
assert (len(resetted_dmm6500.check_errors()) == 0)
assert (value is not None) |
class BloqExample():
_func: Callable[([], Bloq)] = field(repr=False, hash=False)
name: str
bloq_cls: Type[Bloq]
generalizer: Callable[([Bloq], Optional[Bloq])] = (lambda x: x)
def make(self) -> Bloq:
return self._func()
def __call__(self) -> Bloq:
return self.make() |
.parametrize('aoi_model', ['sapm', 'ashrae', 'physical', 'martin_ruiz'])
def test_aoi_models_singleon_weather_single_array(sapm_dc_snl_ac_system, location, aoi_model, weather):
mc = ModelChain(sapm_dc_snl_ac_system, location, dc_model='sapm', aoi_model=aoi_model, spectral_model='no_loss')
mc.run_model(weather=[weather])
assert isinstance(mc.results.aoi_modifier, tuple)
assert (len(mc.results.aoi_modifier) == 1)
assert isinstance(mc.results.ac, pd.Series)
assert (not mc.results.ac.empty)
assert ((mc.results.ac.iloc[0] > 150) and (mc.results.ac.iloc[0] < 200))
assert (mc.results.ac.iloc[1] < 1) |
def test_kaiming_init():
conv_module = nn.Conv2d(3, 16, 3)
kaiming_init(conv_module, bias=0.1)
assert conv_module.bias.allclose(torch.full_like(conv_module.bias, 0.1))
kaiming_init(conv_module, distribution='uniform')
with pytest.raises(AssertionError):
kaiming_init(conv_module, distribution='student-t')
conv_module_no_bias = nn.Conv2d(3, 16, 3, bias=False)
kaiming_init(conv_module_no_bias) |
def get_external_models():
mmcv_home = _get_mmcv_home()
default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json')
default_urls = load_file(default_json_path)
assert isinstance(default_urls, dict)
external_json_path = osp.join(mmcv_home, 'open_mmlab.json')
if osp.exists(external_json_path):
external_urls = load_file(external_json_path)
assert isinstance(external_urls, dict)
default_urls.update(external_urls)
return default_urls |
class Properties(PymiereBaseObject):
def __init__(self, pymiere_id=None):
super(Properties, self).__init__(pymiere_id)
def bind(self, eventName, function):
self._check_type(eventName, str, 'arg "eventName" of function "Properties.bind"')
self._check_type(function, any, 'arg "function" of function "Properties.bind"')
self._eval_on_this_object('bind({}, {})'.format(_format_object_to_es(eventName), _format_object_to_es(function)))
def unbind(self, eventName):
self._check_type(eventName, str, 'arg "eventName" of function "Properties.unbind"')
self._eval_on_this_object('unbind({})'.format(_format_object_to_es(eventName)))
def setTimeout(self, eventName, function, milliseconds):
self._check_type(eventName, str, 'arg "eventName" of function "Properties.setTimeout"')
self._check_type(function, any, 'arg "function" of function "Properties.setTimeout"')
self._check_type(milliseconds, float, 'arg "milliseconds" of function "Properties.setTimeout"')
self._eval_on_this_object('setTimeout({}, {}, {})'.format(_format_object_to_es(eventName), _format_object_to_es(function), _format_object_to_es(milliseconds)))
def doesPropertyExist(self, propertyKey):
self._check_type(propertyKey, str, 'arg "propertyKey" of function "Properties.doesPropertyExist"')
return self._eval_on_this_object('doesPropertyExist({})'.format(_format_object_to_es(propertyKey)))
def isPropertyReadOnly(self, propertyKey):
self._check_type(propertyKey, str, 'arg "propertyKey" of function "Properties.isPropertyReadOnly"')
return self._eval_on_this_object('isPropertyReadOnly({})'.format(_format_object_to_es(propertyKey)))
def clearProperty(self, propertyKey):
self._check_type(propertyKey, str, 'arg "propertyKey" of function "Properties.clearProperty"')
self._eval_on_this_object('clearProperty({})'.format(_format_object_to_es(propertyKey)))
def setProperty(self, property, value, persistent=True, createIfNotExist=True):
self._check_type(property, str, 'arg "property" of function "Properties.setProperty"')
self._check_type(persistent, bool, 'arg "propertyKey" of function "Properties.setProperty"')
self._check_type(createIfNotExist, bool, 'arg "propertyKey" of function "Properties.setProperty"')
self._eval_on_this_object('setProperty({}, {}, {}, {})'.format(_format_object_to_es(property), _format_object_to_es(value), _format_object_to_es(persistent), _format_object_to_es(createIfNotExist)))
def getProperty(self, propertyKey):
self._check_type(propertyKey, str, 'arg "propertyKey" of function "Properties.getProperty"')
return self._eval_on_this_object('getProperty({})'.format(_format_object_to_es(propertyKey))) |
def test_set_scale(qapp, imgfilename3x3):
item = BeePixmapItem(QtGui.QImage(imgfilename3x3), imgfilename3x3)
item.prepareGeometryChange = MagicMock()
item.setScale(3)
assert (item.scale() == 3)
assert (item.pos().x() == 0)
assert (item.pos().y() == 0)
item.prepareGeometryChange.assert_called_once() |
class JWNumberRestrictOperatorTest(unittest.TestCase):
def test_jw_restrict_operator(self):
n_qubits = 4
target_electrons = 2
penalty_const = 10.0
number_sparse = jordan_wigner_sparse(number_operator(n_qubits))
bias_sparse = jordan_wigner_sparse(sum([FermionOperator(((i, 1), (i, 0)), 1.0) for i in range(n_qubits)], FermionOperator()))
hamiltonian_sparse = ((penalty_const * (number_sparse - (target_electrons * scipy.sparse.identity((2 ** n_qubits)))).dot((number_sparse - (target_electrons * scipy.sparse.identity((2 ** n_qubits)))))) + bias_sparse)
restricted_hamiltonian = jw_number_restrict_operator(hamiltonian_sparse, target_electrons, n_qubits)
(true_eigvals, _) = eigh(hamiltonian_sparse.A)
(test_eigvals, _) = eigh(restricted_hamiltonian.A)
self.assertAlmostEqual(norm((true_eigvals[:6] - test_eigvals[:6])), 0.0)
def test_jw_restrict_operator_hopping_to_1_particle(self):
hop = (FermionOperator('3^ 1') + FermionOperator('1^ 3'))
hop_sparse = jordan_wigner_sparse(hop, n_qubits=4)
hop_restrict = jw_number_restrict_operator(hop_sparse, 1, n_qubits=4)
expected = csc_matrix(([1, 1], ([0, 2], [2, 0])), shape=(4, 4))
self.assertTrue(numpy.allclose(hop_restrict.A, expected.A))
def test_jw_restrict_operator_interaction_to_1_particle(self):
interaction = FermionOperator('3^ 2^ 4 1')
interaction_sparse = jordan_wigner_sparse(interaction, n_qubits=6)
interaction_restrict = jw_number_restrict_operator(interaction_sparse, 1, n_qubits=6)
expected = csc_matrix(([], ([], [])), shape=(6, 6))
self.assertTrue(numpy.allclose(interaction_restrict.A, expected.A))
def test_jw_restrict_operator_interaction_to_2_particles(self):
interaction = (FermionOperator('3^ 2^ 4 1') + FermionOperator('4^ 1^ 3 2'))
interaction_sparse = jordan_wigner_sparse(interaction, n_qubits=6)
interaction_restrict = jw_number_restrict_operator(interaction_sparse, 2, n_qubits=6)
dim = ((6 * 5) // 2)
expected = csc_matrix(([(- 1), (- 1)], ([7, 9], [9, 7])), shape=(dim, dim))
self.assertTrue(numpy.allclose(interaction_restrict.A, expected.A))
def test_jw_restrict_operator_hopping_to_1_particle_default_nqubits(self):
interaction = (FermionOperator('3^ 2^ 4 1') + FermionOperator('4^ 1^ 3 2'))
interaction_sparse = jordan_wigner_sparse(interaction, n_qubits=6)
interaction_restrict = jw_number_restrict_operator(interaction_sparse, 2)
dim = ((6 * 5) // 2)
expected = csc_matrix(([(- 1), (- 1)], ([7, 9], [9, 7])), shape=(dim, dim))
self.assertTrue(numpy.allclose(interaction_restrict.A, expected.A))
def test_jw_restrict_jellium_ground_state_integration(self):
n_qubits = 4
grid = Grid(dimensions=1, length=n_qubits, scale=1.0)
jellium_hamiltonian = jordan_wigner_sparse(jellium_model(grid, spinless=False))
number_sparse = jordan_wigner_sparse(number_operator((2 * n_qubits)))
restricted_number = jw_number_restrict_operator(number_sparse, 2)
restricted_jellium_hamiltonian = jw_number_restrict_operator(jellium_hamiltonian, 2)
(_, ground_state) = get_ground_state(restricted_jellium_hamiltonian)
number_expectation = expectation(restricted_number, ground_state)
self.assertAlmostEqual(number_expectation, 2) |
class FlowchartViewBox(ViewBox):
def __init__(self, widget, *args, **kwargs):
ViewBox.__init__(self, *args, **kwargs)
self.widget = widget
def getMenu(self, ev):
self._fc_menu = QtWidgets.QMenu()
self._subMenus = self.getContextMenus(ev)
for menu in self._subMenus:
self._fc_menu.addMenu(menu)
return self._fc_menu
def getContextMenus(self, ev):
menu = self.widget.buildMenu(ev.scenePos())
menu.setTitle(translate('Context Menu', 'Add node'))
return [menu, ViewBox.getMenu(self, ev)] |
class ImbalancedDatasetSampler(torch.utils.data.sampler.Sampler):
def __init__(self, dataset, indices=None, num_samples=None):
self.indices = (list(range(len(dataset))) if (indices is None) else indices)
self.num_samples = (len(self.indices) if (num_samples is None) else num_samples)
label_to_count = ([0] * len(np.unique(dataset.targets)))
for idx in self.indices:
label = self._get_label(dataset, idx)
label_to_count[label] += 1
beta = 0.9999
effective_num = (1.0 - np.power(beta, label_to_count))
per_cls_weights = ((1.0 - beta) / np.array(effective_num))
weights = [per_cls_weights[self._get_label(dataset, idx)] for idx in self.indices]
self.weights = torch.DoubleTensor(weights)
def _get_label(self, dataset, idx):
return dataset.targets[idx]
def __iter__(self):
return iter(torch.multinomial(self.weights, self.num_samples, replacement=True).tolist())
def __len__(self):
return self.num_samples |
def test_get_transfer_secret_none_for_none_transfer_state(chain_state):
secret = factories.make_secret()
transfer = factories.create(factories.LockedTransferUnsignedStateProperties(secret=secret))
secrethash = transfer.lock.secrethash
payment_state = InitiatorPaymentState(initiator_transfers={secrethash: None}, routes=[])
task = InitiatorTask(token_network_address=factories.UNIT_TOKEN_NETWORK_ADDRESS, manager_state=payment_state)
chain_state.payment_mapping.secrethashes_to_task[secrethash] = task
assert (get_transfer_secret(chain_state=chain_state, secrethash=secrethash) is None) |
class VQModel(pl.LightningModule):
def __init__(self, ddconfig, lossconfig, n_embed, embed_dim, ckpt_path=None, ignore_keys=[], image_key='image', colorize_nlabels=None, monitor=None, remap=None, sane_index_shape=False):
super().__init__()
self.image_key = image_key
self.encoder = Encoder(**ddconfig)
self.decoder = Decoder(**ddconfig)
self.loss = instantiate_from_config(lossconfig)
self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25, remap=remap, sane_index_shape=sane_index_shape)
self.quant_conv = torch.nn.Conv2d(ddconfig['z_channels'], embed_dim, 1)
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig['z_channels'], 1)
if (ckpt_path is not None):
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
self.image_key = image_key
if (colorize_nlabels is not None):
assert (type(colorize_nlabels) == int)
self.register_buffer('colorize', torch.randn(3, colorize_nlabels, 1, 1))
if (monitor is not None):
self.monitor = monitor
def init_from_ckpt(self, path, ignore_keys=list()):
sd = torch.load(path, map_location='cpu')['state_dict']
keys = list(sd.keys())
for k in keys:
for ik in ignore_keys:
if k.startswith(ik):
print('Deleting key {} from state_dict.'.format(k))
del sd[k]
self.load_state_dict(sd, strict=False)
print(f'Restored from {path}')
def encode(self, x):
h = self.encoder(x)
h = self.quant_conv(h)
(quant, emb_loss, info) = self.quantize(h)
return (quant, emb_loss, info)
def decode(self, quant):
quant = self.post_quant_conv(quant)
dec = self.decoder(quant)
return dec
def decode_code(self, code_b):
quant_b = self.quantize.embed_code(code_b)
dec = self.decode(quant_b)
return dec
def forward(self, input):
(quant, diff, _) = self.encode(input)
dec = self.decode(quant)
return (dec, diff)
def get_input(self, batch, k):
x = batch[k]
if (len(x.shape) == 3):
x = x[(..., None)]
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format)
return x.float()
def training_step(self, batch, batch_idx, optimizer_idx):
x = self.get_input(batch, self.image_key)
(xrec, qloss) = self(x)
if (optimizer_idx == 0):
(aeloss, log_dict_ae) = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split='train')
self.log('train/aeloss', aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return aeloss
if (optimizer_idx == 1):
(discloss, log_dict_disc) = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split='train')
self.log('train/discloss', discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return discloss
def validation_step(self, batch, batch_idx):
x = self.get_input(batch, self.image_key)
(xrec, qloss) = self(x)
(aeloss, log_dict_ae) = self.loss(qloss, x, xrec, 0, self.global_step, last_layer=self.get_last_layer(), split='val')
(discloss, log_dict_disc) = self.loss(qloss, x, xrec, 1, self.global_step, last_layer=self.get_last_layer(), split='val')
rec_loss = log_dict_ae['val/rec_loss']
PSNR = torch.Tensor([peak_signal_noise_ratio(x.cpu().numpy(), xrec.cpu().numpy())]).to(x.device)
self.log('val/rec_loss', rec_loss, prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
self.log('val/aeloss', aeloss, prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
self.log('val/PSNR', PSNR.mean(), prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
self.log_dict(log_dict_ae)
self.log_dict(log_dict_disc)
return self.log_dict
def configure_optimizers(self):
lr = self.learning_rate
opt_ae = torch.optim.Adam(((((list(self.encoder.parameters()) + list(self.decoder.parameters())) + list(self.quantize.parameters())) + list(self.quant_conv.parameters())) + list(self.post_quant_conv.parameters())), lr=lr, betas=(0.5, 0.9))
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9))
return ([opt_ae, opt_disc], [])
def get_last_layer(self):
return self.decoder.conv_out.weight
def log_images(self, batch, **kwargs):
log = dict()
x = self.get_input(batch, self.image_key)
x = x.to(self.device)
(xrec, _) = self(x)
if (x.shape[1] > 3):
assert (xrec.shape[1] > 3)
x = self.to_rgb(x)
xrec = self.to_rgb(xrec)
log['inputs'] = x
log['reconstructions'] = xrec
return log
def to_rgb(self, x):
assert (self.image_key == 'segmentation')
if (not hasattr(self, 'colorize')):
self.register_buffer('colorize', torch.randn(3, x.shape[1], 1, 1).to(x))
x = F.conv2d(x, weight=self.colorize)
x = (((2.0 * (x - x.min())) / (x.max() - x.min())) - 1.0)
return x |
class MessageDataModel():
def _count_tokens(test_string: str) -> int:
enc = tiktoken.get_encoding('cl100k_base')
tokens = len(enc.encode(test_string))
return tokens
def _get_num_tokens_from_messages(cls, buffer: List[BaseMessage]) -> int:
return sum([cls._count_tokens(m.content) for m in buffer])
def truncate_text(cls, raw_text: str, max_token: Optional[int]=250, trunc_ratio: int=0.5) -> str:
tokens = cls._count_tokens(raw_text)
if ((max_token is None) or (tokens <= max_token)):
return raw_text
half_tokens = int((max_token * trunc_ratio))
lines = raw_text.strip().split('\n')
lines = [' '.join(line.split(' ')[:100]) for line in lines]
total_lines = len(lines)
left = 0
right = (total_lines // 2)
while (left < right):
mid = ((left + right) >> 1)
text = '\n'.join(lines[0:mid])
token = cls._count_tokens(text)
if (token > half_tokens):
right = mid
else:
left = (mid + 1)
first_half = '\n'.join(lines[0:right])
left = ((total_lines // 2) + 1)
right = (total_lines - 1)
while (left < right):
mid = ((left + right) >> 1)
text = '\n'.join(lines[mid:])
token = cls._count_tokens(text)
if (token > half_tokens):
right = mid
else:
left = (mid + 1)
second_half = '\n'.join(lines[left:])
if ((first_half != '') or (second_half != '')):
return f'''{first_half}
...
[too long to show]
...
{second_half}'''
else:
return f'''...
[too long to show]
...
{raw_text[(- 100):]}'''
def truncate_chat_history(cls, full_inputs: Dict[(str, Any)], max_token: int=2500) -> Dict[(str, Any)]:
_input = full_inputs['input']
agent_scratchpad = full_inputs['agent_scratchpad']
agent_scratchpad = '\n'.join([_.content for _ in agent_scratchpad])
_input_tokens = cls._count_tokens(_input)
_scratchpad_tokens = cls._count_tokens(agent_scratchpad)
left_tokens = ((max_token - _scratchpad_tokens) - _input_tokens)
chat_history = full_inputs['chat_history']
curr_buffer_length = cls._get_num_tokens_from_messages(chat_history)
while ((len(chat_history) != 0) and (curr_buffer_length > left_tokens)):
chat_history.pop(0)
curr_buffer_length = cls._get_num_tokens_from_messages(chat_history)
full_inputs['chat_history'] = chat_history
return full_inputs
def _extract_value(json_string: str, key: str) -> str:
pattern = re.compile(f'"?{key}"?\s*:\s*("((?:[^"\]|\.)*)"|([^,\s]*))', re.MULTILINE)
match = pattern.search(json_string)
if match:
result = match.group(1).replace('\\"', '"').replace('\\\\', '\\').strip('"').strip("'").strip()
return result
raise ValueError(f'Could not find {key} in {json_string}')
def _extract_response(chat_history: str, begin_marker: str='[RESPONSE_BEGIN]', end_marker: str='[RESPONSE_END]', ai_msg_marker: str='AI:'):
code_blocks = chat_history.split(ai_msg_marker)
pattern = '\\[RESPONSE_BEGIN\\](.*?)\\[RESPONSE_END\\]'
cleaned_output = []
for code_block in code_blocks:
matches = re.findall(pattern, code_block, re.DOTALL)
if matches:
cleaned_output.append(matches[0].strip())
return '\n'.join(cleaned_output)
def extract_action_for_llm(cls, text, max_token: int=500) -> str:
action_format = ACTION_FORMAT
cleaned_output = text.strip()
try:
_action = cls._extract_value(cleaned_output, 'action')
_action_input = cls._extract_value(cleaned_output, 'action_input')
return action_format.format(_action=_action, _action_input=_action_input)
except Exception:
if cleaned_output.startswith('Action:'):
lines = cleaned_output.splitlines()
_action = lines[1].strip()
_action_input = textwrap.dedent('\n'.join(lines[2:])).strip()
return action_format.format(_action=_action, _action_input=_action_input)
else:
_action_input = cleaned_output
return action_format.format(_action='Final Answer', _action_input=_action_input)
def extract_tool_response_for_llm(cls, text, tool_style: str='code', max_token: int=250) -> str:
wrap_format = TOOL_RESPONSE_FORMAT
tool_observation_format = TOOL_FORMAT[tool_style]
cleaned_output = text.strip()
if (tool_style == 'plugin'):
max_token = None
try:
_result = cls.truncate_text(cls._extract_value(cleaned_output, 'result'), max_token)
_intermediate_steps = cls.truncate_text(cls._extract_value(cleaned_output, 'intermediate_steps'), max_token)
_intermediate_steps = _intermediate_steps.replace('\\n', '\n').strip('\n')
_result = _result.replace('\\n', '\n').strip('\n')
_response = tool_observation_format.format(_intermediate_steps=_intermediate_steps, _result=_result)
return wrap_format.format(_response=_response)
except:
if cleaned_output.startswith('Final Answer:'):
lines = cleaned_output.splitlines()
_response = textwrap.dedent('\n'.join(lines[2:])).strip()
_response = cls.truncate_text(_response, max_token)
return wrap_format.format(_response=_response)
_response = cls.truncate_text(cleaned_output, max_token)
return wrap_format.format(_response=_response)
def extract_code_for_python_tool(cls, text: str, max_token: int=2500, trunc_ratio: float=0.2) -> str:
whole_code = MessageDataModel._extract_response(text)
trunc_code = cls.truncate_text(whole_code, max_token=max_token, trunc_ratio=trunc_ratio)
return trunc_code
def extract_code_for_sql_tool(cls, text: str, max_token: int=2500, trunc_ratio: float=0.2) -> str:
whole_code = MessageDataModel._extract_response(text)
trunc_code = cls.truncate_text(whole_code, max_token=max_token, trunc_ratio=trunc_ratio)
return trunc_code |
def model_fixture(request: SubRequest, factory_name: str) -> Any:
factoryboy_request: FactoryboyRequest = request.getfixturevalue('factoryboy_request')
factoryboy_request.evaluate(request)
assert request.fixturename
fixture_name = request.fixturename
prefix = ''.join((fixture_name, SEPARATOR))
factory_class: FactoryType = request.getfixturevalue(factory_name)
Factory: FactoryType = cast(FactoryType, type('Factory', (factory_class,), {}))
Factory._meta.base_declarations = {k: v for (k, v) in Factory._meta.base_declarations.items() if (not isinstance(v, factory.declarations.PostGenerationDeclaration))}
Factory._meta.post_declarations = factory.builder.DeclarationSet()
kwargs = {}
for key in factory_class._meta.pre_declarations:
argname = ''.join((prefix, key))
if (argname in request._fixturedef.argnames):
kwargs[key] = evaluate(request, request.getfixturevalue(argname))
strategy = factory.enums.CREATE_STRATEGY
builder = factory.builder.StepBuilder(Factory._meta, kwargs, strategy)
step = factory.builder.BuildStep(builder=builder, sequence=Factory._meta.next_sequence())
with disable_method(Factory._after_postgeneration):
instance = Factory(**kwargs)
request._fixturedef.cached_result = (instance, 0, None)
request._fixture_defs[fixture_name] = request._fixturedef
deferred: list[DeferredFunction] = []
for attr in factory_class._meta.post_declarations.sorted():
decl = factory_class._meta.post_declarations.declarations[attr]
if isinstance(decl, factory.RelatedFactory):
deferred.append(make_deferred_related(factory_class, fixture_name, attr))
else:
argname = ''.join((prefix, attr))
extra = {}
for (k, v) in factory_class._meta.post_declarations.contexts[attr].items():
if (k == ''):
continue
post_attr = SEPARATOR.join((argname, k))
if (post_attr in request._fixturedef.argnames):
extra[k] = evaluate(request, request.getfixturevalue(post_attr))
else:
extra[k] = v
postgen_value = evaluate(request, request.getfixturevalue(argname))
postgen_context = PostGenerationContext(value_provided=(postgen_value is not factory.declarations.NotProvided), value=postgen_value, extra=extra)
deferred.append(make_deferred_postgen(step, factory_class, fixture_name, instance, attr, decl, postgen_context))
factoryboy_request.defer(deferred)
factoryboy_request.evaluate(request)
return instance |
def fold_all_batch_norms(sess: tf.compat.v1.Session, input_op_names: Union[(str, List[str])], output_op_names: Union[(str, List[str])]) -> Tuple[(tf.compat.v1.Session, List[Tuple[(tf.Operation, tf.Operation)]])]:
if (not isinstance(input_op_names, (str, List))):
logger.error('start op names must be passed as a string or a List of strings')
if isinstance(input_op_names, str):
input_op_names = [input_op_names]
if isinstance(output_op_names, str):
output_op_names = [output_op_names]
(bn_conv_linear_pairs, bns_to_fold) = find_all_batch_norms_to_fold(sess, input_op_names, output_op_names)
after_fold_sess = _fold_given_auto_selected_batch_norms(sess, bn_conv_linear_pairs)
pairs_to_return = []
for pair in bn_conv_linear_pairs:
pairs_to_return.append((pair[0], pair[1].op))
bn_converted = convert_standalone_batchnorms(after_fold_sess, input_op_names, output_op_names, bns_to_fold)
if bn_converted:
logger.info("%d BatchNorms' weights got converted", len(bn_converted))
after_fold_sess = save_and_load_graph('./temp_bn_fold', after_fold_sess)
return (after_fold_sess, pairs_to_return) |
class PipelineTestCase(TestCase):
if PY2:
def assertRaisesRegex(self, *args, **kwargs):
return self.assertRaisesRegexp(*args, **kwargs)
def test_construction(self):
p0 = Pipeline()
self.assertEqual(p0.columns, {})
self.assertIs(p0.screen, None)
columns = {'f': SomeFactor()}
p1 = Pipeline(columns=columns)
self.assertEqual(p1.columns, columns)
screen = SomeFilter()
p2 = Pipeline(screen=screen)
self.assertEqual(p2.columns, {})
self.assertEqual(p2.screen, screen)
p3 = Pipeline(columns=columns, screen=screen)
self.assertEqual(p3.columns, columns)
self.assertEqual(p3.screen, screen)
def test_construction_bad_input_types(self):
with self.assertRaises(TypeError):
Pipeline(1)
Pipeline({})
with self.assertRaises(TypeError):
Pipeline({}, 1)
with self.assertRaises(TypeError):
Pipeline({}, SomeFactor())
with self.assertRaises(TypeError):
Pipeline({'open': USEquityPricing.open})
Pipeline({}, (SomeFactor() > 5))
def test_add(self):
p = Pipeline()
f = SomeFactor()
p.add(f, 'f')
self.assertEqual(p.columns, {'f': f})
p.add((f > 5), 'g')
self.assertEqual(p.columns, {'f': f, 'g': (f > 5)})
with self.assertRaises(TypeError):
p.add(f, 1)
with self.assertRaises(TypeError):
p.add(USEquityPricing.open, 'open')
def test_overwrite(self):
p = Pipeline()
f = SomeFactor()
other_f = SomeOtherFactor()
p.add(f, 'f')
self.assertEqual(p.columns, {'f': f})
with self.assertRaises(KeyError) as e:
p.add(other_f, 'f')
[message] = e.exception.args
self.assertEqual(message, "Column 'f' already exists.")
p.add(other_f, 'f', overwrite=True)
self.assertEqual(p.columns, {'f': other_f})
def test_remove(self):
f = SomeFactor()
p = Pipeline(columns={'f': f})
with self.assertRaises(KeyError) as e:
p.remove('not_a_real_name')
self.assertEqual(f, p.remove('f'))
with self.assertRaises(KeyError) as e:
p.remove('f')
self.assertEqual(e.exception.args, ('f',))
def test_set_screen(self):
(f, g) = (SomeFilter(), SomeOtherFilter())
p = Pipeline()
self.assertEqual(p.screen, None)
p.set_screen(f)
self.assertEqual(p.screen, f)
with self.assertRaises(ValueError):
p.set_screen(f)
p.set_screen(g, overwrite=True)
self.assertEqual(p.screen, g)
with self.assertRaises(TypeError) as e:
p.set_screen(f, g)
message = e.exception.args[0]
self.assertIn("expected a value of type bool or int for argument 'overwrite'", message)
def test_show_graph(self):
f = SomeFactor()
p = Pipeline(columns={'f': SomeFactor()})
def mock_display_graph(g, format='svg', include_asset_exists=False):
return (g, format, include_asset_exists)
self.assertEqual(getargspec(display_graph), getargspec(mock_display_graph), msg="Mock signature doesn't match signature for display_graph.")
patch_display_graph = patch('zipline.pipeline.graph.display_graph', mock_display_graph)
with patch_display_graph:
(graph, format, include_asset_exists) = p.show_graph()
self.assertIs(graph.outputs['f'], f)
self.assertEqual(sorted(graph.outputs.keys()), ['f', graph.screen_name])
self.assertEqual(format, 'svg')
self.assertEqual(include_asset_exists, False)
with patch_display_graph:
(graph, format, include_asset_exists) = p.show_graph(format='png')
self.assertIs(graph.outputs['f'], f)
self.assertEqual(sorted(graph.outputs.keys()), ['f', graph.screen_name])
self.assertEqual(format, 'png')
self.assertEqual(include_asset_exists, False)
with patch_display_graph:
(graph, format, include_asset_exists) = p.show_graph(format='jpeg')
self.assertIs(graph.outputs['f'], f)
self.assertEqual(sorted(graph.outputs.keys()), ['f', graph.screen_name])
self.assertEqual(format, 'jpeg')
self.assertEqual(include_asset_exists, False)
expected = ".*\\.show_graph\\(\\) expected a value in \\('svg', 'png', 'jpeg'\\) for argument 'format', but got 'fizzbuzz' instead."
with self.assertRaisesRegex(ValueError, expected):
p.show_graph(format='fizzbuzz')
def test_infer_domain_no_terms(self):
self.assertEqual(Pipeline().domain(default=GENERIC), GENERIC)
self.assertEqual(Pipeline().domain(default=US_EQUITIES), US_EQUITIES)
def test_infer_domain_screen_only(self):
class D(DataSet):
c = Column(bool)
filter_generic = D.c.latest
filter_US = D.c.specialize(US_EQUITIES).latest
filter_CA = D.c.specialize(CA_EQUITIES).latest
self.assertEqual(Pipeline(screen=filter_generic).domain(default=GB_EQUITIES), GB_EQUITIES)
self.assertEqual(Pipeline(screen=filter_US).domain(default=GB_EQUITIES), US_EQUITIES)
self.assertEqual(Pipeline(screen=filter_CA).domain(default=GB_EQUITIES), CA_EQUITIES)
def test_infer_domain_outputs(self):
class D(DataSet):
c = Column(float)
D_US = D.specialize(US_EQUITIES)
D_CA = D.specialize(CA_EQUITIES)
result = Pipeline({'f': D_US.c.latest}).domain(default=GB_EQUITIES)
expected = US_EQUITIES
self.assertEqual(result, expected)
result = Pipeline({'f': D_CA.c.latest}).domain(default=GB_EQUITIES)
expected = CA_EQUITIES
self.assertEqual(result, expected)
def test_conflict_between_outputs(self):
class D(DataSet):
c = Column(float)
D_US = D.specialize(US_EQUITIES)
D_CA = D.specialize(CA_EQUITIES)
pipe = Pipeline({'f': D_US.c.latest, 'g': D_CA.c.latest})
with self.assertRaises(AmbiguousDomain) as e:
pipe.domain(default=GENERIC)
self.assertEqual(e.exception.domains, [CA_EQUITIES, US_EQUITIES])
def test_conflict_between_output_and_screen(self):
class D(DataSet):
c = Column(float)
b = Column(bool)
D_US = D.specialize(US_EQUITIES)
D_CA = D.specialize(CA_EQUITIES)
pipe = Pipeline({'f': D_US.c.latest}, screen=D_CA.b.latest)
with self.assertRaises(AmbiguousDomain) as e:
pipe.domain(default=GENERIC)
self.assertEqual(e.exception.domains, [CA_EQUITIES, US_EQUITIES]) |
def parse_config():
parser = ArgumentParser()
parser.add_argument('--gpu', type=int, nargs='+', default=(0,), help='specify gpu devices')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--config_path', default='config/2DPASS-semantickitti.yaml')
parser.add_argument('--log_dir', type=str, default='default', help='log location')
parser.add_argument('--monitor', type=str, default='val/mIoU', help='the maximum metric')
parser.add_argument('--stop_patience', type=int, default=50, help='patience for stop training')
parser.add_argument('--save_top_k', type=int, default=1, help='save top k checkpoints, use -1 to checkpoint every epoch')
parser.add_argument('--check_val_every_n_epoch', type=int, default=1, help='check_val_every_n_epoch')
parser.add_argument('--SWA', action='store_true', default=False, help='StochasticWeightAveraging')
parser.add_argument('--baseline_only', action='store_true', default=False, help='training without 2D')
parser.add_argument('--test', action='store_true', default=False, help='test mode')
parser.add_argument('--fine_tune', action='store_true', default=False, help='fine tune mode')
parser.add_argument('--pretrain2d', action='store_true', default=False, help='use pre-trained 2d network')
parser.add_argument('--num_vote', type=int, default=1, help='number of voting in the test')
parser.add_argument('--submit_to_server', action='store_true', default=False, help='submit on benchmark')
parser.add_argument('--checkpoint', type=str, default=None, help='load checkpoint')
parser.add_argument('--debug', default=False, action='store_true')
args = parser.parse_args()
config = load_yaml(args.config_path)
config.update(vars(args))
if args.test:
config['dataset_params']['val_data_loader']['batch_size'] = args.num_vote
if (args.num_vote > 1):
config['dataset_params']['val_data_loader']['rotate_aug'] = True
config['dataset_params']['val_data_loader']['transform_aug'] = True
if args.debug:
config['dataset_params']['val_data_loader']['batch_size'] = 2
config['dataset_params']['val_data_loader']['num_workers'] = 0
return EasyDict(config) |
class ResNet(nn.Module):
def __init__(self, *, d_numerical: int, categories: ty.Optional[ty.List[int]], d_embedding: int, d: int, d_hidden_factor: float, n_layers: int, activation: str, normalization: str, hidden_dropout: float, residual_dropout: float, d_out: int) -> None:
super().__init__()
def make_normalization():
return {'batchnorm': nn.BatchNorm1d, 'layernorm': nn.LayerNorm}[normalization](d)
self.main_activation = lib.get_activation_fn(activation)
self.last_activation = lib.get_nonglu_activation_fn(activation)
self.residual_dropout = residual_dropout
self.hidden_dropout = hidden_dropout
d_in = d_numerical
d_hidden = int((d * d_hidden_factor))
if (categories is not None):
d_in += (len(categories) * d_embedding)
category_offsets = torch.tensor(([0] + categories[:(- 1)])).cumsum(0)
self.register_buffer('category_offsets', category_offsets)
self.category_embeddings = nn.Embedding(sum(categories), d_embedding)
nn.init.kaiming_uniform_(self.category_embeddings.weight, a=math.sqrt(5))
print(f'self.category_embeddings.weight.shape={self.category_embeddings.weight.shape!r}')
self.first_layer = nn.Linear(d_in, d)
self.layers = nn.ModuleList([nn.ModuleDict({'norm': make_normalization(), 'linear0': nn.Linear(d, (d_hidden * (2 if activation.endswith('glu') else 1))), 'linear1': nn.Linear(d_hidden, d)}) for _ in range(n_layers)])
self.last_normalization = make_normalization()
self.head = nn.Linear(d, d_out)
def forward(self, x_num: Tensor, x_cat: ty.Optional[Tensor]) -> Tensor:
x = []
if (x_num is not None):
x.append(x_num)
if (x_cat is not None):
x.append(self.category_embeddings((x_cat + self.category_offsets[None])).view(x_cat.size(0), (- 1)))
x = torch.cat(x, dim=(- 1))
x = self.first_layer(x)
for layer in self.layers:
layer = ty.cast(ty.Dict[(str, nn.Module)], layer)
z = x
z = layer['norm'](z)
z = layer['linear0'](z)
z = self.main_activation(z)
if self.hidden_dropout:
z = F.dropout(z, self.hidden_dropout, self.training)
z = layer['linear1'](z)
if self.residual_dropout:
z = F.dropout(z, self.residual_dropout, self.training)
x = (x + z)
x = self.last_normalization(x)
x = self.last_activation(x)
x = self.head(x)
x = x.squeeze((- 1))
return x |
class PyTorchClassifier(object):
def __init__(self, inputdim, nclasses, l2reg=0.0, batch_size=64, seed=1111, cudaEfficient=False):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
self.inputdim = inputdim
self.nclasses = nclasses
self.l2reg = l2reg
self.batch_size = batch_size
self.cudaEfficient = cudaEfficient
def prepare_split(self, X, y, validation_data=None, validation_split=None):
assert (validation_split or validation_data)
if (validation_data is not None):
(trainX, trainy) = (X, y)
(devX, devy) = validation_data
else:
permutation = np.random.permutation(len(X))
trainidx = permutation[int((validation_split * len(X))):]
devidx = permutation[0:int((validation_split * len(X)))]
(trainX, trainy) = (X[trainidx], y[trainidx])
(devX, devy) = (X[devidx], y[devidx])
device = (torch.device('cpu') if self.cudaEfficient else torch.device('cuda'))
trainX = torch.from_numpy(trainX).to(device, dtype=torch.float32)
trainy = torch.from_numpy(trainy).to(device, dtype=torch.int64)
devX = torch.from_numpy(devX).to(device, dtype=torch.float32)
devy = torch.from_numpy(devy).to(device, dtype=torch.int64)
return (trainX, trainy, devX, devy)
def fit(self, X, y, validation_data=None, validation_split=None, early_stop=True):
self.nepoch = 0
bestaccuracy = (- 1)
stop_train = False
early_stop_count = 0
(trainX, trainy, devX, devy) = self.prepare_split(X, y, validation_data, validation_split)
while ((not stop_train) and (self.nepoch <= self.max_epoch)):
self.trainepoch(trainX, trainy, epoch_size=self.epoch_size)
accuracy = self.score(devX, devy)
if (accuracy > bestaccuracy):
bestaccuracy = accuracy
bestmodel = copy.deepcopy(self.model)
elif early_stop:
if (early_stop_count >= self.tenacity):
stop_train = True
early_stop_count += 1
self.model = bestmodel
return bestaccuracy
def trainepoch(self, X, y, epoch_size=1):
self.model.train()
for _ in range(self.nepoch, (self.nepoch + epoch_size)):
permutation = np.random.permutation(len(X))
all_costs = []
for i in range(0, len(X), self.batch_size):
idx = torch.from_numpy(permutation[i:(i + self.batch_size)]).long().to(X.device)
Xbatch = X[idx]
ybatch = y[idx]
if self.cudaEfficient:
Xbatch = Xbatch.cuda()
ybatch = ybatch.cuda()
output = self.model(Xbatch)
loss = self.loss_fn(output, ybatch)
all_costs.append(loss.data.item())
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.nepoch += epoch_size
def score(self, devX, devy):
self.model.eval()
correct = 0
if ((not isinstance(devX, torch.cuda.FloatTensor)) or self.cudaEfficient):
devX = torch.FloatTensor(devX).cuda()
devy = torch.LongTensor(devy).cuda()
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:(i + self.batch_size)]
ybatch = devy[i:(i + self.batch_size)]
if self.cudaEfficient:
Xbatch = Xbatch.cuda()
ybatch = ybatch.cuda()
output = self.model(Xbatch)
pred = output.data.max(1)[1]
correct += pred.long().eq(ybatch.data.long()).sum().item()
accuracy = ((1.0 * correct) / len(devX))
return accuracy
def predict(self, devX):
self.model.eval()
if (not isinstance(devX, torch.cuda.FloatTensor)):
devX = torch.FloatTensor(devX).cuda()
yhat = np.array([])
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:(i + self.batch_size)]
output = self.model(Xbatch)
yhat = np.append(yhat, output.data.max(1)[1].cpu().numpy())
yhat = np.vstack(yhat)
return yhat
def predict_proba(self, devX):
self.model.eval()
probas = []
with torch.no_grad():
for i in range(0, len(devX), self.batch_size):
Xbatch = devX[i:(i + self.batch_size)]
vals = F.softmax(self.model(Xbatch).data.cpu().numpy())
if (not probas):
probas = vals
else:
probas = np.concatenate(probas, vals, axis=0)
return probas |
def INSgrow_Gap(sub_ptn, I):
global compnum
global NumbS
global sDB
global IPLUS
compnum = (compnum + 1)
support = 0
global ptn_len
ptn_len = len(sub_ptn)
p = sub_ptn[(ptn_len - 1)].end
IPLUS = copy.deepcopy(I)
for i in range(0, NumbS):
if (len(sDB[i].S) > 0):
ptn_len = len(sub_ptn)
for j in range(0, len(IPLUS[i].poset)):
apos = Pos([])
apos = IPLUS[i].poset[j]
length = len(apos.pos)
if (length == len(sub_ptn)):
maxx = apos.pos[(length - 1)]
a = 0
b = 0
l = (- 1)
a = ((maxx + sub_ptn[(ptn_len - 1)].min) + 1)
b = ((maxx + sub_ptn[(ptn_len - 1)].max) + 1)
lens = len(sDB[i].S)
if (b > (lens - 1)):
b = (lens - 1)
if (a > len(sDB[i].S)):
continue
if (maxx <= b):
l = nextt(sDB[i], p, maxx, a, b)
if ((l != (- 1)) and (l <= b)):
m = j
while (m >= 0):
if (length >= len(IPLUS[i].poset[m].pos)):
if (m == 0):
IPLUS[i].poset[j].pos.append(l)
support = (support + 1)
break
elif (IPLUS[i].poset[m].pos[length] == l):
m = (m + 1)
break
if (m == 0):
IPLUS[i].poset[j].pos.append(l)
support = (support + 1)
break
m = (m - 1)
return support |
def main():
connection = establish_tcp_connection()
h2_connection = h2.connection.H2Connection()
settings_header_value = h2_connection.initiate_upgrade_connection()
send_initial_request(connection, settings_header_value)
extra_data = get_upgrade_response(connection)
connection.sendall(h2_connection.data_to_send())
events = connection.receive_data(extra_data)
main_loop(events) |
_sentencepiece
_tokenizers
class MBartTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = MBartTokenizer
rust_tokenizer_class = MBartTokenizerFast
test_rust_tokenizer = True
test_sentencepiece = True
def setUp(self):
super().setUp()
tokenizer = MBartTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokenizer.save_pretrained(self.tmpdirname)
def test_full_tokenizer(self):
tokenizer = MBartTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokens = tokenizer.tokenize('This is a test')
self.assertListEqual(tokens, ['This', 'is', 'a', 't', 'est'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [(value + tokenizer.fairseq_offset) for value in [285, 46, 10, 170, 382]])
tokens = tokenizer.tokenize('I was born in 92000, and this is false.')
self.assertListEqual(tokens, [(SPIECE_UNDERLINE + 'I'), (SPIECE_UNDERLINE + 'was'), (SPIECE_UNDERLINE + 'b'), 'or', 'n', (SPIECE_UNDERLINE + 'in'), (SPIECE_UNDERLINE + ''), '9', '2', '0', '0', '0', ',', (SPIECE_UNDERLINE + 'and'), (SPIECE_UNDERLINE + 'this'), (SPIECE_UNDERLINE + 'is'), (SPIECE_UNDERLINE + 'f'), 'al', 's', 'e', '.'])
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(ids, [(value + tokenizer.fairseq_offset) for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]])
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(back_tokens, [(SPIECE_UNDERLINE + 'I'), (SPIECE_UNDERLINE + 'was'), (SPIECE_UNDERLINE + 'b'), 'or', 'n', (SPIECE_UNDERLINE + 'in'), (SPIECE_UNDERLINE + ''), '<unk>', '2', '0', '0', '0', ',', (SPIECE_UNDERLINE + 'and'), (SPIECE_UNDERLINE + 'this'), (SPIECE_UNDERLINE + 'is'), (SPIECE_UNDERLINE + 'f'), 'al', 's', '<unk>', '.'])
def test_save_pretrained(self):
if (not self.test_slow_tokenizer):
return
self.tokenizers_list[0] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {})
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tmpdirname2 = tempfile.mkdtemp()
tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2)
tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2)
self.assertTrue(any((('tokenizer.json' in f) for f in tokenizer_r_files)))
tokenizer_r_files = tuple((f for f in tokenizer_r_files if ('tokenizer.json' not in f)))
self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files)
tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2)
tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2)
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(tokenizer_rp, key))
shutil.rmtree(tmpdirname2)
tmpdirname2 = tempfile.mkdtemp()
tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=True)
tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2)
self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files)
tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2)
tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2)
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(tokenizer_rp, key))
shutil.rmtree(tmpdirname2)
tmpdirname2 = tempfile.mkdtemp()
tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2, legacy_format=False)
tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2)
self.assertTrue(any((('tokenizer.json' in f) for f in tokenizer_r_files)))
tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2)
tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2)
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(tokenizer_rp, key))
shutil.rmtree(tmpdirname2) |
class IfExp(NodeNG):
_astroid_fields = ('test', 'body', 'orelse')
test: NodeNG
body: NodeNG
orelse: NodeNG
def postinit(self, test: NodeNG, body: NodeNG, orelse: NodeNG) -> None:
self.test = test
self.body = body
self.orelse = orelse
def get_children(self):
(yield self.test)
(yield self.body)
(yield self.orelse)
def op_left_associative(self) -> Literal[False]:
return False
_if_nothing_inferred
def _infer(self, context: (InferenceContext | None)=None, **kwargs: Any) -> Generator[(InferenceResult, None, None)]:
both_branches = False
context = (context or InferenceContext())
lhs_context = copy_context(context)
rhs_context = copy_context(context)
try:
test = next(self.test.infer(context=context.clone()))
except (InferenceError, StopIteration):
both_branches = True
else:
if (not isinstance(test, util.UninferableBase)):
if test.bool_value():
(yield from self.body.infer(context=lhs_context))
else:
(yield from self.orelse.infer(context=rhs_context))
else:
both_branches = True
if both_branches:
(yield from self.body.infer(context=lhs_context))
(yield from self.orelse.infer(context=rhs_context)) |
def dbref(inp, reqhash=True):
if (reqhash and (not (isinstance(inp, str) and inp.startswith('#')))):
return None
if isinstance(inp, str):
inp = inp.lstrip('#')
try:
if (int(inp) < 0):
return None
except Exception:
return None
return inp |
class Data(object):
time = 0
host = None
plugin = None
plugininstance = None
type = None
typeinstance = None
def __init__(self, **kw):
[setattr(self, k, v) for (k, v) in kw.items()]
def datetime(self):
return datetime.fromtimestamp(self.time)
def source(self):
buf = StringIO()
if self.host:
buf.write(str(self.host))
if self.plugin:
buf.write('/')
buf.write(str(self.plugin))
if self.plugininstance:
buf.write('/')
buf.write(str(self.plugininstance))
if self.type:
buf.write('/')
buf.write(str(self.type))
if self.typeinstance:
buf.write('/')
buf.write(str(self.typeinstance))
return buf.getvalue()
def __str__(self):
return ('[%i] %s' % (self.time, self.source)) |
class RacketLexer(RegexLexer):
name = 'Racket'
url = '
aliases = ['racket', 'rkt']
filenames = ['*.rkt', '*.rktd', '*.rktl']
mimetypes = ['text/x-racket', 'application/x-racket']
version_added = '1.6'
_keywords = ('#%app', '#%datum', '#%declare', '#%expression', '#%module-begin', '#%plain-app', '#%plain-lambda', '#%plain-module-begin', '#%printing-module-begin', '#%provide', '#%require', '#%stratified-body', '#%top', '#%top-interaction', '#%variable-reference', '->', '->*', '->*m', '->d', '->dm', '->i', '->m', '...', ':do-in', '==', '=>', '_', 'absent', 'abstract', 'all-defined-out', 'all-from-out', 'and', 'any', 'augment', 'augment*', 'augment-final', 'augment-final*', 'augride', 'augride*', 'begin', 'begin-for-syntax', 'begin0', 'case', 'case->', 'case->m', 'case-lambda', 'class', 'class*', 'class-field-accessor', 'class-field-mutator', 'class/c', 'class/derived', 'combine-in', 'combine-out', 'command-line', 'compound-unit', 'compound-unit/infer', 'cond', 'cons/dc', 'contract', 'contract-out', 'contract-struct', 'contracted', 'define', 'define-compound-unit', 'define-compound-unit/infer', 'define-contract-struct', 'define-custom-hash-types', 'define-custom-set-types', 'define-for-syntax', 'define-local-member-name', 'define-logger', 'define-match-expander', 'define-member-name', 'define-module-boundary-contract', 'define-namespace-anchor', 'define-opt/c', 'define-sequence-syntax', 'define-serializable-class', 'define-serializable-class*', 'define-signature', 'define-signature-form', 'define-struct', 'define-struct/contract', 'define-struct/derived', 'define-syntax', 'define-syntax-rule', 'define-syntaxes', 'define-unit', 'define-unit-binding', 'define-unit-from-context', 'define-unit/contract', 'define-unit/new-import-export', 'define-unit/s', 'define-values', 'define-values-for-export', 'define-values-for-syntax', 'define-values/invoke-unit', 'define-values/invoke-unit/infer', 'define/augment', 'define/augment-final', 'define/augride', 'define/contract', 'define/final-prop', 'define/match', 'define/overment', 'define/override', 'define/override-final', 'define/private', 'define/public', 'define/public-final', 'define/pubment', 'define/subexpression-pos-prop', 'define/subexpression-pos-prop/name', 'delay', 'delay/idle', 'delay/name', 'delay/strict', 'delay/sync', 'delay/thread', 'do', 'else', 'except', 'except-in', 'except-out', 'export', 'extends', 'failure-cont', 'false', 'false/c', 'field', 'field-bound?', 'file', 'flat-murec-contract', 'flat-rec-contract', 'for', 'for*', 'for*/and', 'for*/async', 'for*/first', 'for*/fold', 'for*/fold/derived', 'for*/hash', 'for*/hasheq', 'for*/hasheqv', 'for*/last', 'for*/list', 'for*/lists', 'for*/mutable-set', 'for*/mutable-seteq', 'for*/mutable-seteqv', 'for*/or', 'for*/product', 'for*/set', 'for*/seteq', 'for*/seteqv', 'for*/stream', 'for*/sum', 'for*/vector', 'for*/weak-set', 'for*/weak-seteq', 'for*/weak-seteqv', 'for-label', 'for-meta', 'for-syntax', 'for-template', 'for/and', 'for/async', 'for/first', 'for/fold', 'for/fold/derived', 'for/hash', 'for/hasheq', 'for/hasheqv', 'for/last', 'for/list', 'for/lists', 'for/mutable-set', 'for/mutable-seteq', 'for/mutable-seteqv', 'for/or', 'for/product', 'for/set', 'for/seteq', 'for/seteqv', 'for/stream', 'for/sum', 'for/vector', 'for/weak-set', 'for/weak-seteq', 'for/weak-seteqv', 'gen:custom-write', 'gen:dict', 'gen:equal+hash', 'gen:set', 'gen:stream', 'generic', 'get-field', 'hash/dc', 'if', 'implies', 'import', 'include', 'include-at/relative-to', 'include-at/relative-to/reader', 'include/reader', 'inherit', 'inherit-field', 'inherit/inner', 'inherit/super', 'init', 'init-depend', 'init-field', 'init-rest', 'inner', 'inspect', 'instantiate', 'interface', 'interface*', 'invariant-assertion', 'invoke-unit', 'invoke-unit/infer', 'lambda', 'lazy', 'let', 'let*', 'let*-values', 'let-syntax', 'let-syntaxes', 'let-values', 'let/cc', 'let/ec', 'letrec', 'letrec-syntax', 'letrec-syntaxes', 'letrec-syntaxes+values', 'letrec-values', 'lib', 'link', 'local', 'local-require', 'log-debug', 'log-error', 'log-fatal', 'log-info', 'log-warning', 'match', 'match*', 'match*/derived', 'match-define', 'match-define-values', 'match-lambda', 'match-lambda*', 'match-lambda**', 'match-let', 'match-let*', 'match-let*-values', 'match-let-values', 'match-letrec', 'match-letrec-values', 'match/derived', 'match/values', 'member-name-key', 'mixin', 'module', 'module*', 'module+', 'nand', 'new', 'nor', 'object-contract', 'object/c', 'only', 'only-in', 'only-meta-in', 'open', 'opt/c', 'or', 'overment', 'overment*', 'override', 'override*', 'override-final', 'override-final*', 'parameterize', 'parameterize*', 'parameterize-break', 'parametric->/c', 'place', 'place*', 'place/context', 'planet', 'prefix', 'prefix-in', 'prefix-out', 'private', 'private*', 'prompt-tag/c', 'protect-out', 'provide', 'provide-signature-elements', 'provide/contract', 'public', 'public*', 'public-final', 'public-final*', 'pubment', 'pubment*', 'quasiquote', 'quasisyntax', 'quasisyntax/loc', 'quote', 'quote-syntax', 'quote-syntax/prune', 'recontract-out', 'recursive-contract', 'relative-in', 'rename', 'rename-in', 'rename-inner', 'rename-out', 'rename-super', 'require', 'send', 'send*', 'send+', 'send-generic', 'send/apply', 'send/keyword-apply', 'set!', 'set!-values', 'set-field!', 'shared', 'stream', 'stream*', 'stream-cons', 'struct', 'struct*', 'struct-copy', 'struct-field-index', 'struct-out', 'struct/c', 'struct/ctc', 'struct/dc', 'submod', 'super', 'super-instantiate', 'super-make-object', 'super-new', 'syntax', 'syntax-case', 'syntax-case*', 'syntax-id-rules', 'syntax-rules', 'syntax/loc', 'tag', 'this', 'this%', 'thunk', 'thunk*', 'time', 'unconstrained-domain->', 'unit', 'unit-from-context', 'unit/c', 'unit/new-import-export', 'unit/s', 'unless', 'unquote', 'unquote-splicing', 'unsyntax', 'unsyntax-splicing', 'values/drop', 'when', 'with-continuation-mark', 'with-contract', 'with-contract-continuation-mark', 'with-handlers', 'with-handlers*', 'with-method', 'with-syntax', '')
_builtins = ('*', '*list/c', '+', '-', '/', '<', '</c', '<=', '<=/c', '=', '=/c', '>', '>/c', '>=', '>=/c', 'abort-current-continuation', 'abs', 'absolute-path?', 'acos', 'add-between', 'add1', 'alarm-evt', 'always-evt', 'and/c', 'andmap', 'angle', 'any/c', 'append', 'append*', 'append-map', 'apply', 'argmax', 'argmin', 'arithmetic-shift', 'arity-at-least', 'arity-at-least-value', 'arity-at-least?', 'arity-checking-wrapper', 'arity-includes?', 'arity=?', 'arrow-contract-info', 'arrow-contract-info-accepts-arglist', 'arrow-contract-info-chaperone-procedure', 'arrow-contract-info-check-first-order', 'arrow-contract-info?', 'asin', 'assf', 'assoc', 'assq', 'assv', 'atan', 'bad-number-of-results', 'banner', 'base->-doms/c', 'base->-rngs/c', 'base->?', 'between/c', 'bitwise-and', 'bitwise-bit-field', 'bitwise-bit-set?', 'bitwise-ior', 'bitwise-not', 'bitwise-xor', 'blame-add-car-context', 'blame-add-cdr-context', 'blame-add-context', 'blame-add-missing-party', 'blame-add-nth-arg-context', 'blame-add-range-context', 'blame-add-unknown-context', 'blame-context', 'blame-contract', 'blame-fmt->-string', 'blame-missing-party?', 'blame-negative', 'blame-original?', 'blame-positive', 'blame-replace-negative', 'blame-source', 'blame-swap', 'blame-swapped?', 'blame-update', 'blame-value', 'blame?', 'boolean=?', 'boolean?', 'bound-identifier=?', 'box', 'box-cas!', 'box-immutable', 'box-immutable/c', 'box/c', 'box?', 'break-enabled', 'break-parameterization?', 'break-thread', 'build-chaperone-contract-property', 'build-compound-type-name', 'build-contract-property', 'build-flat-contract-property', 'build-list', 'build-path', 'build-path/convention-type', 'build-string', 'build-vector', 'byte-pregexp', 'byte-pregexp?', 'byte-ready?', 'byte-regexp', 'byte-regexp?', 'byte?', 'bytes', 'bytes->immutable-bytes', 'bytes->list', 'bytes->path', 'bytes->path-element', 'bytes->string/latin-1', 'bytes->string/locale', 'bytes->string/utf-8', 'bytes-append', 'bytes-append*', 'bytes-close-converter', 'bytes-convert', 'bytes-convert-end', 'bytes-converter?', 'bytes-copy', 'bytes-copy!', 'bytes-environment-variable-name?', 'bytes-fill!', 'bytes-join', 'bytes-length', 'bytes-no-nuls?', 'bytes-open-converter', 'bytes-ref', 'bytes-set!', 'bytes-utf-8-index', 'bytes-utf-8-length', 'bytes-utf-8-ref', 'bytes<?', 'bytes=?', 'bytes>?', 'bytes?', 'caaaar', 'caaadr', 'caaar', 'caadar', 'caaddr', 'caadr', 'caar', 'cadaar', 'cadadr', 'cadar', 'caddar', 'cadddr', 'caddr', 'cadr', 'call-in-nested-thread', 'call-with-atomic-output-file', 'call-with-break-parameterization', 'call-with-composable-continuation', 'call-with-continuation-barrier', 'call-with-continuation-prompt', 'call-with-current-continuation', 'call-with-default-reading-parameterization', 'call-with-escape-continuation', 'call-with-exception-handler', 'call-with-file-lock/timeout', 'call-with-immediate-continuation-mark', 'call-with-input-bytes', 'call-with-input-file', 'call-with-input-file*', 'call-with-input-string', 'call-with-output-bytes', 'call-with-output-file', 'call-with-output-file*', 'call-with-output-string', 'call-with-parameterization', 'call-with-semaphore', 'call-with-semaphore/enable-break', 'call-with-values', 'call/cc', 'call/ec', 'car', 'cartesian-product', 'cdaaar', 'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar', 'cddaar', 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr', 'cddr', 'cdr', 'ceiling', 'channel-get', 'channel-put', 'channel-put-evt', 'channel-put-evt?', 'channel-try-get', 'channel/c', 'channel?', 'chaperone-box', 'chaperone-channel', 'chaperone-continuation-mark-key', 'chaperone-contract-property?', 'chaperone-contract?', 'chaperone-evt', 'chaperone-hash', 'chaperone-hash-set', 'chaperone-of?', 'chaperone-procedure', 'chaperone-procedure*', 'chaperone-prompt-tag', 'chaperone-struct', 'chaperone-struct-type', 'chaperone-vector', 'chaperone?', 'char->integer', 'char-alphabetic?', 'char-blank?', 'char-ci<=?', 'char-ci<?', 'char-ci=?', 'char-ci>=?', 'char-ci>?', 'char-downcase', 'char-foldcase', 'char-general-category', 'char-graphic?', 'char-in', 'char-in/c', 'char-iso-control?', 'char-lower-case?', 'char-numeric?', 'char-punctuation?', 'char-ready?', 'char-symbolic?', 'char-title-case?', 'char-titlecase', 'char-upcase', 'char-upper-case?', 'char-utf-8-length', 'char-whitespace?', 'char<=?', 'char<?', 'char=?', 'char>=?', 'char>?', 'char?', 'check-duplicate-identifier', 'check-duplicates', 'checked-procedure-check-and-extract', 'choice-evt', 'class->interface', 'class-info', 'class-seal', 'class-unseal', 'class?', 'cleanse-path', 'close-input-port', 'close-output-port', 'coerce-chaperone-contract', 'coerce-chaperone-contracts', 'coerce-contract', 'coerce-contract/f', 'coerce-contracts', 'coerce-flat-contract', 'coerce-flat-contracts', 'collect-garbage', 'collection-file-path', 'collection-path', 'combinations', 'compile', 'compile-allow-set!-undefined', 'compile-context-preservation-enabled', 'compile-enforce-module-constants', 'compile-syntax', 'compiled-expression-recompile', 'compiled-expression?', 'compiled-module-expression?', 'complete-path?', 'complex?', 'compose', 'compose1', 'conjoin', 'conjugate', 'cons', 'cons/c', 'cons?', 'const', 'continuation-mark-key/c', 'continuation-mark-key?', 'continuation-mark-set->context', 'continuation-mark-set->list', 'continuation-mark-set->list*', 'continuation-mark-set-first', 'continuation-mark-set?', 'continuation-marks', 'continuation-prompt-available?', 'continuation-prompt-tag?', 'continuation?', 'contract-continuation-mark-key', 'contract-custom-write-property-proc', 'contract-exercise', 'contract-first-order', 'contract-first-order-passes?', 'contract-late-neg-projection', 'contract-name', 'contract-proc', 'contract-projection', 'contract-property?', 'contract-random-generate', 'contract-random-generate-fail', 'contract-random-generate-fail?', 'contract-random-generate-get-current-environment', 'contract-random-generate-stash', 'contract-random-generate/choose', 'contract-stronger?', 'contract-struct-exercise', 'contract-struct-generate', 'contract-struct-late-neg-projection', 'contract-struct-list-contract?', 'contract-val-first-projection', 'contract?', 'convert-stream', 'copy-directory/files', 'copy-file', 'copy-port', 'cos', 'cosh', 'count', 'current-blame-format', 'current-break-parameterization', 'current-code-inspector', 'current-command-line-arguments', 'current-compile', 'current-compiled-file-roots', 'current-continuation-marks', 'current-contract-region', 'current-custodian', 'current-directory', 'current-directory-for-user', 'current-drive', 'current-environment-variables', 'current-error-port', 'current-eval', 'current-evt-pseudo-random-generator', 'current-force-delete-permissions', 'current-future', 'current-gc-milliseconds', 'current-get-interaction-input-port', 'current-inexact-milliseconds', 'current-input-port', 'current-inspector', 'current-library-collection-links', 'current-library-collection-paths', 'current-load', 'current-load-extension', 'current-load-relative-directory', 'current-load/use-compiled', 'current-locale', 'current-logger', 'current-memory-use', 'current-milliseconds', 'current-module-declare-name', 'current-module-declare-source', 'current-module-name-resolver', 'current-module-path-for-load', 'current-namespace', 'current-output-port', 'current-parameterization', 'current-plumber', 'current-preserved-thread-cell-values', 'current-print', 'current-process-milliseconds', 'current-prompt-read', 'current-pseudo-random-generator', 'current-read-interaction', 'current-reader-guard', 'current-readtable', 'current-seconds', 'current-security-guard', 'current-subprocess-custodian-mode', 'current-thread', 'current-thread-group', 'current-thread-initial-stack-size', 'current-write-relative-directory', 'curry', 'curryr', 'custodian-box-value', 'custodian-box?', 'custodian-limit-memory', 'custodian-managed-list', 'custodian-memory-accounting-available?', 'custodian-require-memory', 'custodian-shutdown-all', 'custodian?', 'custom-print-quotable-accessor', 'custom-print-quotable?', 'custom-write-accessor', 'custom-write-property-proc', 'custom-write?', 'date', 'date*', 'date*-nanosecond', 'date*-time-zone-name', 'date*?', 'date-day', 'date-dst?', 'date-hour', 'date-minute', 'date-month', 'date-second', 'date-time-zone-offset', 'date-week-day', 'date-year', 'date-year-day', 'date?', 'datum->syntax', 'datum-intern-literal', 'default-continuation-prompt-tag', 'degrees->radians', 'delete-directory', 'delete-directory/files', 'delete-file', 'denominator', 'dict->list', 'dict-can-functional-set?', 'dict-can-remove-keys?', 'dict-clear', 'dict-clear!', 'dict-copy', 'dict-count', 'dict-empty?', 'dict-for-each', 'dict-has-key?', 'dict-implements/c', 'dict-implements?', 'dict-iter-contract', 'dict-iterate-first', 'dict-iterate-key', 'dict-iterate-next', 'dict-iterate-value', 'dict-key-contract', 'dict-keys', 'dict-map', 'dict-mutable?', 'dict-ref', 'dict-ref!', 'dict-remove', 'dict-remove!', 'dict-set', 'dict-set!', 'dict-set*', 'dict-set*!', 'dict-update', 'dict-update!', 'dict-value-contract', 'dict-values', 'dict?', 'directory-exists?', 'directory-list', 'disjoin', 'display', 'display-lines', 'display-lines-to-file', 'display-to-file', 'displayln', 'double-flonum?', 'drop', 'drop-common-prefix', 'drop-right', 'dropf', 'dropf-right', 'dump-memory-stats', 'dup-input-port', 'dup-output-port', 'dynamic->*', 'dynamic-get-field', 'dynamic-object/c', 'dynamic-place', 'dynamic-place*', 'dynamic-require', 'dynamic-require-for-syntax', 'dynamic-send', 'dynamic-set-field!', 'dynamic-wind', 'eighth', 'empty', 'empty-sequence', 'empty-stream', 'empty?', 'environment-variables-copy', 'environment-variables-names', 'environment-variables-ref', 'environment-variables-set!', 'environment-variables?', 'eof', 'eof-evt', 'eof-object?', 'ephemeron-value', 'ephemeron?', 'eprintf', 'eq-contract-val', 'eq-contract?', 'eq-hash-code', 'eq?', 'equal-contract-val', 'equal-contract?', 'equal-hash-code', 'equal-secondary-hash-code', 'equal<%>', 'equal?', 'equal?/recur', 'eqv-hash-code', 'eqv?', 'error', 'error-display-handler', 'error-escape-handler', 'error-print-context-length', 'error-print-source-location', 'error-print-width', 'error-value->string-handler', 'eval', 'eval-jit-enabled', 'eval-syntax', 'even?', 'evt/c', 'evt?', 'exact->inexact', 'exact-ceiling', 'exact-floor', 'exact-integer?', 'exact-nonnegative-integer?', 'exact-positive-integer?', 'exact-round', 'exact-truncate', 'exact?', 'executable-yield-handler', 'exit', 'exit-handler', 'exn', 'exn-continuation-marks', 'exn-message', 'exn:break', 'exn:break-continuation', 'exn:break:hang-up', 'exn:break:hang-up?', 'exn:break:terminate', 'exn:break:terminate?', 'exn:break?', 'exn:fail', 'exn:fail:contract', 'exn:fail:contract:arity', 'exn:fail:contract:arity?', 'exn:fail:contract:blame', 'exn:fail:contract:blame-object', 'exn:fail:contract:blame?', 'exn:fail:contract:continuation', 'exn:fail:contract:continuation?', 'exn:fail:contract:divide-by-zero', 'exn:fail:contract:divide-by-zero?', 'exn:fail:contract:non-fixnum-result', 'exn:fail:contract:non-fixnum-result?', 'exn:fail:contract:variable', 'exn:fail:contract:variable-id', 'exn:fail:contract:variable?', 'exn:fail:contract?', 'exn:fail:filesystem', 'exn:fail:filesystem:errno', 'exn:fail:filesystem:errno-errno', 'exn:fail:filesystem:errno?', 'exn:fail:filesystem:exists', 'exn:fail:filesystem:exists?', 'exn:fail:filesystem:missing-module', 'exn:fail:filesystem:missing-module-path', 'exn:fail:filesystem:missing-module?', 'exn:fail:filesystem:version', 'exn:fail:filesystem:version?', 'exn:fail:filesystem?', 'exn:fail:network', 'exn:fail:network:errno', 'exn:fail:network:errno-errno', 'exn:fail:network:errno?', 'exn:fail:network?', 'exn:fail:object', 'exn:fail:object?', 'exn:fail:out-of-memory', 'exn:fail:out-of-memory?', 'exn:fail:read', 'exn:fail:read-srclocs', 'exn:fail:read:eof', 'exn:fail:read:eof?', 'exn:fail:read:non-char', 'exn:fail:read:non-char?', 'exn:fail:read?', 'exn:fail:syntax', 'exn:fail:syntax-exprs', 'exn:fail:syntax:missing-module', 'exn:fail:syntax:missing-module-path', 'exn:fail:syntax:missing-module?', 'exn:fail:syntax:unbound', 'exn:fail:syntax:unbound?', 'exn:fail:syntax?', 'exn:fail:unsupported', 'exn:fail:unsupported?', 'exn:fail:user', 'exn:fail:user?', 'exn:fail?', 'exn:misc:match?', 'exn:missing-module-accessor', 'exn:missing-module?', 'exn:srclocs-accessor', 'exn:srclocs?', 'exn?', 'exp', 'expand', 'expand-once', 'expand-syntax', 'expand-syntax-once', 'expand-syntax-to-top-form', 'expand-to-top-form', 'expand-user-path', 'explode-path', 'expt', 'externalizable<%>', 'failure-result/c', 'false?', 'field-names', 'fifth', 'file->bytes', 'file->bytes-lines', 'file->lines', 'file->list', 'file->string', 'file->value', 'file-exists?', 'file-name-from-path', 'file-or-directory-identity', 'file-or-directory-modify-seconds', 'file-or-directory-permissions', 'file-position', 'file-position*', 'file-size', 'file-stream-buffer-mode', 'file-stream-port?', 'file-truncate', 'filename-extension', 'filesystem-change-evt', 'filesystem-change-evt-cancel', 'filesystem-change-evt?', 'filesystem-root-list', 'filter', 'filter-map', 'filter-not', 'filter-read-input-port', 'find-executable-path', 'find-files', 'find-library-collection-links', 'find-library-collection-paths', 'find-relative-path', 'find-system-path', 'findf', 'first', 'first-or/c', 'fixnum?', 'flat-contract', 'flat-contract-predicate', 'flat-contract-property?', 'flat-contract?', 'flat-named-contract', 'flatten', 'floating-point-bytes->real', 'flonum?', 'floor', 'flush-output', 'fold-files', 'foldl', 'foldr', 'for-each', 'force', 'format', 'fourth', 'fprintf', 'free-identifier=?', 'free-label-identifier=?', 'free-template-identifier=?', 'free-transformer-identifier=?', 'fsemaphore-count', 'fsemaphore-post', 'fsemaphore-try-wait?', 'fsemaphore-wait', 'fsemaphore?', 'future', 'future?', 'futures-enabled?', 'gcd', 'generate-member-key', 'generate-temporaries', 'generic-set?', 'generic?', 'gensym', 'get-output-bytes', 'get-output-string', 'get-preference', 'get/build-late-neg-projection', 'get/build-val-first-projection', 'getenv', 'global-port-print-handler', 'group-by', 'group-execute-bit', 'group-read-bit', 'group-write-bit', 'guard-evt', 'handle-evt', 'handle-evt?', 'has-blame?', 'has-contract?', 'hash', 'hash->list', 'hash-clear', 'hash-clear!', 'hash-copy', 'hash-copy-clear', 'hash-count', 'hash-empty?', 'hash-eq?', 'hash-equal?', 'hash-eqv?', 'hash-for-each', 'hash-has-key?', 'hash-iterate-first', 'hash-iterate-key', 'hash-iterate-key+value', 'hash-iterate-next', 'hash-iterate-pair', 'hash-iterate-value', 'hash-keys', 'hash-map', 'hash-placeholder?', 'hash-ref', 'hash-ref!', 'hash-remove', 'hash-remove!', 'hash-set', 'hash-set!', 'hash-set*', 'hash-set*!', 'hash-update', 'hash-update!', 'hash-values', 'hash-weak?', 'hash/c', 'hash?', 'hasheq', 'hasheqv', 'identifier-binding', 'identifier-binding-symbol', 'identifier-label-binding', 'identifier-prune-lexical-context', 'identifier-prune-to-source-module', 'identifier-remove-from-definition-context', 'identifier-template-binding', 'identifier-transformer-binding', 'identifier?', 'identity', 'if/c', 'imag-part', 'immutable?', 'impersonate-box', 'impersonate-channel', 'impersonate-continuation-mark-key', 'impersonate-hash', 'impersonate-hash-set', 'impersonate-procedure', 'impersonate-procedure*', 'impersonate-prompt-tag', 'impersonate-struct', 'impersonate-vector', 'impersonator-contract?', 'impersonator-ephemeron', 'impersonator-of?', 'impersonator-prop:application-mark', 'impersonator-prop:blame', 'impersonator-prop:contracted', 'impersonator-property-accessor-procedure?', 'impersonator-property?', 'impersonator?', 'implementation?', 'implementation?/c', 'in-bytes', 'in-bytes-lines', 'in-combinations', 'in-cycle', 'in-dict', 'in-dict-keys', 'in-dict-pairs', 'in-dict-values', 'in-directory', 'in-hash', 'in-hash-keys', 'in-hash-pairs', 'in-hash-values', 'in-immutable-hash', 'in-immutable-hash-keys', 'in-immutable-hash-pairs', 'in-immutable-hash-values', 'in-immutable-set', 'in-indexed', 'in-input-port-bytes', 'in-input-port-chars', 'in-lines', 'in-list', 'in-mlist', 'in-mutable-hash', 'in-mutable-hash-keys', 'in-mutable-hash-pairs', 'in-mutable-hash-values', 'in-mutable-set', 'in-naturals', 'in-parallel', 'in-permutations', 'in-port', 'in-producer', 'in-range', 'in-sequences', 'in-set', 'in-slice', 'in-stream', 'in-string', 'in-syntax', 'in-value', 'in-values*-sequence', 'in-values-sequence', 'in-vector', 'in-weak-hash', 'in-weak-hash-keys', 'in-weak-hash-pairs', 'in-weak-hash-values', 'in-weak-set', 'inexact->exact', 'inexact-real?', 'inexact?', 'infinite?', 'input-port-append', 'input-port?', 'inspector?', 'instanceof/c', 'integer->char', 'integer->integer-bytes', 'integer-bytes->integer', 'integer-in', 'integer-length', 'integer-sqrt', 'integer-sqrt/remainder', 'integer?', 'interface->method-names', 'interface-extension?', 'interface?', 'internal-definition-context-binding-identifiers', 'internal-definition-context-introduce', 'internal-definition-context-seal', 'internal-definition-context?', 'is-a?', 'is-a?/c', 'keyword->string', 'keyword-apply', 'keyword<?', 'keyword?', 'keywords-match', 'kill-thread', 'last', 'last-pair', 'lcm', 'length', 'liberal-define-context?', 'link-exists?', 'list', 'list*', 'list*of', 'list->bytes', 'list->mutable-set', 'list->mutable-seteq', 'list->mutable-seteqv', 'list->set', 'list->seteq', 'list->seteqv', 'list->string', 'list->vector', 'list->weak-set', 'list->weak-seteq', 'list->weak-seteqv', 'list-contract?', 'list-prefix?', 'list-ref', 'list-set', 'list-tail', 'list-update', 'list/c', 'list?', 'listen-port-number?', 'listof', 'load', 'load-extension', 'load-on-demand-enabled', 'load-relative', 'load-relative-extension', 'load/cd', 'load/use-compiled', 'local-expand', 'local-expand/capture-lifts', 'local-transformer-expand', 'local-transformer-expand/capture-lifts', 'locale-string-encoding', 'log', 'log-all-levels', 'log-level-evt', 'log-level?', 'log-max-level', 'log-message', 'log-receiver?', 'logger-name', 'logger?', 'magnitude', 'make-arity-at-least', 'make-base-empty-namespace', 'make-base-namespace', 'make-bytes', 'make-channel', 'make-chaperone-contract', 'make-continuation-mark-key', 'make-continuation-prompt-tag', 'make-contract', 'make-custodian', 'make-custodian-box', 'make-custom-hash', 'make-custom-hash-types', 'make-custom-set', 'make-custom-set-types', 'make-date', 'make-date*', 'make-derived-parameter', 'make-directory', 'make-directory*', 'make-do-sequence', 'make-empty-namespace', 'make-environment-variables', 'make-ephemeron', 'make-exn', 'make-exn:break', 'make-exn:break:hang-up', 'make-exn:break:terminate', 'make-exn:fail', 'make-exn:fail:contract', 'make-exn:fail:contract:arity', 'make-exn:fail:contract:blame', 'make-exn:fail:contract:continuation', 'make-exn:fail:contract:divide-by-zero', 'make-exn:fail:contract:non-fixnum-result', 'make-exn:fail:contract:variable', 'make-exn:fail:filesystem', 'make-exn:fail:filesystem:errno', 'make-exn:fail:filesystem:exists', 'make-exn:fail:filesystem:missing-module', 'make-exn:fail:filesystem:version', 'make-exn:fail:network', 'make-exn:fail:network:errno', 'make-exn:fail:object', 'make-exn:fail:out-of-memory', 'make-exn:fail:read', 'make-exn:fail:read:eof', 'make-exn:fail:read:non-char', 'make-exn:fail:syntax', 'make-exn:fail:syntax:missing-module', 'make-exn:fail:syntax:unbound', 'make-exn:fail:unsupported', 'make-exn:fail:user', 'make-file-or-directory-link', 'make-flat-contract', 'make-fsemaphore', 'make-generic', 'make-handle-get-preference-locked', 'make-hash', 'make-hash-placeholder', 'make-hasheq', 'make-hasheq-placeholder', 'make-hasheqv', 'make-hasheqv-placeholder', 'make-immutable-custom-hash', 'make-immutable-hash', 'make-immutable-hasheq', 'make-immutable-hasheqv', 'make-impersonator-property', 'make-input-port', 'make-input-port/read-to-peek', 'make-inspector', 'make-keyword-procedure', 'make-known-char-range-list', 'make-limited-input-port', 'make-list', 'make-lock-file-name', 'make-log-receiver', 'make-logger', 'make-mixin-contract', 'make-mutable-custom-set', 'make-none/c', 'make-object', 'make-output-port', 'make-parameter', 'make-parent-directory*', 'make-phantom-bytes', 'make-pipe', 'make-pipe-with-specials', 'make-placeholder', 'make-plumber', 'make-polar', 'make-prefab-struct', 'make-primitive-class', 'make-proj-contract', 'make-pseudo-random-generator', 'make-reader-graph', 'make-readtable', 'make-rectangular', 'make-rename-transformer', 'make-resolved-module-path', 'make-security-guard', 'make-semaphore', 'make-set!-transformer', 'make-shared-bytes', 'make-sibling-inspector', 'make-special-comment', 'make-srcloc', 'make-string', 'make-struct-field-accessor', 'make-struct-field-mutator', 'make-struct-type', 'make-struct-type-property', 'make-syntax-delta-introducer', 'make-syntax-introducer', 'make-temporary-file', 'make-tentative-pretty-print-output-port', 'make-thread-cell', 'make-thread-group', 'make-vector', 'make-weak-box', 'make-weak-custom-hash', 'make-weak-custom-set', 'make-weak-hash', 'make-weak-hasheq', 'make-weak-hasheqv', 'make-will-executor', 'map', 'match-equality-test', 'matches-arity-exactly?', 'max', 'mcar', 'mcdr', 'mcons', 'member', 'member-name-key-hash-code', 'member-name-key=?', 'member-name-key?', 'memf', 'memq', 'memv', 'merge-input', 'method-in-interface?', 'min', 'mixin-contract', 'module->exports', 'module->imports', 'module->language-info', 'module->namespace', 'module-compiled-cross-phase-persistent?', 'module-compiled-exports', 'module-compiled-imports', 'module-compiled-language-info', 'module-compiled-name', 'module-compiled-submodules', 'module-declared?', 'module-path-index-join', 'module-path-index-resolve', 'module-path-index-split', 'module-path-index-submodule', 'module-path-index?', 'module-path?', 'module-predefined?', 'module-provide-protected?', 'modulo', 'mpair?', 'mutable-set', 'mutable-seteq', 'mutable-seteqv', 'n->th', 'nack-guard-evt', 'namespace-anchor->empty-namespace', 'namespace-anchor->namespace', 'namespace-anchor?', 'namespace-attach-module', 'namespace-attach-module-declaration', 'namespace-base-phase', 'namespace-mapped-symbols', 'namespace-module-identifier', 'namespace-module-registry', 'namespace-require', 'namespace-require/constant', 'namespace-require/copy', 'namespace-require/expansion-time', 'namespace-set-variable-value!', 'namespace-symbol->identifier', 'namespace-syntax-introduce', 'namespace-undefine-variable!', 'namespace-unprotect-module', 'namespace-variable-value', 'namespace?', 'nan?', 'natural-number/c', 'negate', 'negative?', 'never-evt', 'new-/c', 'new-/c', 'newline', 'ninth', 'non-empty-listof', 'non-empty-string?', 'none/c', 'normal-case-path', 'normalize-arity', 'normalize-path', 'normalized-arity?', 'not', 'not/c', 'null', 'null?', 'number->string', 'number?', 'numerator', 'object%', 'object->vector', 'object-info', 'object-interface', 'object-method-arity-includes?', 'object-name', 'object-or-false=?', 'object=?', 'object?', 'odd?', 'one-of/c', 'open-input-bytes', 'open-input-file', 'open-input-output-file', 'open-input-string', 'open-output-bytes', 'open-output-file', 'open-output-nowhere', 'open-output-string', 'or/c', 'order-of-magnitude', 'ormap', 'other-execute-bit', 'other-read-bit', 'other-write-bit', 'output-port?', 'pair?', 'parameter-procedure=?', 'parameter/c', 'parameter?', 'parameterization?', 'parse-command-line', 'partition', 'path->bytes', 'path->complete-path', 'path->directory-path', 'path->string', 'path-add-suffix', 'path-convention-type', 'path-element->bytes', 'path-element->string', 'path-element?', 'path-for-some-system?', 'path-list-string->path-list', 'path-only', 'path-replace-suffix', 'path-string?', 'path<?', 'path?', 'pathlist-closure', 'peek-byte', 'peek-byte-or-special', 'peek-bytes', 'peek-bytes!', 'peek-bytes!-evt', 'peek-bytes-avail!', 'peek-bytes-avail!*', 'peek-bytes-avail!-evt', 'peek-bytes-avail!/enable-break', 'peek-bytes-evt', 'peek-char', 'peek-char-or-special', 'peek-string', 'peek-string!', 'peek-string!-evt', 'peek-string-evt', 'peeking-input-port', 'permutations', 'phantom-bytes?', 'pi', 'pi.f', 'pipe-content-length', 'place-break', 'place-channel', 'place-channel-get', 'place-channel-put', 'place-channel-put/get', 'place-channel?', 'place-dead-evt', 'place-enabled?', 'place-kill', 'place-location?', 'place-message-allowed?', 'place-sleep', 'place-wait', 'place?', 'placeholder-get', 'placeholder-set!', 'placeholder?', 'plumber-add-flush!', 'plumber-flush-all', 'plumber-flush-handle-remove!', 'plumber-flush-handle?', 'plumber?', 'poll-guard-evt', 'port->bytes', 'port->bytes-lines', 'port->lines', 'port->list', 'port->string', 'port-closed-evt', 'port-closed?', 'port-commit-peeked', 'port-count-lines!', 'port-count-lines-enabled', 'port-counts-lines?', 'port-display-handler', 'port-file-identity', 'port-file-unlock', 'port-next-location', 'port-number?', 'port-print-handler', 'port-progress-evt', 'port-provides-progress-evts?', 'port-read-handler', 'port-try-file-lock?', 'port-write-handler', 'port-writes-atomic?', 'port-writes-special?', 'port?', 'positive?', 'predicate/c', 'prefab-key->struct-type', 'prefab-key?', 'prefab-struct-key', 'preferences-lock-file-mode', 'pregexp', 'pregexp?', 'pretty-display', 'pretty-format', 'pretty-print', 'pretty-print-.-symbol-without-bars', 'pretty-print-abbreviate-read-macros', 'pretty-print-columns', 'pretty-print-current-style-table', 'pretty-print-depth', 'pretty-print-exact-as-decimal', 'pretty-print-extend-style-table', 'pretty-print-handler', 'pretty-print-newline', 'pretty-print-post-print-hook', 'pretty-print-pre-print-hook', 'pretty-print-print-hook', 'pretty-print-print-line', 'pretty-print-remap-stylable', 'pretty-print-show-inexactness', 'pretty-print-size-hook', 'pretty-print-style-table?', 'pretty-printing', 'pretty-write', 'primitive-closure?', 'primitive-result-arity', 'primitive?', 'print', 'print-as-expression', 'print-boolean-long-form', 'print-box', 'print-graph', 'print-hash-table', 'print-mpair-curly-braces', 'print-pair-curly-braces', 'print-reader-abbreviations', 'print-struct', 'print-syntax-width', 'print-unreadable', 'print-vector-length', 'printable/c', 'printable<%>', 'printf', 'println', 'procedure->method', 'procedure-arity', 'procedure-arity-includes/c', 'procedure-arity-includes?', 'procedure-arity?', 'procedure-closure-contents-eq?', 'procedure-extract-target', 'procedure-keywords', 'procedure-reduce-arity', 'procedure-reduce-keyword-arity', 'procedure-rename', 'procedure-result-arity', 'procedure-specialize', 'procedure-struct-type?', 'procedure?', 'process', 'process*', 'process*/ports', 'process/ports', 'processor-count', 'progress-evt?', 'promise-forced?', 'promise-running?', 'promise/c', 'promise/name?', 'promise?', 'prop:arity-string', 'prop:arrow-contract', 'prop:arrow-contract-get-info', 'prop:arrow-contract?', 'prop:blame', 'prop:chaperone-contract', 'prop:checked-procedure', 'prop:contract', 'prop:contracted', 'prop:custom-print-quotable', 'prop:custom-write', 'prop:dict', 'prop:dict/contract', 'prop:equal+hash', 'prop:evt', 'prop:exn:missing-module', 'prop:exn:srclocs', 'prop:expansion-contexts', 'prop:flat-contract', 'prop:impersonator-of', 'prop:input-port', 'prop:liberal-define-context', 'prop:object-name', 'prop:opt-chaperone-contract', 'prop:opt-chaperone-contract-get-test', 'prop:opt-chaperone-contract?', 'prop:orc-contract', 'prop:orc-contract-get-subcontracts', 'prop:orc-contract?', 'prop:output-port', 'prop:place-location', 'prop:procedure', 'prop:recursive-contract', 'prop:recursive-contract-unroll', 'prop:recursive-contract?', 'prop:rename-transformer', 'prop:sequence', 'prop:set!-transformer', 'prop:stream', 'proper-subset?', 'pseudo-random-generator->vector', 'pseudo-random-generator-vector?', 'pseudo-random-generator?', 'put-preferences', 'putenv', 'quotient', 'quotient/remainder', 'radians->degrees', 'raise', 'raise-argument-error', 'raise-arguments-error', 'raise-arity-error', 'raise-blame-error', 'raise-contract-error', 'raise-mismatch-error', 'raise-not-cons-blame-error', 'raise-range-error', 'raise-result-error', 'raise-syntax-error', 'raise-type-error', 'raise-user-error', 'random', 'random-seed', 'range', 'rational?', 'rationalize', 'read', 'read-accept-bar-quote', 'read-accept-box', 'read-accept-compiled', 'read-accept-dot', 'read-accept-graph', 'read-accept-infix-dot', 'read-accept-lang', 'read-accept-quasiquote', 'read-accept-reader', 'read-byte', 'read-byte-or-special', 'read-bytes', 'read-bytes!', 'read-bytes!-evt', 'read-bytes-avail!', 'read-bytes-avail!*', 'read-bytes-avail!-evt', 'read-bytes-avail!/enable-break', 'read-bytes-evt', 'read-bytes-line', 'read-bytes-line-evt', 'read-case-sensitive', 'read-cdot', 'read-char', 'read-char-or-special', 'read-curly-brace-as-paren', 'read-curly-brace-with-tag', 'read-decimal-as-inexact', 'read-eval-print-loop', 'read-language', 'read-line', 'read-line-evt', 'read-on-demand-source', 'read-square-bracket-as-paren', 'read-square-bracket-with-tag', 'read-string', 'read-string!', 'read-string!-evt', 'read-string-evt', 'read-syntax', 'read-syntax/recursive', 'read/recursive', 'readtable-mapping', 'readtable?', 'real->decimal-string', 'real->double-flonum', 'real->floating-point-bytes', 'real->single-flonum', 'real-in', 'real-part', 'real?', 'reencode-input-port', 'reencode-output-port', 'regexp', 'regexp-match', 'regexp-match*', 'regexp-match-evt', 'regexp-match-exact?', 'regexp-match-peek', 'regexp-match-peek-immediate', 'regexp-match-peek-positions', 'regexp-match-peek-positions*', 'regexp-match-peek-positions-immediate', 'regexp-match-peek-positions-immediate/end', 'regexp-match-peek-positions/end', 'regexp-match-positions', 'regexp-match-positions*', 'regexp-match-positions/end', 'regexp-match/end', 'regexp-match?', 'regexp-max-lookbehind', 'regexp-quote', 'regexp-replace', 'regexp-replace*', 'regexp-replace-quote', 'regexp-replaces', 'regexp-split', 'regexp-try-match', 'regexp?', 'relative-path?', 'relocate-input-port', 'relocate-output-port', 'remainder', 'remf', 'remf*', 'remove', 'remove*', 'remove-duplicates', 'remq', 'remq*', 'remv', 'remv*', 'rename-contract', 'rename-file-or-directory', 'rename-transformer-target', 'rename-transformer?', 'replace-evt', 'reroot-path', 'resolve-path', 'resolved-module-path-name', 'resolved-module-path?', 'rest', 'reverse', 'round', 'second', 'seconds->date', 'security-guard?', 'semaphore-peek-evt', 'semaphore-peek-evt?', 'semaphore-post', 'semaphore-try-wait?', 'semaphore-wait', 'semaphore-wait/enable-break', 'semaphore?', 'sequence->list', 'sequence->stream', 'sequence-add-between', 'sequence-andmap', 'sequence-append', 'sequence-count', 'sequence-filter', 'sequence-fold', 'sequence-for-each', 'sequence-generate', 'sequence-generate*', 'sequence-length', 'sequence-map', 'sequence-ormap', 'sequence-ref', 'sequence-tail', 'sequence/c', 'sequence?', 'set', 'set!-transformer-procedure', 'set!-transformer?', 'set->list', 'set->stream', 'set-add', 'set-add!', 'set-box!', 'set-clear', 'set-clear!', 'set-copy', 'set-copy-clear', 'set-count', 'set-empty?', 'set-eq?', 'set-equal?', 'set-eqv?', 'set-first', 'set-for-each', 'set-implements/c', 'set-implements?', 'set-intersect', 'set-intersect!', 'set-map', 'set-mcar!', 'set-mcdr!', 'set-member?', 'set-mutable?', 'set-phantom-bytes!', 'set-port-next-location!', 'set-remove', 'set-remove!', 'set-rest', 'set-some-basic-contracts!', 'set-subtract', 'set-subtract!', 'set-symmetric-difference', 'set-symmetric-difference!', 'set-union', 'set-union!', 'set-weak?', 'set/c', 'set=?', 'set?', 'seteq', 'seteqv', 'seventh', 'sgn', 'shared-bytes', 'shell-execute', 'shrink-path-wrt', 'shuffle', 'simple-form-path', 'simplify-path', 'sin', 'single-flonum?', 'sinh', 'sixth', 'skip-projection-wrapper?', 'sleep', 'some-system-path->string', 'sort', 'special-comment-value', 'special-comment?', 'special-filter-input-port', 'split-at', 'split-at-right', 'split-common-prefix', 'split-path', 'splitf-at', 'splitf-at-right', 'sqr', 'sqrt', 'srcloc', 'srcloc->string', 'srcloc-column', 'srcloc-line', 'srcloc-position', 'srcloc-source', 'srcloc-span', 'srcloc?', 'stop-after', 'stop-before', 'stream->list', 'stream-add-between', 'stream-andmap', 'stream-append', 'stream-count', 'stream-empty?', 'stream-filter', 'stream-first', 'stream-fold', 'stream-for-each', 'stream-length', 'stream-map', 'stream-ormap', 'stream-ref', 'stream-rest', 'stream-tail', 'stream/c', 'stream?', 'string', 'string->bytes/latin-1', 'string->bytes/locale', 'string->bytes/utf-8', 'string->immutable-string', 'string->keyword', 'string->list', 'string->number', 'string->path', 'string->path-element', 'string->some-system-path', 'string->symbol', 'string->uninterned-symbol', 'string->unreadable-symbol', 'string-append', 'string-append*', 'string-ci<=?', 'string-ci<?', 'string-ci=?', 'string-ci>=?', 'string-ci>?', 'string-contains?', 'string-copy', 'string-copy!', 'string-downcase', 'string-environment-variable-name?', 'string-fill!', 'string-foldcase', 'string-join', 'string-len/c', 'string-length', 'string-locale-ci<?', 'string-locale-ci=?', 'string-locale-ci>?', 'string-locale-downcase', 'string-locale-upcase', 'string-locale<?', 'string-locale=?', 'string-locale>?', 'string-no-nuls?', 'string-normalize-nfc', 'string-normalize-nfd', 'string-normalize-nfkc', 'string-normalize-nfkd', 'string-normalize-spaces', 'string-port?', 'string-prefix?', 'string-ref', 'string-replace', 'string-set!', 'string-split', 'string-suffix?', 'string-titlecase', 'string-trim', 'string-upcase', 'string-utf-8-length', 'string<=?', 'string<?', 'string=?', 'string>=?', 'string>?', 'string?', 'struct->vector', 'struct-accessor-procedure?', 'struct-constructor-procedure?', 'struct-info', 'struct-mutator-procedure?', 'struct-predicate-procedure?', 'struct-type-info', 'struct-type-make-constructor', 'struct-type-make-predicate', 'struct-type-property-accessor-procedure?', 'struct-type-property/c', 'struct-type-property?', 'struct-type?', 'struct:arity-at-least', 'struct:arrow-contract-info', 'struct:date', 'struct:date*', 'struct:exn', 'struct:exn:break', 'struct:exn:break:hang-up', 'struct:exn:break:terminate', 'struct:exn:fail', 'struct:exn:fail:contract', 'struct:exn:fail:contract:arity', 'struct:exn:fail:contract:blame', 'struct:exn:fail:contract:continuation', 'struct:exn:fail:contract:divide-by-zero', 'struct:exn:fail:contract:non-fixnum-result', 'struct:exn:fail:contract:variable', 'struct:exn:fail:filesystem', 'struct:exn:fail:filesystem:errno', 'struct:exn:fail:filesystem:exists', 'struct:exn:fail:filesystem:missing-module', 'struct:exn:fail:filesystem:version', 'struct:exn:fail:network', 'struct:exn:fail:network:errno', 'struct:exn:fail:object', 'struct:exn:fail:out-of-memory', 'struct:exn:fail:read', 'struct:exn:fail:read:eof', 'struct:exn:fail:read:non-char', 'struct:exn:fail:syntax', 'struct:exn:fail:syntax:missing-module', 'struct:exn:fail:syntax:unbound', 'struct:exn:fail:unsupported', 'struct:exn:fail:user', 'struct:srcloc', 'struct:wrapped-extra-arg-arrow', 'struct?', 'sub1', 'subbytes', 'subclass?', 'subclass?/c', 'subprocess', 'subprocess-group-enabled', 'subprocess-kill', 'subprocess-pid', 'subprocess-status', 'subprocess-wait', 'subprocess?', 'subset?', 'substring', 'suggest/c', 'symbol->string', 'symbol-interned?', 'symbol-unreadable?', 'symbol<?', 'symbol=?', 'symbol?', 'symbols', 'sync', 'sync/enable-break', 'sync/timeout', 'sync/timeout/enable-break', 'syntax->datum', 'syntax->list', 'syntax-arm', 'syntax-column', 'syntax-debug-info', 'syntax-disarm', 'syntax-e', 'syntax-line', 'syntax-local-bind-syntaxes', 'syntax-local-certifier', 'syntax-local-context', 'syntax-local-expand-expression', 'syntax-local-get-shadower', 'syntax-local-identifier-as-binding', 'syntax-local-introduce', 'syntax-local-lift-context', 'syntax-local-lift-expression', 'syntax-local-lift-module', 'syntax-local-lift-module-end-declaration', 'syntax-local-lift-provide', 'syntax-local-lift-require', 'syntax-local-lift-values-expression', 'syntax-local-make-definition-context', 'syntax-local-make-delta-introducer', 'syntax-local-module-defined-identifiers', 'syntax-local-module-exports', 'syntax-local-module-required-identifiers', 'syntax-local-name', 'syntax-local-phase-level', 'syntax-local-submodules', 'syntax-local-transforming-module-provides?', 'syntax-local-value', 'syntax-local-value/immediate', 'syntax-original?', 'syntax-position', 'syntax-property', 'syntax-property-preserved?', 'syntax-property-symbol-keys', 'syntax-protect', 'syntax-rearm', 'syntax-recertify', 'syntax-shift-phase-level', 'syntax-source', 'syntax-source-module', 'syntax-span', 'syntax-taint', 'syntax-tainted?', 'syntax-track-origin', 'syntax-transforming-module-expression?', 'syntax-transforming-with-lifts?', 'syntax-transforming?', 'syntax/c', 'syntax?', 'system', 'system*', 'system*/exit-code', 'system-big-endian?', 'system-idle-evt', 'system-language+country', 'system-library-subpath', 'system-path-convention-type', 'system-type', 'system/exit-code', 'tail-marks-match?', 'take', 'take-common-prefix', 'take-right', 'takef', 'takef-right', 'tan', 'tanh', 'tcp-abandon-port', 'tcp-accept', 'tcp-accept-evt', 'tcp-accept-ready?', 'tcp-accept/enable-break', 'tcp-addresses', 'tcp-close', 'tcp-connect', 'tcp-connect/enable-break', 'tcp-listen', 'tcp-listener?', 'tcp-port?', 'tentative-pretty-print-port-cancel', 'tentative-pretty-print-port-transfer', 'tenth', 'terminal-port?', 'the-unsupplied-arg', 'third', 'thread', 'thread-cell-ref', 'thread-cell-set!', 'thread-cell-values?', 'thread-cell?', 'thread-dead-evt', 'thread-dead?', 'thread-group?', 'thread-receive', 'thread-receive-evt', 'thread-resume', 'thread-resume-evt', 'thread-rewind-receive', 'thread-running?', 'thread-send', 'thread-suspend', 'thread-suspend-evt', 'thread-try-receive', 'thread-wait', 'thread/suspend-to-kill', 'thread?', 'time-apply', 'touch', 'transplant-input-port', 'transplant-output-port', 'true', 'truncate', 'udp-addresses', 'udp-bind!', 'udp-bound?', 'udp-close', 'udp-connect!', 'udp-connected?', 'udp-multicast-interface', 'udp-multicast-join-group!', 'udp-multicast-leave-group!', 'udp-multicast-loopback?', 'udp-multicast-set-interface!', 'udp-multicast-set-loopback!', 'udp-multicast-set-ttl!', 'udp-multicast-ttl', 'udp-open-socket', 'udp-receive!', 'udp-receive!*', 'udp-receive!-evt', 'udp-receive!/enable-break', 'udp-receive-ready-evt', 'udp-send', 'udp-send*', 'udp-send-evt', 'udp-send-ready-evt', 'udp-send-to', 'udp-send-to*', 'udp-send-to-evt', 'udp-send-to/enable-break', 'udp-send/enable-break', 'udp?', 'unbox', 'uncaught-exception-handler', 'unit?', 'unspecified-dom', 'unsupplied-arg?', 'use-collection-link-paths', 'use-compiled-file-paths', 'use-user-specific-search-paths', 'user-execute-bit', 'user-read-bit', 'user-write-bit', 'value-blame', 'value-contract', 'values', 'variable-reference->empty-namespace', 'variable-reference->module-base-phase', 'variable-reference->module-declaration-inspector', 'variable-reference->module-path-index', 'variable-reference->module-source', 'variable-reference->namespace', 'variable-reference->phase', 'variable-reference->resolved-module-path', 'variable-reference-constant?', 'variable-reference?', 'vector', 'vector->immutable-vector', 'vector->list', 'vector->pseudo-random-generator', 'vector->pseudo-random-generator!', 'vector->values', 'vector-append', 'vector-argmax', 'vector-argmin', 'vector-copy', 'vector-copy!', 'vector-count', 'vector-drop', 'vector-drop-right', 'vector-fill!', 'vector-filter', 'vector-filter-not', 'vector-immutable', 'vector-immutable/c', 'vector-immutableof', 'vector-length', 'vector-map', 'vector-map!', 'vector-member', 'vector-memq', 'vector-memv', 'vector-ref', 'vector-set!', 'vector-set*!', 'vector-set-performance-stats!', 'vector-split-at', 'vector-split-at-right', 'vector-take', 'vector-take-right', 'vector/c', 'vector?', 'vectorof', 'version', 'void', 'void?', 'weak-box-value', 'weak-box?', 'weak-set', 'weak-seteq', 'weak-seteqv', 'will-execute', 'will-executor?', 'will-register', 'will-try-execute', 'with-input-from-bytes', 'with-input-from-file', 'with-input-from-string', 'with-output-to-bytes', 'with-output-to-file', 'with-output-to-string', 'would-be-future', 'wrap-evt', 'wrapped-extra-arg-arrow', 'wrapped-extra-arg-arrow-extra-neg-party-argument', 'wrapped-extra-arg-arrow-real-func', 'wrapped-extra-arg-arrow?', 'writable<%>', 'write', 'write-byte', 'write-bytes', 'write-bytes-avail', 'write-bytes-avail*', 'write-bytes-avail-evt', 'write-bytes-avail/enable-break', 'write-char', 'write-special', 'write-special-avail*', 'write-special-evt', 'write-string', 'write-to-file', 'writeln', 'xor', 'zero?', '~.a', '~.s', '~.v', '~a', '~e', '~r', '~s', '~v')
_opening_parenthesis = '[([{]'
_closing_parenthesis = '[)\\]}]'
_delimiters = '()[\\]{}",\\\'`;\\s'
_symbol = ('(?:\\|[^|]*\\||\\\\[\\w\\W]|[^|\\\\%s]+)+' % _delimiters)
_exact_decimal_prefix = '(?:#e)?(?:#d)?(?:#e)?'
_exponent = '(?:[defls][-+]?\\d+)'
_inexact_simple_no_hashes = '(?:\\d+(?:/\\d+|\\.\\d*)?|\\.\\d+)'
_inexact_simple = ('(?:%s|(?:\\d+#+(?:\\.#*|/\\d+#*)?|\\.\\d+#+|\\d+(?:\\.\\d*#+|/\\d+#+)))' % _inexact_simple_no_hashes)
_inexact_normal_no_hashes = ('(?:%s%s?)' % (_inexact_simple_no_hashes, _exponent))
_inexact_normal = ('(?:%s%s?)' % (_inexact_simple, _exponent))
_inexact_special = '(?:(?:inf|nan)\\.[0f])'
_inexact_real = ('(?:[-+]?%s|[-+]%s)' % (_inexact_normal, _inexact_special))
_inexact_unsigned = ('(?:%s|%s)' % (_inexact_normal, _inexact_special))
tokens = {'root': [(_closing_parenthesis, Error), ('(?!\\Z)', Text, 'unquoted-datum')], 'datum': [('(?s)#;|#*', Comment), (';[^\\n\\r\\x85\\u2028\\u2029]*', Comment.Single), ('#\\|', Comment.Multiline, 'block-comment'), ('(?u)\\s+', Whitespace), (('(?i)%s[-+]?\\d+(?=[%s])' % (_exact_decimal_prefix, _delimiters)), Number.Integer, '#pop'), (('(?i)%s[-+]?(\\d+(\\.\\d*)?|\\.\\d+)([deflst][-+]?\\d+)?(?=[%s])' % (_exact_decimal_prefix, _delimiters)), Number.Float, '#pop'), (('(?i)%s[-+]?(%s([-+]%s?i)?|[-+]%s?i)(?=[%s])' % (_exact_decimal_prefix, _inexact_normal_no_hashes, _inexact_normal_no_hashes, _inexact_normal_no_hashes, _delimiters)), Number, '#pop'), (('(?i)(#d)?(%s([-+]%s?i)?|[-+]%s?i|%%s)(?=[%s])' % (_inexact_real, _inexact_unsigned, _inexact_unsigned, _inexact_real, _inexact_real, _delimiters)), Number.Float, '#pop'), (('(?i)(([-+]?%st[-+]?\\d+)|[-+](inf|nan)\\.t)(?=[%s])' % (_inexact_simple, _delimiters)), Number.Float, '#pop'), (('(?iu)(#[ei])?#b%s' % _symbol), Number.Bin, '#pop'), (('(?iu)(#[ei])?#o%s' % _symbol), Number.Oct, '#pop'), (('(?iu)(#[ei])?#x%s' % _symbol), Number.Hex, '#pop'), (('(?iu)(#d)?#i%s' % _symbol), Number.Float, '#pop'), ('#?"', String.Double, ('#pop', 'string')), ('#<<(.+)\\n(^(?!\\1$).*$\\n)*^\\1$', String.Heredoc, '#pop'), ('#\\\\(u[\\da-fA-F]{1,4}|U[\\da-fA-F]{1,8})', String.Char, '#pop'), ('(?is)#\\\\([0-7]{3}|[a-z]+|.)', String.Char, '#pop'), ('(?s)#[pr]x#?"(\\\\?.)*?"', String.Regex, '#pop'), ('#(true|false|[tTfF])', Name.Constant, '#pop'), (('#:%s' % _symbol), Keyword.Declaration, '#pop'), ('(#lang |#!)(\\S+)', bygroups(Keyword.Namespace, Name.Namespace)), ('#reader', Keyword.Namespace, 'quoted-datum'), (("(?i)\\.(?=[%s])|#c[is]|#['`]|#,?" % _delimiters), Operator), (("'|#[s&]|#hash(eqv?)?|#\\d*(?=%s)" % _opening_parenthesis), Operator, ('#pop', 'quoted-datum'))], 'datum*': [('`|,?', Operator), (_symbol, String.Symbol, '#pop'), ('[|\\\\]', Error), default('#pop')], 'list': [(_closing_parenthesis, Punctuation, '#pop')], 'unquoted-datum': [include('datum'), (('quote(?=[%s])' % _delimiters), Keyword, ('#pop', 'quoted-datum')), ('`', Operator, ('#pop', 'quasiquoted-datum')), (('quasiquote(?=[%s])' % _delimiters), Keyword, ('#pop', 'quasiquoted-datum')), (_opening_parenthesis, Punctuation, ('#pop', 'unquoted-list')), (words(_keywords, suffix=('(?=[%s])' % _delimiters)), Keyword, '#pop'), (words(_builtins, suffix=('(?=[%s])' % _delimiters)), Name.Builtin, '#pop'), (_symbol, Name, '#pop'), include('datum*')], 'unquoted-list': [include('list'), ('(?!\\Z)', Text, 'unquoted-datum')], 'quasiquoted-datum': [include('datum'), (',?', Operator, ('#pop', 'unquoted-datum')), (('unquote(-splicing)?(?=[%s])' % _delimiters), Keyword, ('#pop', 'unquoted-datum')), (_opening_parenthesis, Punctuation, ('#pop', 'quasiquoted-list')), include('datum*')], 'quasiquoted-list': [include('list'), ('(?!\\Z)', Text, 'quasiquoted-datum')], 'quoted-datum': [include('datum'), (_opening_parenthesis, Punctuation, ('#pop', 'quoted-list')), include('datum*')], 'quoted-list': [include('list'), ('(?!\\Z)', Text, 'quoted-datum')], 'block-comment': [('#\\|', Comment.Multiline, '#push'), ('\\|#', Comment.Multiline, '#pop'), ('[^#|]+|.', Comment.Multiline)], 'string': [('"', String.Double, '#pop'), ('(?s)\\\\([0-7]{1,3}|x[\\da-fA-F]{1,2}|u[\\da-fA-F]{1,4}|U[\\da-fA-F]{1,8}|.)', String.Escape), ('[^\\\\"]+', String.Double)]} |
class CodeGenOnnxConfig(OnnxConfigWithPast):
def __init__(self, config: PretrainedConfig, task: str='default', patching_specs: List[PatchingSpec]=None, use_past: bool=False):
super().__init__(config, task=task, patching_specs=patching_specs, use_past=use_past)
if (not getattr(self._config, 'pad_token_id', None)):
self._config.pad_token_id = 0
def inputs(self) -> Mapping[(str, Mapping[(int, str)])]:
common_inputs = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
self.fill_with_past_key_values_(common_inputs, direction='inputs')
common_inputs['attention_mask'] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
common_inputs['attention_mask'] = {0: 'batch', 1: 'sequence'}
return common_inputs
def num_layers(self) -> int:
return self._config.n_layer
def num_attention_heads(self) -> int:
return self._config.n_head
def generate_dummy_inputs(self, tokenizer: PreTrainedTokenizer, batch_size: int=(- 1), seq_length: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None) -> Mapping[(str, Any)]:
common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs(tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework)
ordered_inputs = OrderedDict({'input_ids': common_inputs['input_ids']})
if self.use_past:
if (not is_torch_available()):
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
(batch, seqlen) = common_inputs['input_ids'].shape
past_key_values_length = (seqlen + 2)
past_shape = (batch, self.num_attention_heads, past_key_values_length, (self._config.hidden_size // self.num_attention_heads))
ordered_inputs['past_key_values'] = [(torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers)]
ordered_inputs['attention_mask'] = common_inputs['attention_mask']
if self.use_past:
mask_dtype = ordered_inputs['attention_mask'].dtype
ordered_inputs['attention_mask'] = torch.cat([ordered_inputs['attention_mask'], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1)
return ordered_inputs
def default_onnx_opset(self) -> int:
return 13 |
def _create_session_with_discord_token(sa: ServerApp, sid: (str | None)) -> User:
discord_user = sa.discord.fetch_user()
if (sa.enforce_role is not None):
if (not sa.enforce_role.verify_user(discord_user.id)):
logger().info('User %s is not authorized for connecting to the server', discord_user.name)
raise error.UserNotAuthorizedToUseServerError
user = _create_user_from_discord(discord_user)
if (sid is None):
return user
with sa.session(sid=sid) as session:
session['user-id'] = user.id
session['discord-access-token'] = flask.session['DISCORD_OAUTH2_TOKEN']
return user |
def ground_sort(i_op, qdmr, grounding_out):
assert (qdmr.ops[i_op] == 'sort')
if (len(qdmr.args[i_op]) == 3):
(data_arg, sort_arg, sort_dir_arg) = qdmr.args[i_op]
else:
(data_arg, sort_arg) = qdmr.args[i_op]
sort_dir_arg = None
if (sort_dir_arg is not None):
is_ascending_sort = ('descend' not in sort_dir_arg)
add_grounding_with_check(GroundingIndex(i_op, 2, sort_dir_arg), GroundingKey.make_sortdir_grounding(is_ascending_sort), grounding_out) |
_module()
class XMLDataset(CustomDataset):
def __init__(self, min_size=None, **kwargs):
assert (self.CLASSES or kwargs.get('classes', None)), 'CLASSES in `XMLDataset` can not be None.'
super(XMLDataset, self).__init__(**kwargs)
self.cat2label = {cat: i for (i, cat) in enumerate(self.CLASSES)}
self.min_size = min_size
def load_annotations(self, ann_file):
data_infos = []
img_ids = mmcv.list_from_file(ann_file)
for img_id in img_ids:
filename = f'JPEGImages/{img_id}.jpg'
xml_path = osp.join(self.img_prefix, 'Annotations', f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
if (size is not None):
width = int(size.find('width').text)
height = int(size.find('height').text)
else:
img_path = osp.join(self.img_prefix, 'JPEGImages', '{}.jpg'.format(img_id))
img = Image.open(img_path)
(width, height) = img.size
data_infos.append(dict(id=img_id, filename=filename, width=width, height=height))
return data_infos
def _filter_imgs(self, min_size=32):
valid_inds = []
for (i, img_info) in enumerate(self.data_infos):
if (min(img_info['width'], img_info['height']) < min_size):
continue
if self.filter_empty_gt:
img_id = img_info['id']
xml_path = osp.join(self.img_prefix, 'Annotations', f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
for obj in root.findall('object'):
name = obj.find('name').text
if (name in self.CLASSES):
valid_inds.append(i)
break
else:
valid_inds.append(i)
return valid_inds
def get_ann_info(self, idx):
img_id = self.data_infos[idx]['id']
xml_path = osp.join(self.img_prefix, 'Annotations', f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
bboxes = []
labels = []
bboxes_ignore = []
labels_ignore = []
for obj in root.findall('object'):
name = obj.find('name').text
if (name not in self.CLASSES):
continue
label = self.cat2label[name]
difficult = obj.find('difficult')
difficult = (0 if (difficult is None) else int(difficult.text))
bnd_box = obj.find('bndbox')
bbox = [int(float(bnd_box.find('xmin').text)), int(float(bnd_box.find('ymin').text)), int(float(bnd_box.find('xmax').text)), int(float(bnd_box.find('ymax').text))]
ignore = False
if self.min_size:
assert (not self.test_mode)
w = (bbox[2] - bbox[0])
h = (bbox[3] - bbox[1])
if ((w < self.min_size) or (h < self.min_size)):
ignore = True
if (difficult or ignore):
bboxes_ignore.append(bbox)
labels_ignore.append(label)
else:
bboxes.append(bbox)
labels.append(label)
if (not bboxes):
bboxes = np.zeros((0, 4))
labels = np.zeros((0,))
else:
bboxes = (np.array(bboxes, ndmin=2) - 1)
labels = np.array(labels)
if (not bboxes_ignore):
bboxes_ignore = np.zeros((0, 4))
labels_ignore = np.zeros((0,))
else:
bboxes_ignore = (np.array(bboxes_ignore, ndmin=2) - 1)
labels_ignore = np.array(labels_ignore)
ann = dict(bboxes=bboxes.astype(np.float32), labels=labels.astype(np.int64), bboxes_ignore=bboxes_ignore.astype(np.float32), labels_ignore=labels_ignore.astype(np.int64))
return ann
def get_cat_ids(self, idx):
cat_ids = []
img_id = self.data_infos[idx]['id']
xml_path = osp.join(self.img_prefix, 'Annotations', f'{img_id}.xml')
tree = ET.parse(xml_path)
root = tree.getroot()
for obj in root.findall('object'):
name = obj.find('name').text
if (name not in self.CLASSES):
continue
label = self.cat2label[name]
cat_ids.append(label)
return cat_ids |
class BoW(nn.Module):
def __init__(self, vocab: List[str], word_weights: Dict[(str, float)]={}, unknown_word_weight: float=1, cumulative_term_frequency: bool=True):
super(BoW, self).__init__()
vocab = list(set(vocab))
self.config_keys = ['vocab', 'word_weights', 'unknown_word_weight', 'cumulative_term_frequency']
self.vocab = vocab
self.word_weights = word_weights
self.unknown_word_weight = unknown_word_weight
self.cumulative_term_frequency = cumulative_term_frequency
self.weights = []
num_unknown_words = 0
for word in vocab:
weight = unknown_word_weight
if (word in word_weights):
weight = word_weights[word]
elif (word.lower() in word_weights):
weight = word_weights[word.lower()]
else:
num_unknown_words += 1
self.weights.append(weight)
logging.info('{} out of {} words without a weighting value. Set weight to {}'.format(num_unknown_words, len(vocab), unknown_word_weight))
self.tokenizer = WhitespaceTokenizer(vocab, stop_words=set(), do_lower_case=False)
self.sentence_embedding_dimension = len(vocab)
def forward(self, features: Dict[(str, Tensor)]):
return features
def tokenize(self, text: str) -> List[int]:
return self.tokenizer.tokenize(text)
def get_sentence_embedding_dimension(self):
return self.sentence_embedding_dimension
def get_sentence_features(self, tokens: List[int], pad_seq_length: int):
vector = np.zeros(self.get_sentence_embedding_dimension(), dtype=np.float32)
for token in tokens:
if self.cumulative_term_frequency:
vector[token] += self.weights[token]
else:
vector[token] = self.weights[token]
return {'sentence_embedding': torch.tensor([vector], dtype=torch.float)}
def get_config_dict(self):
return {key: self.__dict__[key] for key in self.config_keys}
def save(self, output_path):
with open(os.path.join(output_path, 'config.json'), 'w') as fOut:
json.dump(self.get_config_dict(), fOut, indent=2)
def load(input_path):
with open(os.path.join(input_path, 'config.json')) as fIn:
config = json.load(fIn)
return BoW(**config) |
class LineEditDelegate(QtWidgets.QStyledItemDelegate):
def createEditor(self, parent, option, index):
editor = QtWidgets.QLineEdit(parent)
editor.setValidator(ExpressionValidator())
return editor
def setEditorData(self, editor, index):
value = index.data(QtCore.Qt.ItemDataRole.DisplayRole)
editor.setText(value)
def setModelData(self, editor, model, index):
value = editor.text()
model.setData(index, value, QtCore.Qt.ItemDataRole.EditRole)
def updateEditorGeometry(self, editor, option, index):
editor.setGeometry(option.rect) |
def check_resp(resp, value, frequency, limit_db, prelude, context):
try:
value_resp = num.abs(evaluate1(resp, frequency))
except response.InvalidResponseError as e:
return Delivery(log=[('warning', ('Could not check response: %s' % str(e)), context)])
if (value_resp == 0.0):
return Delivery(log=[('warning', ('%s\n computed response is zero' % prelude), context)])
diff_db = (20.0 * num.log10((value_resp / value)))
if (num.abs(diff_db) > limit_db):
return Delivery(log=[('warning', ('%s\n reported value: %g\n computed value: %g\n at frequency [Hz]: %g\n factor: %g\n difference [dB]: %g\n limit [dB]: %g' % (prelude, value, value_resp, frequency, (value_resp / value), diff_db, limit_db)), context)])
return Delivery() |
class Panorama(Primitive):
def __init__(self, panorama, center=vec3(0.0, 0.0, 0.0), light_intensity=0.0, blur=0.0):
super().__init__(center, SkyBox_Material(panorama, light_intensity, blur), shadow=False)
l = SKYBOX_DISTANCE
self.light_intensity = light_intensity
self.collider_list += [Sphere_Collider(assigned_primitive=self, center=center, radius=SKYBOX_DISTANCE)]
def get_uv(self, hit):
return hit.collider.get_uv(hit) |
def find_memory_type(phys_addr):
if (phys_addr == 0):
return 'N/A'
if is_system_ram(phys_addr):
return 'System RAM'
if is_persistent_mem(phys_addr):
return 'Persistent Memory'
f.seek(0, 0)
for j in f:
m = re.split('-|:', j, 2)
if (int(m[0], 16) <= phys_addr <= int(m[1], 16)):
return m[2]
return 'N/A' |
class Blocks():
def __init__(self, tessellation, edges, buildings, id_name, unique_id):
self.tessellation = tessellation
self.edges = edges
self.buildings = buildings
self.id_name = id_name
self.unique_id = unique_id
if (id_name in buildings.columns):
raise ValueError(f"'{id_name}' column cannot be in the buildings GeoDataFrame.")
cut = gpd.overlay(tessellation, gpd.GeoDataFrame(geometry=edges.buffer(0.001)), how='difference')
cut = cut.explode(ignore_index=True)
weights = libpysal.weights.Queen.from_dataframe(cut, silence_warnings=True)
cut['component'] = weights.component_labels
buildings_c = buildings.copy()
buildings_c.geometry = buildings_c.representative_point()
centroids_temp_id = gpd.sjoin(buildings_c, cut[[cut.geometry.name, 'component']], how='left', predicate='within')
cells_copy = tessellation[[unique_id, tessellation.geometry.name]].merge(centroids_temp_id[[unique_id, 'component']], on=unique_id, how='left')
blocks = cells_copy.dissolve(by='component').explode(ignore_index=True)
blocks[id_name] = range(len(blocks))
blocks = blocks[[id_name, blocks.geometry.name]]
centroids_w_bl_id2 = gpd.sjoin(buildings_c, blocks, how='left', predicate='within')
self.buildings_id = centroids_w_bl_id2[id_name]
cells_m = tessellation[[unique_id]].merge(centroids_w_bl_id2[[unique_id, id_name]], on=unique_id, how='left')
self.tessellation_id = cells_m[id_name]
self.tessellation_id.index = self.tessellation.index
self.blocks = blocks |
class Target(object):
def __init__(self, targetInfo: Dict, browserContext: 'BrowserContext', sessionFactory: Callable[([], Coroutine[(Any, Any, CDPSession)])], ignoreHTTPSErrors: bool, defaultViewport: Optional[Dict], screenshotTaskQueue: List, loop: asyncio.AbstractEventLoop) -> None:
self._targetInfo = targetInfo
self._browserContext = browserContext
self._targetId = targetInfo.get('targetId', '')
self._sessionFactory = sessionFactory
self._ignoreHTTPSErrors = ignoreHTTPSErrors
self._defaultViewport = defaultViewport
self._screenshotTaskQueue = screenshotTaskQueue
self._loop = loop
self._page: Optional[Page] = None
self._initializedPromise = self._loop.create_future()
self._isClosedPromise = self._loop.create_future()
self._isInitialized = ((self._targetInfo['type'] != 'page') or (self._targetInfo['url'] != ''))
if self._isInitialized:
self._initializedCallback(True)
def _initializedCallback(self, bl: bool) -> None:
if self._initializedPromise.done():
self._initializedPromise = self._loop.create_future()
self._initializedPromise.set_result(bl)
def _closedCallback(self) -> None:
self._isClosedPromise.set_result(None)
async def createCDPSession(self) -> CDPSession:
return (await self._sessionFactory())
async def page(self) -> Optional[Page]:
if ((self._targetInfo['type'] in ['page', 'background_page']) and (self._page is None)):
client = (await self._sessionFactory())
new_page = (await Page.create(client, self, self._ignoreHTTPSErrors, self._defaultViewport, self._screenshotTaskQueue))
self._page = new_page
return new_page
return self._page
def url(self) -> str:
return self._targetInfo['url']
def type(self) -> str:
_type = self._targetInfo['type']
if (_type in ['page', 'background_page', 'service_worker', 'browser']):
return _type
return 'other'
def browser(self) -> 'Browser':
return self._browserContext.browser
def browserContext(self) -> 'BrowserContext':
return self._browserContext
def opener(self) -> Optional['Target']:
openerId = self._targetInfo.get('openerId')
if (openerId is None):
return None
return self.browser._targets.get(openerId)
def _targetInfoChanged(self, targetInfo: Dict) -> None:
self._targetInfo = targetInfo
if ((not self._isInitialized) and ((self._targetInfo['type'] != 'page') or (self._targetInfo['url'] != ''))):
self._isInitialized = True
self._initializedCallback(True)
return |
def extract_and_save_image(dataset, save_dir):
if osp.exists(save_dir):
print('Folder "{}" already exists'.format(save_dir))
return
print('Extracting images to "{}" ...'.format(save_dir))
mkdir_if_missing(save_dir)
for i in range(len(dataset)):
(img, label) = dataset[i]
class_dir = osp.join(save_dir, str(label).zfill(3))
mkdir_if_missing(class_dir)
impath = osp.join(class_dir, (str((i + 1)).zfill(5) + '.jpg'))
img.save(impath) |
class ResNet101FeatureExtractor(nn.Module):
def __init__(self, use_input_norm=True, device=torch.device('cpu')):
super(ResNet101FeatureExtractor, self).__init__()
model = torchvision.models.resnet101(pretrained=True)
self.use_input_norm = use_input_norm
if self.use_input_norm:
mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(device)
std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(device)
self.register_buffer('mean', mean)
self.register_buffer('std', std)
self.features = nn.Sequential(*list(model.children())[:8])
for (k, v) in self.features.named_parameters():
v.requires_grad = False
def forward(self, x):
if self.use_input_norm:
x = ((x - self.mean) / self.std)
output = self.features(x)
return output |
class CbEnterpriseEdr(Product):
product: str = 'cbc'
profile: str = 'default'
token: Optional[str] = None
org_key: Optional[str] = None
_device_group: Optional[list[str]] = None
_device_policy: Optional[list[str]] = None
_conn: CBCloudAPI
_limit: int = (- 1)
_raw: bool = False
def __init__(self, **kwargs):
self.url = (kwargs['url'] if ('url' in kwargs) else None)
self.token = (kwargs['token'] if ('token' in kwargs) else None)
self.profile = (kwargs['profile'] if ('profile' in kwargs) else 'default')
self.org_key = (kwargs['org_key'] if ('org_key' in kwargs) else None)
self._device_group = (kwargs['device_group'] if ('device_group' in kwargs) else None)
self._device_policy = (kwargs['device_policy'] if ('device_group' in kwargs) else None)
self._limit = (int(kwargs['limit']) if ('limit' in kwargs) else self._limit)
self._raw = (kwargs['raw'] if ('raw' in kwargs) else self._raw)
super().__init__(self.product, **kwargs)
def _authenticate(self) -> None:
if (self.token and self.url and self.org_key):
cb_conn = CBCloudAPI(token=self.token, url=self.url, org_key=self.org_key)
elif self.profile:
cb_conn = CBCloudAPI(profile=self.profile)
else:
cb_conn = CBCloudAPI()
self._conn = cb_conn
def build_query(self, filters: dict) -> QueryBuilder:
query_base = QueryBuilder()
for (key, value) in filters.items():
if (key == 'days'):
minutes_back = f'start:-{(value * 1440)}m'
minutes_back = _convert_relative_time(minutes_back)
query_base.and_(minutes_back)
elif (key == 'minutes'):
minutes_back = f'start:-{value}m'
minutes_back = _convert_relative_time(minutes_back)
query_base.and_(minutes_back)
elif (key == 'hostname'):
device_name = f'device_name:{value}'
query_base.and_(device_name)
elif (key == 'username'):
user_name = f'process_username:{value}'
query_base.and_(user_name)
else:
self._echo(f'Query filter {key} is not supported by product {self.product}', logging.WARNING)
if self._device_group:
device_group = []
for name in self._device_group:
device_group.append(f'device_group:"{name}"')
query_base.and_((('(' + ' OR '.join(device_group)) + ')'))
if self._device_policy:
device_policy = []
for name in self._device_policy:
device_policy.append(f'device_policy:"{name}"')
query_base.and_((('(' + ' OR '.join(device_policy)) + ')'))
return query_base
def divide_chunks(self, l: list, n: int) -> Generator:
for i in range(0, len(l), n):
(yield l[i:(i + n)])
def perform_query(self, tag: Tag, base_query: dict, query: str) -> set[Result]:
results = set()
parsed_base_query = self.build_query(base_query)
try:
self.log.debug(f'Query {tag}: {query}')
process = self._conn.select(Process)
full_query = parsed_base_query.where(query)
self.log.debug(f'Full Query: {full_query.__str__}')
for proc in process.where(full_query):
deets = proc.get_details()
hostname = (deets['device_name'] if ('device_name' in deets) else 'None')
user = (deets['process_username'][0] if ('process_username' in deets) else 'None')
proc_name = (deets['process_name'] if ('process_name' in deets) else 'None')
cmdline = (deets['process_cmdline'][0] if ('process_cmdline' in deets) else 'None')
ts = (deets['device_timestamp'] if ('device_timestamp' in deets) else 'None')
proc_guid = (deets['process_guid'] if ('process_guid' in deets) else 'None')
result = Result(hostname, user, proc_name, cmdline, (ts, proc_guid))
results.add(result)
if ((self._limit > 0) and ((len(results) + 1) > self._limit)):
break
except cbc_sdk.errors.ApiError as e:
self._echo(f'CbC SDK Error (see log for details): {e}', logging.ERROR)
self.log.exception(e)
except KeyboardInterrupt:
self._echo('Caught CTRL-C. Returning what we have . . .')
' \n if self._raw:\n return raw_results\n else:\n return results\n '
return results
def process_search(self, tag: Tag, base_query: dict, query: str) -> None:
results = self.perform_query(tag, base_query, query)
self._add_results(list(results), tag)
def nested_process_search(self, tag: Tag, criteria: dict, base_query: dict) -> None:
results: list = []
for (search_field, terms) in criteria.items():
if (search_field == 'query'):
if isinstance(terms, list):
if (len(terms) > 1):
query = (('((' + ') OR ('.join(terms)) + '))')
else:
query = (('(' + terms[0]) + ')')
else:
query = terms
results += self.perform_query(tag, base_query, query)
else:
chunked_terms = list(self.divide_chunks(terms, 100))
for chunk in chunked_terms:
terms = [(f'"{term}"' if (' ' in term) else term) for term in chunk]
if (search_field not in PARAMETER_MAPPING):
self._echo(f'Query filter {search_field} is not supported by product {self.product}', logging.WARNING)
continue
query = (('(' + ' OR '.join((('%s:%s' % (PARAMETER_MAPPING[search_field], term)) for term in terms))) + ')')
results += self.perform_query(tag, base_query, query)
self.log.debug(f'Nested search results: {len(results)}')
self._add_results(list(results), tag)
def get_other_row_headers(self) -> list[str]:
return ['Device Timestamp', 'Process GUID'] |
def _best_effort_input_batch_size(flat_input):
for input_ in flat_input:
shape = input_.shape
if (shape.ndims is None):
continue
if (shape.ndims < 2):
raise ValueError(('Expected input tensor %s to have rank at least 2' % input_))
batch_size = shape[1].value
if (batch_size is not None):
return batch_size
return array_ops.shape(flat_input[0])[1] |
def main(options, arguments):
global previous_time
previous_time = time.time()
phases_path = options.input
if (options.output == None):
outfile = phases_path.replace('.txt', '-diffs.json')
else:
outfile = options.output
hashes_dic = read_phashes_manifest(phases_path)
hashes = precompute_vectors(hashes_dic, phases_path)
hashes_diff = {}
blacklist = read_blacklist_dict(phases_path)
if (options.device == None):
seek_queue_many(list(hashes_dic.keys()), hashes, outfile, blacklist, hashes_diff)
else:
devices = ['/gpu:0', '/gpu:1']
device = devices[int(options.device)]
with tf.device(device):
seek_queue_many_device(list(hashes_dic.keys()), hashes, outfile, blacklist, hashes_diff, devices, device)
os.remove((phases_path + '.new_dict.progress'))
os.remove((phases_path + '.pickle')) |
def extract_T1_features(wf, feature_type='histogram_whole_scan'):
feature_type = feature_type.lower()
basename = (lambda name: splitext(name)[0])
if (wf.mri_name is not None):
prefix = (basename(wf.mri_name) + '_')
else:
prefix = ''
out_csv_name = '{}{}_features.csv'.format(prefix, feature_type)
feat_dir = pjoin(wf.out_dir, cfg.outlier_feature_folder_name)
makedirs(feat_dir, exist_ok=True)
path_to_csv = (lambda sid_: pjoin(feat_dir, sid_, out_csv_name))
if (feature_type in ['histogram_whole_scan']):
extract_method = t1_histogram_whole_scan
else:
raise NotImplementedError('Requested feature type {} not implemented!\n\tAllowed options : {} '.format(feature_type, cfg.t1_mri_features_OLD))
feature_paths = dict()
num_subjects = len(wf.id_list)
for (counter, sid) in enumerate(wf.id_list):
print('{} : {}/{}'.format(sid, (counter + 1), num_subjects))
makedirs(pjoin(feat_dir, sid), exist_ok=True)
feat_file = path_to_csv(sid)
if (not pexists(feat_file)):
features = extract_method(wf.path_getter_inputs(sid))
try:
np.savetxt(feat_file, features, delimiter='\n', header=feature_type)
except:
raise IOError('Unable to save extracted features to disk!')
feature_paths[sid] = feat_file
return feature_paths |
class ToggleValidationFixture(FileUploadInputFixture):
make_validation_fail = False
def new_domain_object(self):
fixture = self
class DomainObject():
fields = ExposedNames()
def make_field(self):
field = FileField(allow_multiple=True, label='Attached files')
field.add_validation_constraint(ToggleableConstraint(fixture))
return field
fields.files = make_field
events = ExposedNames()
events.submit = (lambda i: Event(label='Submit'))
return DomainObject()
check_script = 'return $(".reahl-nested-form").find(".reload_flag").length > 0'
def mark_nested_form(self):
self.web_fixture.driver_browser.execute_script('$(".reahl-nested-form").children().addClass("reload_flag")')
has_class = self.web_fixture.driver_browser.execute_script(self.check_script)
assert has_class, 'Something is wrong, could not place flags for checking reloading of form'
def nested_form_was_reloaded(self):
has_class = self.web_fixture.driver_browser.execute_script(self.check_script)
return (not has_class) |
class PlayQueryBlockNBT(Packet):
id = 1
to = 0
def __init__(self, transaction_id: int, x: int, y: int, z: int) -> None:
super().__init__()
self.transaction_id = transaction_id
(self.x, self.y, self.z) = (x, y, z)
def decode(cls, buf: Buffer) -> PlayQueryBlockNBT:
return cls(buf.unpack_varint(), *buf.unpack_position()) |
def icon_name(name):
return {'stackoverflow': 'stack-overflow', 'google-oauth': 'google', 'google-oauth2': 'google', 'google-openidconnect': 'google', 'yahoo-oauth': 'yahoo', 'facebook-app': 'facebook', 'email': 'envelope', 'vimeo': 'vimeo-square', 'linkedin-oauth2': 'linkedin', 'vk-oauth2': 'vk', 'live': 'windows', 'username': 'user'}.get(name, name) |
class Net():
def __init__(self, points, features, is_training, setting):
bn_decay = setting.get_bn_decay(tf.train.get_global_step())
l0_xyz = points
l0_points = None
num_class = setting.num_class
(l1_xyz, l1_points) = pointnet_sa_module_msg(l0_xyz, l0_points, 512, [0.1, 0.2, 0.4], [32, 64, 128], [[32, 32, 64], [64, 64, 128], [64, 96, 128]], is_training, bn_decay, scope='layer1')
(l2_xyz, l2_points) = pointnet_sa_module_msg(l1_xyz, l1_points, 128, [0.2, 0.4, 0.8], [64, 64, 128], [[64, 64, 128], [128, 128, 256], [128, 128, 256]], is_training, bn_decay, scope='layer2')
(l3_xyz, l3_points, _) = pointnet_sa_module(l3_input_shape, l2_xyz, l2_points, npoint=None, radius=None, nsample=None, mlp=[256, 512, 1024], mlp2=None, group_all=True, is_training=is_training, bn_decay=bn_decay, scope='layer3')
net = tf.reshape(l3_points, [l3_input_shape[0], (- 1)])
net = tf_util.fully_connected(FC1_inputs_shape, net, 512, bn=True, is_training=is_training, scope='fc1', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.4, is_training=is_training, scope='dp1')
net = tf_util.fully_connected(FC2_inputs_shape, net, 256, bn=True, is_training=is_training, scope='fc2', bn_decay=bn_decay)
net = tf_util.dropout(net, keep_prob=0.4, is_training=is_training, scope='dp2')
net = tf_util.fully_connected(FC3_inputs_shape, net, num_class, activation_fn=None, scope='fc3')
self.logits = tf.expand_dims(net, axis=1) |
_reduction
_guvectorize(['(uint8[:], int64[:], int8[:], float64[:])', '(uint64[:], int64[:], int8[:], float64[:])', '(int8[:], int64[:], int8[:], float64[:])', '(int64[:], int64[:], int8[:], float64[:])', '(float32[:], int64[:], int8[:], float32[:])', '(float64[:], int64[:], int8[:], float64[:])'], '(n),(n),(c)->(c)')
def cohort_nanmean(x: ArrayLike, cohort: ArrayLike, _: ArrayLike, out: ArrayLike) -> None:
out[:] = 0
n = len(x)
c = len(_)
count = np.zeros(c)
for i in range(n):
j = cohort[i]
v = x[i]
if ((not np.isnan(v)) and (j >= 0)):
out[j] += v
count[j] += 1
for j in range(c):
out[j] /= count[j] |
def update_world(world, time_elapsed):
num_substeps = world.env.get_num_update_substeps()
timestep = (time_elapsed / num_substeps)
num_substeps = (1 if (time_elapsed == 0) else num_substeps)
for i in range(num_substeps):
world.update(timestep)
valid_episode = world.env.check_valid_episode()
if valid_episode:
end_episode = world.env.is_episode_end()
if end_episode:
world.end_episode()
world.reset()
break
else:
world.reset()
break
return |
class Continuous_MountainCarEnv(gym.Env):
metadata = {'render.modes': ['human', 'rgb_array'], 'video.frames_per_second': 30}
def __init__(self):
self.min_action = (- 1.0)
self.max_action = 1.0
self.min_position = (- 1.2)
self.max_position = 0.6
self.max_speed = 0.07
self.goal_position = 0.45
self.power = 0.0015
self.low_state = np.array([self.min_position, (- self.max_speed)])
self.high_state = np.array([self.max_position, self.max_speed])
self.viewer = None
self.action_space = spaces.Box(self.min_action, self.max_action, shape=(1,))
self.observation_space = spaces.Box(self.low_state, self.high_state)
self._seed()
self.reset()
def _seed(self, seed=None):
(self.np_random, seed) = seeding.np_random(seed)
return [seed]
def _step(self, action):
position = self.state[0]
velocity = self.state[1]
force = min(max(action[0], (- 1.0)), 1.0)
velocity += ((force * self.power) - (0.0025 * math.cos((3 * position))))
if (velocity > self.max_speed):
velocity = self.max_speed
if (velocity < (- self.max_speed)):
velocity = (- self.max_speed)
position += velocity
if (position > self.max_position):
position = self.max_position
if (position < self.min_position):
position = self.min_position
if ((position == self.min_position) and (velocity < 0)):
velocity = 0
done = bool((position >= self.goal_position))
reward = 0
if done:
reward = 100.0
reward -= (math.pow(action[0], 2) * 0.1)
self.state = np.array([position, velocity])
return (self.state, reward, done, {})
def _reset(self):
self.state = np.array([self.np_random.uniform(low=(- 0.6), high=(- 0.4)), 0])
return np.array(self.state)
def _height(self, xs):
return ((np.sin((3 * xs)) * 0.45) + 0.55)
def _render(self, mode='human', close=False):
if close:
if (self.viewer is not None):
self.viewer.close()
self.viewer = None
return
screen_width = 600
screen_height = 400
world_width = (self.max_position - self.min_position)
scale = (screen_width / world_width)
carwidth = 40
carheight = 20
if (self.viewer is None):
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_width, screen_height)
xs = np.linspace(self.min_position, self.max_position, 100)
ys = self._height(xs)
xys = list(zip(((xs - self.min_position) * scale), (ys * scale)))
self.track = rendering.make_polyline(xys)
self.track.set_linewidth(4)
self.viewer.add_geom(self.track)
clearance = 10
(l, r, t, b) = (((- carwidth) / 2), (carwidth / 2), carheight, 0)
car = rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
car.add_attr(rendering.Transform(translation=(0, clearance)))
self.cartrans = rendering.Transform()
car.add_attr(self.cartrans)
self.viewer.add_geom(car)
frontwheel = rendering.make_circle((carheight / 2.5))
frontwheel.set_color(0.5, 0.5, 0.5)
frontwheel.add_attr(rendering.Transform(translation=((carwidth / 4), clearance)))
frontwheel.add_attr(self.cartrans)
self.viewer.add_geom(frontwheel)
backwheel = rendering.make_circle((carheight / 2.5))
backwheel.add_attr(rendering.Transform(translation=(((- carwidth) / 4), clearance)))
backwheel.add_attr(self.cartrans)
backwheel.set_color(0.5, 0.5, 0.5)
self.viewer.add_geom(backwheel)
flagx = ((self.goal_position - self.min_position) * scale)
flagy1 = (self._height(self.goal_position) * scale)
flagy2 = (flagy1 + 50)
flagpole = rendering.Line((flagx, flagy1), (flagx, flagy2))
self.viewer.add_geom(flagpole)
flag = rendering.FilledPolygon([(flagx, flagy2), (flagx, (flagy2 - 10)), ((flagx + 25), (flagy2 - 5))])
flag.set_color(0.8, 0.8, 0)
self.viewer.add_geom(flag)
pos = self.state[0]
self.cartrans.set_translation(((pos - self.min_position) * scale), (self._height(pos) * scale))
self.cartrans.set_rotation(math.cos((3 * pos)))
return self.viewer.render(return_rgb_array=(mode == 'rgb_array')) |
_cell_magic
def workspacefile(line: str, cell: str) -> None:
workspace = get_workspace()
(fs, path) = fsspec.core.url_to_fs(workspace)
path = posixpath.join(path, line)
base = posixpath.dirname(path)
if (not fs.exists(base)):
fs.mkdirs(base, exist_ok=True)
with fs.open(path, 'wt') as f:
f.write(cell)
print(f'Added {line} to workspace {workspace}') |
class Stem(nn.Module):
def __init__(self, in_channels, stem_channels, out_channels, expand_ratio, conv_cfg=None, norm_cfg=dict(type='BN'), with_cp=False):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.conv1 = ConvModule(in_channels=in_channels, out_channels=stem_channels, kernel_size=3, stride=2, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=dict(type='ReLU'))
mid_channels = int(round((stem_channels * expand_ratio)))
branch_channels = (stem_channels // 2)
if (stem_channels == self.out_channels):
inc_channels = (self.out_channels - branch_channels)
else:
inc_channels = (self.out_channels - stem_channels)
self.branch1 = nn.Sequential(ConvModule(branch_channels, branch_channels, kernel_size=3, stride=2, padding=1, groups=branch_channels, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None), ConvModule(branch_channels, inc_channels, kernel_size=1, stride=1, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU')))
self.expand_conv = ConvModule(branch_channels, mid_channels, kernel_size=1, stride=1, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU'))
self.depthwise_conv = ConvModule(mid_channels, mid_channels, kernel_size=3, stride=2, padding=1, groups=mid_channels, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=None)
self.linear_conv = ConvModule(mid_channels, (branch_channels if (stem_channels == self.out_channels) else stem_channels), kernel_size=1, stride=1, padding=0, conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU'))
def forward(self, x):
def _inner_forward(x):
x = self.conv1(x)
(x1, x2) = x.chunk(2, dim=1)
x2 = self.expand_conv(x2)
x2 = self.depthwise_conv(x2)
x2 = self.linear_conv(x2)
out = torch.cat((self.branch1(x1), x2), dim=1)
out = channel_shuffle(out, 2)
return out
if (self.with_cp and x.requires_grad):
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out |
class VContainer(SplitContainer):
def __init__(self, area):
SplitContainer.__init__(self, area, QtCore.Qt.Orientation.Vertical)
def type(self):
return 'vertical'
def updateStretch(self):
x = 0
y = 0
sizes = []
for i in range(self.count()):
(wx, wy) = self.widget(i).stretch()
y += wy
x = max(x, wx)
sizes.append(wy)
self.setStretch(x, y)
tot = float(sum(sizes))
if (tot == 0):
scale = 1.0
else:
scale = (self.height() / tot)
self.setSizes([int((s * scale)) for s in sizes]) |
def _set_image_or_guide(self, image_or_guide: torch.Tensor, attr: str, comparison_only: bool=False, **kwargs: Any) -> None:
for op in self._losses():
if (comparison_only and (not isinstance(op, loss.ComparisonLoss))):
continue
setter = getattr(op, f'set_{attr}')
setter(image_or_guide, **kwargs) |
class WeaviateUploader(BaseUploader):
client = None
upload_params = {}
def init_client(cls, host, distance, connection_params, upload_params):
url = f" WEAVIATE_DEFAULT_PORT)}"
cls.client = Client(url, **connection_params)
cls.upload_params = upload_params
cls.connection_params = connection_params
def _update_geo_data(data_object):
keys = data_object.keys()
for key in keys:
if isinstance(data_object[key], dict):
if (lat := data_object[key].pop('lat', None)):
data_object[key]['latitude'] = lat
if (lon := data_object[key].pop('lon', None)):
data_object[key]['longitude'] = lon
return data_object
def upload_batch(cls, ids: List[int], vectors: List[list], metadata: List[Optional[dict]]):
cls.client.batch.configure(batch_size=100, timeout_retries=3)
with cls.client.batch as batch:
for (id_, vector, data_object) in zip(ids, vectors, metadata):
data_object = cls._update_geo_data((data_object or {}))
batch.add_data_object(data_object=data_object, class_name=WEAVIATE_CLASS_NAME, uuid=uuid.UUID(int=id_).hex, vector=vector)
batch.create_objects() |
def fake_validator(*errors):
errors = list(reversed(errors))
class FakeValidator():
def __init__(self, *args, **kwargs):
pass
def iter_errors(self, instance):
if errors:
return errors.pop()
return []
def check_schema(self, schema):
pass
return FakeValidator |
.usefixtures('mock_os_environ')
def test_file_argument_force_overwrite(testdir):
testdir.makeini('\n [pytest]\n env_files =\n myenv.txt\n ')
testdir.maketxtfile(myenv='FOO=BAR\nSPAM=EGGS')
tmp_env_file = testdir.maketxtfile(tmpenv='FOO=BAZ\nBAR=SPAM')
testdir.makepyfile("\n import os\n\n def test_env_foo():\n assert os.environ.get('FOO') == 'BAZ'\n\n def test_env_spam():\n assert os.environ.get('SPAM') == 'EGGS'\n\n def test_env_bar():\n assert os.environ.get('BAR') == 'SPAM'\n ")
result = testdir.runpytest('-v', '--envfile', str(tmp_env_file))
result.stdout.fnmatch_lines(['*::test_env_foo PASSED*', '*::test_env_spam PASSED*', '*::test_env_bar PASSED*'])
assert (result.ret == 0) |
def collate_fn_mmg(batch):
(obj_point_list, obj_label_list, obj_2d_feats) = ([], [], [])
rel_label_list = []
(edge_indices, descriptor) = ([], [])
batch_ids = []
count = 0
for (i, b) in enumerate(batch):
obj_point_list.append(b[0])
obj_2d_feats.append(b[1])
obj_label_list.append(b[3])
rel_label_list.append(b[4])
edge_indices.append((b[5] + count))
descriptor.append(b[6])
count += b[0].shape[0]
batch_ids.append(torch.full((b[0].shape[0], 1), i))
return (torch.cat(obj_point_list, dim=0), torch.cat(obj_2d_feats, dim=0), torch.cat(obj_label_list, dim=0), torch.cat(rel_label_list, dim=0), torch.cat(edge_indices, dim=0), torch.cat(descriptor, dim=0), torch.cat(batch_ids, dim=0)) |
class MultiSelfAttention(SequenceMapper):
def __init__(self, n_heads: int, project_size: Optional[int], memory_size: Optional[int]=None, shared_project: bool=False, project_bias: bool=False, bilinear_comp: bool=False, init='glorot_uniform', merge: Optional[MergeLayer]=None, scale=True, bias=True):
self.n_heads = n_heads
self.bilinear_comp = bilinear_comp
self.merge = merge
self.project_bias = project_bias
self.project_size = project_size
self.shared_project = shared_project
self.memory_size = memory_size
self.scale = scale
self.bias = bias
self.init = init
def apply(self, is_train, x, mask=None):
batch_size = tf.shape(x)[0]
x_word_dim = tf.shape(x)[1]
x_feature_dim = x.shape.as_list()[(- 1)]
project_size = self.project_size
if (project_size is None):
project_size = (x_feature_dim // self.n_heads)
if ((x_feature_dim % self.n_heads) != 0):
raise ValueError()
mem_size = self.memory_size
if (mem_size is None):
mem_size = project_size
init = get_keras_initialization(self.init)
query_proj = tf.get_variable('query_proj', (x_feature_dim, self.n_heads, project_size), initializer=init)
if self.shared_project:
key_proj = query_proj
else:
key_proj = tf.get_variable('key_proj', (x_feature_dim, self.n_heads, project_size), initializer=init)
mem_proj = tf.get_variable('mem_proj', (x_feature_dim, self.n_heads, mem_size), initializer=init)
queries = tf.tensordot(x, query_proj, [[2], [0]])
keys = tf.tensordot(x, key_proj, [[2], [0]])
if self.project_bias:
queries += tf.get_variable('query_bias', (1, 1, self.n_heads, project_size), initializer=tf.zeros_initializer())
keys += tf.get_variable('key_bias', (1, 1, self.n_heads, project_size), initializer=tf.zeros_initializer())
dist_matrix = tf.einsum('bwhd,bkhd->bwkh', queries, keys)
if self.scale:
dist_matrix /= tf.sqrt(float(project_size))
if self.bilinear_comp:
query_bias_proj = tf.get_variable('query_bias_proj', (x_feature_dim, self.n_heads), initializer=init)
key_bias_proj = tf.get_variable('query_bias_proj', (x_feature_dim, self.n_heads), initializer=init)
dist_matrix += tf.expand_dims(tf.tensordot(x, query_bias_proj, [[2], [0]]), 2)
dist_matrix += tf.expand_dims(tf.tensordot(x, key_bias_proj, [[2], [0]]), 1)
joint_mask = compute_attention_mask(mask, mask, x_word_dim, x_word_dim)
if (joint_mask is not None):
dist_matrix += tf.expand_dims((VERY_NEGATIVE_NUMBER * (1 - tf.cast(joint_mask, dist_matrix.dtype))), 2)
dist_matrix += tf.expand_dims(tf.expand_dims((tf.eye(x_word_dim) * VERY_NEGATIVE_NUMBER), 0), 2)
if self.bias:
bias = tf.get_variable('bias', (1, 1, self.n_heads, 1), initializer=tf.zeros_initializer())
dist_matrix += bias
select_probs = tf.nn.softmax(dist_matrix)
memories = tf.tensordot(x, mem_proj, [[2], [0]])
response = tf.einsum('bwhk,bkhd->bwhd', select_probs, memories)
response = tf.reshape(response, (batch_size, x_word_dim, (self.n_heads * mem_size)))
if (self.merge is not None):
with tf.variable_scope('merge'):
response = self.merge.apply(is_train, x, response)
return response |
def tensor6(name: Optional[str]=None, *, dtype: Optional['DTypeLike']=None, shape: Optional[tuple[(ST, ST, ST, ST, ST, ST)]]=(None, None, None, None, None, None)) -> 'TensorVariable':
if (dtype is None):
dtype = config.floatX
shape = _validate_static_shape(shape, ndim=6)
type = TensorType(dtype, shape=shape)
return type(name) |
def test_fips_metadata_excludes_md5_and_blake2(monkeypatch):
replaced_blake2b = pretend.raiser(ValueError('fipsmode'))
replaced_md5 = pretend.raiser(ValueError('fipsmode'))
monkeypatch.setattr(package_file.hashlib, 'md5', replaced_md5)
monkeypatch.setattr(package_file.hashlib, 'blake2b', replaced_blake2b)
filename = 'tests/fixtures/twine-1.5.0-py2.py3-none-any.whl'
pf = package_file.PackageFile.from_filename(filename, None)
mddict = pf.metadata_dictionary()
assert ('md5_digest' not in mddict)
assert ('blake2_256_digest' not in mddict) |
def test_all_coarse_grains_for_blackbox():
blackbox = macro.Blackbox(((0, 1),), (0, 1))
assert (list(macro.all_coarse_grains_for_blackbox(blackbox)) == [macro.CoarseGrain(((0, 1),), (((0, 1), (2,)),)), macro.CoarseGrain(((0, 1),), (((0, 2), (1,)),)), macro.CoarseGrain(((0, 1),), (((0,), (1, 2)),))]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.