code stringlengths 281 23.7M |
|---|
.parametrize('cfg_file', ['kie/sdmgr/sdmgr_novisual_60e_wildreceipt.py', 'kie/sdmgr/sdmgr_unet16_60e_wildreceipt.py'])
def test_sdmgr_pipeline(cfg_file):
model = _get_detector_cfg(cfg_file)
from mmocr.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 128, 128)
mm_inputs = _demo_mm_inputs(0, input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
relations = mm_inputs.pop('relations')
texts = mm_inputs.pop('texts')
gt_bboxes = mm_inputs.pop('gt_bboxes')
gt_labels = mm_inputs.pop('gt_labels')
losses = detector.forward(imgs, img_metas, relations=relations, texts=texts, gt_bboxes=gt_bboxes, gt_labels=gt_labels)
assert isinstance(losses, dict)
with torch.no_grad():
batch_results = []
for idx in range(len(img_metas)):
result = detector.forward(imgs[idx:(idx + 1)], None, return_loss=False, relations=[relations[idx]], texts=[texts[idx]], gt_bboxes=[gt_bboxes[idx]])
batch_results.append(result)
results = {'nodes': torch.randn(1, 3)}
boxes = [[1, 1, 2, 1, 2, 2, 1, 2]]
img = np.random.rand(5, 5, 3)
detector.show_result(img, results, boxes) |
class PlotArgs():
def __init__(self, name=None, states=None, sigma_bounds=None, is_angle=None, rad2deg=None, max_length=None, connect=True, symbol='o', symbol_size=2, px_mode=True, color=None, hidden=False):
if (name is not None):
self.name = name
elif (states is not None):
self.name = states[0]
else:
raise ValueError('Must provide a plot name or state names.')
if (states is not None):
if (not isinstance(states, (list, tuple, np.ndarray))):
states = [states]
self.state_names = states
else:
self.state_names = [self.name]
self.sigma_bounds = sigma_bounds
self.is_angle = (is_angle or rad2deg)
self.rad2deg = rad2deg
self.max_length = max_length
self.connect = connect
self.symbol = symbol
self.symbol_size = symbol_size
self.px_mode = px_mode
self.color = color
self.hidden = hidden
def set_color(self, color):
self.color = color |
def preprocess_degreeLists():
logging.info('Recovering degreeList from disk...')
degreeList = restoreVariableFromDisk('degreeList')
logging.info('Creating compactDegreeList...')
dList = {}
dFrequency = {}
for (v, layers) in degreeList.items():
dFrequency[v] = {}
for (layer, degreeListLayer) in layers.items():
dFrequency[v][layer] = {}
for degree in degreeListLayer:
if (degree not in dFrequency[v][layer]):
dFrequency[v][layer][degree] = 0
dFrequency[v][layer][degree] += 1
for (v, layers) in dFrequency.items():
dList[v] = {}
for (layer, frequencyList) in layers.items():
list_d = []
for (degree, freq) in frequencyList.items():
list_d.append((degree, freq))
list_d.sort(key=(lambda x: x[0]))
dList[v][layer] = np.array(list_d, dtype='float')
logging.info('compactDegreeList created!')
saveVariableOnDisk(dList, 'compactDegreeList') |
class screen2Widget(Container):
def __init__(self, **kwargs):
super(screen2Widget, self).__init__(**kwargs)
self.style['position'] = 'absolute'
self.style['overflow'] = 'auto'
self.style['background-color'] = '#ffff80'
self.style['left'] = '10px'
self.style['top'] = '10px'
self.style['margin'] = '0px'
self.style['width'] = '427px'
self.style['display'] = 'block'
self.style['height'] = '480px'
testlabel = Label('This is Screen 2!')
mytextbox = TextInput(single_line=True, hint='Write something to be send to screen 1')
btnsend = Button('Send Text Input to Screen 1')
self.append(testlabel, 'testlabel')
self.append(mytextbox, 'mytextbox')
self.append(btnsend, 'btnsend') |
def convert_to_nested_clauses(thought):
if ('Shawn started' in thought):
return 'Shawn, who originally had 5 toys, got 4 more from his parents. 5 + 4 = 9.'
if ('There are originally 3 cars' in thought):
return 'In the parking lot, where there were originally 3 cars, 2 more cars arrived. 3 + 2 = 5.'
if ('Jason started' in thought):
return 'Jason, who started with 20 lollipops, gave some to Denny, leaving him with 12. 20 - 12 = 8.'
if ('There were originally 9 computers' in thought):
return 'In the server room, where there were originally 9 computers, 20 more were added over 4 days. 9 + 20 is 29.'
if ('There are 15 trees originally' in thought):
return 'In the grove, which originally had 15 trees, more trees were planted to make it 21 in total. So there must have been 21 - 15 = 6.'
if ('Originally, Leah had' in thought):
return 'Leah, who had 32 chocolates, and her sister, who had 42, ate some. 32 + 42 = 74. After eating 35, they had 74 - 35 = 39.'
if ('Olivia had' in thought):
return 'Olivia, who had 23 dollars, spent 15 on bagels. 23 - 15 is 8.'
if ('Michael started' in thought):
return 'Michael, who had 58 golf balls, lost some over two days. After Tuesday, he had 35 left. After Wednesday, 33 he had left.' |
def lock(func=None, **kwgs):
if (func is None):
return partial(lock, **kwgs)
(func)
def wrapped(self, *args, **kwargs):
self.lock.acquire(**kwgs)
try:
return func(self, *args, **kwargs)
finally:
self.lock.release()
return wrapped |
class QuestionHistory():
def __init__(self) -> None:
self._history: Dict[(DNSQuestion, Tuple[(float, Set[DNSRecord])])] = {}
def add_question_at_time(self, question: DNSQuestion, now: _float, known_answers: Set[DNSRecord]) -> None:
self._history[question] = (now, known_answers)
def suppresses(self, question: DNSQuestion, now: _float, known_answers: Set[DNSRecord]) -> bool:
previous_question = self._history.get(question)
if (not previous_question):
return False
(than, previous_known_answers) = previous_question
if ((now - than) > _DUPLICATE_QUESTION_INTERVAL):
return False
if (previous_known_answers - known_answers):
return False
return True
def async_expire(self, now: _float) -> None:
removes: List[DNSQuestion] = []
for (question, now_known_answers) in self._history.items():
(than, _) = now_known_answers
if ((now - than) > _DUPLICATE_QUESTION_INTERVAL):
removes.append(question)
for question in removes:
del self._history[question]
def clear(self) -> None:
self._history.clear() |
def average_named_params(named_params_list, average_weights_dict_list, inplace=True):
if ((type(named_params_list[0]) is tuple) or (type(named_params_list[0]) is list)):
if inplace:
(_, averaged_params) = named_params_list[0]
else:
(_, averaged_params) = deepcopy(named_params_list[0])
elif inplace:
averaged_params = named_params_list[0]
else:
averaged_params = deepcopy(named_params_list[0])
for k in averaged_params.keys():
for i in range(0, len(named_params_list)):
if ((type(named_params_list[0]) is tuple) or (type(named_params_list[0]) is list)):
(local_sample_number, local_named_params) = named_params_list[i]
else:
local_named_params = named_params_list[i]
w = average_weights_dict_list[i]
if (i == 0):
averaged_params[k] = (local_named_params[k] * w).type(averaged_params[k].dtype)
else:
averaged_params[k] += (local_named_params[k].to(averaged_params[k].device) * w).type(averaged_params[k].dtype)
return averaged_params |
def get_split(metrics, split):
if (split == 'all'):
return metrics
metrics['id'] = metrics.index.str.rsplit(pat='_', n=1).str[1].astype(int)
if (metrics['id'].max() > len(metrics)):
split_id = (math.floor((len(metrics) * 1.5)) / 2)
else:
split_id = (len(metrics) / 2)
if (split == 'seen'):
return metrics[(metrics['id'] < split_id)].drop(columns='id')
elif (split == 'unseen'):
return metrics[(metrics['id'] >= split_id)].drop(columns='id')
else:
raise ValueError |
class _ACArray(np.ndarray, abc.Mapping):
def __new__(cls, input_array, skc_slicer):
obj = np.asarray(input_array).view(cls)
obj._skc_slicer = skc_slicer
return obj
_inherit(np.ndarray.__getitem__)
def __getitem__(self, k):
try:
if (k in self):
return self._skc_slicer(k)
return super().__getitem__(k)
except IndexError:
raise IndexError(k)
def __setitem__(self, k, v):
raise AttributeError('_SlicerArray are read-only')
_inherit(abc.Mapping.items)
def items(self):
return ((e, self[e]) for e in self)
_inherit(abc.Mapping.keys)
def keys(self):
return iter(self)
_inherit(abc.Mapping.values)
def values(self):
return (self[e] for e in self) |
def test_get_package_with_dist_and_universal_py3_wheel() -> None:
repo = MockRepository()
package = repo.package('ipython', Version.parse('7.5.0'))
assert (package.name == 'ipython')
assert (package.version.text == '7.5.0')
assert (package.python_versions == '>=3.5')
expected = [Dependency('appnope', '*'), Dependency('backcall', '*'), Dependency('colorama', '*'), Dependency('decorator', '*'), Dependency('jedi', '>=0.10'), Dependency('pexpect', '*'), Dependency('pickleshare', '*'), Dependency('prompt-toolkit', '>=2.0.0,<2.1.0'), Dependency('pygments', '*'), Dependency('setuptools', '>=18.5'), Dependency('traitlets', '>=4.2'), Dependency('typing', '*'), Dependency('win-unicode-console', '>=0.5')]
required = [r for r in package.requires if (not r.is_optional())]
assert (sorted(required, key=(lambda dep: dep.name)) == expected) |
class SubmissionFactory(DjangoModelFactory):
class Meta():
model = Submission
conference = factory.SubFactory(ConferenceFactory)
title = LanguageFactory('sentence')
abstract = LanguageFactory('text')
elevator_pitch = LanguageFactory('text')
notes = factory.Faker('text')
type = factory.SubFactory(SubmissionTypeFactory)
speaker = factory.SubFactory(UserFactory)
duration = factory.SubFactory(DurationFactory)
topic = factory.SubFactory(TopicFactory)
audience_level = factory.SubFactory(AudienceLevelFactory)
speaker_level = factory.fuzzy.FuzzyChoice(Submission.SPEAKER_LEVELS, getter=(lambda c: c[0]))
previous_talk_video = factory.Faker('url')
status = 'proposed'
_generation
def custom_submission_type(self, create, extracted, **kwargs):
if (not create):
return
if extracted:
self.type = self.conference.submission_types.get(name=extracted)
_generation
def custom_audience_level(self, create, extracted, **kwargs):
if (not create):
return
if extracted:
self.audience_level = self.conference.audience_levels.get(name=extracted)
_generation
def custom_duration(self, create, extracted, **kwargs):
if (not create):
return
if extracted:
self.duration = self.conference.durations.get(name=extracted)
_generation
def custom_topic(self, create, extracted, **kwargs):
if (not create):
return
if extracted:
self.topic = self.conference.topics.get(name=extracted)
_generation
def languages(self, create, extracted, **kwargs):
if (not create):
return
if extracted:
for language_code in extracted:
self.languages.add(Language.objects.get(code=language_code))
else:
self.languages.add(Language.objects.get(code=random.choice(settings.LANGUAGES)[0]))
_generation
def tags(self, create, extracted, **kwargs):
if (not create):
return
if extracted:
for tag_name in extracted:
(tag, _) = SubmissionTag.objects.get_or_create(name=tag_name)
self.tags.add(tag) |
def create_config(config_file_env, config_file_exp):
with open(config_file_env, 'r') as stream:
root_dir = yaml.safe_load(stream)['root_dir']
with open(config_file_exp, 'r') as stream:
config = yaml.safe_load(stream)
cfg = EasyDict()
for (k, v) in config.items():
cfg[k] = v
output_dir = os.path.join(root_dir, os.path.basename(config_file_exp).split('.')[0])
mkdir_if_missing(output_dir)
cfg['output_dir'] = output_dir
cfg['checkpoint'] = os.path.join(cfg['output_dir'], 'checkpoint.pth.tar')
cfg['best_model'] = os.path.join(cfg['output_dir'], 'best_model.pth.tar')
return cfg |
def test_system():
syst = {'height': 1.0, 'pitch': 2.0, 'surface_tilt': 30.0, 'surface_azimuth': 180.0, 'rotation': (- 30.0)}
syst['gcr'] = (1.0 / syst['pitch'])
pts = np.linspace(0, 1, num=3)
sqr3 = (np.sqrt(3) / 4)
c00 = (((- 2) - sqr3) / np.sqrt(((1.25 ** 2) + ((2 + sqr3) ** 2))))
c01 = ((- sqr3) / np.sqrt(((1.25 ** 2) + (sqr3 ** 2))))
c02 = (sqr3 / np.sqrt(((0.75 ** 2) + (sqr3 ** 2))))
c03 = ((2 - sqr3) / np.sqrt(((1.25 ** 2) + ((2 - sqr3) ** 2))))
vf_0 = (0.5 * (((c03 - c02) + c01) - c00))
c10 = (((- 3) - sqr3) / np.sqrt(((1.25 ** 2) + ((3 + sqr3) ** 2))))
c11 = (((- 1) - sqr3) / np.sqrt(((1.25 ** 2) + ((1 + sqr3) ** 2))))
c12 = (((- 1) + sqr3) / np.sqrt(((0.75 ** 2) + (((- 1) + sqr3) ** 2))))
c13 = ((1 - sqr3) / np.sqrt(((1.25 ** 2) + ((1 - sqr3) ** 2))))
vf_1 = (0.5 * (((c13 - c12) + c11) - c10))
c20 = ((- (4 + sqr3)) / np.sqrt(((1.25 ** 2) + ((4 + sqr3) ** 2))))
c21 = (((- 2) + sqr3) / np.sqrt(((0.75 ** 2) + (((- 2) + sqr3) ** 2))))
c22 = (((- 2) - sqr3) / np.sqrt(((1.25 ** 2) + ((2 + sqr3) ** 2))))
c23 = ((0 - sqr3) / np.sqrt(((1.25 ** 2) + ((0 - sqr3) ** 2))))
vf_2 = (0.5 * (((c23 - c22) + c21) - c20))
vfs_ground_sky = np.array([vf_0, vf_1, vf_2])
return (syst, pts, vfs_ground_sky) |
class TypeInfoMap(Dict[(str, TypeInfo)]):
def __str__(self) -> str:
a: list[str] = ['TypeInfoMap(']
for (x, y) in sorted(self.items()):
ti = ('\n' + ' ').join(str(y).split('\n'))
a.append(f' {x} : {ti}')
a[(- 1)] += ')'
return '\n'.join(a) |
class SmilesRnnDistributionLearner():
def __init__(self, output_dir: str, n_epochs=10, hidden_size=512, n_layers=3, max_len=100, batch_size=64, rnn_dropout=0.2, lr=0.001, valid_every=100) -> None:
self.n_epochs = n_epochs
self.output_dir = output_dir
self.hidden_size = hidden_size
self.n_layers = n_layers
self.max_len = max_len
self.batch_size = batch_size
self.rnn_dropout = rnn_dropout
self.lr = lr
self.valid_every = valid_every
self.print_every = 10
self.seed = 42
def train(self, training_set: List[str], validation_set: List[str]) -> DistributionMatchingGenerator:
cuda_available = torch.cuda.is_available()
device_str = ('cuda' if cuda_available else 'cpu')
device = torch.device(device_str)
logger.info(f'CUDA enabled: {cuda_available}')
set_random_seed(self.seed, device)
(train_seqs, _) = load_smiles_from_list(training_set, self.max_len)
(valid_seqs, _) = load_smiles_from_list(validation_set, self.max_len)
train_set = get_tensor_dataset(train_seqs)
test_set = get_tensor_dataset(valid_seqs)
sd = SmilesCharDictionary()
n_characters = sd.get_char_num()
smiles_model = SmilesRnn(input_size=n_characters, hidden_size=self.hidden_size, output_size=n_characters, n_layers=self.n_layers, rnn_dropout=self.rnn_dropout)
optimizer = torch.optim.Adam(smiles_model.parameters(), lr=self.lr)
criterion = torch.nn.CrossEntropyLoss(ignore_index=sd.pad_idx)
trainer = SmilesRnnTrainer(model=smiles_model, criteria=[criterion], optimizer=optimizer, device=device, log_dir=self.output_dir)
trainer.fit(train_set, test_set, batch_size=self.batch_size, print_every=self.print_every, valid_every=self.valid_every, n_epochs=self.n_epochs) |
def _run_testcases(plot=True, close_plots=False, verbose=True, *args, **kwargs):
test_against_specair_convolution(*args, plot=plot, close_plots=close_plots, verbose=verbose, **kwargs)
test_normalisation_mode(*args, plot=plot, close_plots=close_plots, verbose=verbose, **kwargs)
test_slit_energy_conservation(*args, plot=plot, close_plots=close_plots, verbose=verbose, **kwargs)
test_linear_dispersion_effect(*args, plot=plot, close_plots=close_plots, verbose=verbose, **kwargs)
test_cut_slices(*args, plot=plot, close_plots=close_plots, verbose=verbose, **kwargs)
test_auto_correct_dispersion(*args, plot=plot, close_plots=close_plots, verbose=verbose, **kwargs)
test_all_slit_shapes(*args, plot=plot, close_plots=close_plots, verbose=verbose, **kwargs)
test_slit_unit_conversions_spectrum_in_cm(*args, verbose=verbose, plot=plot, close_plots=close_plots, **kwargs)
test_slit_unit_conversions_spectrum_in_nm(*args, verbose=verbose, plot=plot, close_plots=close_plots, **kwargs)
test_convoluted_quantities_units(*args, **kwargs)
test_resampling(*args, plot=plot, verbose=verbose, **kwargs)
return True |
def extract_answer_from_response(response, task_config: TaskConfig) -> str:
if (task_config.prompt_config.inter_example_sep and (task_config.prompt_config.inter_example_sep in response)):
answer = response.split(task_config.prompt_config.inter_example_sep)[0]
else:
answer = response
return answer |
def query_param(name, help_str, type=reqparse.text_type, default=None, choices=(), required=False):
def add_param(func):
if ('__api_query_params' not in dir(func)):
func.__api_query_params = []
func.__api_query_params.append({'name': name, 'type': type, 'help': help_str, 'default': default, 'choices': choices, 'required': required, 'location': 'args'})
return func
return add_param |
class NetworkLock(Lock):
def __init__(self, *args, **kwargs):
if ('timeout' in kwargs):
timeout = kwargs['timeout']
del kwargs['timeout']
else:
timeout = pysat.params['file_timeout']
super(NetworkLock, self).__init__(*args, timeout=timeout, **kwargs)
return
def release(self):
self.fh.flush()
try:
os.fsync(self.fh.fileno())
except OSError:
pass
super(NetworkLock, self).release()
return |
class IDPMenu(menus.Menu):
def __init__(self, send_channel: QuoTextChannel, role: QuoRole):
super().__init__(timeout=60, delete_message_after=False, clear_reactions_after=True)
self.embed = None
self._id = 'Not Set!'
self._pass = 'Not Set!'
self.msg = None
self.send_channel = send_channel
self.ping_role = role
self.delete_in = 30
self.id_pass_content = False
self.check = (lambda msg: ((msg.channel == self.ctx.channel) and (msg.author == self.ctx.author)))
def idp_embed(self):
embed = self.ctx.bot.embed(self.ctx, title='New Custom Room. JOIN NOW!')
embed.set_thumbnail(url=self.ctx.guild.icon.url)
embed.add_field(name='Room ID', value=self._id)
embed.add_field(name='Password', value=self._pass)
embed.add_field(name='Map', value='Not Set')
embed.add_field(name='Match Starts at', value='Not Set')
embed.set_footer(text=f'Shared by: {self.ctx.author} Auto delete in {plural(self.delete_in):minute|minutes}.', icon_url=self.ctx.author.display_avatar.url)
return embed
def inital_embed():
embed = discord.Embed(color=config.COLOR, title='ID-PASS Menu')
embed.description = ' | Set Title\n | Set Room ID\n | Set Room Password\n | Set Room Map\n | Set Start Time\n | Set thumbnail image\n | ID/Pass as Content\n | Autodelete after\n'
return embed
async def refresh(self):
try:
content = (self.ping_role.mention if self.ping_role else '')
if self.id_pass_content:
content += f'''
ID: {self._id} | Password: {self._pass}'''
(await self.msg.edit(content=content, embed=self.embed, allowed_mentions=discord.AllowedMentions(everyone=False, roles=False)))
except:
self.stop()
async def cembed(self, description):
return (await self.ctx.send(embed=discord.Embed(color=discord.Color(config.COLOR), title=f' ID/Pass Formatter', description=description)))
async def send_initial_message(self, ctx, channel):
self.embed = self.idp_embed()
self.msg = (await channel.send(embed=self.idp_embed()))
return (await channel.send(embed=self.inital_embed()))
(regional_indicator('T'))
async def set_title(self, payload):
msg = (await self.cembed(f'''What do you want the title to be?
Title cannot exceed 256 characters.'''))
title = (await inputs.string_input(self.ctx, self.check, delete_after=True))
if (len(title) > 256):
return (await self.ctx.error(f'Title cannot exceed 256 characters.', delete_after=3))
(await inputs.safe_delete(msg))
if (title.lower() == 'none'):
self.embed.title = None
else:
self.embed.title = title
(await self.refresh())
('')
async def set_id(self, payload):
msg = (await self.cembed(f'What is the ID of custom room?'))
_id = (await inputs.string_input(self.ctx, self.check, delete_after=True))
(await inputs.safe_delete(msg))
self.embed.set_field_at(0, name='Room ID', value=_id)
self._id = _id
(await self.refresh())
('')
async def set_pass(self, payload):
msg = (await self.cembed(f'What is the password for room?'))
_pass = (await inputs.string_input(self.ctx, self.check, delete_after=True))
(await inputs.safe_delete(msg))
self.embed.set_field_at(1, name='Password', value=_pass)
self._pass = _pass
(await self.refresh())
('')
async def set_map(self, payload):
msg = (await self.cembed(f'What is the name of map?'))
_map = (await inputs.string_input(self.ctx, self.check, delete_after=True))
(await inputs.safe_delete(msg))
self.embed.set_field_at(2, name='Maps', value=_map)
(await self.refresh())
('')
async def set_starttime(self, payload):
msg = (await self.cembed(f'What is the match start time?'))
start_time = (await inputs.string_input(self.ctx, self.check, delete_after=True))
(await inputs.safe_delete(msg))
self.embed.set_field_at(3, name='Match Starts at', value=start_time)
(await self.refresh())
('')
async def set_thumbnail(self, payload):
msg = (await self.cembed(f'Enter the Image URL you want to set as thumbnail.'))
image = (await inputs.string_input(self.ctx, self.check, delete_after=True))
(await inputs.safe_delete(msg))
if (image.lower() == 'none'):
self.embed.set_thumbnail(url=None)
else:
try:
image_formats = ('image/png', 'image/jpeg', 'image/jpg', 'image/gif')
res = (await self.bot.session.get(image))
if (res.headers['content-type'] in image_formats):
check = True
else:
check = False
except aio
return (await self.ctx.error(f'This is not a valid Image URL', delete_after=3))
if (not check):
return (await self.ctx.error(f"The URL didn't contain a valid Image format.", delete_after=3))
self.embed.set_thumbnail(url=image)
(await self.refresh())
('')
async def idp_content(self, payload):
self.id_pass_content = (not self.id_pass_content)
(await self.refresh())
('')
async def delete_time(self, payload):
msg = (await self.cembed(f'''After how many minutes do you want me to delete the idp message?
It can be between 1-30'''))
delete_time = (await inputs.integer_input(self.ctx, self.check, delete_after=True, limits=(None, None)))
(await inputs.safe_delete(msg))
self.delete_in = delete_time
self.embed.set_footer(text=f'Shared by: {self.ctx.author} Auto delete in {plural(self.delete_in):minute|minutes}', icon_url=self.ctx.author.display_avatar.url)
(await self.refresh())
('')
async def on_cancel(self, payload):
self.stop()
('')
async def on_confirm(self, payload):
content = (self.ping_role.mention if self.ping_role else '')
if self.id_pass_content:
content += f'''
ID: {self._id} | Password: {self._pass}'''
msg = (await self.send_channel.send(content=content, embed=self.embed, allowed_mentions=discord.AllowedMentions(everyone=True, roles=True)))
self.bot.loop.create_task(delete_denied_message(msg, (self.delete_in * 60)))
self.stop() |
class w2v_api(object):
def load_word2vec(self, binary=True):
if (self.word_vec_path is None):
return
raw_word2vec = gensim.models.KeyedVectors.load_word2vec_format(self.word_vec_path, binary=binary)
print('load w2v done')
self.word2vec = []
oov_cnt = 0
for v in self.vocab:
if (v not in raw_word2vec):
oov_cnt += 1
vec = (np.random.randn(self.word2vec_dim) * 0.1)
else:
(vec - raw_word2vec[v])
self.word2vec.append(vec)
print(('word2vec cannot cover %f vocab' % (float(oov_cnt) / len(self.vocab)))) |
class LLMHandler():
def __init__(self, settings, path, llm):
self.history = []
self.propmts = []
self.settings = settings
self.path = path
self.llm = llm
def stream_enabled(self):
enabled = self.get_setting('streaming')
if (enabled is None):
return False
return enabled
def install(self):
pip_path = os.path.join(os.path.abspath(os.path.join(self.path, os.pardir)), 'pip')
for module in self.llm['extra_requirements']:
install_module(module, pip_path)
def is_installed(self):
for module in self.llm['extra_requirements']:
if (find_module(module) is None):
return False
return True
def load_model(self, model):
return True
def send_message(self, window, message):
return 'Not yet implemented'
def get_suggestions(self, window, message):
return 'Not yet implemented'
def set_history(self, prompts, window):
self.prompts = prompts
self.history = window.chat[(len(window.chat) - window.memory):(len(window.chat) - 1)]
def get_setting(self, key):
j = json.loads(self.settings.get_string('llm-settings'))
if ((self.key not in j) or (key not in j[self.key])):
return self.get_default_setting(key)
return j[self.key][key]
def set_setting(self, key, value):
j = json.loads(self.settings.get_string('llm-settings'))
if (self.key not in j):
j[self.key] = {}
j[self.key][key] = value
self.settings.set_string('llm-settings', json.dumps(j))
def get_default_setting(self, key):
extra_settings = self.llm['extra_settings']
for s in extra_settings:
if (s['key'] == key):
return s['default']
return None |
class Package(OpcPackage):
def after_unmarshal(self):
self._gather_image_parts()
def get_or_add_image_part(self, image_descriptor: (str | IO[bytes])) -> ImagePart:
return self.image_parts.get_or_add_image_part(image_descriptor)
def image_parts(self) -> ImageParts:
return ImageParts()
def _gather_image_parts(self):
for rel in self.iter_rels():
if rel.is_external:
continue
if (rel.reltype != RT.IMAGE):
continue
if (rel.target_part in self.image_parts):
continue
self.image_parts.append(rel.target_part) |
def align_dfmesh_scanpc(df_mesh, df_resolution, scan_pc):
pts_min = np.amin(scan_pc, axis=0)
pts_max = np.amax(scan_pc, axis=0)
pc_extents = (pts_max - pts_min)
pc_bbox_center = ((pts_max + pts_max) / 2.0)
max_pc_size = np.max(pc_extents)
df_mesh_extents = df_mesh.bounding_box.extents
max_mesh_size = np.max(df_mesh_extents)
scale_factor = (1.0 / df_resolution)
trans_v = (pc_bbox_center - np.array([(df_resolution / 2.0), (df_resolution / 2.0), (df_resolution / 2.0)]))
df_mesh.apply_translation(trans_v)
df_mesh.apply_scale(scale_factor)
rot_m = axangle2aff([0, 1, 0], np.pi)
df_mesh.apply_transform(rot_m)
return df_mesh |
def custom(path: Union[(PurePath, str)], context: Optional[dict]=None) -> CompletedProcess:
with import_file(path) as module:
try:
func = getattr(module, 'pretf_workflow')
except AttributeError:
raise log.bad(f"workflow: {path} does not have a 'pretf_workflow' function")
result = call_pretf_function(func=func, context=context)
if isinstance(result, int):
result = CompletedProcess(args=[str(path)], returncode=result)
return result |
class Effect5317(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Small Projectile Turret')), 'damageMultiplier', ship.getModifiedItemAttr('shipBonusMD1'), skill='Minmatar Destroyer', **kwargs) |
def replay_citations(dag: ProvDAG, out_fp: str, deduplicate: bool=True, suppress_header: bool=False):
bib_db = collect_citations(dag, deduplicate=deduplicate)
boundary = ('#' * 79)
header = []
footer = []
extra = ['', '# This bibtex-formatted citation file can be imported into popular citation ', '# managers like Zotero and Mendeley, simplifying management and formatting.']
if (not suppress_header):
header = (build_header(boundary=boundary, extra_text=extra) + ['\n'])
footer = build_footer(dag=dag, boundary=boundary)
if (bib_db.entries_dict == {}):
bib_db = 'No citations were registered to the used Actions.'
with open(out_fp, 'w') as bibfile:
bibfile.write(bib_db)
else:
with open(out_fp, 'w') as bibfile:
bibfile.write('\n'.join(header))
bibfile.write(BibTexWriter().write(bib_db))
bibfile.write('\n'.join(footer)) |
def test_json_xmlrpc(run_cli):
cmd = 'bugzilla query --json --id 1165434'
timestr = 'T19:12:12'
dateobj = datetime.datetime.strptime(timestr, '%Y%m%dT%H:%M:%S')
attachfile = tests.utils.tests_path('data/bz-attach-get1.txt')
attachdata = open(attachfile, 'rb').read()
bugid = 1165434
data = {'bugs': [{'id': bugid, 'timetest': xmlrpc.client.DateTime(dateobj), 'binarytest': xmlrpc.client.Binary(attachdata)}]}
fakebz = tests.mockbackend.make_bz(bug_search_args=None, bug_search_return={'bugs': [{'id': bugid}]}, bug_get_args=None, bug_get_return=data)
out = run_cli(cmd, fakebz)
tests.utils.diff_compare(tests.utils.sanitize_json(out), 'data/clioutput/test_json_xmlrpc.txt')
retdata = json.loads(out)['bugs'][0]
assert (base64.b64decode(retdata['binarytest']) == attachdata)
assert (retdata['timetest'] == (dateobj.isoformat() + 'Z'))
data['bugs'][0]['foo'] = Exception('foo')
fakebz = tests.mockbackend.make_bz(bug_search_args=None, bug_search_return={'bugs': [{'id': bugid}]}, bug_get_args=None, bug_get_return=data)
with pytest.raises(RuntimeError):
run_cli(cmd, fakebz, expectfail=True) |
class Tree():
def __init__(self, left: (Tree | None), value: int, right: (Tree | None)) -> None:
self.left = left
self.value = value
self.right = right
async def __aiter__(self) -> AsyncIterator[int]:
if self.left:
async for i in self.left:
(yield i)
(yield self.value)
if self.right:
async for i in self.right:
(yield i) |
class TestLength():
def test_sanity_check(self):
mySymbolicMatricesList = TypedListType(TensorType(pytensor.config.floatX, shape=(None, None)))()
z = Length()(mySymbolicMatricesList)
f = pytensor.function([mySymbolicMatricesList], z)
x = rand_ranged_matrix((- 1000), 1000, [100, 101])
assert (f([x, x, x, x]) == 4)
def test_interface(self):
mySymbolicMatricesList = TypedListType(TensorType(pytensor.config.floatX, shape=(None, None)))()
z = mySymbolicMatricesList.__len__()
f = pytensor.function([mySymbolicMatricesList], z)
x = rand_ranged_matrix((- 1000), 1000, [100, 101])
assert (f([x, x]) == 2) |
def test_podman_vfs(tmp_path: Path, monkeypatch, container_engine):
if (container_engine.name != 'podman'):
pytest.skip('only runs with podman')
vfs_path = (tmp_path / 'podman_vfs')
vfs_path.mkdir()
vfs_containers_conf_data = {'containers': {'default_capabilities': ['CHOWN', 'DAC_OVERRIDE', 'FOWNER', 'FSETID', 'KILL', 'NET_BIND_SERVICE', 'SETFCAP', 'SETGID', 'SETPCAP', 'SETUID', 'SYS_CHROOT']}, 'engine': {'cgroup_manager': 'cgroupfs', 'events_logger': 'file'}}
storage_root = (vfs_path / '.local/share/containers/vfs-storage')
run_root = (vfs_path / '.local/share/containers/vfs-runroot')
storage_root.mkdir(parents=True, exist_ok=True)
run_root.mkdir(parents=True, exist_ok=True)
vfs_containers_storage_conf_data = {'storage': {'driver': 'vfs', 'graphroot': os.fspath(storage_root), 'runroot': os.fspath(run_root), 'rootless_storage_path': os.fspath(storage_root), 'options': {'aufs': {'mountopt': 'rw'}, 'overlay': {'mountopt': 'rw', 'force_mask': 'shared'}}}}
vfs_containers_conf_fpath = (vfs_path / 'temp_vfs_containers.conf')
vfs_containers_storage_conf_fpath = (vfs_path / 'temp_vfs_containers_storage.conf')
with open(vfs_containers_conf_fpath, 'wb') as file:
tomli_w.dump(vfs_containers_conf_data, file)
with open(vfs_containers_storage_conf_fpath, 'wb') as file:
tomli_w.dump(vfs_containers_storage_conf_data, file)
monkeypatch.setenv('CONTAINERS_CONF', str(vfs_containers_conf_fpath))
monkeypatch.setenv('CONTAINERS_STORAGE_CONF', str(vfs_containers_storage_conf_fpath))
with OCIContainer(engine=PODMAN, image=DEFAULT_IMAGE) as container:
assert (container.call(['echo', 'hello'], capture_output=True) == 'hello\n')
(tmp_path / 'some_file.txt').write_text('1234')
container.copy_into((tmp_path / 'some_file.txt'), PurePosixPath('some_file.txt'))
assert (container.call(['cat', 'some_file.txt'], capture_output=True) == '1234')
subprocess.run(['podman', 'unshare', 'rm', '-rf', vfs_path], check=True) |
class Node():
def __init__(self, state, parent=None):
self.visits = 1
self.reward = 0.0
self.state = state
self.children = []
self.parent = parent
def add_child(self, child_state):
child = Node(child_state, self)
self.children.append(child)
def update(self, reward):
self.reward += reward
self.visits += 1
def fully_expanded(self):
if (len(self.children) == self.state.max_children):
return True
return False
def __repr__(self):
s = str(self.state.smiles)
return s |
class RecvIfcRTL(CalleeIfcRTL):
def construct(s, Type):
super().construct(en=True, rdy=True, MsgType=Type, RetType=None)
def connect(s, other, parent):
if isinstance(other, CallerIfcCL):
m = RecvCL2SendRTL(s.MsgType)
if hasattr(parent, 'RecvCL2SendRTL_count'):
count = parent.RecvCL2SendRTL_count
setattr(parent, ('RecvCL2SendRTL_' + str(count)), m)
else:
parent.RecvCL2SendRTL_count = 0
parent.RecvCL2SendRTL_0 = m
connect_pairs(other, m.recv, m.send.msg, s.msg, m.send.en, s.en, m.send.rdy, s.rdy)
parent.RecvCL2SendRTL_count += 1
return True
elif isinstance(other, CalleeIfcCL):
if (s._dsl.level <= other._dsl.level):
raise InvalidConnectionError('CL2RTL connection is not supported between RecvIfcRTL and CalleeIfcCL.\n - level {}: {} (class {})\n - level {}: {} (class {})'.format(s._dsl.level, repr(s), type(s), other._dsl.level, repr(other), type(other)))
m = RecvCL2SendRTL(s.MsgType)
if hasattr(parent, 'RecvCL2SendRTL_count'):
count = parent.RecvCL2SendRTL_count
setattr(parent, ('RecvCL2SendRTL_' + str(count)), m)
else:
parent.RecvCL2SendRTL_count = 0
parent.RecvCL2SendRTL_0 = m
connect_pairs(other, m.recv, m.send.msg, s.msg, m.send.en, s.en, m.send.rdy, s.rdy)
parent.RecvCL2SendRTL_count += 1
return True
return False |
.parametrize('has_changelog', [False, True])
def test_on_menu_action_changelog(default_main_window, monkeypatch, has_changelog):
mock_show = MagicMock()
monkeypatch.setattr(QtWidgets.QWidget, 'show', mock_show)
if has_changelog:
default_main_window.all_change_logs = {}
default_main_window._on_menu_action_changelog()
if has_changelog:
assert (default_main_window.changelog_window is not None)
assert (default_main_window.changelog_window.centralWidget() is default_main_window.changelog_tab)
assert (default_main_window.changelog_window.windowTitle() == 'Change Log')
default_main_window.changelog_window.show.assert_called_once_with()
else:
assert (default_main_window.changelog_window is None)
mock_show.assert_not_called() |
def render_venv_config(cfg):
lines = [f'home = {cfg.home}', f'version = {cfg.version}', f'include-system-site-packages = {cfg.system_site_packages}']
if (cfg.prompt is not None):
lines.append(f'prompt = {cfg.prompt}')
if (cfg.executable is not None):
lines.append(f'executable = {cfg.executable}')
if (cfg.command is not None):
lines.append(f'command = {cfg.command}')
return ''.join(((l + os.linesep) for l in lines)) |
class StableDiffusionPipeline(DiffusionPipeline):
def __init__(self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet2DConditionModel, scheduler: Union[(DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler)], safety_checker, feature_extractor: CLIPFeatureExtractor):
super().__init__()
scheduler = scheduler.set_format(format)
self.register_modules(vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor)
_grad()
def __call__(self, prompt: Union[(str, List[str])], height: Optional[int]=512, width: Optional[int]=512, num_inference_steps: Optional[int]=50, guidance_scale: Optional[float]=7.5, eta: Optional[float]=0.0, generator: Optional[torch.Generator]=None, latents: Optional[torch.FloatTensor]=None, output_type: Optional[str]='pil', **kwargs):
if ('torch_device' in kwargs):
device = kwargs.pop('torch_device')
warnings.warn('`torch_device` is deprecated as an input argument to `__call__` and will be removed in v0.3.0. Consider using `pipe.to(torch_device)` instead.')
if (device is None):
device = ('cuda' if torch.cuda.is_available() else 'cpu')
self.to(device)
onnx = False
if ('execution_provider' in kwargs):
onnx = True
self.scheduler = self.scheduler.set_format('np')
ep = kwargs.pop('execution_provider')
import onnxruntime as ort
so = ort.SessionOptions()
so.enable_mem_pattern = False
unet_sess = ort.InferenceSession('onnx/unet.onnx', so, providers=[ep])
post_quant_conv_sess = ort.InferenceSession('onnx/post_quant_conv.onnx', so, providers=[ep])
decoder_sess = ort.InferenceSession('onnx/decoder.onnx', so, providers=[ep])
encoder_sess = ort.InferenceSession('onnx/encoder.onnx', so, providers=[ep])
if isinstance(prompt, str):
batch_size = 1
elif isinstance(prompt, list):
batch_size = len(prompt)
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(prompt)}')
if (((height % 8) != 0) or ((width % 8) != 0)):
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.')
text_input = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=False, return_tensors='pt')
if onnx:
text_embeddings = encoder_sess.run(None, {'text_input': text_input.input_ids.numpy()})[0]
else:
text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]
do_classifier_free_guidance = (guidance_scale > 1.0)
if do_classifier_free_guidance:
max_length = text_input.input_ids.shape[(- 1)]
uncond_input = self.tokenizer(([''] * batch_size), padding='max_length', max_length=max_length, truncation=False, return_tensors='pt')
if onnx:
uncond_embeddings = encoder_sess.run(None, {'text_input': uncond_input.input_ids.numpy()})[0]
text_embeddings = np.concatenate([uncond_embeddings, text_embeddings])
else:
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
latents_shape = (batch_size, self.unet.in_channels, (height // 8), (width // 8))
if (latents is None):
latents = torch.randn(latents_shape, generator=generator, device=self.device)
else:
if (latents.shape != latents_shape):
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}')
latents = latents.to(self.device)
if onnx:
latents = latents.numpy()
accepts_offset = ('offset' in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys()))
extra_set_kwargs = {}
if accepts_offset:
extra_set_kwargs['offset'] = 1
self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
if isinstance(self.scheduler, LMSDiscreteScheduler):
latents = (latents * self.scheduler.sigmas[0])
accepts_eta = ('eta' in set(inspect.signature(self.scheduler.step).parameters.keys()))
extra_step_kwargs = {}
if accepts_eta:
extra_step_kwargs['eta'] = eta
for (i, t) in tqdm(enumerate(self.scheduler.timesteps)):
if (onnx and do_classifier_free_guidance):
latent_model_input = np.concatenate(([latents] * 2))
else:
latent_model_input = (torch.cat(([latents] * 2)) if do_classifier_free_guidance else latents)
if isinstance(self.scheduler, LMSDiscreteScheduler):
sigma = self.scheduler.sigmas[i]
latent_model_input = (latent_model_input / (((sigma ** 2) + 1) ** 0.5))
if onnx:
latent_model_input = latent_model_input.astype('float32')
if onnx:
inp = {'latent_model_input': latent_model_input, 't': np.array([t], dtype=np.int64), 'encoder_hidden_states': text_embeddings}
noise_pred = unet_sess.run(None, inp)[0]
else:
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings)
if do_classifier_free_guidance:
if onnx:
(noise_pred_uncond, noise_pred_text) = np.array_split(noise_pred, 2)
else:
(noise_pred_uncond, noise_pred_text) = noise_pred.chunk(2)
noise_pred = (noise_pred_uncond + (guidance_scale * (noise_pred_text - noise_pred_uncond)))
if isinstance(self.scheduler, LMSDiscreteScheduler):
latents = self.scheduler.step(noise_pred, i, latents, **extra_step_kwargs)['prev_sample']
else:
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs)['prev_sample']
latents = ((1 / 0.18215) * latents)
if onnx:
latents = post_quant_conv_sess.run(None, {'latents': latents.astype('float32')})[0]
image = decoder_sess.run(None, {'latents': latents})[0]
image = np.clip(((image / 2) + 0.5), 0, 1)
image = np.transpose(image, (0, 2, 3, 1))
else:
image = self.vae.decode(latents)
image = ((image / 2) + 0.5).clamp(0, 1)
image = image.cpu().permute(0, 2, 3, 1).numpy()
safety_cheker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors='pt').to(self.device)
if (output_type == 'pil'):
image = self.numpy_to_pil(image)
return {'sample': image} |
class ODConv2d(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, reduction=0.0625, kernel_num=4):
super(ODConv2d, self).__init__()
self.in_planes = in_planes
self.out_planes = out_planes
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.kernel_num = kernel_num
self.attention = Attention(in_planes, out_planes, kernel_size, groups=groups, reduction=reduction, kernel_num=kernel_num)
self.weight = nn.Parameter(torch.randn(kernel_num, out_planes, (in_planes // groups), kernel_size, kernel_size), requires_grad=True)
self._initialize_weights()
if ((self.kernel_size == 1) and (self.kernel_num == 1)):
self._forward_impl = self._forward_impl_pw1x
else:
self._forward_impl = self._forward_impl_common
def _initialize_weights(self):
for i in range(self.kernel_num):
nn.init.kaiming_normal_(self.weight[i], mode='fan_out', nonlinearity='relu')
def update_temperature(self, temperature):
self.attention.update_temperature(temperature)
def _forward_impl_common(self, x):
(channel_attention, filter_attention, spatial_attention, kernel_attention) = self.attention(x)
(batch_size, in_planes, height, width) = x.size()
x = (x * channel_attention)
x = x.reshape(1, (- 1), height, width)
aggregate_weight = ((spatial_attention * kernel_attention) * self.weight.unsqueeze(dim=0))
aggregate_weight = torch.sum(aggregate_weight, dim=1).view([(- 1), (self.in_planes // self.groups), self.kernel_size, self.kernel_size])
output = F.conv2d(x, weight=aggregate_weight, bias=None, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=(self.groups * batch_size))
output = output.view(batch_size, self.out_planes, output.size((- 2)), output.size((- 1)))
output = (output * filter_attention)
return output
def _forward_impl_pw1x(self, x):
(channel_attention, filter_attention, spatial_attention, kernel_attention) = self.attention(x)
x = (x * channel_attention)
output = F.conv2d(x, weight=self.weight.squeeze(dim=0), bias=None, stride=self.stride, padding=self.padding, dilation=self.dilation, groups=self.groups)
output = (output * filter_attention)
return output
def forward(self, x):
return self._forward_impl(x) |
.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_three_nn():
known = torch.tensor([[[(- 1.8373), 3.5605, (- 0.7867)], [0.7615, 2.942, 0.2314], [(- 0.6503), 3.6637, (- 1.0622)], [(- 1.8373), 3.5605, (- 0.7867)], [(- 1.8373), 3.5605, (- 0.7867)]], [[(- 1.3399), 1.9991, (- 0.3698)], [(- 0.0799), 0.9698, (- 0.8457)], [0.0858, 2.4721, (- 0.1928)], [(- 1.3399), 1.9991, (- 0.3698)], [(- 1.3399), 1.9991, (- 0.3698)]]]).cuda()
unknown = torch.tensor([[[(- 1.8373), 3.5605, (- 0.7867)], [0.7615, 2.942, 0.2314], [(- 0.6503), 3.6637, (- 1.0622)], [(- 1.5237), 2.3976, (- 0.8097)], [(- 0.0722), 3.4017, (- 0.288)], [0.5198, 3.0661, (- 0.4605)], [(- 2.0185), 3.5019, (- 0.3236)], [0.5098, 3.102, 0.5799], [(- 1.6137), 3.8443, (- 0.5269)], [0.7341, 2.9626, (- 0.3189)]], [[(- 1.3399), 1.9991, (- 0.3698)], [(- 0.0799), 0.9698, (- 0.8457)], [0.0858, 2.4721, (- 0.1928)], [(- 0.9022), 1.656, (- 1.309)], [0.1156, 1.6901, (- 0.4366)], [(- 0.6477), 2.3576, (- 0.1563)], [(- 0.8482), 1.1466, (- 1.2704)], [(- 0.8753), 2.0845, (- 0.346)], [(- 0.5621), 1.4233, (- 1.2858)], [(- 0.5883), 1.3114, (- 1.2899)]]]).cuda()
(dist, idx) = three_nn(unknown, known)
expected_dist = torch.tensor([[[0.0, 0.0, 0.0], [0.0, 2.0463, 2.8588], [0.0, 1.2229, 1.2229], [1.2047, 1.2047, 1.2047], [1.0011, 1.0845, 1.8411], [0.7433, 1.4451, 2.4304], [0.5007, 0.5007, 0.5007], [0.4587, 2.0875, 2.7544], [0.445, 0.445, 0.445], [0.5514, 1.7206, 2.6811]], [[0.0, 0.0, 0.0], [0.0, 1.6464, 1.6952], [0.0, 1.5125, 1.5125], [1.0915, 1.0915, 1.0915], [0.8197, 0.8511, 1.4894], [0.7433, 0.8082, 0.8082], [0.8955, 1.334, 1.334], [0.473, 0.473, 0.473], [0.7949, 1.3325, 1.3325], [0.7566, 1.3727, 1.3727]]]).cuda()
expected_idx = torch.tensor([[[0, 3, 4], [1, 2, 0], [2, 0, 3], [0, 3, 4], [2, 1, 0], [1, 2, 0], [0, 3, 4], [1, 2, 0], [0, 3, 4], [1, 2, 0]], [[0, 3, 4], [1, 2, 0], [2, 0, 3], [0, 3, 4], [2, 1, 0], [2, 0, 3], [1, 0, 3], [0, 3, 4], [1, 0, 3], [1, 0, 3]]]).cuda()
assert torch.allclose(dist, expected_dist, 0.0001)
assert torch.all((idx == expected_idx)) |
class AddressBookPanel(Div):
def __init__(self, view):
super().__init__(view)
number_of_addresses = Session.query(Address).count()
self.add_child(H(view, 1, text=_.ngettext('Address', 'Addresses', number_of_addresses)))
self.add_child(AddressForm(view))
for address in Session.query(Address).all():
self.add_child(AddressBox(view, address)) |
def test_locker_properly_loads_subdir(locker: Locker) -> None:
content = '[[package]]\nname = "git-package-subdir"\nversion = "1.2.3"\ndescription = ""\noptional = false\npython-versions = "*"\ndevelop = false\nfiles = []\n\n[package.source]\ntype = "git"\nurl = " = "develop"\nresolved_reference = "123456"\nsubdirectory = "subdir"\n\n[metadata]\nlock-version = "2.0"\npython-versions = "*"\ncontent-hash = "115cf985d932e9bf5f540555bbdd75decbb62cac81e399375fc19f6277f8c1d8"\n'
with open(locker.lock, 'w', encoding='utf-8') as f:
f.write(content)
repository = locker.locked_repository()
assert (len(repository.packages) == 1)
packages = repository.find_packages(get_dependency('git-package-subdir', '1.2.3'))
assert (len(packages) == 1)
package = packages[0]
assert (package.source_subdirectory == 'subdir') |
class ContextX509(cpi.Context):
def __init__(self, api, adaptor):
_cpi_base = super(ContextX509, self)
_cpi_base.__init__(api, adaptor)
_CALL
def init_instance(self, adaptor_state, type):
if (not (type.lower() in (schema.lower() for schema in _ADAPTOR_SCHEMAS))):
raise BadParameter('the x509 context only handles x509 contexts!')
self.get_api().type = type
return self.get_api()
_CALL
def _initialize(self, session):
api = self.get_api()
if (not api.user_proxy):
api.user_proxy = ('x509up_u%d' % os.getuid())
if ((not os.path.exists(api.user_proxy)) or (not os.path.isfile(api.user_proxy))):
raise BadParameter(('X509 proxy does not exist: %s' % api.user_proxy))
try:
fh = open(api.user_proxy)
except Exception as e:
raise PermissionDenied(("X509 proxy '%s' not readable: %s" % (api.user_proxy, str(e)))) from e
else:
fh.close() |
def test_game_session_collect_pickup_for_self(flask_app, two_player_session, generic_pickup_category, default_generator_params, echoes_resource_database, mocker):
sa = MagicMock()
sa.get_current_user.return_value = database.User.get_by_id(1234)
mock_emit: MagicMock = mocker.patch('flask_socketio.emit')
mock_session_description: PropertyMock = mocker.patch('randovania.server.database.MultiplayerSession.layout_description', new_callable=PropertyMock)
mock_get_resource_database: MagicMock = mocker.patch('randovania.server.multiplayer.world_api._get_resource_database', autospec=True)
mock_get_pickup_target: MagicMock = mocker.patch('randovania.server.multiplayer.world_api._get_pickup_target', autospec=True)
w1 = database.World.get_by_id(1)
pickup = PickupEntry('A', 1, generic_pickup_category, generic_pickup_category, progression=((echoes_resource_database.item[0], 1),), generator_params=default_generator_params)
mock_get_resource_database.return_value = echoes_resource_database
mock_get_pickup_target.return_value = PickupTarget(pickup, 0)
with flask_app.test_request_context():
result = world_api.collect_locations(sa, w1, (0,))
assert (result == set())
mock_emit.assert_not_called()
mock_get_pickup_target.assert_called_once_with(mock_session_description.return_value, 0, 0)
with pytest.raises(peewee.DoesNotExist):
database.WorldAction.get(provider=w1, location=0) |
class HandlerFactory():
def create(vim: Nvim) -> 'Handler':
client = VimClient(vim)
file_parser = FileParser(client)
process_manager = ProcessManager(client)
output_parser = OutputParser(client.sync_eval('g:ultest_disable_grouping'))
runner = PositionRunner(vim=client, process_manager=process_manager, output_parser=output_parser)
tracker = PositionTracker(file_parser=file_parser, runner=runner, vim=client)
return Handler(client, tracker=tracker, runner=runner) |
def getConfig():
parser = argparse.ArgumentParser()
parser.add_argument('--output_dir', required=True, type=str, help='Name of the output directory')
parser.add_argument('--weights_type', default='', type=str, help='Which probe weights to use for intervention')
cfg = parser.parse_args()
return cfg |
class Composite(ScalarInnerGraphOp):
init_param: tuple[(str, ...)] = ('inputs', 'outputs')
def __init__(self, inputs, outputs, name='Composite'):
self.name = name
self._name = None
for i in inputs:
assert (i not in outputs)
if ((len(outputs) > 1) or (not any((isinstance(var.owner.op, Composite) for var in outputs)))):
(inputs, outputs) = clone(inputs, outputs)
else:
assert (len(outputs) == 1)
res = pytensor.compile.rebuild_collect_shared(inputs=inputs, outputs=outputs[0].owner.inputs, copy_inputs_over=False)
res2 = pytensor.compile.rebuild_collect_shared(inputs=outputs[0].owner.op.inputs, outputs=outputs[0].owner.op.outputs, replace=dict(zip(outputs[0].owner.op.inputs, res[1])))
assert (len(res2[1]) == len(outputs))
assert (len(res[0]) == len(inputs))
assert (res[0] != inputs)
(inputs, outputs) = (res[0], res2[1])
(self.inputs, self.outputs) = self._cleanup_graph(inputs, outputs)
self.inputs_type = tuple([input.type for input in self.inputs])
self.outputs_type = tuple([output.type for output in self.outputs])
self.nin = len(inputs)
self.nout = len(outputs)
super().__init__()
def __str__(self):
if (self._name is not None):
return self._name
for (i, r) in enumerate(self.fgraph.inputs):
r.name = f'i{int(i)}'
for (i, r) in enumerate(self.fgraph.outputs):
r.name = f'o{int(i)}'
io = set((self.fgraph.inputs + self.fgraph.outputs))
for (i, r) in enumerate(self.fgraph.variables):
if ((r not in io) and (len(self.fgraph.clients[r]) > 1)):
r.name = f't{int(i)}'
if ((len(self.fgraph.outputs) > 1) or (len(self.fgraph.apply_nodes) > 10)):
self._name = 'Composite{...}'
else:
outputs_str = ', '.join([pprint(output) for output in self.fgraph.outputs])
self._name = f'Composite{{{outputs_str}}}'
return self._name
def make_new_inplace(self, output_types_preference=None, name=None):
d = {k: getattr(self, k) for k in self.init_param}
out = self.__class__(**d)
if name:
out.name = name
else:
name = out.name
super(Composite, out).__init__(output_types_preference, name)
return out
def fgraph(self):
if hasattr(self, '_fgraph'):
return self._fgraph
fgraph = FunctionGraph(self.inputs, self.outputs)
self._fgraph = fgraph
return self._fgraph
def clone_float32(self):
(new_ins, new_outs) = composite_f32.apply(self.fgraph)
return Composite(new_ins, new_outs)
def clone(self):
(new_ins, new_outs) = composite_f32.apply(self.fgraph)
return Composite(new_ins, new_outs)
def output_types(self, input_types):
if (tuple(input_types) != self.inputs_type):
raise TypeError(f'Wrong types for Composite. Expected {self.inputs_type}, got {tuple(input_types)}.')
return self.outputs_type
def make_node(self, *inputs):
if (tuple([i.type for i in self.inputs]) == tuple([i.type for i in inputs])):
return super().make_node(*inputs)
else:
assert (len(inputs) == self.nin)
res = pytensor.compile.rebuild_collect_shared(self.outputs, replace=dict(zip(self.inputs, inputs)), rebuild_strict=False)
cloned_inputs = [res[2][0][i] for i in inputs]
node = Composite(cloned_inputs, res[1]).make_node(*inputs)
return node
def perform(self, node, inputs, output_storage):
outputs = self.py_perform_fn(*inputs)
for (storage, out_val) in zip(output_storage, outputs):
storage[0] = out_val
def grad(self, inputs, output_grads):
raise NotImplementedError('grad is not implemented for Composite')
def c_code_template(self):
from pytensor.link.c.interface import CLinkerType
if hasattr(self, '_c_code'):
return self._c_code
subd = dict(chain(((e, f'%(i{int(i)})s') for (i, e) in enumerate(self.fgraph.inputs)), ((e, f'%(o{int(i)})s') for (i, e) in enumerate(self.fgraph.outputs))))
for var in self.fgraph.variables:
if (var.owner is None):
if (var not in self.fgraph.inputs):
if (isinstance(var, Constant) and isinstance(var.type, CLinkerType)):
subd[var] = var.type.c_literal(var.data)
else:
raise ValueError('All orphans in the fgraph to Composite must be Constant, CLinkerType instances.')
elif (any(((i.dtype == 'float16') for i in var.owner.inputs)) or any(((o.dtype == 'float16') for o in var.owner.outputs))):
self.inner_float16 = True
_c_code = '{\n'
self.nodenames = [f'%(nodename)s_subnode{int(j)}' for (j, n) in enumerate(self.fgraph.toposort())]
i = 0
for (j, node) in enumerate(self.fgraph.toposort()):
for output in node.outputs:
if (output not in subd):
i += 1
name = f'V%(id)s_tmp{int(i)}'
subd[output] = name
_c_code += f'''{output.type.dtype_specs()[1]} {name};
'''
s = node.op.c_code(node, self.nodenames[j], [subd[input] for input in node.inputs], [subd[output] for output in node.outputs], dict(fail='%(fail)s', id=f'%(id)s_{int(j)}'))
_c_code += s
_c_code += '\n'
_c_code += '}\n'
self._c_code = _c_code
return self._c_code
def c_code(self, node, nodename, inames, onames, sub):
d = dict(chain(zip((f'i{int(i)}' for i in range(len(inames))), inames), zip((f'o{int(i)}' for i in range(len(onames))), onames)), **sub)
d['nodename'] = nodename
if ('id' not in sub):
d['id'] = '_DUMMY_ID_'
return (self.c_code_template % d)
def c_code_cache_version_outer(self) -> tuple[(int, ...)]:
return (4,) |
class ExactGPModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood):
super(ExactGPModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x) |
class SingleSentenceClassificationProcessor(DataProcessor):
def __init__(self, labels=None, examples=None, mode='classification', verbose=False):
self.labels = ([] if (labels is None) else labels)
self.examples = ([] if (examples is None) else examples)
self.mode = mode
self.verbose = verbose
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
if isinstance(idx, slice):
return SingleSentenceClassificationProcessor(labels=self.labels, examples=self.examples[idx])
return self.examples[idx]
def create_from_csv(cls, file_name, split_name='', column_label=0, column_text=1, column_id=None, skip_first_row=False, **kwargs):
processor = cls(**kwargs)
processor.add_examples_from_csv(file_name, split_name=split_name, column_label=column_label, column_text=column_text, column_id=column_id, skip_first_row=skip_first_row, overwrite_labels=True, overwrite_examples=True)
return processor
def create_from_examples(cls, texts_or_text_and_labels, labels=None, **kwargs):
processor = cls(**kwargs)
processor.add_examples(texts_or_text_and_labels, labels=labels)
return processor
def add_examples_from_csv(self, file_name, split_name='', column_label=0, column_text=1, column_id=None, skip_first_row=False, overwrite_labels=False, overwrite_examples=False):
lines = self._read_tsv(file_name)
if skip_first_row:
lines = lines[1:]
texts = []
labels = []
ids = []
for (i, line) in enumerate(lines):
texts.append(line[column_text])
labels.append(line[column_label])
if (column_id is not None):
ids.append(line[column_id])
else:
guid = (f'{split_name}-{i}' if split_name else str(i))
ids.append(guid)
return self.add_examples(texts, labels, ids, overwrite_labels=overwrite_labels, overwrite_examples=overwrite_examples)
def add_examples(self, texts_or_text_and_labels, labels=None, ids=None, overwrite_labels=False, overwrite_examples=False):
if ((labels is not None) and (len(texts_or_text_and_labels) != len(labels))):
raise ValueError(f'Text and labels have mismatched lengths {len(texts_or_text_and_labels)} and {len(labels)}')
if ((ids is not None) and (len(texts_or_text_and_labels) != len(ids))):
raise ValueError(f'Text and ids have mismatched lengths {len(texts_or_text_and_labels)} and {len(ids)}')
if (ids is None):
ids = ([None] * len(texts_or_text_and_labels))
if (labels is None):
labels = ([None] * len(texts_or_text_and_labels))
examples = []
added_labels = set()
for (text_or_text_and_label, label, guid) in zip(texts_or_text_and_labels, labels, ids):
if (isinstance(text_or_text_and_label, (tuple, list)) and (label is None)):
(text, label) = text_or_text_and_label
else:
text = text_or_text_and_label
added_labels.add(label)
examples.append(InputExample(guid=guid, text_a=text, text_b=None, label=label))
if overwrite_examples:
self.examples = examples
else:
self.examples.extend(examples)
if overwrite_labels:
self.labels = list(added_labels)
else:
self.labels = list(set(self.labels).union(added_labels))
return self.examples
def get_features(self, tokenizer, max_length=None, pad_on_left=False, pad_token=0, mask_padding_with_zero=True, return_tensors=None):
if (max_length is None):
max_length = tokenizer.max_len
label_map = {label: i for (i, label) in enumerate(self.labels)}
all_input_ids = []
for (ex_index, example) in enumerate(self.examples):
if ((ex_index % 10000) == 0):
logger.info(f'Tokenizing example {ex_index}')
input_ids = tokenizer.encode(example.text_a, add_special_tokens=True, max_length=min(max_length, tokenizer.max_len))
all_input_ids.append(input_ids)
batch_length = max((len(input_ids) for input_ids in all_input_ids))
features = []
for (ex_index, (input_ids, example)) in enumerate(zip(all_input_ids, self.examples)):
if ((ex_index % 10000) == 0):
logger.info(f'Writing example {ex_index}/{len(self.examples)}')
attention_mask = ([(1 if mask_padding_with_zero else 0)] * len(input_ids))
padding_length = (batch_length - len(input_ids))
if pad_on_left:
input_ids = (([pad_token] * padding_length) + input_ids)
attention_mask = (([(0 if mask_padding_with_zero else 1)] * padding_length) + attention_mask)
else:
input_ids = (input_ids + ([pad_token] * padding_length))
attention_mask = (attention_mask + ([(0 if mask_padding_with_zero else 1)] * padding_length))
if (len(input_ids) != batch_length):
raise ValueError(f'Error with input length {len(input_ids)} vs {batch_length}')
if (len(attention_mask) != batch_length):
raise ValueError(f'Error with input length {len(attention_mask)} vs {batch_length}')
if (self.mode == 'classification'):
label = label_map[example.label]
elif (self.mode == 'regression'):
label = float(example.label)
else:
raise ValueError(self.mode)
if ((ex_index < 5) and self.verbose):
logger.info('*** Example ***')
logger.info(f'guid: {example.guid}')
logger.info(f"input_ids: {' '.join([str(x) for x in input_ids])}")
logger.info(f"attention_mask: {' '.join([str(x) for x in attention_mask])}")
logger.info(f'label: {example.label} (id = {label})')
features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, label=label))
if (return_tensors is None):
return features
elif (return_tensors == 'tf'):
if (not is_tf_available()):
raise RuntimeError("return_tensors set to 'tf' but TensorFlow 2.0 can't be imported")
import tensorflow as tf
def gen():
for ex in features:
(yield ({'input_ids': ex.input_ids, 'attention_mask': ex.attention_mask}, ex.label))
dataset = tf.data.Dataset.from_generator(gen, ({'input_ids': tf.int32, 'attention_mask': tf.int32}, tf.int64), ({'input_ids': tf.TensorShape([None]), 'attention_mask': tf.TensorShape([None])}, tf.TensorShape([])))
return dataset
elif (return_tensors == 'pt'):
if (not is_torch_available()):
raise RuntimeError("return_tensors set to 'pt' but PyTorch can't be imported")
import torch
from torch.utils.data import TensorDataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
if (self.mode == 'classification'):
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif (self.mode == 'regression'):
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_labels)
return dataset
else:
raise ValueError("return_tensors should be one of 'tf' or 'pt'") |
class Bottleneck_Res(nn.Module):
def __init__(self, in_channel, depth, stride):
super(Bottleneck_Res, self).__init__()
if (in_channel == depth):
self.shortcut_layer = nn.MaxPool1d(1, stride)
else:
self.shortcut_layer = nn.Sequential(nn.Conv1d(in_channel, depth, 1, stride, bias=False), nn.BatchNorm1d(depth))
self.res_layer = nn.Sequential(nn.BatchNorm1d(in_channel), nn.Conv1d(in_channel, depth, 3, 1, 1, bias=False), nn.PReLU(depth), nn.Conv1d(depth, depth, 3, stride, 1, bias=False), nn.BatchNorm1d(depth))
def forward(self, x):
shortcut = self.shortcut_layer(x)
res = self.res_layer(x)
return (res + shortcut) |
(is_wine(), 'hangs under wine')
(is_osx(), 'crashy on macOS')
class Tchooser(TestCase):
def test_choose_files(self):
w = Gtk.Window()
with with_response(Gtk.ResponseType.CANCEL):
assert (choose_files(w, 'title', 'action') == [])
def test_choose_folders(self):
w = Gtk.Window()
with with_response(Gtk.ResponseType.CANCEL):
assert (choose_folders(w, 'title', 'action') == [])
def test_choose_filter(self):
cf = create_chooser_filter('filter', ['*.txt'])
assert isinstance(cf, Gtk.FileFilter)
assert (cf.get_name() == 'filter')
w = Gtk.Window()
with with_response(Gtk.ResponseType.CANCEL):
assert (choose_files(w, 'title', 'action', cf) == [])
def test_choose_target_file(self):
w = Gtk.Window()
with with_response(Gtk.ResponseType.CANCEL):
assert (choose_target_file(w, 'title', 'action') is None)
with with_response(Gtk.ResponseType.CANCEL):
assert (choose_target_file(w, 'title', 'action', 'example') is None)
def test_choose_target_folder(self):
w = Gtk.Window()
with with_response(Gtk.ResponseType.CANCEL):
assert (choose_target_folder(w, 'title', 'action') is None)
with with_response(Gtk.ResponseType.CANCEL):
assert (choose_target_folder(w, 'title', 'action', 'example') is None)
def test_get_current_dir(self):
path = get_current_dir()
assert isinstance(path, fsnative)
def test_set_current_dir(self):
set_current_dir(fsnative('.'))
assert (get_current_dir() == os.getcwd()) |
def main():
root_path = args.root_path
label_name = args.label_name
if (args.cnn == 'resnet50'):
feature_root = '/media/newssd/OMG_experiments/Extracted_features/resnet50_ferplus_features_fps=30_pool5_7x7_s1'
elif (args.cnn == 'vgg'):
feature_root = '/media/newssd/OMG_experiments/Extracted_features/vgg_fer_features_fps=30_pool5'
if (len(args.store_name) == 0):
args.store_name = '_'.join([label_name, 'cnn:{}'.format(args.cnn), 'loss_type:{}'.format(args.loss_type), 'batch_size:{}'.format(args.batch_size), 'cat_before_gru:{}'.format(args.cat_before_gru), 'freeze:{}'.format(args.freeze), 'fusion:{}'.format(args.fusion)])
if (len(args.save_root) == 0):
setattr(args, 'save_root', args.store_name)
else:
setattr(args, 'save_root', os.path.join(args.save_root, args.store_name))
print('save experiment to :{}'.format(args.save_root))
check_rootfolders()
num_class = (1 if (not ('_' in args.label_name)) else 2)
setattr(args, 'num_class', num_class)
if (args.loss_type == 'mse'):
criterion = nn.MSELoss().cuda()
elif (args.loss_type == 'ccc'):
criterion = My_loss().cuda()
else:
raise ValueError('Unknown loss type')
L = args.length
train_dict = pickle.load(open(args.train_dict, 'rb'))
val_dict = pickle.load(open(args.val_dict, 'rb'))
train_dict.update(val_dict)
train_val_dict = copy.copy(train_dict)
video_names = sorted(list(train_dict.keys()))
np.random.seed(0)
video_indexes = np.random.permutation(len(video_names))
video_names = [video_names[i] for i in video_indexes]
if args.test:
run_5_fold_prediction_on_test_set(feature_root)
for i in range(5):
model = Two_Stream_RNN(mlp_hidden_units=args.hidden_units, phase_size=48, phase_channels=(2 * L), phase_hidden_size=256, cat_before_gru=args.cat_before_gru, gru_hidden=64, gru_num_layers=2, fusion=args.fusion)
pytorch_total_params = sum((p.numel() for p in model.parameters() if p.requires_grad))
print('Total Params: {}'.format(pytorch_total_params))
phasenet_param = sum((p.numel() for p in model.phase_net.parameters() if p.requires_grad))
print('Temporal Stream params: {} ({:.2f})'.format(phasenet_param, (phasenet_param / float(pytorch_total_params))))
mlp_param = sum((p.numel() for p in model.mlp.parameters() if p.requires_grad))
print('Spatial Stream params: {} ({:.2f})'.format(mlp_param, (mlp_param / float(pytorch_total_params))))
model.cuda()
if args.cat_before_gru:
params_dict = [{'params': model.rnns.parameters(), 'lr': args.lr}, {'params': model.classifier.parameters(), 'lr': args.lr}, {'params': model.fusion_module.parameters(), 'lr': args.lr}]
else:
params_dict = [{'params': model.rnns_spatial.parameters(), 'lr': args.lr}, {'params': model.rnns_temporal.parameters(), 'lr': args.lr}, {'params': model.classifier.parameters(), 'lr': args.lr}, {'params': model.fusion_module.parameters(), 'lr': args.lr}]
if (not args.freeze):
params_dict += [{'params': model.mlp.parameters(), 'lr': args.lr}, {'params': model.phase_net.parameters(), 'lr': args.lr}]
optimizer = torch.optim.SGD(params_dict, args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
torch.cuda.empty_cache()
cudnn.benchmark = True
length = (len(video_names) // 5)
val_video_names = video_names[(i * length):((i + 1) * length)]
if (i == 4):
val_video_names = video_names[(i * length):]
train_video_names = [name for name in video_names if (name not in val_video_names)]
train_video_names = video_names
train_dict = {key: train_val_dict[key] for key in train_video_names}
val_dict = {key: train_val_dict[key] for key in val_video_names}
train_dataset = Face_Dataset([os.path.join(root_path, 'Train'), os.path.join(root_path, 'Validation')], feature_root, train_dict, label_name, py_level=args.py_level, py_nbands=args.py_nbands, sample_rate=args.sample_rate, num_phase=L, phase_size=48, test_mode=False, return_phase=False)
val_dataset = Face_Dataset([os.path.join(root_path, 'Train'), os.path.join(root_path, 'Validation')], feature_root, val_dict, label_name, py_level=args.py_level, py_nbands=args.py_nbands, sample_rate=args.sample_rate, num_phase=L, phase_size=48, test_mode=True, return_phase=False)
train_batch_sampler = SameLengthBatchSampler(train_dataset.indices_list, batch_size=args.batch_size, drop_last=True)
val_batch_sampler = SameLengthBatchSampler(val_dataset.indices_list, batch_size=args.eval_batch_size, drop_last=True, random=False)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_sampler=train_batch_sampler, num_workers=args.workers, pin_memory=False)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_sampler=val_batch_sampler, num_workers=args.workers, pin_memory=False)
print('train dataset:{}'.format(len(train_dataset)))
print('val dataset:{}'.format(len(val_dataset)))
log = open(os.path.join(args.save_root, args.root_log, 'fold_{}.txt'.format(i)), 'w')
output = '\n Fold: {}\n'.format(i)
log.write(output)
log.flush()
best_loss = 1000
best_ccc = (- 100)
val_accum_epochs = 0
for epoch in range(args.epochs):
adjust_learning_rate(optimizer, epoch, args.lr_steps)
(train_mean, train_std) = train(train_loader, model, criterion, optimizer, epoch, log)
log_train_mean_std = open(os.path.join(args.save_root, args.root_log, 'mean_std_{}.txt'.format(i)), 'w')
log_train_mean_std.write('{} {}'.format(train_mean, train_std))
log_train_mean_std.flush()
torch.cuda.empty_cache()
if ((((epoch + 1) % args.eval_freq) == 0) or (epoch == (args.epochs - 1))):
(loss_val, ccc_current_val) = validate(val_loader, model, criterion, ((epoch + 1) * len(train_loader)), log, train_mean, train_std)
is_best_loss = (loss_val < best_loss)
best_loss = min(loss_val, best_loss)
is_best_ccc = (ccc_current_val > best_ccc)
best_ccc = max(ccc_current_val, best_ccc)
save_checkpoint({'epoch': (epoch + 1), 'state_dict': model.state_dict()}, is_best_loss, is_best_ccc, filename='fold_{}'.format(i))
if (not is_best_ccc):
val_accum_epochs += 1
else:
val_accum_epochs = 0
if (val_accum_epochs >= args.early_stop):
print('validation ccc did not improve over {} epochs, stop'.format(args.early_stop))
break
run_5_fold_prediction_on_test_set(feature_root) |
def train(model, train_loader, test_loader, gt, logger):
if (not os.path.exists(cfg.save_dir)):
os.makedirs(cfg.save_dir)
criterion = torch.nn.BCELoss()
criterion2 = torch.nn.KLDivLoss(reduction='batchmean')
optimizer = optim.Adam(model.parameters(), lr=cfg.lr)
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=60, eta_min=0)
logger.info('Model:{}\n'.format(model))
logger.info('Optimizer:{}\n'.format(optimizer))
(initial_auc, n_far) = test_func(test_loader, model, gt, cfg.dataset)
logger.info('Random initialize {}:{:.4f} FAR:{:.5f}'.format(cfg.metrics, initial_auc, n_far))
best_model_wts = copy.deepcopy(model.state_dict())
best_auc = 0.0
auc_far = 0.0
st = time.time()
for epoch in range(cfg.max_epoch):
(loss1, loss2) = train_func(train_loader, model, optimizer, criterion, criterion2, cfg.lamda)
scheduler.step()
(auc, far) = test_func(test_loader, model, gt, cfg.dataset)
if (auc >= best_auc):
best_auc = auc
auc_far = far
best_model_wts = copy.deepcopy(model.state_dict())
logger.info('[Epoch:{}/{}]: loss1:{:.4f} loss2:{:.4f} | AUC:{:.4f} FAR:{:.5f}'.format((epoch + 1), cfg.max_epoch, loss1, loss2, auc, far))
time_elapsed = (time.time() - st)
model.load_state_dict(best_model_wts)
torch.save(model.state_dict(), ((((cfg.save_dir + cfg.model_name) + '_') + str(round(best_auc, 4)).split('.')[1]) + '.pkl'))
logger.info('Training completes in {:.0f}m {:.0f}s | best {}:{:.4f} FAR:{:.5f}\n'.format((time_elapsed // 60), (time_elapsed % 60), cfg.metrics, best_auc, auc_far)) |
class Rule():
def __init__(self, match: (Match | list[Match]), group: (_Group | None)=None, float: bool=False, intrusive: bool=False, break_on_match: bool=True) -> None:
if isinstance(match, Match):
self.matchlist = [match]
else:
self.matchlist = match
self.group = group
self.float = float
self.intrusive = intrusive
self.break_on_match = break_on_match
def matches(self, w: base.Window) -> bool:
return any((w.match(m) for m in self.matchlist))
def __repr__(self) -> str:
actions = utils.describe_attributes(self, ['group', 'float', 'intrusive', 'break_on_match'])
return ('<Rule match=%r actions=(%s)>' % (self.matchlist, actions)) |
class LightMaps(QWidget):
def __init__(self, parent=None):
super(LightMaps, self).__init__(parent)
self.pressed = False
self.snapped = False
self.zoomed = False
self.invert = False
self._normalMap = SlippyMap(self)
self._largeMap = SlippyMap(self)
self.pressPos = QPoint()
self.dragPos = QPoint()
self.tapTimer = QBasicTimer()
self.zoomPixmap = QPixmap()
self.maskPixmap = QPixmap()
self._normalMap.updated.connect(self.updateMap)
self._largeMap.updated.connect(self.update)
def setCenter(self, lat, lng):
self._normalMap.latitude = lat
self._normalMap.longitude = lng
self._normalMap.invalidate()
self._largeMap.invalidate()
def toggleNightMode(self):
self.invert = (not self.invert)
self.update()
def updateMap(self, r):
self.update(r)
def activateZoom(self):
self.zoomed = True
self.tapTimer.stop()
self._largeMap.zoom = (self._normalMap.zoom + 1)
self._largeMap.width = (self._normalMap.width * 2)
self._largeMap.height = (self._normalMap.height * 2)
self._largeMap.latitude = self._normalMap.latitude
self._largeMap.longitude = self._normalMap.longitude
self._largeMap.invalidate()
self.update()
def resizeEvent(self, event):
self._normalMap.width = self.width()
self._normalMap.height = self.height()
self._normalMap.invalidate()
self._largeMap.width = (self._normalMap.width * 2)
self._largeMap.height = (self._normalMap.height * 2)
self._largeMap.invalidate()
def paintEvent(self, event):
p = QPainter()
p.begin(self)
self._normalMap.render(p, event.rect())
p.setPen(Qt.black)
p.drawText(self.rect(), (Qt.AlignBottom | Qt.TextWordWrap), 'Map data CCBYSA 2009 OpenStreetMap.org contributors')
p.end()
if self.zoomed:
dim = min(self.width(), self.height())
magnifierSize = min(MAX_MAGNIFIER, ((dim * 2) / 3))
radius = (magnifierSize / 2)
ring = (radius - 15)
box = QSize(magnifierSize, magnifierSize)
if (self.maskPixmap.size() != box):
self.maskPixmap = QPixmap(box)
self.maskPixmap.fill(Qt.transparent)
g = QRadialGradient()
g.setCenter(radius, radius)
g.setFocalPoint(radius, radius)
g.setRadius(radius)
g.setColorAt(1.0, QColor(255, 255, 255, 0))
g.setColorAt(0.5, QColor(128, 128, 128, 255))
mask = QPainter(self.maskPixmap)
mask.setRenderHint(QPainter.Antialiasing)
mask.setCompositionMode(QPainter.CompositionMode_Source)
mask.setBrush(g)
mask.setPen(Qt.NoPen)
mask.drawRect(self.maskPixmap.rect())
mask.setBrush(QColor(Qt.transparent))
mask.drawEllipse(g.center(), ring, ring)
mask.end()
center = (self.dragPos - QPoint(0, radius))
center += QPoint(0, (radius / 2))
corner = (center - QPoint(radius, radius))
xy = ((center * 2) - QPoint(radius, radius))
if (self.zoomPixmap.size() != box):
self.zoomPixmap = QPixmap(box)
self.zoomPixmap.fill(Qt.lightGray)
if True:
p = QPainter(self.zoomPixmap)
p.translate((- xy))
self._largeMap.render(p, QRect(xy, box))
p.end()
clipPath = QPainterPath()
clipPath.addEllipse(QPointF(center), ring, ring)
p = QPainter(self)
p.setRenderHint(QPainter.Antialiasing)
p.setClipPath(clipPath)
p.drawPixmap(corner, self.zoomPixmap)
p.setClipping(False)
p.drawPixmap(corner, self.maskPixmap)
p.setPen(Qt.gray)
p.drawPath(clipPath)
if self.invert:
p = QPainter(self)
p.setCompositionMode(QPainter.CompositionMode_Difference)
p.fillRect(event.rect(), Qt.white)
p.end()
def timerEvent(self, event):
if (not self.zoomed):
self.activateZoom()
self.update()
def mousePressEvent(self, event):
if (event.buttons() != Qt.LeftButton):
return
self.pressed = self.snapped = True
self.pressPos = self.dragPos = event.pos()
self.tapTimer.stop()
self.tapTimer.start(HOLD_TIME, self)
def mouseMoveEvent(self, event):
if (not event.buttons()):
return
if (not self.zoomed):
if ((not self.pressed) or (not self.snapped)):
delta = (event.pos() - self.pressPos)
self.pressPos = event.pos()
self._normalMap.pan(delta)
return
else:
threshold = 10
delta = (event.pos() - self.pressPos)
if self.snapped:
self.snapped &= (delta.x() < threshold)
self.snapped &= (delta.y() < threshold)
self.snapped &= (delta.x() > (- threshold))
self.snapped &= (delta.y() > (- threshold))
if (not self.snapped):
self.tapTimer.stop()
else:
self.dragPos = event.pos()
self.update()
def mouseReleaseEvent(self, event):
self.zoomed = False
self.update()
def keyPressEvent(self, event):
if (not self.zoomed):
if (event.key() == Qt.Key_Left):
self._normalMap.pan(QPoint(20, 0))
if (event.key() == Qt.Key_Right):
self._normalMap.pan(QPoint((- 20), 0))
if (event.key() == Qt.Key_Up):
self._normalMap.pan(QPoint(0, 20))
if (event.key() == Qt.Key_Down):
self._normalMap.pan(QPoint(0, (- 20)))
if ((event.key() == Qt.Key_Z) or (event.key() == Qt.Key_Select)):
self.dragPos = QPoint((self.width() / 2), (self.height() / 2))
self.activateZoom()
else:
if ((event.key() == Qt.Key_Z) or (event.key() == Qt.Key_Select)):
self.zoomed = False
self.update()
delta = QPoint(0, 0)
if (event.key() == Qt.Key_Left):
delta = QPoint((- 15), 0)
if (event.key() == Qt.Key_Right):
delta = QPoint(15, 0)
if (event.key() == Qt.Key_Up):
delta = QPoint(0, (- 15))
if (event.key() == Qt.Key_Down):
delta = QPoint(0, 15)
if (delta != QPoint(0, 0)):
self.dragPos += delta
self.update() |
def load_x963_vectors(vector_data):
vectors = []
hashname = None
vector = {}
for line in vector_data:
line = line.strip()
if line.startswith('[SHA'):
hashname = line[1:(- 1)]
shared_secret_len = 0
shared_info_len = 0
key_data_len = 0
elif line.startswith('[shared secret length'):
shared_secret_len = int(line[1:(- 1)].split('=')[1].strip())
elif line.startswith('[SharedInfo length'):
shared_info_len = int(line[1:(- 1)].split('=')[1].strip())
elif line.startswith('[key data length'):
key_data_len = int(line[1:(- 1)].split('=')[1].strip())
elif line.startswith('COUNT'):
count = int(line.split('=')[1].strip())
vector['hash'] = hashname
vector['count'] = count
vector['shared_secret_length'] = shared_secret_len
vector['sharedinfo_length'] = shared_info_len
vector['key_data_length'] = key_data_len
elif line.startswith('Z'):
vector['Z'] = line.split('=')[1].strip()
assert (vector['Z'] is not None)
assert ((((shared_secret_len + 7) // 8) * 2) == len(vector['Z']))
elif line.startswith('SharedInfo'):
if (shared_info_len != 0):
vector['sharedinfo'] = line.split('=')[1].strip()
assert (vector['sharedinfo'] is not None)
silen = len(vector['sharedinfo'])
assert ((((shared_info_len + 7) // 8) * 2) == silen)
elif line.startswith('key_data'):
vector['key_data'] = line.split('=')[1].strip()
assert (vector['key_data'] is not None)
assert ((((key_data_len + 7) // 8) * 2) == len(vector['key_data']))
vectors.append(vector)
vector = {}
return vectors |
class MMapIndexedDatasetBuilder(object):
def __init__(self, out_file, dtype=np.int64):
self._data_file = open(out_file, 'wb')
self._dtype = dtype
self._sizes = []
self._doc_idx = [0]
def add_item(self, tensor):
np_array = np.array(tensor.numpy(), dtype=self._dtype)
self._data_file.write(np_array.tobytes(order='C'))
self._sizes.append(np_array.size)
def end_document(self):
self._doc_idx.append(len(self._sizes))
def merge_file_(self, another_file):
index = MMapIndexedDataset.Index(index_file_path(another_file))
assert (index.dtype == self._dtype)
for size in index.sizes:
self._sizes.append(size)
with open(data_file_path(another_file), 'rb') as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, index_file):
self._data_file.close()
with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:
index.write(self._sizes, self._doc_idx) |
def setup_logging(training_args):
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
logger.setLevel((logging.INFO if is_main_process(training_args.local_rank) else logging.WARN))
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f'Training/evaluation parameters {training_args}') |
class _ExtractInfo():
def __init__(self, project, resource, start, end, new_name, variable, similar, make_global):
self.project = project
self.resource = resource
self.pymodule = project.get_pymodule(resource)
self.global_scope = self.pymodule.get_scope()
self.source = self.pymodule.source_code
self.lines = self.pymodule.lines
self.new_name = new_name
self.variable = variable
self.similar = similar
self._init_parts(start, end)
self.kind = None
self._init_scope()
self.make_global = make_global
def _init_parts(self, start, end):
self.region = (self._choose_closest_line_end(start), self._choose_closest_line_end(end, end=True))
start = self.logical_lines.logical_line_in(self.lines.get_line_number(self.region[0]))[0]
end = self.logical_lines.logical_line_in(self.lines.get_line_number(self.region[1]))[1]
self.region_lines = (start, end)
self.lines_region = (self.lines.get_line_start(self.region_lines[0]), self.lines.get_line_end(self.region_lines[1]))
def logical_lines(self):
return self.pymodule.logical_lines
def _init_scope(self):
start_line = self.region_lines[0]
scope = self.global_scope.get_inner_scope_for_line(start_line)
if ((scope.get_kind() != 'Module') and (scope.get_start() == start_line)):
scope = scope.parent
self.scope = scope
self.scope_region = self._get_scope_region(self.scope)
def _get_scope_region(self, scope):
return (self.lines.get_line_start(scope.get_start()), (self.lines.get_line_end(scope.get_end()) + 1))
def _choose_closest_line_end(self, offset, end=False):
lineno = self.lines.get_line_number(offset)
line_start = self.lines.get_line_start(lineno)
line_end = self.lines.get_line_end(lineno)
if (self.source[line_start:offset].strip() == ''):
if end:
return (line_start - 1)
else:
return line_start
elif (self.source[offset:line_end].strip() == ''):
return min(line_end, len(self.source))
return offset
def one_line(self):
return ((self.region != self.lines_region) and (self.logical_lines.logical_line_in(self.region_lines[0]) == self.logical_lines.logical_line_in(self.region_lines[1])))
def global_(self):
return (self.scope.parent is None)
def method(self):
return ((self.scope.parent is not None) and (self.scope.parent.get_kind() == 'Class'))
def indents(self):
return sourceutils.get_indents(self.pymodule.lines, self.region_lines[0])
def scope_indents(self):
if self.global_:
return 0
return sourceutils.get_indents(self.pymodule.lines, self.scope.get_start())
def extracted(self):
return self.source[self.region[0]:self.region[1]]
_cached_parsed_extracted = None
def _parsed_extracted(self):
if (self._cached_parsed_extracted is None):
self._cached_parsed_extracted = _parse_text(self.extracted)
return self._cached_parsed_extracted
_returned = None
def returned(self):
if (self._returned is None):
self._returned = usefunction._returns_last(self._parsed_extracted)
return self._returned
_returning_named_expr = None
def returning_named_expr(self):
if (self._returning_named_expr is None):
self._returning_named_expr = usefunction._namedexpr_last(self._parsed_extracted)
return self._returning_named_expr
_returning_generator = None
def returning_generator_exp(self):
if (self._returning_generator is None):
self._returning_generator = (isinstance(self._parsed_extracted, ast.Module) and isinstance(self._parsed_extracted.body[0], ast.Expr) and isinstance(self._parsed_extracted.body[0].value, ast.GeneratorExp))
return self._returning_generator |
class KnowValues(unittest.TestCase):
def test_symm_orb_h2o(self):
atoms = [['O', (1.0, 0.0, 0.0)], [1, (0.0, (- 0.757), 0.587)], [1, (0.0, 0.757, 0.587)]]
basis = {'H': gto.basis.load('cc_pvqz', 'C'), 'O': gto.basis.load('cc_pvqz', 'C')}
self.assertEqual(get_so(atoms, basis)[0], 165)
self.assertEqual(get_so(atoms, basis, 1)[0], 210)
def test_symm_orb_d2h(self):
atoms = [[1, (0.0, 0.0, 0.0)], [1, (1.0, 0.0, 0.0)], [1, (0.0, 1.0, 0.0)], [1, (0.0, 0.0, 1.0)], [1, ((- 1), 0.0, 0.0)], [1, (0.0, (- 1.0), 0.0)], [1, (0.0, 0.0, (- 1.0))]]
basis = {'H': gto.basis.load('cc_pvqz', 'C')}
self.assertEqual(get_so(atoms, basis)[0], 385)
self.assertEqual(get_so(atoms, basis, 1)[0], 490)
def test_symm_orb_c2v(self):
atoms = [[1, (1.0, 0.0, 2.0)], [2, (0.0, 1.0, 0.0)], [1, ((- 2.0), 0.0, (- 1.0))], [2, (0.0, (- 1.0), 0.0)]]
basis = {'H': gto.basis.load('cc_pvqz', 'C'), 'He': gto.basis.load('cc_pvqz', 'C')}
self.assertEqual(get_so(atoms, basis)[0], 220)
def test_symm_orb_c2h(self):
atoms = [[1, (1.0, 0.0, 2.0)], [2, (0.0, 1.0, 0.0)], [1, ((- 1.0), 0.0, (- 2.0))], [2, (0.0, (- 1.0), 0.0)]]
basis = {'H': gto.basis.load('cc_pvqz', 'C'), 'He': gto.basis.load('cc_pvqz', 'C')}
self.assertEqual(get_so(atoms, basis)[0], 220)
self.assertEqual(get_so(atoms, basis, 1)[0], 280)
atoms = [[1, (1.0, 0.0, 1.0)], [1, (1.0, 0.0, (- 1.0))], [2, (0.0, 0.0, 2.0)], [2, (2.0, 0.0, (- 2.0))], [3, (1.0, 1.0, 0.0)], [3, (1.0, (- 1.0), 0.0)]]
basis = {'H': gto.basis.load('cc_pvqz', 'C'), 'He': gto.basis.load('cc_pvqz', 'C'), 'Li': gto.basis.load('cc_pvqz', 'C')}
self.assertEqual(get_so(atoms, basis)[0], 330)
self.assertEqual(get_so(atoms, basis, 1)[0], 420)
def test_symm_orb_d2(self):
atoms = [[1, (1.0, 0.0, 1.0)], [1, (1.0, 0.0, (- 1.0))], [2, (0.0, 0.0, 2.0)], [2, (2.0, 0.0, 2.0)], [2, (1.0, 1.0, (- 2.0))], [2, (1.0, (- 1.0), (- 2.0))]]
basis = {'H': gto.basis.load('cc_pvqz', 'C'), 'He': gto.basis.load('cc_pvqz', 'C')}
self.assertEqual(get_so(atoms, basis)[0], 330)
self.assertEqual(get_so(atoms, basis, 1)[0], 420)
def test_symm_orb_ci(self):
atoms = [[1, (1.0, 0.0, 0.0)], [2, (0.0, 1.0, 0.0)], [3, (0.0, 0.0, 1.0)], [4, (0.5, 0.5, 0.5)], [1, ((- 1.0), 0.0, 0.0)], [2, (0.0, (- 1.0), 0.0)], [3, (0.0, 0.0, (- 1.0))], [4, ((- 0.5), (- 0.5), (- 0.5))]]
basis = {'H': gto.basis.load('cc_pvqz', 'C'), 'He': gto.basis.load('cc_pvqz', 'C'), 'Li': gto.basis.load('cc_pvqz', 'C'), 'Be': gto.basis.load('cc_pvqz', 'C')}
self.assertEqual(get_so(atoms, basis)[0], 440)
def test_symm_orb_cs(self):
atoms = [[1, (1.0, 0.0, 2.0)], [2, (1.0, 0.0, 0.0)], [3, (2.0, 0.0, (- 1.0))], [4, (0.0, 0.0, 1.0)]]
basis = {'H': gto.basis.load('cc_pvqz', 'C'), 'He': gto.basis.load('cc_pvqz', 'C'), 'Li': gto.basis.load('cc_pvqz', 'C'), 'Be': gto.basis.load('cc_pvqz', 'C')}
self.assertEqual(get_so(atoms, basis)[0], 220)
def test_symm_orb_c1(self):
atoms = [[1, (1.0, 0.0, 0.0)], [2, (0.0, 1.0, 0.0)], [3, (0.0, 0.0, 1.0)], [4, (0.5, 0.5, 0.5)]]
basis = {'H': gto.basis.load('cc_pvqz', 'C'), 'He': gto.basis.load('cc_pvqz', 'C'), 'Li': gto.basis.load('cc_pvqz', 'C'), 'Be': gto.basis.load('cc_pvqz', 'C')}
self.assertEqual(get_so(atoms, basis)[0], 220)
def test_symm_orb_c3v_as_cs(self):
atoms = [['Fe', (0.0, 0.0, 0.015198)], ['C', (0.0, 0.0, (- 1.938396))], ['C', (0.0, (- 1.394127), (- 1.614155))], ['C', ((- 1.207349), 0.697064, (- 1.614155))], ['C', (1.207349, 0.697064, (- 1.614155))], ['H', ((- 0.922915), (- 1.965174), (- 1.708739))], ['H', (0.922915, (- 1.965174), (- 1.708739))], ['H', ((- 1.240433), 1.781855, (- 1.708739))], ['H', ((- 2.163348), 0.183319, (- 1.708739))], ['H', (2.163348, 0.183319, (- 1.708739))], ['H', (1.240433, 1.781855, (- 1.708739))], ['C', (0.0, 1.558543, 0.88711)], ['C', (1.349738, (- 0.779272), 0.88711)], ['C', ((- 1.349738), (- 0.779272), 0.88711)], ['O', (0.0, 2.572496, 1.441607)], ['O', (2.227847, (- 1.286248), 1.441607)], ['O', ((- 2.227847), (- 1.286248), 1.441607)]]
basis = {'Fe': gto.basis.load('def2svp', 'C'), 'C': gto.basis.load('def2svp', 'C'), 'H': gto.basis.load('def2svp', 'C'), 'O': gto.basis.load('def2svp', 'C')}
(n, so) = get_so(atoms, basis)
self.assertEqual([c.shape[1] for c in so], [134, 104])
def test_symm_orb_so3(self):
atoms = [['Si', (0, 0, 0)]]
basis = {'Si': gto.basis.load('ccpvtz', 'Si')}
(n, so) = get_so(atoms, basis)
(idx, idy) = numpy.where((numpy.hstack(so) != 0))
self.assertEqual(idy.argsort().tolist(), [0, 1, 2, 3, 4, 6, 9, 12, 15, 7, 10, 13, 16, 5, 8, 11, 14, 17, 22, 18, 23, 19, 24, 20, 25, 21, 26, 27, 28, 29, 30, 31, 32, 33])
def test_so3_symb2id(self):
ref = symm.basis._SO3_SYMB2ID
with lib.temporary_env(symm.basis, _SO3_SYMB2ID={}):
for s in ['p+1', 'd+0', 'f-2', 'g+4', 'f+0']:
self.assertEqual(ref[s], symm.basis.so3_irrep_symb2id(s))
self.assertRaises(KeyError, symm.basis.so3_irrep_symb2id, 'k-8')
def test_so3_id2symb(self):
ref = symm.basis._SO3_ID2SYMB
with lib.temporary_env(symm.basis, _SO3_ID2SYMB={}):
for s in [200, 202, 314, 317, 421, 420]:
self.assertEqual(ref[s], symm.basis.so3_irrep_id2symb(s))
self.assertRaises(KeyError, symm.basis.so3_irrep_id2symb, 746)
self.assertRaises(KeyError, symm.basis.so3_irrep_id2symb, 729) |
def find_python_executable(python: Optional[str]=None) -> str:
if (not python):
python = os.environ.get('FLIT_INSTALL_PYTHON')
if (not python):
return sys.executable
if os.path.isabs(python):
return python
resolved_python = shutil.which(python)
if (resolved_python is None):
raise PythonNotFoundError('Unable to resolve Python executable {!r}'.format(python))
try:
return subprocess.check_output([resolved_python, '-c', 'import sys; print(sys.executable)'], universal_newlines=True).strip()
except Exception as e:
raise PythonNotFoundError('{} occurred trying to find the absolute filepath of Python executable {!r} ({!r})'.format(e.__class__.__name__, python, resolved_python)) from e |
def get_iterator(args):
with open((osp.join(args.data, args.split) + '.tsv'), 'r') as fp:
lines = fp.read().split('\n')
root = lines.pop(0).strip()
files = [osp.join(root, line.split('\t')[0]) for line in lines if (len(line) > 0)]
num = len(files)
reader = Wav2VecFeatureReader(args.checkpoint)
def iterate():
for fname in files:
w2v_feats = reader.get_feats(fname)
(yield w2v_feats)
return (iterate, num) |
class ErrorHandlerTests(unittest.IsolatedAsyncioTestCase):
def setUp(self):
self.bot = MockBot()
self.ctx = MockContext(bot=self.bot)
self.cog = error_handler.ErrorHandler(self.bot)
async def test_error_handler_already_handled(self):
self.ctx.reset_mock()
error = errors.CommandError()
error.handled = 'foo'
self.assertIsNone((await self.cog.on_command_error(self.ctx, error)))
self.ctx.send.assert_not_awaited()
async def test_error_handler_command_not_found_error_not_invoked_by_handler(self):
error = errors.CommandNotFound()
test_cases = ({'try_silence_return': True, 'called_try_get_tag': False}, {'try_silence_return': False, 'called_try_get_tag': False}, {'try_silence_return': False, 'called_try_get_tag': True})
self.cog.try_silence = AsyncMock()
self.cog.try_get_tag = AsyncMock()
self.cog.try_run_fixed_codeblock = AsyncMock(return_value=False)
for case in test_cases:
with self.subTest(try_silence_return=case['try_silence_return'], try_get_tag=case['called_try_get_tag']):
self.ctx.reset_mock()
self.cog.try_silence.reset_mock(return_value=True)
self.cog.try_get_tag.reset_mock()
self.ctx.invoked_from_error_handler = False
self.cog.try_silence.return_value = case['try_silence_return']
self.ctx.channel.id = 1234
self.assertIsNone((await self.cog.on_command_error(self.ctx, error)))
self.assertTrue(self.ctx.invoked_from_error_handler)
if case['try_silence_return']:
self.cog.try_get_tag.assert_not_awaited()
self.cog.try_silence.assert_awaited_once()
else:
self.cog.try_silence.assert_awaited_once()
self.cog.try_get_tag.assert_awaited_once()
self.ctx.send.assert_not_awaited()
async def test_error_handler_command_not_found_error_invoked_by_handler(self):
ctx = MockContext(bot=self.bot, invoked_from_error_handler=True)
self.cog.try_silence = AsyncMock()
self.cog.try_get_tag = AsyncMock()
self.cog.try_run_fixed_codeblock = AsyncMock()
error = errors.CommandNotFound()
self.assertIsNone((await self.cog.on_command_error(ctx, error)))
self.cog.try_silence.assert_not_awaited()
self.cog.try_get_tag.assert_not_awaited()
self.cog.try_run_fixed_codeblock.assert_not_awaited()
self.ctx.send.assert_not_awaited()
async def test_error_handler_user_input_error(self):
self.ctx.reset_mock()
self.cog.handle_user_input_error = AsyncMock()
error = errors.UserInputError()
self.assertIsNone((await self.cog.on_command_error(self.ctx, error)))
self.cog.handle_user_input_error.assert_awaited_once_with(self.ctx, error)
async def test_error_handler_check_failure(self):
self.ctx.reset_mock()
self.cog.handle_check_failure = AsyncMock()
error = errors.CheckFailure()
self.assertIsNone((await self.cog.on_command_error(self.ctx, error)))
self.cog.handle_check_failure.assert_awaited_once_with(self.ctx, error)
async def test_error_handler_command_on_cooldown(self):
self.ctx.reset_mock()
error = errors.CommandOnCooldown(10, 9, type=None)
self.assertIsNone((await self.cog.on_command_error(self.ctx, error)))
self.ctx.send.assert_awaited_once_with(error)
async def test_error_handler_command_invoke_error(self):
self.cog.handle_api_error = AsyncMock()
self.cog.handle_unexpected_error = AsyncMock()
test_cases = ({'args': (self.ctx, errors.CommandInvokeError(ResponseCodeError(AsyncMock()))), 'expect_mock_call': self.cog.handle_api_error}, {'args': (self.ctx, errors.CommandInvokeError(TypeError)), 'expect_mock_call': self.cog.handle_unexpected_error}, {'args': (self.ctx, errors.CommandInvokeError(LockedResourceError('abc', 'test'))), 'expect_mock_call': 'send'}, {'args': (self.ctx, errors.CommandInvokeError(InvalidInfractedUserError(self.ctx.author))), 'expect_mock_call': 'send'})
for case in test_cases:
with self.subTest(args=case['args'], expect_mock_call=case['expect_mock_call']):
self.ctx.send.reset_mock()
self.assertIsNone((await self.cog.on_command_error(*case['args'])))
if (case['expect_mock_call'] == 'send'):
self.ctx.send.assert_awaited_once()
else:
case['expect_mock_call'].assert_awaited_once_with(self.ctx, case['args'][1].original)
async def test_error_handler_conversion_error(self):
self.cog.handle_api_error = AsyncMock()
self.cog.handle_unexpected_error = AsyncMock()
cases = ({'error': errors.ConversionError(AsyncMock(), ResponseCodeError(AsyncMock())), 'mock_function_to_call': self.cog.handle_api_error}, {'error': errors.ConversionError(AsyncMock(), TypeError), 'mock_function_to_call': self.cog.handle_unexpected_error})
for case in cases:
with self.subTest(**case):
self.assertIsNone((await self.cog.on_command_error(self.ctx, case['error'])))
case['mock_function_to_call'].assert_awaited_once_with(self.ctx, case['error'].original)
async def test_error_handler_unexpected_errors(self):
self.cog.handle_unexpected_error = AsyncMock()
errs = (errors.ExtensionError(name='foo'),)
for err in errs:
with self.subTest(error=err):
self.cog.handle_unexpected_error.reset_mock()
self.assertIsNone((await self.cog.on_command_error(self.ctx, err)))
self.cog.handle_unexpected_error.assert_awaited_once_with(self.ctx, err)
('bot.exts.backend.error_handler.log')
async def test_error_handler_other_errors(self, log_mock):
error = errors.DisabledCommand()
self.assertIsNone((await self.cog.on_command_error(self.ctx, error)))
log_mock.debug.assert_called_once() |
class MPNN(nn.Module):
def __init__(self, n_node_hidden, n_edge_hidden, n_layers):
super().__init__()
self.n_layers = n_layers
edge_network = nn.Sequential(nn.Linear(n_edge_hidden, n_edge_hidden), nn.ReLU(), nn.Linear(n_edge_hidden, (n_node_hidden * n_node_hidden)))
self.conv = NNConv(n_node_hidden, n_node_hidden, edge_network, aggregator_type='mean', bias=False)
self.gru = nn.GRU(n_node_hidden, n_node_hidden)
def forward(self, g, h_node, h_edge):
h_gru = h_node.unsqueeze(0)
for _ in range(self.n_layers):
m = F.relu(self.conv(g, h_node, h_edge))
(h_node, h_gru) = self.gru(m.unsqueeze(0), h_gru)
h_node = h_node.squeeze(0)
return h_node |
class MIMOSA_Optimizer(BaseOptimizer):
def __init__(self, args=None):
super().__init__(args)
self.model_name = 'mimosa'
def _optimize(self, oracle, config):
self.oracle.assign_evaluator(oracle)
all_smiles_score_list = []
model_ckpt = os.path.join(path_here, 'pretrained_model/GNN.ckpt')
gnn = torch.load(model_ckpt)
population_size = config['population_size']
lamb = config['lamb']
start_smiles_lst = ['C1(N)=NC=CC=N1', 'C1(C)=NC=CC=N1', 'C1(C)=CC=CC=C1', 'C1(N)=CC=CC=C1', 'CC', 'C1(C)CCCCC1']
shuffle(self.all_smiles)
warmstart_smiles_lst = self.all_smiles[:1000]
warmstart_smiles_score = self.oracle(warmstart_smiles_lst)
warmstart_smiles_score_lst = list(zip(warmstart_smiles_lst, warmstart_smiles_score))
warmstart_smiles_score_lst.sort(key=(lambda x: x[1]), reverse=True)
all_smiles_score_list.extend(warmstart_smiles_score_lst)
all_smiles_score_list.sort(key=(lambda x: x[1]), reverse=True)
good_smiles_list = all_smiles_score_list[:500]
train_gnn(good_smiles_list, gnn, epoch=config['train_epoch'])
warmstart_smiles_lst = [i[0] for i in warmstart_smiles_score_lst[:50]]
print('warm start smiles list', warmstart_smiles_lst)
start_smiles_lst += warmstart_smiles_lst
current_set = set(start_smiles_lst)
patience = 0
while True:
if (len(self.oracle) > 100):
self.sort_buffer()
old_scores = [item[1][0] for item in list(self.mol_buffer.items())[:100]]
else:
old_scores = 0
next_set = set()
for smiles in current_set:
smiles_set = optimize_single_molecule_one_iterate(smiles, gnn)
next_set = next_set.union(smiles_set)
smiles_lst = list(next_set)
shuffle(smiles_lst)
print(f'Generated molecules: {len(smiles_lst)}')
smiles_lst = smiles_lst[:config['offspring_size']]
score_lst = self.oracle(smiles_lst)
if self.finish:
print('max oracle hit, abort ...... ')
break
smiles_score_lst = [(smiles, score) for (smiles, score) in zip(smiles_lst, score_lst)]
smiles_score_lst.sort(key=(lambda x: x[1]), reverse=True)
(current_set, _, _) = dpp(smiles_score_lst=smiles_score_lst, num_return=population_size, lamb=lamb)
all_smiles_score_list.extend(smiles_score_lst)
all_smiles_score_list.sort(key=(lambda x: x[1]), reverse=True)
good_smiles_list = all_smiles_score_list[:config['train_data_size']]
train_gnn(good_smiles_list, gnn, epoch=config['train_epoch'])
if (len(self.oracle) > 5000):
self.sort_buffer()
new_scores = [item[1][0] for item in list(self.mol_buffer.items())[:100]]
if (new_scores == old_scores):
patience += 1
if (patience >= self.args.patience):
self.log_intermediate(finish=True)
print('convergence criteria met, abort ...... ')
break
else:
patience = 0 |
class DataQuery():
def __init__(self, **kwargs):
self._dict = kwargs.copy()
self._fields = tuple(self._dict.keys())
self._values = tuple(self._dict.values())
def __getitem__(self, key):
return self._dict[key]
def __eq__(self, other):
sdict = self._asdict()
try:
odict = other._asdict()
except AttributeError:
return False
common_keys = False
for (key, val) in sdict.items():
if (key in odict):
common_keys = True
if ((odict[key] != val) and (val is not None)):
return False
return common_keys
def __hash__(self):
fields = []
values = []
for (field, value) in sorted(self._dict.items()):
if (value != '*'):
fields.append(field)
if isinstance(value, (list, set)):
value = tuple(value)
values.append(value)
return hash(tuple(zip(fields, values)))
def get(self, key, default=None):
return self._dict.get(key, default)
def from_dict(cls, the_dict):
return cls(**the_dict)
def items(self):
return self._dict.items()
def _asdict(self):
return self._dict.copy()
def to_dict(self, trim=True):
if trim:
return self._to_trimmed_dict()
else:
return self._asdict()
def _to_trimmed_dict(self):
return {key: val for (key, val) in self._dict.items() if (val != '*')}
def __repr__(self):
items = ('{}={}'.format(key, repr(val)) for (key, val) in zip(self._fields, self._values))
return (((self.__class__.__name__ + '(') + ', '.join(items)) + ')')
def filter_dataids(self, dataid_container):
keys = list(filter(self._match_dataid, dataid_container))
return keys
def _match_dataid(self, dataid):
if self._shares_required_keys(dataid):
keys_to_check = (set(dataid.keys()) & set(self._fields))
else:
keys_to_check = (set(dataid._id_keys.keys()) & set(self._fields))
if (not keys_to_check):
return False
return all((self._match_query_value(key, dataid.get(key)) for key in keys_to_check))
def _shares_required_keys(self, dataid):
for (key, val) in dataid._id_keys.items():
try:
if val.get('required', False):
if (key in self._fields):
return True
except AttributeError:
continue
return False
def _match_query_value(self, key, id_val):
val = self._dict[key]
if (val == '*'):
return True
if (isinstance(id_val, tuple) and isinstance(val, (tuple, list))):
return (tuple(val) == id_val)
if (not isinstance(val, list)):
val = [val]
return (id_val in val)
def sort_dataids_with_preference(self, all_ids, preference):
try:
res = preference.to_dict()
except AttributeError:
res = dict()
res.update(self.to_dict())
optimistic_query = DataQuery.from_dict(res)
(sorted_ids, distances) = optimistic_query.sort_dataids(all_ids)
if (distances[0] == np.inf):
(sorted_ids, distances) = self.sort_dataids(all_ids)
return (sorted_ids, distances)
def sort_dataids(self, dataids):
distances = []
sorted_dataids = []
big_distance = 100000
keys = set(self._dict.keys())
for dataid in dataids:
keys |= set(dataid.keys())
for dataid in sorted(dataids):
sorted_dataids.append(dataid)
distance = 0
for key in keys:
if (distance == np.inf):
break
val = self._dict.get(key, '*')
if (val == '*'):
distance = self._add_absolute_distance(dataid, key, distance)
else:
try:
dataid_val = dataid[key]
except KeyError:
distance += big_distance
continue
distance = self._add_distance_from_query(dataid_val, val, distance)
distances.append(distance)
(distances, dataids) = zip(*sorted(zip(distances, sorted_dataids)))
return (dataids, distances)
def _add_absolute_distance(dataid, key, distance):
try:
distance += dataid.get(key).value
except AttributeError:
if isinstance(dataid.get(key), numbers.Number):
distance += dataid.get(key)
elif isinstance(dataid.get(key), tuple):
distance += len(dataid.get(key))
return distance
def _add_distance_from_query(dataid_val, requested_val, distance):
try:
distance += dataid_val.distance(requested_val)
except AttributeError:
if (not isinstance(requested_val, list)):
requested_val = [requested_val]
if (dataid_val not in requested_val):
distance = np.inf
elif isinstance(dataid_val, numbers.Number):
distance += dataid_val
return distance
def create_less_modified_query(self):
new_dict = self.to_dict()
new_dict['modifiers'] = tuple(new_dict['modifiers'][:(- 1)])
return DataQuery.from_dict(new_dict)
def is_modified(self):
return bool(self._dict.get('modifiers')) |
def select_2(train_embs, one_test_emb, downstream_train_examples, one_test_example, tag, given_context, phase2_selection):
cos = nn.CosineSimilarity(dim=1, eps=1e-06)
if (not os.path.isdir(f'cache/{tag}/prompts')):
os.makedirs(f'cache/{tag}/prompts', exist_ok=True)
prompt_string = f'''{conversion(table_prompt)}
'''
prev_prompt_string = f'''{conversion(table_prompt)}
'''
if (phase2_selection in ['similar']):
test_e_reshape = one_test_emb.reshape(1, (- 1))
scores = cos(test_e_reshape, train_embs).numpy()
sorted_indices = np.argsort(scores)
elif (phase2_selection in ['random']):
sorted_indices = np.random.permutation(range(len(downstream_train_examples)))
selected_indices = []
num_indices = len(sorted_indices)
count = 1
for idx in range((num_indices - 1), (- 1), (- 1)):
prev_prompt_string += get_instance(count, downstream_train_examples[sorted_indices[idx]])
cur_prompt_string = (prev_prompt_string + f'''Example #{(count + 1)}
''')
last_slot_values = given_context
cur_prompt_string += f'''[context] {conversion(', '.join({f'{slot}: {value}' for (slot, value) in last_slot_values.items()}))}
'''
last_sys_utt = one_test_example['dialog']['sys'][(- 1)]
if (last_sys_utt == 'none'):
last_sys_utt = ''
cur_prompt_string += f'''[system] {last_sys_utt}
'''
cur_prompt_string += f'''Q: [user] {one_test_example['dialog']['usr'][(- 1)]}
'''
cur_prompt_string += 'SQL: SELECT * FROM'
length = len(tokenizer_for_length(cur_prompt_string)['input_ids'])
if (length > 3800):
break
selected_indices.append(idx)
count += 1
indices_scores = []
for idx in selected_indices:
indices_scores.append([idx, cos(train_embs[sorted_indices[idx]].reshape(1, (- 1)), one_test_emb.reshape(1, (- 1))).item()])
indices_scores = sorted(indices_scores, key=(lambda x: x[1]), reverse=True)
new_selected_indices = [x[0] for x in indices_scores]
if (phase2_selection in ['similar']):
assert (new_selected_indices == selected_indices), f'new_selected_indices={new_selected_indices}, selected_indices={selected_indices}'
selected_indices = new_selected_indices
select_num = len(selected_indices)
count = 0
second_phase_selected_indices = []
for idx in range((select_num - 1), (- 1), (- 1)):
prompt_string += get_instance(count, downstream_train_examples[sorted_indices[selected_indices[idx]]])
second_phase_selected_indices.append([sorted_indices[selected_indices[idx]].item(), downstream_train_examples[sorted_indices[selected_indices[idx]]]['id']])
count += 1
prompt_string += f'''Example #{count}
'''
last_slot_values = given_context
prompt_string += f'''[context] {conversion(', '.join({f'{slot}: {value}' for (slot, value) in last_slot_values.items()}))}
'''
last_sys_utt = one_test_example['dialog']['sys'][(- 1)]
if (last_sys_utt == 'none'):
last_sys_utt = ''
prompt_string += f'''[system] {last_sys_utt}
'''
prompt_string += f'''Q: [user] {one_test_example['dialog']['usr'][(- 1)]}
'''
prompt_string += 'SQL: SELECT * FROM'
assert (len(tokenizer_for_length(prompt_string)['input_ids']) <= 3800)
print('select_2, prompt example num: ', len(second_phase_selected_indices))
with open(f"cache/{tag}/prompts/{one_test_example['name'].replace('.', '')}_{one_test_example['id']}.json", 'w') as f:
json.dump([[one_test_example['name'].replace('.', ''), one_test_example['id'], second_phase_selected_indices], prompt_string], f, indent=4)
return prompt_string |
def test_add_with_strings_update():
context = Context({'arbset': {1, 2}, 'add': {'set': PyString('arbset'), 'addMe': 'xy', 'unpack': True}})
add.run_step(context)
context['add']['unpack'] = False
context['add']['addMe'] = 'z'
add.run_step(context)
assert (context['arbset'] == {1, 2, 'x', 'y', 'z'})
assert (len(context) == 2) |
def test_inheritance():
class Parent(NamedTuple):
a: int
class Child(Parent):
b: str
assert (get_named_tuple_shape(Child) == Shape(input=InputShape(constructor=Child, kwargs=None, fields=(InputField(type=int, id='a', default=NoDefault(), is_required=True, metadata=MappingProxyType({}), original=ANY),), params=(Param(field_id='a', name='a', kind=ParamKind.POS_OR_KW),), overriden_types=frozenset()), output=OutputShape(fields=(OutputField(type=int, id='a', default=NoDefault(), metadata=MappingProxyType({}), accessor=create_key_accessor(0, access_error=None), original=ANY),), overriden_types=frozenset()))) |
class Lz4f(Codec):
codec_id = 'imagecodecs_lz4f'
def __init__(self, level=None, blocksizeid=False, contentchecksum=None, blockchecksum=None):
self.level = level
self.blocksizeid = blocksizeid
self.contentchecksum = contentchecksum
self.blockchecksum = blockchecksum
def encode(self, buf):
return imagecodecs.lz4f_encode(buf, level=self.level, blocksizeid=self.blocksizeid, contentchecksum=self.contentchecksum, blockchecksum=self.blockchecksum)
def decode(self, buf, out=None):
return imagecodecs.lz4f_decode(buf, out=_flat(out)) |
def query_yes_no(question):
valid = {'yes': True, 'y': True, 'ye': True, 'no': False, 'n': False}
prompt = ' [y/n] '
while True:
sys.stdout.write((question + prompt))
choice = input().lower()
if (choice in valid):
return valid[choice]
else:
sys.stdout.write(("Please respond with 'yes' or 'no' " + "(or 'y' or 'n').\n")) |
class TestRopLop(RopLopChecker):
def test_max(self):
self.check_mat_rop_lop(pt_max(self.mx, axis=0), (self.mat_in_shape[1],))
self.check_mat_rop_lop(pt_max(self.mx, axis=1), (self.mat_in_shape[0],))
def test_argmax(self):
self.check_nondiff_rop(argmax(self.mx, axis=1))
def test_subtensor(self):
self.check_rop_lop(self.x[:4], (4,))
def test_incsubtensor1(self):
tv = np.asarray(self.rng.uniform(size=(3,)), pytensor.config.floatX)
t = pytensor.shared(tv)
out = pytensor.tensor.subtensor.inc_subtensor(self.x[:3], t)
self.check_rop_lop(out, self.in_shape)
def test_incsubtensor2(self):
tv = np.asarray(self.rng.uniform(size=(10,)), pytensor.config.floatX)
t = pytensor.shared(tv)
out = pytensor.tensor.subtensor.inc_subtensor(t[:4], self.x[:4])
self.check_rop_lop(out, (10,))
def test_setsubtensor1(self):
tv = np.asarray(self.rng.uniform(size=(3,)), pytensor.config.floatX)
t = pytensor.shared(tv)
out = pytensor.tensor.subtensor.set_subtensor(self.x[:3], t)
self.check_rop_lop(out, self.in_shape)
def test_print(self):
out = pytensor.printing.Print('x', attrs=('shape',))(self.x)
self.check_rop_lop(out, self.in_shape)
def test_setsubtensor2(self):
tv = np.asarray(self.rng.uniform(size=(10,)), pytensor.config.floatX)
t = pytensor.shared(tv)
out = pytensor.tensor.subtensor.set_subtensor(t[:4], self.x[:4])
self.check_rop_lop(out, (10,))
def test_dimshuffle(self):
self.check_rop_lop(self.x[:4].dimshuffle('x', 0).sum(axis=0), (4,))
def test_unbroadcast(self):
self.check_rop_lop(unbroadcast(self.x[:4].dimshuffle('x', 0), 0).sum(axis=1), (1,))
def test_join(self):
tv = np.asarray(self.rng.uniform(size=(10,)), pytensor.config.floatX)
t = pytensor.shared(tv)
out = pt.join(0, self.x, t)
self.check_rop_lop(out, ((self.in_shape[0] + 10),))
def test_dot(self):
insh = self.in_shape[0]
vW = np.asarray(self.rng.uniform(size=(insh, insh)), pytensor.config.floatX)
W = pytensor.shared(vW)
self.check_rop_lop(dot(self.x, W), self.in_shape)
def test_elemwise0(self):
self.check_rop_lop(((self.x + 1) ** 2), self.in_shape)
def test_elemwise1(self):
self.check_rop_lop((self.x + pt.cast(self.x, 'int32')), self.in_shape)
def test_flatten(self):
self.check_mat_rop_lop(self.mx.flatten(), ((self.mat_in_shape[0] * self.mat_in_shape[1]),))
def test_sum(self):
self.check_mat_rop_lop(self.mx.sum(axis=1), (self.mat_in_shape[0],))
def test_softmax(self):
self.check_rop_lop(pytensor.tensor.special.softmax(self.x, axis=(- 1)), self.in_shape)
def test_alloc(self):
out1d = pt.alloc(self.x.sum(), self.in_shape[0])
self.check_rop_lop(out1d, self.in_shape[0])
out3d = pt.alloc(self.x, self.mat_in_shape[0], self.mat_in_shape[1], self.in_shape[0])
self.check_rop_lop(out3d.flatten(), ((self.mat_in_shape[0] * self.mat_in_shape[1]) * self.in_shape[0]))
def test_invalid_input(self):
success = False
try:
Rop(0.0, [matrix()], [vector()])
success = True
except ValueError:
pass
assert (not success)
def test_multiple_outputs(self):
m = matrix('m')
v = vector('v')
m_ = matrix('m_')
v_ = vector('v_')
mval = self.rng.uniform(size=(3, 7)).astype(pytensor.config.floatX)
vval = self.rng.uniform(size=(7,)).astype(pytensor.config.floatX)
m_val = self.rng.uniform(size=(3, 7)).astype(pytensor.config.floatX)
v_val = self.rng.uniform(size=(7,)).astype(pytensor.config.floatX)
rop_out1 = Rop([m, v, (m + v)], [m, v], [m_, v_])
assert isinstance(rop_out1, list)
assert (len(rop_out1) == 3)
rop_out2 = Rop((m, v, (m + v)), [m, v], [m_, v_])
assert isinstance(rop_out2, tuple)
assert (len(rop_out2) == 3)
all_outs = []
for o in (rop_out1, rop_out2):
all_outs.extend(o)
f = pytensor.function([m, v, m_, v_], all_outs)
f(mval, vval, m_val, v_val)
def test_Rop_dot_bug_18Oct2013_Jeremiah(self):
x = pt.arange(20.0).reshape([1, 20])
v = pytensor.shared(np.ones([20]))
d = dot(x, v).sum()
Rop(grad(d, v), v, v) |
class StackAsserter(Provider):
request_type: Type[Request]
expected_stack: Sequence[Request]
send_next: Optional[Request]
def apply_provider(self, mediator: Mediator, request: Request):
if (not isinstance(request, self.request_type)):
raise CannotProvide
assert (list(self.expected_stack) == list(mediator.request_stack))
if (self.send_next is not None):
mediator.delegating_provide(self.send_next) |
def draw(response, axes_amplitude=None, axes_phase=None, fmin=0.01, fmax=100.0, nf=100, normalize=False, style={}, label=None, show_breakpoints=False, color_pool=None, label_pool=None):
f = num.exp(num.linspace(num.log(fmin), num.log(fmax), nf))
resp_fmax = response.get_fmax()
if (resp_fmax is not None):
if (fmax > resp_fmax):
logger.warning(('Maximum frequency above range supported by response. Clipping to supported%s.' % ((' (%s)' % label) if label else '')))
f = f[(f <= resp_fmax)]
if (f.size == 0):
return
tf = response.evaluate(f)
ok = num.isfinite(tf)
if (not num.all(ok)):
logger.warning(('NaN values present in evaluated response%s.' % ((' (%s)' % label) if label else '')))
f = f[ok]
tf = tf[ok]
if normalize:
tf = normalize_on_flat(f, tf)
ta = num.abs(tf)
if (color_pool is not None):
fh = BytesIO()
f.dump(fh)
tf.dump(fh)
c_key = hashlib.sha1(fh.getvalue()).hexdigest()
fh.close()
if (c_key not in color_pool[0]):
color_pool[0][c_key] = color_pool[1][(len(color_pool[0]) % len(color_pool[1]))]
new = True
else:
new = False
style = dict(style)
style['color'] = color_pool[0][c_key]
ikey = (list(color_pool[0].keys()).index(c_key) + 1)
slabel = ('[%i]' % ikey)
if (label_pool is not None):
label_pool.append(('%s %s' % (slabel, label)))
eff_label = (slabel if new else None)
else:
eff_label = ('%s %s' % (slabel, label))
if axes_amplitude:
axes_amplitude.plot(f, ta, label=eff_label, **style)
for checkpoint in response.checkpoints:
axes_amplitude.plot(checkpoint.frequency, checkpoint.value, 'o', color=style.get('color', 'black'))
axes_amplitude.annotate((('%.3g s' % (1.0 / checkpoint.frequency)) if (checkpoint.frequency < 1.0) else ('%.3g Hz' % checkpoint.frequency)), xy=(checkpoint.frequency, checkpoint.value), xytext=(10, 10), textcoords='offset points', color=style.get('color', 'black'))
if show_breakpoints:
for (br_frequency, br_change) in response.construction():
if (not (fmin <= br_frequency <= fmax)):
continue
br_value = abs(response.evaluate1(br_frequency))
axes_amplitude.plot(br_frequency, br_value, ('v' if (br_change < 0) else '^'), mec=style.get('color', 'black'), color='none', ms=10)
axes_amplitude.annotate((('%.3g s (%i)' % ((1.0 / br_frequency), br_change)) if (br_frequency < 1.0) else ('%.3g Hz' % br_frequency)), xy=(br_frequency, br_value), xytext=(10, 10), textcoords='offset points', color=style.get('color', 'black'))
if axes_phase:
dta = num.diff(num.log(ta))
iflat = num.nanargmin((num.abs(num.diff(dta)) + num.abs(dta[:(- 1)])))
tp = num.unwrap(num.angle(tf))
ioff = int(num.round((tp[iflat] / (2.0 * num.pi))))
tp -= ((ioff * 2.0) * num.pi)
axes_phase.plot(f, (tp / num.pi), label=eff_label, **style)
else:
tp = [0.0]
return ((num.min(ta), num.max(ta)), ((num.min(tp) / num.pi), (num.max(tp) / num.pi))) |
def save_dataset(data_items, name):
if (not data_items):
return
out_filepath = os.path.join(settings.DATASET_FOLDER, name)
data = {'links': data_items}
if (not os.path.exists(os.path.dirname(out_filepath))):
os.makedirs(os.path.dirname(out_filepath))
with open(out_filepath, 'w') as fio:
json.dump(data, fio) |
_sentencepiece
class BertGenerationTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
tokenizer_class = BertGenerationTokenizer
test_rust_tokenizer = False
test_sentencepiece = True
def setUp(self):
super().setUp()
tokenizer = BertGenerationTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokenizer.save_pretrained(self.tmpdirname)
def test_convert_token_and_id(self):
token = '<s>'
token_id = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token)
def test_get_vocab(self):
vocab_keys = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], '<unk>')
self.assertEqual(vocab_keys[1], '<s>')
self.assertEqual(vocab_keys[(- 1)], '<pad>')
self.assertEqual(len(vocab_keys), 1002)
def test_vocab_size(self):
self.assertEqual(self.get_tokenizer().vocab_size, 1000)
def test_full_tokenizer(self):
tokenizer = BertGenerationTokenizer(SAMPLE_VOCAB, keep_accents=True)
tokens = tokenizer.tokenize('This is a test')
self.assertListEqual(tokens, ['This', 'is', 'a', 't', 'est'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382])
tokens = tokenizer.tokenize('I was born in 92000, and this is false.')
self.assertListEqual(tokens, [(SPIECE_UNDERLINE + 'I'), (SPIECE_UNDERLINE + 'was'), (SPIECE_UNDERLINE + 'b'), 'or', 'n', (SPIECE_UNDERLINE + 'in'), (SPIECE_UNDERLINE + ''), '9', '2', '0', '0', '0', ',', (SPIECE_UNDERLINE + 'and'), (SPIECE_UNDERLINE + 'this'), (SPIECE_UNDERLINE + 'is'), (SPIECE_UNDERLINE + 'f'), 'al', 's', 'e', '.'])
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(ids, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4])
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(back_tokens, [(SPIECE_UNDERLINE + 'I'), (SPIECE_UNDERLINE + 'was'), (SPIECE_UNDERLINE + 'b'), 'or', 'n', (SPIECE_UNDERLINE + 'in'), (SPIECE_UNDERLINE + ''), '<unk>', '2', '0', '0', '0', ',', (SPIECE_UNDERLINE + 'and'), (SPIECE_UNDERLINE + 'this'), (SPIECE_UNDERLINE + 'is'), (SPIECE_UNDERLINE + 'f'), 'al', 's', '<unk>', '.'])
_property
def big_tokenizer(self):
return BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder')
def test_tokenization_base_easy_symbols(self):
symbols = 'Hello World!'
original_tokenizer_encodings = [18536, 2260, 101]
self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols))
def test_tokenization_base_hard_symbols(self):
symbols = 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
original_tokenizer_encodings = [871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172]
self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols))
_torch
def test_torch_encode_plus_sent_to_model(self):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
first_ten_tokens = list(self.big_tokenizer.get_vocab().keys())[:10]
sequence = ' '.join(first_ten_tokens)
encoded_sequence = self.big_tokenizer.encode_plus(sequence, return_tensors='pt', return_token_type_ids=False)
batch_encoded_sequence = self.big_tokenizer.batch_encode_plus([((sequence + ' ') + sequence)], return_tensors='pt', return_token_type_ids=False)
config = BertGenerationConfig()
model = BertGenerationEncoder(config)
assert (model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size)
with torch.no_grad():
model(**encoded_sequence)
model(**batch_encoded_sequence)
def test_tokenizer_integration(self):
expected_encoding = {'input_ids': [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
self.tokenizer_integration_test_util(expected_encoding=expected_encoding, model_name='google/bert_for_seq_generation_L-24_bbc_encoder', revision='c817d1fd1be2ffaa1fed4db') |
def test_multiple_root_event_handlers():
root_called = 0
def pointer_leave_callback(event):
nonlocal root_called
root_called += 1
root_handler = RootEventHandler()
root_handler.add_event_handler(pointer_leave_callback, 'pointer_leave')
alt_handler = RootEventHandler()
root_handler.dispatch_event(PointerEvent(type='pointer_move', x=0, y=0, target=root_handler))
assert (root_called == 0)
alt_handler.dispatch_event(PointerEvent(type='pointer_move', x=0, y=0, target=alt_handler))
assert (root_called == 0) |
.parametrize('actual, expected', [(reactpy.vdom('div', [reactpy.vdom('div')]), {'tagName': 'div', 'children': [{'tagName': 'div'}]}), (reactpy.vdom('div', {'style': {'backgroundColor': 'red'}}), {'tagName': 'div', 'attributes': {'style': {'backgroundColor': 'red'}}}), (reactpy.vdom('div', [reactpy.vdom('div'), 1], (reactpy.vdom('div'), 2)), {'tagName': 'div', 'children': [{'tagName': 'div'}, 1, {'tagName': 'div'}, 2]}), (reactpy.vdom('div', {'on_event': FAKE_EVENT_HANDLER}), {'tagName': 'div', 'eventHandlers': FAKE_EVENT_HANDLER_DICT}), (reactpy.vdom('div', reactpy.html.h1('hello'), reactpy.html.h2('world')), {'tagName': 'div', 'children': [{'tagName': 'h1', 'children': ['hello']}, {'tagName': 'h2', 'children': ['world']}]}), (reactpy.vdom('div', {'tagName': 'div'}), {'tagName': 'div', 'children': [{'tagName': 'div'}]}), (reactpy.vdom('div', (i for i in range(3))), {'tagName': 'div', 'children': [0, 1, 2]}), (reactpy.vdom('div', ((x ** 2) for x in [1, 2, 3])), {'tagName': 'div', 'children': [1, 4, 9]})])
def test_simple_node_construction(actual, expected):
assert (actual == expected) |
def evaluate(args):
torch.cuda.set_device(args.gpu)
s_r = args.se_ratio
arch_def = [['ds_r1_k3_s1_e1_c16'], [('ir_r1_k3_s2_e6_c32_se%f_nsw' % s_r)], [('ir_r1_k3_s1_e3_c32_se%f_nsw' % s_r)], [('ir_r1_k5_s2_e6_c40_se%f_nsw' % s_r), ('ir_r3_k3_s1_e6_c40_se%f_nsw' % s_r)], [('ir_r1_k5_s2_e6_c80_se%f_nsw' % s_r), ('ir_r1_k7_s1_e6_c80_se%f_nsw' % s_r), ('ir_r2_k3_s1_e6_c80_se%f_nsw' % s_r), ('ir_r4_k3_s1_e6_c96_se%f_nsw' % s_r)], [('ir_r1_k3_s2_e6_c192_se%f_nsw' % s_r), ('ir_r3_k7_s1_e6_c192_se%f_nsw' % s_r), ('ir_r1_k7_s1_e6_c320_se%f_nsw' % s_r)]]
model_kwargs = dict(block_args=decode_arch_def(arch_def, 1.0, depth_trunc='round'), num_features=1280, stem_size=32, channel_multiplier=1.0, act_layer=nn.ReLU)
model = EfficientNet(**model_kwargs)
device = torch.device(args.device)
if (args.device == 'cuda'):
model.cuda()
state = torch.load(f'{args.model_path}', map_location=device)
model.load_state_dict(state)
_input = torch.randn(1, 3, 224, 224).to(device)
(flops, params) = profile(model, inputs=(_input,), verbose=False)
print('Model: {}, params: {}M, flops: {}M'.format(args.model, (params / 1000000.0), (flops / 1000000.0)))
model.eval()
val_dataloader = get_imagenet_dataset(batch_size=args.batch_size, dataset_root=args.val_dataset_root, dataset_tpye='valid')
print('Start to evaluate ...')
total_top1 = 0.0
total_top5 = 0.0
total_counter = 0.0
for (image, label) in val_dataloader:
(image, label) = (image.to(device), label.to(device))
result = model(image)
(top1, top5) = accuracy(result, label, topk=(1, 5))
if (device.type == 'cuda'):
total_counter += image.cpu().data.shape[0]
total_top1 += top1.cpu().data.numpy()
total_top5 += top5.cpu().data.numpy()
else:
total_counter += image.data.shape[0]
total_top1 += top1.data.numpy()
total_top5 += top5.data.numpy()
mean_top1 = (total_top1 / total_counter)
mean_top5 = (total_top5 / total_counter)
print(('Evaluate Result: Total: %d\tmTop1: %.4f\tmTop5: %.6f' % (total_counter, mean_top1, mean_top5))) |
class UFID(Frame):
_framespec = [Latin1TextSpec('owner'), BinaryDataSpec('data')]
def HashKey(self):
return ('%s:%s' % (self.FrameID, self.owner))
def __eq__(s, o):
if isinstance(o, UFI):
return ((s.owner == o.owner) and (s.data == o.data))
else:
return (s.data == o)
__hash__ = Frame.__hash__
def _pprint(self):
return ('%s=%r' % (self.owner, self.data)) |
def validate_dicts(ground_truth: dict, predicted: dict) -> bool:
valid = True
num_agents_gt = len(ground_truth)
num_agents_pred = len(predicted)
if (num_agents_gt != num_agents_pred):
print(f'Incorrect number of rows in inference csv. Expected {num_agents_gt}, Got {num_agents_pred}')
valid = False
missing_agents = (ground_truth.keys() - predicted.keys())
if len(missing_agents):
valid = False
for missing_agents in missing_agents:
print(f'Missing agents: {missing_agents}')
unknown_agents = (predicted.keys() - ground_truth.keys())
if len(unknown_agents):
valid = False
for unknown_agent in unknown_agents:
print(f'Unknown agents: {unknown_agent}')
return valid |
class CheckAndRaise(COp):
_f16_ok = True
__props__ = ('msg', 'exc_type')
view_map = {0: [0]}
check_input = False
params_type = ParamsType(exc_type=exception_type)
def __init__(self, exc_type, msg=''):
if (not issubclass(exc_type, Exception)):
raise ValueError('`exc_type` must be an Exception subclass')
self.exc_type = exc_type
self.msg = msg
def __str__(self):
return f'CheckAndRaise{{{self.exc_type}({self.msg})}}'
def __eq__(self, other):
if (type(self) != type(other)):
return False
if ((self.msg == other.msg) and (self.exc_type == other.exc_type)):
return True
return False
def __hash__(self):
return hash((self.msg, self.exc_type))
def make_node(self, value: Variable, *conds: Variable):
import pytensor.tensor as pt
if (not isinstance(value, Variable)):
value = pt.as_tensor_variable(value)
conds = [(pt.as_tensor_variable(c) if (not isinstance(c, Variable)) else c) for c in conds]
assert all(((c.type.ndim == 0) for c in conds))
return Apply(self, ([value] + conds), [value.type()])
def perform(self, node, inputs, outputs):
(out,) = outputs
(val, *conds) = inputs
out[0] = val
if (not np.all(conds)):
raise self.exc_type(self.msg)
def grad(self, input, output_gradients):
return (output_gradients + ([DisconnectedType()()] * (len(input) - 1)))
def connection_pattern(self, node):
return ([[1]] + ([[0]] * (len(node.inputs) - 1)))
def c_code(self, node, name, inames, onames, props):
if (not isinstance(node.inputs[0].type, (DenseTensorType, ScalarType))):
raise NotImplementedError(f'CheckAndRaise c_code not implemented for input type {node.inputs[0].type}')
(value_name, *cond_names) = inames
out_name = onames[0]
check = []
fail_code = props['fail']
param_struct_name = props['params']
msg = self.msg.replace('"', '\\"').replace('\n', '\\n')
for (idx, cond_name) in enumerate(cond_names):
if isinstance(node.inputs[0].type, DenseTensorType):
check.append(f'''
if(PyObject_IsTrue((PyObject *){cond_name}) == 0) {{
PyObject * exc_type = {param_struct_name}->exc_type;
Py_INCREF(exc_type);
PyErr_SetString(exc_type, "{msg}");
Py_XDECREF(exc_type);
{indent(fail_code, (' ' * 4))}
}}
''')
else:
check.append(f'''
if({cond_name} == 0) {{
PyObject * exc_type = {param_struct_name}->exc_type;
Py_INCREF(exc_type);
PyErr_SetString(exc_type, "{msg}");
Py_XDECREF(exc_type);
{indent(fail_code, (' ' * 4))}
}}
''')
check = '\n'.join(check)
if isinstance(node.inputs[0].type, DenseTensorType):
res = f'''
{check}
Py_XDECREF({out_name});
{out_name} = {value_name};
Py_INCREF({value_name});
'''
else:
res = f'''
{check}
{out_name} = {value_name};
'''
return res
def c_code_cache_version(self):
return (1, 1)
def infer_shape(self, fgraph, node, input_shapes):
return [input_shapes[0]] |
.parametrize('pretty_json', (True, False))
.parametrize('verbosity', (0, 1, 2))
def test_json_format_validation_error_nested(capsys, pretty_json, verbosity):
validator = Draft7Validator({'anyOf': [{'properties': {'foo': {'oneOf': [{'type': 'string'}, {'type': 'integer'}]}}}, {'properties': {'bar': {'oneOf': [{'type': 'string'}, {'type': 'object', 'properties': {'baz': {'type': 'integer'}}}]}}}]})
err = next(validator.iter_errors({'foo': {}, 'bar': {'baz': 'buzz'}}))
result = CheckResult()
result.record_validation_error('foo.json', err)
json_reporter = JsonReporter(pretty=pretty_json, verbosity=verbosity)
json_reporter.report_result(result)
captured = capsys.readouterr()
assert (captured.err == '')
data = json.loads(captured.out)
assert (data['status'] == 'fail')
if (verbosity < 1):
assert (data == {'status': 'fail'})
return
assert (len(data['errors']) == 1)
assert ('is not valid under any of the given schemas' in data['errors'][0]['message'])
assert data['errors'][0]['has_sub_errors']
assert (data['errors'][0]['num_sub_errors'] == 5)
if (verbosity < 2):
assert ('sub_errors' not in data['errors'][0])
return
else:
assert ('sub_errors' in data['errors'][0])
sub_errors = data['errors'][0]['sub_errors']
(foo_errors, bar_errors, bar_baz_errors) = ([], [], [])
for error_item in sub_errors:
if (error_item['path'] == '$.foo'):
foo_errors.append(error_item)
elif (error_item['path'] == '$.bar'):
bar_errors.append(error_item)
elif (error_item['path'] == '$.bar.baz'):
bar_baz_errors.append(error_item)
assert (len(foo_errors) == 3)
assert (len(bar_baz_errors) == 1)
assert (len(bar_errors) == 2)
assert ("'buzz' is not of type 'integer'" == bar_baz_errors[0]['message'])
assert ({item['message'] for item in foo_errors} == {"{} is not of type 'string'", "{} is not of type 'integer'", '{} is not valid under any of the given schemas'})
assert ("{'baz': 'buzz'} is not of type 'string'" in [item['message'] for item in bar_errors]) |
(fov=ShowInInspector(int), orthoSize=ShowInInspector(float))
class Camera(SingleComponent):
near = ShowInInspector(float, 0.05)
far = ShowInInspector(float, 200)
clearColor = ShowInInspector(Color, RGB(0, 0, 0))
shader = ShowInInspector(Shader, shaders['Standard'])
skyboxEnabled = ShowInInspector(bool, True)
skybox = ShowInInspector(Skybox, skyboxes['Water'])
ortho = ShowInInspector(bool, False, 'Orthographic')
shadows = ShowInInspector(bool, False)
depthMapSize = ShowInInspector(int, 1024)
def __init__(self):
super(Camera, self).__init__()
self.size = Screen.size.copy()
self.guiShader = shaders['GUI']
self.skyboxShader = shaders['Skybox']
self.depthShader = shaders['Depth']
self.customProjMat = None
self.fov = 90
self.orthoSize = 5
self.viewMat = glm.lookAt([0, 0, 0], [0, 0, (- 1)], [0, 1, 0])
self.renderPass = False
def setupBuffers(self):
if (hasattr(self, 'guiVBO') and hasattr(self, 'guiVAO')):
return
data = [0.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0]
self.guiVBO = gl.glGenBuffers(1)
self.guiVAO = gl.glGenVertexArrays(1)
gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.guiVBO)
gl.glBufferData(gl.GL_ARRAY_BUFFER, (len(data) * floatSize), convert(c_float, data), gl.GL_STATIC_DRAW)
gl.glBindVertexArray(self.guiVAO)
gl.glEnableVertexAttribArray(0)
gl.glVertexAttribPointer(0, 2, gl.GL_FLOAT, gl.GL_FALSE, (2 * floatSize), None)
def fov(self):
return self._fov
def fov(self, value):
self._fov = value
self.projMat = glm.perspective(glm.radians(((self._fov / self.size.x) * self.size.y)), (self.size.x / self.size.y), self.near, self.far)
def orthoSize(self):
return self._orthoSize
def orthoSize(self, value):
self._orthoSize = value
width = ((value * self.size.x) / self.size.y)
self.orthoMat = glm.ortho((- width), width, (- value), value, self.near, self.far)
def Resize(self, width, height):
if ((width == 0) or (height == 0)):
return
gl.glViewport(0, 0, width, height)
self.size = Vector2(width, height)
self.projMat = glm.perspective(glm.radians(((self._fov / self.size.x) * self.size.y)), (self.size.x / self.size.y), self.near, self.far)
if (self.scene.mainCamera is self):
Screen._edit(width, height)
def getMatrix(self, transform):
if ((not transform.hasChanged) and (transform.modelMatrix is not None)):
return transform.modelMatrix
(angle, axis) = transform.rotation.angleAxisPair
angle = (- glm.radians(angle))
axis = (Vector3(1, 1, (- 1)) * axis.normalized())
position = glm.translate(glm.mat4(), list((transform.position * Vector3(1, 1, (- 1)))))
rotated = (position * glm.mat4_cast(glm.angleAxis(angle, list(axis))))
scaled = glm.scale(rotated, list(transform.scale))
transform.modelMatrix = scaled
transform.hasChanged = False
return scaled
def get2DMatrix(self, rectTransform):
rect = (rectTransform.GetRect(self.size) + rectTransform.offset)
rectMin = Vector2.min(rect.min, rect.max)
size = (rect.max - rect.min).abs()
pivot = (size * rectTransform.pivot)
model = glm.translate(glm.mat4(1), glm.vec3(*(rectMin + pivot), 0))
model = glm.rotate(model, glm.radians(rectTransform.rotation), glm.vec3(0, 0, 1))
model = glm.translate(model, glm.vec3(*(- pivot), 0))
model = glm.scale(model, glm.vec3(*(size / 2), 1))
return model
def getViewMat(self):
if (self.renderPass and self.transform.hasChanged):
(angle, axis) = self.transform.rotation.angleAxisPair
angle = glm.radians((- angle))
axis = (axis.normalized() * Vector3((- 1), (- 1), 1))
rot = glm.angleAxis(angle, list(axis))
self.viewMat = glm.translate(glm.mat4_cast(rot), list((self.transform.position * Vector3((- 1), (- 1), 1))))
self.renderPass = False
self.transform.hasChanged = False
return self.viewMat
def UseShader(self, name):
self.shader = shaders[name]
def SetupShader(self, lights):
self.shader.use()
if (self.customProjMat is not None):
self.shader.setMat4(b'projection', self.customProjMat)
elif self.ortho:
self.shader.setMat4(b'projection', self.orthoMat)
else:
self.shader.setMat4(b'projection', self.projMat)
self.shader.setInt(b'useShadowMap', int(self.shadows))
self.shader.setMat4(b'view', self.getViewMat())
self.shader.setVec3(b'viewPos', list((self.transform.position * Vector3(1, 1, (- 1)))))
self.shader.setInt(b'numLights', len(lights))
for (i, light) in enumerate(lights):
lightName = f'lights[{i}].'.encode()
self.shader.setVec3((lightName + b'pos'), (light.transform.position * Vector3(1, 1, (- 1))))
self.shader.setFloat((lightName + b'strength'), (light.intensity * 10))
self.shader.setVec3((lightName + b'color'), (light.color.toRGB() / 255))
self.shader.setInt((lightName + b'type'), int(light.type))
direction = light.transform.forward
self.shader.setVec3((lightName + b'dir'), (direction * Vector3(1, 1, (- 1))))
if self.shadows:
gl.glActiveTexture((gl.GL_TEXTURE1 + i))
gl.glBindTexture(gl.GL_TEXTURE_2D, light.depthMap)
self.shader.setInt(f'shadowMaps[{i}]'.encode(), (i + 1))
location = f'lightSpaceMatrices[{i}]'.encode()
self.shader.setMat4(location, light.lightSpaceMatrix)
def SetupDepthShader(self, light):
self.depthShader.use()
proj = glm.ortho((- 10), 10, (- 10), 10, light.near, light.far)
pos = (light.transform.position * Vector3(1, 1, (- 1)))
look = (pos + (light.transform.forward * Vector3(1, 1, (- 1))))
up = (light.transform.up * Vector3(1, 1, (- 1)))
view = glm.lookAt(list(pos), list(look), list(up))
light.lightSpaceMatrix = (proj * view)
location = b'lightSpaceMatrix'
self.depthShader.setMat4(location, light.lightSpaceMatrix)
def Draw(self, renderers):
self.shader.use()
for renderer in renderers:
model = self.getMatrix(renderer.transform)
normModel = glm.transpose(glm.inverse(glm.mat3(model)))
self.shader.setMat4(b'model', model)
self.shader.setMat3(b'normModel', normModel)
self.shader.setVec3(b'objectColor', (renderer.mat.color.toRGB() / 255))
if (renderer.mat.texture is not None):
self.shader.setInt(b'textured', 1)
renderer.mat.texture.use()
renderer.Render()
def DrawDepth(self, renderers):
self.depthShader.use()
for renderer in renderers:
model = self.getMatrix(renderer.transform)
self.depthShader.setMat4(b'model', model)
renderer.Render()
def RenderDepth(self, renderers, lights):
previousFBO = gl.glGetIntegerv(gl.GL_DRAW_FRAMEBUFFER_BINDING)
previousViewport = gl.glGetIntegerv(gl.GL_VIEWPORT)
if self.shadows:
gl.glDisable(gl.GL_CULL_FACE)
for light in lights:
if (not hasattr(light, 'depthFBO')):
light.setupBuffers(self.depthMapSize)
gl.glViewport(0, 0, self.depthMapSize, self.depthMapSize)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, light.depthFBO)
self.SetupDepthShader(light)
gl.glClear(gl.GL_DEPTH_BUFFER_BIT)
self.DrawDepth(renderers)
gl.glEnable(gl.GL_CULL_FACE)
gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, previousFBO)
gl.glViewport(*previousViewport)
def RenderScene(self, renderers, lights):
gl.glClearColor(*(self.clearColor.toRGB() / 255), 1)
gl.glClear((gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT))
self.SetupShader(lights)
self.Draw(renderers)
def Render(self, renderers, lights):
self.RenderDepth(renderers, lights)
self.RenderScene(renderers, lights)
self.RenderSkybox()
self.Render2D()
def RenderSkybox(self):
if self.skyboxEnabled:
gl.glDepthFunc(gl.GL_LEQUAL)
self.skyboxShader.use()
self.skyboxShader.setMat4(b'view', glm.mat4(glm.mat3(self.getViewMat())))
self.skyboxShader.setMat4(b'projection', self.projMat)
self.skybox.use()
gl.glDrawArrays(gl.GL_TRIANGLES, 0, 36)
gl.glDepthFunc(gl.GL_LESS)
def Render2D(self):
if (self.canvas is None):
return
from .gui import GuiRenderComponent
self.Setup2D()
renderers = []
for gameObject in self.canvas.transform.GetDescendants():
components = gameObject.GetComponents(GuiRenderComponent)
renderers.extend(components)
self.Draw2D(renderers)
def Setup2D(self):
self.setupBuffers()
self.guiShader.use()
self.guiShader.setMat4(b'projection', glm.ortho(0, *self.size, 0, 10, (- 10)))
gl.glBindVertexArray(self.guiVAO)
def Draw2D(self, renderers):
from .gui import RectTransform
for renderer in renderers:
rectTransform = renderer.GetComponent(RectTransform)
if (rectTransform is None):
continue
if (renderer.PreRender() is not None):
continue
if (renderer.texture is None):
continue
renderer.texture.use()
self.guiShader.setMat4(b'model', self.get2DMatrix(rectTransform))
self.guiShader.setFloat(b'depth', renderer.depth)
self.guiShader.setInt(b'image', 0)
self.guiShader.setInt(b'flipX', renderer.flipX)
self.guiShader.setInt(b'flipY', renderer.flipY)
gl.glDrawArrays(gl.GL_QUADS, 0, 4) |
def test_log_vehicle_leave():
events = telemetry.events_from_type('LogVehicleLeave')
for (idx, ev) in enumerate(events):
if (ev.fellow_passengers and (ev.vehicle.fuel_percent != 0)):
data = events[idx]
break
else:
assert False
assert isinstance(data, LogVehicleLeave)
assert isinstance(data.character, Character)
assert isinstance(data.vehicle, Vehicle)
if (data.vehicle.health_percent != 100):
assert isinstance(data.vehicle.health_percent, float)
assert isinstance(data.vehicle.fuel_percent, float)
assert isinstance(data.vehicle.vehicle_id, str)
assert isinstance(data.vehicle.vehicle_unique_id, int)
assert isinstance(data.vehicle.vehicle_is_wheels_in_air, bool)
assert isinstance(data.vehicle.vehicle_is_in_water_volume, bool)
assert isinstance(data.fellow_passengers[0], Character)
assert isinstance(data.seat_index, int)
assert isinstance(data.ride_distance, float)
assert isinstance(data.max_speed, float)
assert (str(data.vehicle) in VEHICLE_MAP_VALUES) |
def get_batches(targets, sources, batch_size, source_pad_int, target_pad_int):
for batch_i in range(0, (len(sources) // batch_size)):
start_i = (batch_i * batch_size)
sources_batch = sources[start_i:(start_i + batch_size)]
targets_batch = targets[start_i:(start_i + batch_size)]
pad_sources_batch = np.array(pad_sentence_batch(sources_batch, source_pad_int))
pad_targets_batch = np.array(pad_sentence_batch(targets_batch, target_pad_int))
targets_lengths = []
for target in targets_batch:
targets_lengths.append(len(target))
source_lengths = []
for source in sources_batch:
source_lengths.append(len(source))
(yield (pad_targets_batch, pad_sources_batch, targets_lengths, source_lengths)) |
def get_files_in_tree(tree, repo):
files = set()
for entry in tree:
if (entry.type == 'tree'):
sub_files = [(f[0], '{}/{}'.format(entry.name, f[1])) for f in get_files_in_tree(repo[entry.id], repo)]
files.update(sub_files)
else:
blob = repo[entry.id]
if (not blob.is_binary):
if entry.name.endswith('java'):
files.add((entry.hex, entry.name))
return files |
class Tunnel(XodrBase):
def __init__(self, s: float, length: float, id: str, name: str, tunnel_type: TunnelType=TunnelType.standard, daylight: float=0.5, lighting: float=0.5):
super().__init__()
self.s = s
self.length = length
self.id = id
self.name = name
self.tunnel_type = tunnel_type
self.daylight = daylight
self.lighting = lighting
def __eq__(self, other):
if (isinstance(other, Tunnel) and super().__eq__(other)):
if (self.get_attributes() == other.get_attributes()):
return True
return False
def get_attributes(self):
retdict = {}
retdict['s'] = str(self.s)
retdict['length'] = str(self.length)
retdict['id'] = str(self.id)
retdict['name'] = str(self.name)
retdict['type'] = enum2str(self.tunnel_type)
retdict['daylight'] = str(self.daylight)
retdict['lighting'] = str(self.lighting)
return retdict
def get_element(self):
element = ET.Element('tunnel', attrib=self.get_attributes())
return element |
class Product(Space):
def __init__(self, *components):
if isinstance(components[0], (list, tuple)):
assert (len(components) == 1)
components = components[0]
self._components = tuple(components)
dtypes = [c.new_tensor_variable('tmp', extra_dims=0).dtype for c in components]
if ((len(dtypes) > 0) and hasattr(dtypes[0], 'as_numpy_dtype')):
dtypes = [d.as_numpy_dtype for d in dtypes]
self._common_dtype = np.core.numerictypes.find_common_type([], dtypes)
def sample(self):
return tuple((x.sample() for x in self._components))
def components(self):
return self._components
def contains(self, x):
return (isinstance(x, tuple) and all((c.contains(xi) for (c, xi) in zip(self._components, x))))
def new_tensor_variable(self, name, extra_dims):
return ext.new_tensor(name=name, ndim=(extra_dims + 1), dtype=self._common_dtype)
def flat_dim(self):
return np.sum([c.flat_dim for c in self._components])
def flatten(self, x):
return np.concatenate([c.flatten(xi) for (c, xi) in zip(self._components, x)])
def flatten_n(self, xs):
xs_regrouped = [[x[i] for x in xs] for i in range(len(xs[0]))]
flat_regrouped = [c.flatten_n(xi) for (c, xi) in zip(self.components, xs_regrouped)]
return np.concatenate(flat_regrouped, axis=(- 1))
def unflatten(self, x):
dims = [c.flat_dim for c in self._components]
flat_xs = np.split(x, np.cumsum(dims)[:(- 1)])
return tuple((c.unflatten(xi) for (c, xi) in zip(self._components, flat_xs)))
def unflatten_n(self, xs):
dims = [c.flat_dim for c in self._components]
flat_xs = np.split(xs, np.cumsum(dims)[:(- 1)], axis=(- 1))
unflat_xs = [c.unflatten_n(xi) for (c, xi) in zip(self.components, flat_xs)]
unflat_xs_grouped = list(zip(*unflat_xs))
return unflat_xs_grouped
def __eq__(self, other):
if (not isinstance(other, Product)):
return False
return (tuple(self.components) == tuple(other.components))
def __hash__(self):
return hash(tuple(self.components)) |
class TestSklearnSVM(QiskitAquaTestCase):
def setUp(self):
super().setUp()
aqua_globals.random_seed = 10598
pass
def test_binary(self):
training_input = {'A': np.asarray([[0.6560706, 0.], [0., 0.], [0., 0.], [0., (- 0.)], [0.3994399, 0.], [0., (- 0.)], [0., 0.], [0., 0.], [0., 0.5323737], [0., 0.], [0., 0.], [0.6259567, 0.], [0., 0.], [0.3938784, 0.], [0., 0.], [0., 0.], [0., 0.], [0., 0.], [0., 0.], [0.8690704, 0.]]), 'B': np.asarray([[0., (- 0.)], [0., (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [0., (- 0.)], [0., (- 0.)], [0., 0.], [(- 0.), (- 0.)], [0., (- 0.)], [(- 0.), (- 0.)], [0., (- 0.1922198)], [0., (- 0.)], [0., (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [0., (- 0.3660534)]])}
test_input = {'A': np.asarray([[0., 0.], [0., 0.], [0., 0.], [0., 0.], [0., 0.], [0.5073321, 0.], [0., 0.], [1.0, 0.], [0.630973, 0.], [0., 0.]]), 'B': np.asarray([[(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [0., (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.394202), (- 0.)], [0., (- 0.)], [0., (- 0.)], [0., (- 0.)]])}
temp = [test_input[k] for k in sorted(test_input)]
total_array = np.concatenate(temp)
try:
result = SklearnSVM(training_input, test_input, total_array).run()
self.assertEqual(result['testing_accuracy'], 1.0)
self.assertEqual(result['predicted_classes'], ['A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B'])
except MissingOptionalLibraryError as ex:
self.skipTest(str(ex))
def test_multiclass_one_against_all(self):
training_input = {'A': np.asarray([[0.6560706, 0.], [0., 0.], [0., 0.], [0., (- 0.)], [0.3994399, 0.], [0., (- 0.)], [0., 0.], [0., 0.], [0., 0.5323737], [0., 0.], [0., 0.], [0.6259567, 0.], [0., 0.], [0.3938784, 0.], [0., 0.], [0., 0.], [0., 0.], [0., 0.], [0., 0.], [0.8690704, 0.]]), 'B': np.asarray([[0., (- 0.)], [0., (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [0., (- 0.)], [0., (- 0.)], [0., 0.], [(- 0.), (- 0.)], [0., (- 0.)], [(- 0.), (- 0.)], [0., (- 0.1922198)], [0., (- 0.)], [0., (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [0., (- 0.3660534)]]), 'C': np.asarray([[(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), (- 0.)], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 1.0], [(- 0.), 0.5564536], [(- 0.), 0.], [(- 0.), 0.], [(- 1.0), 0.], [(- 0.), 0.], [(- 0.), 0.]])}
test_input = {'A': np.asarray([[0., 0.], [0., 0.], [0., 0.], [0., 0.], [0., 0.], [0.5073321, 0.], [0., 0.], [1.0, 0.], [0.630973, 0.], [0., 0.]]), 'B': np.asarray([[(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [0., (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.394202), (- 0.)], [0., (- 0.)], [0., (- 0.)], [0., (- 0.)]]), 'C': np.asarray([[(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.477976], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.1181712]])}
temp = [test_input[k] for k in sorted(test_input)]
total_array = np.concatenate(temp)
result = SklearnSVM(training_input, test_input, total_array, multiclass_extension=OneAgainstRest()).run()
self.assertEqual(result['testing_accuracy'], 1.0)
self.assertEqual(result['predicted_classes'], ['A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C'])
def test_multiclass_all_pairs(self):
training_input = {'A': np.asarray([[0.6560706, 0.], [0., 0.], [0., 0.], [0., (- 0.)], [0.3994399, 0.], [0., (- 0.)], [0., 0.], [0., 0.], [0., 0.5323737], [0., 0.], [0., 0.], [0.6259567, 0.], [0., 0.], [0.3938784, 0.], [0., 0.], [0., 0.], [0., 0.], [0., 0.], [0., 0.], [0.8690704, 0.]]), 'B': np.asarray([[0., (- 0.)], [0., (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [0., (- 0.)], [0., (- 0.)], [0., 0.], [(- 0.), (- 0.)], [0., (- 0.)], [(- 0.), (- 0.)], [0., (- 0.1922198)], [0., (- 0.)], [0., (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [0., (- 0.3660534)]]), 'C': np.asarray([[(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), (- 0.)], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 1.0], [(- 0.), 0.5564536], [(- 0.), 0.], [(- 0.), 0.], [(- 1.0), 0.], [(- 0.), 0.], [(- 0.), 0.]])}
test_input = {'A': np.asarray([[0., 0.], [0., 0.], [0., 0.], [0., 0.], [0., 0.], [0.5073321, 0.], [0., 0.], [1.0, 0.], [0.630973, 0.], [0., 0.]]), 'B': np.asarray([[(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [0., (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.394202), (- 0.)], [0., (- 0.)], [0., (- 0.)], [0., (- 0.)]]), 'C': np.asarray([[(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.477976], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.1181712]])}
temp = [test_input[k] for k in sorted(test_input)]
total_array = np.concatenate(temp)
result = SklearnSVM(training_input, test_input, total_array, multiclass_extension=AllPairs()).run()
self.assertEqual(result['testing_accuracy'], 1.0)
self.assertEqual(result['predicted_classes'], ['A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C'])
def test_multiclass_error_correcting_code(self):
training_input = {'A': np.asarray([[0.6560706, 0.], [0., 0.], [0., 0.], [0., (- 0.)], [0.3994399, 0.], [0., (- 0.)], [0., 0.], [0., 0.], [0., 0.5323737], [0., 0.], [0., 0.], [0.6259567, 0.], [0., 0.], [0.3938784, 0.], [0., 0.], [0., 0.], [0., 0.], [0., 0.], [0., 0.], [0.8690704, 0.]]), 'B': np.asarray([[0., (- 0.)], [0., (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [0., (- 0.)], [0., (- 0.)], [0., 0.], [(- 0.), (- 0.)], [0., (- 0.)], [(- 0.), (- 0.)], [0., (- 0.1922198)], [0., (- 0.)], [0., (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [0., (- 0.3660534)]]), 'C': np.asarray([[(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), (- 0.)], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 1.0], [(- 0.), 0.5564536], [(- 0.), 0.], [(- 0.), 0.], [(- 1.0), 0.], [(- 0.), 0.], [(- 0.), 0.]])}
test_input = {'A': np.asarray([[0., 0.], [0., 0.], [0., 0.], [0., 0.], [0., 0.], [0.5073321, 0.], [0., 0.], [1.0, 0.], [0.630973, 0.], [0., 0.]]), 'B': np.asarray([[(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [0., (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.394202), (- 0.)], [0., (- 0.)], [0., (- 0.)], [0., (- 0.)]]), 'C': np.asarray([[(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.477976], [(- 0.), 0.], [(- 0.), 0.], [(- 0.), 0.1181712]])}
temp = [test_input[k] for k in sorted(test_input)]
total_array = np.concatenate(temp)
result = SklearnSVM(training_input, test_input, total_array, multiclass_extension=ErrorCorrectingCode(code_size=5)).run()
self.assertEqual(result['testing_accuracy'], 1.0)
self.assertEqual(result['predicted_classes'], ['A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C']) |
class F9_Network(F8_Network):
removedKeywords = F8_Network.removedKeywords
removedAttrs = F8_Network.removedAttrs
def __init__(self, writePriority=0, *args, **kwargs):
F8_Network.__init__(self, writePriority, *args, **kwargs)
self.bootprotoList.append(BOOTPROTO_QUERY)
def _getParser(self):
op = F8_Network._getParser(self)
for action in op._actions:
if ('--bootproto' in action.option_strings):
action.help += dedent(("\n\n .. versionchanged:: %s\n\n The 'query' value was added." % versionToLongString(F9)))
break
return op |
.skipif((sys.platform == 'win32'), reason='Windows only applies R/O to files')
def test_populated_read_only_cache_and_copied_app_data(tmp_path, current_fastest, temp_app_data):
dest = (tmp_path / 'venv')
cmd = ['--seeder', 'app-data', '--creator', current_fastest, '-vv', '-p', 'python', str(dest)]
assert cli_run(cmd)
cached_py_info._CACHE.clear()
safe_delete(dest)
with read_only_dir(temp_app_data):
assert cli_run(['--read-only-app-data', *cmd]) |
class Room(models.Model):
TYPES = Choices(('talk', _('Talk room')), ('training', _('Training room')))
name = models.CharField(_('name'), max_length=100)
type = models.CharField(_('type'), choices=TYPES, max_length=10, default=TYPES.talk)
def __str__(self):
return self.name
class Meta():
verbose_name = _('Room')
verbose_name_plural = _('Rooms') |
def imread(filename, flags=cv2.IMREAD_COLOR):
global _im_zfile
path = filename
pos_at = path.index('')
if (pos_at == (- 1)):
print(("character '' is not found from the given path '%s'" % path))
assert 0
path_zip = path[0:pos_at]
if (not os.path.isfile(path_zip)):
print(("zip file '%s' is not found" % path_zip))
assert 0
for i in range(len(_im_zfile)):
if (_im_zfile[i]['path'] == path_zip):
path_img = os.path.join(_im_zfile[i]['zipfile'].namelist()[0], path[(pos_at + 2):])
data = _im_zfile[i]['zipfile'].read(path_img)
return cv2.imdecode(np.frombuffer(data, np.uint8), flags)
_im_zfile.append({'path': path_zip, 'zipfile': zipfile.ZipFile(path_zip, 'r')})
path_img = os.path.join(_im_zfile[(- 1)]['zipfile'].namelist()[0], path[(pos_at + 2):])
data = _im_zfile[(- 1)]['zipfile'].read(path_img)
return cv2.imdecode(np.frombuffer(data, np.uint8), flags) |
class DeployLog(models.Model):
d_types = (('deploy', ''), ('rollback', ''))
project_config = models.ForeignKey('ProjectConfig', on_delete=models.CASCADE)
deploy_user = models.ForeignKey('users.UserProfile', on_delete=models.CASCADE)
d_type = models.CharField(max_length=10, choices=d_types, verbose_name='', default=0)
branch_tag = models.CharField(max_length=16, verbose_name='', default='master')
release_name = models.CharField(max_length=100, verbose_name='')
release_desc = models.CharField(max_length=100, verbose_name='')
result = models.TextField(verbose_name='')
c_time = models.DateTimeField(auto_now_add=True, verbose_name='')
class Meta():
db_table = 'ops_deploy_log'
verbose_name = ''
verbose_name_plural = '' |
def create_optimizer(init_lr: float, num_train_steps: int, num_warmup_steps: int, min_lr_ratio: float=0.0, adam_beta1: float=0.9, adam_beta2: float=0.999, adam_epsilon: float=1e-08, weight_decay_rate: float=0.0, power: float=1.0, include_in_weight_decay: Optional[List[str]]=None):
lr_schedule = tf.keras.optimizers.schedules.PolynomialDecay(initial_learning_rate=init_lr, decay_steps=(num_train_steps - num_warmup_steps), end_learning_rate=(init_lr * min_lr_ratio), power=power)
if num_warmup_steps:
lr_schedule = WarmUp(initial_learning_rate=init_lr, decay_schedule_fn=lr_schedule, warmup_steps=num_warmup_steps)
if (weight_decay_rate > 0.0):
optimizer = AdamWeightDecay(learning_rate=lr_schedule, weight_decay_rate=weight_decay_rate, beta_1=adam_beta1, beta_2=adam_beta2, epsilon=adam_epsilon, exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'], include_in_weight_decay=include_in_weight_decay)
else:
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule, beta_1=adam_beta1, beta_2=adam_beta2, epsilon=adam_epsilon)
return (optimizer, lr_schedule) |
def get_lr_scheduler(scheduler_type: str, optimizer: torch.optim.Optimizer, warmup_steps: Optional[int]=0, max_steps: Optional[bool]=None, base_lr: float=0.0001, max_lr: float=0.001, step_size_up: int=2000) -> torch.optim.lr_scheduler:
if (scheduler_type == 'linear'):
return get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=max_steps)
elif (scheduler_type == 'cos_hard_restart'):
return get_cosine_with_hard_restarts_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=max_steps, num_cycles=3)
elif (scheduler_type == 'cyclic'):
return torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr, max_lr, step_size_up=step_size_up, cycle_momentum=False) |
def cache_data(hparams, filename, flag):
if (hparams.data_format == 'ffm'):
cache_obj = FfmCache()
elif (hparams.data_format == 'din'):
cache_obj = DinCache()
elif (hparams.data_format == 'cccfnet'):
cache_obj = CCCFNetCache()
else:
raise ValueError('data format must be ffm, din, cccfnet, this format not defined {0}'.format(hparams.data_format))
if (not os.path.exists(util.CACHE_DIR)):
os.mkdir(util.CACHE_DIR)
if (flag == 'train'):
hparams.train_file_cache = util.convert_cached_name(hparams.train_file, hparams.batch_size)
cached_name = hparams.train_file_cache
sample_num_path = util.TRAIN_NUM
impression_id_path = util.TRAIN_IMPRESSION_ID
elif (flag == 'eval'):
hparams.eval_file_cache = util.convert_cached_name(hparams.eval_file, hparams.batch_size)
cached_name = hparams.eval_file_cache
sample_num_path = util.EVAL_NUM
impression_id_path = util.EVAL_IMPRESSION_ID
elif (flag == 'test'):
hparams.test_file_cache = util.convert_cached_name(hparams.test_file, hparams.batch_size)
cached_name = hparams.test_file_cache
sample_num_path = util.TEST_NUM
impression_id_path = util.TEST_IMPRESSION_ID
elif (flag == 'infer'):
hparams.infer_file_cache = util.convert_cached_name(hparams.infer_file, hparams.batch_size)
cached_name = hparams.infer_file_cache
sample_num_path = util.INFER_NUM
impression_id_path = util.INFER_IMPRESSION_ID
else:
raise ValueError('flag must be train, eval, test, infer')
print('cache filename:', filename)
if (not os.path.isfile(cached_name)):
print('has not cached file, begin cached...')
start_time = time.time()
(sample_num, impression_id_list) = cache_obj.write_tfrecord(filename, cached_name, hparams)
util.print_time('caced file used time', start_time)
print('data sample num:{0}'.format(sample_num))
with open(sample_num_path, 'w') as f:
f.write((str(sample_num) + '\n'))
with open(impression_id_path, 'w') as f:
for impression_id in impression_id_list:
f.write((str(impression_id) + '\n')) |
_REGISTRY.register()
class HiFaceGANModel(SRModel):
def init_training_settings(self):
train_opt = self.opt['train']
self.ema_decay = train_opt.get('ema_decay', 0)
if (self.ema_decay > 0):
raise NotImplementedError('HiFaceGAN does not support EMA now. Pass')
self.net_g.train()
self.net_d = build_network(self.opt['network_d'])
self.net_d = self.model_to_device(self.net_d)
self.print_network(self.net_d)
if train_opt.get('pixel_opt'):
self.cri_pix = build_loss(train_opt['pixel_opt']).to(self.device)
else:
self.cri_pix = None
if train_opt.get('perceptual_opt'):
self.cri_perceptual = build_loss(train_opt['perceptual_opt']).to(self.device)
else:
self.cri_perceptual = None
if train_opt.get('feature_matching_opt'):
self.cri_feat = build_loss(train_opt['feature_matching_opt']).to(self.device)
else:
self.cri_feat = None
if ((self.cri_pix is None) and (self.cri_perceptual is None)):
raise ValueError('Both pixel and perceptual losses are None.')
if train_opt.get('gan_opt'):
self.cri_gan = build_loss(train_opt['gan_opt']).to(self.device)
self.net_d_iters = train_opt.get('net_d_iters', 1)
self.net_d_init_iters = train_opt.get('net_d_init_iters', 0)
self.setup_optimizers()
self.setup_schedulers()
def setup_optimizers(self):
train_opt = self.opt['train']
optim_type = train_opt['optim_g'].pop('type')
self.optimizer_g = self.get_optimizer(optim_type, self.net_g.parameters(), **train_opt['optim_g'])
self.optimizers.append(self.optimizer_g)
optim_type = train_opt['optim_d'].pop('type')
self.optimizer_d = self.get_optimizer(optim_type, self.net_d.parameters(), **train_opt['optim_d'])
self.optimizers.append(self.optimizer_d)
def discriminate(self, input_lq, output, ground_truth):
(h, w) = output.shape[(- 2):]
if (output.shape[(- 2):] != input_lq.shape[(- 2):]):
lq = torch.nn.functional.interpolate(input_lq, (h, w))
real = torch.nn.functional.interpolate(ground_truth, (h, w))
fake_concat = torch.cat([lq, output], dim=1)
real_concat = torch.cat([lq, real], dim=1)
else:
fake_concat = torch.cat([input_lq, output], dim=1)
real_concat = torch.cat([input_lq, ground_truth], dim=1)
fake_and_real = torch.cat([fake_concat, real_concat], dim=0)
discriminator_out = self.net_d(fake_and_real)
(pred_fake, pred_real) = self._divide_pred(discriminator_out)
return (pred_fake, pred_real)
def _divide_pred(pred):
if (type(pred) == list):
fake = []
real = []
for p in pred:
fake.append([tensor[:(tensor.size(0) // 2)] for tensor in p])
real.append([tensor[(tensor.size(0) // 2):] for tensor in p])
else:
fake = pred[:(pred.size(0) // 2)]
real = pred[(pred.size(0) // 2):]
return (fake, real)
def optimize_parameters(self, current_iter):
for p in self.net_d.parameters():
p.requires_grad = False
self.optimizer_g.zero_grad()
self.output = self.net_g(self.lq)
l_g_total = 0
loss_dict = OrderedDict()
if (((current_iter % self.net_d_iters) == 0) and (current_iter > self.net_d_init_iters)):
if self.cri_pix:
l_g_pix = self.cri_pix(self.output, self.gt)
l_g_total += l_g_pix
loss_dict['l_g_pix'] = l_g_pix
if self.cri_perceptual:
(l_g_percep, l_g_style) = self.cri_perceptual(self.output, self.gt)
if (l_g_percep is not None):
l_g_total += l_g_percep
loss_dict['l_g_percep'] = l_g_percep
if (l_g_style is not None):
l_g_total += l_g_style
loss_dict['l_g_style'] = l_g_style
(pred_fake, pred_real) = self.discriminate(self.lq, self.output, self.gt)
l_g_gan = self.cri_gan(pred_fake, True, is_disc=False)
l_g_total += l_g_gan
loss_dict['l_g_gan'] = l_g_gan
if self.cri_feat:
l_g_feat = self.cri_feat(pred_fake, pred_real)
l_g_total += l_g_feat
loss_dict['l_g_feat'] = l_g_feat
l_g_total.backward()
self.optimizer_g.step()
for p in self.net_d.parameters():
p.requires_grad = True
self.optimizer_d.zero_grad()
(pred_fake, pred_real) = self.discriminate(self.lq, self.output.detach(), self.gt)
l_d_real = self.cri_gan(pred_real, True, is_disc=True)
loss_dict['l_d_real'] = l_d_real
l_d_fake = self.cri_gan(pred_fake, False, is_disc=True)
loss_dict['l_d_fake'] = l_d_fake
l_d_total = ((l_d_real + l_d_fake) / 2)
l_d_total.backward()
self.optimizer_d.step()
self.log_dict = self.reduce_loss_dict(loss_dict)
if (self.ema_decay > 0):
print('HiFaceGAN does not support EMA now. pass')
def validation(self, dataloader, current_iter, tb_logger, save_img=False):
if (self.opt['network_g']['type'] in ('HiFaceGAN', 'SPADEGenerator')):
self.net_g.train()
if self.opt['dist']:
self.dist_validation(dataloader, current_iter, tb_logger, save_img)
else:
print(('In HiFaceGANModel: The new metrics package is under development.' + 'Using super method now (Only PSNR & SSIM are supported)'))
super().nondist_validation(dataloader, current_iter, tb_logger, save_img)
def nondist_validation(self, dataloader, current_iter, tb_logger, save_img):
dataset_name = dataloader.dataset.opt['name']
with_metrics = (self.opt['val'].get('metrics') is not None)
if with_metrics:
self.metric_results = dict()
sr_tensors = []
gt_tensors = []
pbar = tqdm(total=len(dataloader), unit='image')
for val_data in dataloader:
img_name = osp.splitext(osp.basename(val_data['lq_path'][0]))[0]
self.feed_data(val_data)
self.test()
visuals = self.get_current_visuals()
sr_tensors.append(visuals['result'])
if ('gt' in visuals):
gt_tensors.append(visuals['gt'])
del self.gt
del self.lq
del self.output
torch.cuda.empty_cache()
if save_img:
if self.opt['is_train']:
save_img_path = osp.join(self.opt['path']['visualization'], img_name, f'{img_name}_{current_iter}.png')
elif self.opt['val']['suffix']:
save_img_path = osp.join(self.opt['path']['visualization'], dataset_name, f"{img_name}_{self.opt['val']['suffix']}.png")
else:
save_img_path = osp.join(self.opt['path']['visualization'], dataset_name, f"{img_name}_{self.opt['name']}.png")
imwrite(tensor2img(visuals['result']), save_img_path)
pbar.update(1)
pbar.set_description(f'Test {img_name}')
pbar.close()
if with_metrics:
sr_pack = torch.cat(sr_tensors, dim=0)
gt_pack = torch.cat(gt_tensors, dim=0)
for (name, opt_) in self.opt['val']['metrics'].items():
self.metric_results[name] = calculate_metric(dict(sr_pack=sr_pack, gt_pack=gt_pack), opt_)
self._log_validation_metric_values(current_iter, dataset_name, tb_logger)
def save(self, epoch, current_iter):
if hasattr(self, 'net_g_ema'):
print('HiFaceGAN does not support EMA now. Fallback to normal mode.')
self.save_network(self.net_g, 'net_g', current_iter)
self.save_network(self.net_d, 'net_d', current_iter)
self.save_training_state(epoch, current_iter) |
class TestStripPickler():
def setup_method(self):
self.origdir = os.getcwd()
self.tmpdir = mkdtemp()
os.chdir(self.tmpdir)
def teardown_method(self):
os.chdir(self.origdir)
if (self.tmpdir is not None):
shutil.rmtree(self.tmpdir)
def test_basic(self):
with open('test.pkl', 'wb') as f:
m = matrix()
dest_pkl = 'my_test.pkl'
with open(dest_pkl, 'wb') as f:
strip_pickler = StripPickler(f, protocol=(- 1))
strip_pickler.dump(m) |
class TestBrowserCrash(unittest.TestCase):
async def test_browser_crash_send(self):
browser = (await launch(args=['--no-sandbox']))
page = (await browser.newPage())
(await page.goto('about:blank'))
(await page.querySelector('title'))
browser.process.terminate()
browser.process.wait()
if current_platform().startswith('win'):
(await asyncio.sleep(1))
with self.assertRaises(NetworkError):
(await page.querySelector('title'))
with self.assertRaises(NetworkError):
with self.assertLogs('pyppeteer', logging.ERROR):
(await page.querySelector('title'))
with self.assertRaises(ConnectionError):
(await browser.newPage()) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.