code stringlengths 281 23.7M |
|---|
class _GlobalConvModule(nn.Module):
def __init__(self, in_dim, out_dim, kernel_size):
super(_GlobalConvModule, self).__init__()
pad0 = ((kernel_size[0] - 1) // 2)
pad1 = ((kernel_size[1] - 1) // 2)
super(_GlobalConvModule, self).__init__()
self.conv_l1 = nn.Conv2d(in_dim, out_dim, kernel_size=(kernel_size[0], 1), padding=(pad0, 0))
self.conv_l2 = nn.Conv2d(out_dim, out_dim, kernel_size=(1, kernel_size[1]), padding=(0, pad1))
self.conv_r1 = nn.Conv2d(in_dim, out_dim, kernel_size=(1, kernel_size[1]), padding=(0, pad1))
self.conv_r2 = nn.Conv2d(out_dim, out_dim, kernel_size=(kernel_size[0], 1), padding=(pad0, 0))
def forward(self, x):
x_l = self.conv_l1(x)
x_l = self.conv_l2(x_l)
x_r = self.conv_r1(x)
x_r = self.conv_r2(x_r)
x = (x_l + x_r)
return x |
class WritePoTestCase(unittest.TestCase):
def test_join_locations(self):
catalog = Catalog()
catalog.add('foo', locations=[('main.py', 1)])
catalog.add('foo', locations=[('utils.py', 3)])
buf = BytesIO()
pofile.write_po(buf, catalog, omit_header=True)
assert (buf.getvalue().strip() == b'#: main.py:1 utils.py:3\nmsgid "foo"\nmsgstr ""')
def test_write_po_file_with_specified_charset(self):
catalog = Catalog(charset='iso-8859-1')
catalog.add('foo', 'aou', locations=[('main.py', 1)])
buf = BytesIO()
pofile.write_po(buf, catalog, omit_header=False)
po_file = buf.getvalue().strip()
assert (b'"Content-Type: text/plain; charset=iso-8859-1\\n"' in po_file)
assert ('msgstr "aou"'.encode('iso-8859-1') in po_file)
def test_duplicate_comments(self):
catalog = Catalog()
catalog.add('foo', auto_comments=['A comment'])
catalog.add('foo', auto_comments=['A comment'])
buf = BytesIO()
pofile.write_po(buf, catalog, omit_header=True)
assert (buf.getvalue().strip() == b'#. A comment\nmsgid "foo"\nmsgstr ""')
def test_wrap_long_lines(self):
text = "Here's some text where\nwhite space and line breaks matter, and should\n\nnot be removed\n\n"
catalog = Catalog()
catalog.add(text, locations=[('main.py', 1)])
buf = BytesIO()
pofile.write_po(buf, catalog, no_location=True, omit_header=True, width=42)
assert (buf.getvalue().strip() == b'msgid ""\n"Here\'s some text where\\n"\n"white space and line breaks matter, and"\n" should\\n"\n"\\n"\n"not be removed\\n"\n"\\n"\nmsgstr ""')
def test_wrap_long_lines_with_long_word(self):
text = "Here's some text that\nincludesareallylongwordthatmightbutshouldnt throw us into an infinite loop\n"
catalog = Catalog()
catalog.add(text, locations=[('main.py', 1)])
buf = BytesIO()
pofile.write_po(buf, catalog, no_location=True, omit_header=True, width=32)
assert (buf.getvalue().strip() == b'msgid ""\n"Here\'s some text that\\n"\n"includesareallylongwordthatmightbutshouldnt"\n" throw us into an infinite "\n"loop\\n"\nmsgstr ""')
def test_wrap_long_lines_in_header(self):
catalog = Catalog(project='AReallyReallyLongNameForAProject', revision_date=datetime(2007, 4, 1))
buf = BytesIO()
pofile.write_po(buf, catalog)
assert (b'\n'.join(buf.getvalue().splitlines()[:7]) == b'# Translations template for AReallyReallyLongNameForAProject.\n# Copyright (C) 2007 ORGANIZATION\n# This file is distributed under the same license as the\n# AReallyReallyLongNameForAProject project.\n# FIRST AUTHOR <>, 2007.\n#\n#, fuzzy')
def test_wrap_locations_with_hyphens(self):
catalog = Catalog()
catalog.add('foo', locations=[('doupy/templates/base/navmenu.inc.html.py', 60)])
catalog.add('foo', locations=[('doupy/templates/job-offers/helpers.html', 22)])
buf = BytesIO()
pofile.write_po(buf, catalog, omit_header=True)
assert (buf.getvalue().strip() == b'#: doupy/templates/base/navmenu.inc.html.py:60\n#: doupy/templates/job-offers/helpers.html:22\nmsgid "foo"\nmsgstr ""')
def test_no_wrap_and_width_behaviour_on_comments(self):
catalog = Catalog()
catalog.add("Pretty dam long message id, which must really be big to test this wrap behaviour, if not it won't work.", locations=[('fake.py', n) for n in range(1, 30)])
buf = BytesIO()
pofile.write_po(buf, catalog, width=None, omit_header=True)
assert (buf.getvalue().lower() == b'#: fake.py:1 fake.py:2 fake.py:3 fake.py:4 fake.py:5 fake.py:6 fake.py:7\n#: fake.py:8 fake.py:9 fake.py:10 fake.py:11 fake.py:12 fake.py:13 fake.py:14\n#: fake.py:15 fake.py:16 fake.py:17 fake.py:18 fake.py:19 fake.py:20 fake.py:21\n#: fake.py:22 fake.py:23 fake.py:24 fake.py:25 fake.py:26 fake.py:27 fake.py:28\n#: fake.py:29\nmsgid "pretty dam long message id, which must really be big to test this wrap behaviour, if not it won\'t work."\nmsgstr ""\n\n')
buf = BytesIO()
pofile.write_po(buf, catalog, width=100, omit_header=True)
assert (buf.getvalue().lower() == b'#: fake.py:1 fake.py:2 fake.py:3 fake.py:4 fake.py:5 fake.py:6 fake.py:7 fake.py:8 fake.py:9 fake.py:10\n#: fake.py:11 fake.py:12 fake.py:13 fake.py:14 fake.py:15 fake.py:16 fake.py:17 fake.py:18 fake.py:19\n#: fake.py:20 fake.py:21 fake.py:22 fake.py:23 fake.py:24 fake.py:25 fake.py:26 fake.py:27 fake.py:28\n#: fake.py:29\nmsgid ""\n"pretty dam long message id, which must really be big to test this wrap behaviour, if not it won\'t"\n" work."\nmsgstr ""\n\n')
def test_pot_with_translator_comments(self):
catalog = Catalog()
catalog.add('foo', locations=[('main.py', 1)], auto_comments=['Comment About `foo`'])
catalog.add('bar', locations=[('utils.py', 3)], user_comments=['Comment About `bar` with', 'multiple lines.'])
buf = BytesIO()
pofile.write_po(buf, catalog, omit_header=True)
assert (buf.getvalue().strip() == b'#. Comment About `foo`\n#: main.py:1\nmsgid "foo"\nmsgstr ""\n\n# Comment About `bar` with\n# multiple lines.\n#: utils.py:3\nmsgid "bar"\nmsgstr ""')
def test_po_with_obsolete_message(self):
catalog = Catalog()
catalog.add('foo', 'Voh', locations=[('main.py', 1)])
catalog.obsolete['bar'] = Message('bar', 'Bahr', locations=[('utils.py', 3)], user_comments=['User comment'])
buf = BytesIO()
pofile.write_po(buf, catalog, omit_header=True)
assert (buf.getvalue().strip() == b'#: main.py:1\nmsgid "foo"\nmsgstr "Voh"\n\n# User comment\n#~ msgid "bar"\n#~ msgstr "Bahr"')
def test_po_with_multiline_obsolete_message(self):
catalog = Catalog()
catalog.add('foo', 'Voh', locations=[('main.py', 1)])
msgid = "Here's a message that covers\nmultiple lines, and should still be handled\ncorrectly.\n"
msgstr = "Here's a message that covers\nmultiple lines, and should still be handled\ncorrectly.\n"
catalog.obsolete[msgid] = Message(msgid, msgstr, locations=[('utils.py', 3)])
buf = BytesIO()
pofile.write_po(buf, catalog, omit_header=True)
assert (buf.getvalue().strip() == b'#: main.py:1\nmsgid "foo"\nmsgstr "Voh"\n\n#~ msgid ""\n#~ "Here\'s a message that covers\\n"\n#~ "multiple lines, and should still be handled\\n"\n#~ "correctly.\\n"\n#~ msgstr ""\n#~ "Here\'s a message that covers\\n"\n#~ "multiple lines, and should still be handled\\n"\n#~ "correctly.\\n"')
def test_po_with_obsolete_message_ignored(self):
catalog = Catalog()
catalog.add('foo', 'Voh', locations=[('main.py', 1)])
catalog.obsolete['bar'] = Message('bar', 'Bahr', locations=[('utils.py', 3)], user_comments=['User comment'])
buf = BytesIO()
pofile.write_po(buf, catalog, omit_header=True, ignore_obsolete=True)
assert (buf.getvalue().strip() == b'#: main.py:1\nmsgid "foo"\nmsgstr "Voh"')
def test_po_with_previous_msgid(self):
catalog = Catalog()
catalog.add('foo', 'Voh', locations=[('main.py', 1)], previous_id='fo')
buf = BytesIO()
pofile.write_po(buf, catalog, omit_header=True, include_previous=True)
assert (buf.getvalue().strip() == b'#: main.py:1\n#| msgid "fo"\nmsgid "foo"\nmsgstr "Voh"')
def test_po_with_previous_msgid_plural(self):
catalog = Catalog()
catalog.add(('foo', 'foos'), ('Voh', 'Voeh'), locations=[('main.py', 1)], previous_id=('fo', 'fos'))
buf = BytesIO()
pofile.write_po(buf, catalog, omit_header=True, include_previous=True)
assert (buf.getvalue().strip() == b'#: main.py:1\n#| msgid "fo"\n#| msgid_plural "fos"\nmsgid "foo"\nmsgid_plural "foos"\nmsgstr[0] "Voh"\nmsgstr[1] "Voeh"')
def test_sorted_po(self):
catalog = Catalog()
catalog.add('bar', locations=[('utils.py', 3)], user_comments=['Comment About `bar` with', 'multiple lines.'])
catalog.add(('foo', 'foos'), ('Voh', 'Voeh'), locations=[('main.py', 1)])
buf = BytesIO()
pofile.write_po(buf, catalog, sort_output=True)
value = buf.getvalue().strip()
assert (b'# Comment About `bar` with\n# multiple lines.\n#: utils.py:3\nmsgid "bar"\nmsgstr ""\n\n#: main.py:1\nmsgid "foo"\nmsgid_plural "foos"\nmsgstr[0] "Voh"\nmsgstr[1] "Voeh"' in value)
assert (value.find(b'msgid ""') < value.find(b'msgid "bar"') < value.find(b'msgid "foo"'))
def test_sorted_po_context(self):
catalog = Catalog()
catalog.add(('foo', 'foos'), ('Voh', 'Voeh'), locations=[('main.py', 1)], context='there')
catalog.add(('foo', 'foos'), ('Voh', 'Voeh'), locations=[('main.py', 1)])
catalog.add(('foo', 'foos'), ('Voh', 'Voeh'), locations=[('main.py', 1)], context='here')
buf = BytesIO()
pofile.write_po(buf, catalog, sort_output=True)
value = buf.getvalue().strip()
assert (b'#: main.py:1\nmsgid "foo"\nmsgid_plural "foos"\nmsgstr[0] "Voh"\nmsgstr[1] "Voeh"\n\n#: main.py:1\nmsgctxt "here"\nmsgid "foo"\nmsgid_plural "foos"\nmsgstr[0] "Voh"\nmsgstr[1] "Voeh"\n\n#: main.py:1\nmsgctxt "there"\nmsgid "foo"\nmsgid_plural "foos"\nmsgstr[0] "Voh"\nmsgstr[1] "Voeh"' in value)
def test_file_sorted_po(self):
catalog = Catalog()
catalog.add('bar', locations=[('utils.py', 3)])
catalog.add(('foo', 'foos'), ('Voh', 'Voeh'), locations=[('main.py', 1)])
buf = BytesIO()
pofile.write_po(buf, catalog, sort_by_file=True)
value = buf.getvalue().strip()
assert (value.find(b'main.py') < value.find(b'utils.py'))
def test_file_with_no_lineno(self):
catalog = Catalog()
catalog.add('bar', locations=[('utils.py', None)], user_comments=['Comment About `bar` with', 'multiple lines.'])
buf = BytesIO()
pofile.write_po(buf, catalog, sort_output=True)
value = buf.getvalue().strip()
assert (b'# Comment About `bar` with\n# multiple lines.\n#: utils.py\nmsgid "bar"\nmsgstr ""' in value)
def test_silent_location_fallback(self):
buf = BytesIO(b'#: broken_file.py\nmsgid "missing line number"\nmsgstr ""\n\n#: broken_file.py:broken_line_number\nmsgid "broken line number"\nmsgstr ""')
catalog = pofile.read_po(buf)
assert (catalog['missing line number'].locations == [('broken_file.py', None)])
assert (catalog['broken line number'].locations == [])
def test_include_lineno(self):
catalog = Catalog()
catalog.add('foo', locations=[('main.py', 1)])
catalog.add('foo', locations=[('utils.py', 3)])
buf = BytesIO()
pofile.write_po(buf, catalog, omit_header=True, include_lineno=True)
assert (buf.getvalue().strip() == b'#: main.py:1 utils.py:3\nmsgid "foo"\nmsgstr ""')
def test_no_include_lineno(self):
catalog = Catalog()
catalog.add('foo', locations=[('main.py', 1)])
catalog.add('foo', locations=[('main.py', 2)])
catalog.add('foo', locations=[('utils.py', 3)])
buf = BytesIO()
pofile.write_po(buf, catalog, omit_header=True, include_lineno=False)
assert (buf.getvalue().strip() == b'#: main.py utils.py\nmsgid "foo"\nmsgstr ""') |
def test_get_and_update_iou(one_to_n_address):
request_args = dict(url='url', token_network_address=factories.UNIT_TOKEN_NETWORK_ADDRESS, sender=factories.make_address(), receiver=factories.make_address(), privkey=PRIVKEY)
with pytest.raises(ServiceRequestFailed):
with patch.object(session, 'get', side_effect=requests.RequestException):
get_last_iou(**request_args)
response = mocked_failed_response(error=ValueError)
with pytest.raises(ServiceRequestFailed):
with patch.object(session, 'get', return_value=response):
get_last_iou(**request_args)
response = mocked_json_response(response_data={'other_key': 'other_value'})
with patch.object(session, 'get', return_value=response):
iou = get_last_iou(**request_args)
assert (iou is None), 'get_pfs_iou should return None if pfs returns no iou.'
last_iou = make_iou(pfs_config=PFS_CONFIG, our_address=factories.UNIT_TRANSFER_INITIATOR, privkey=PRIVKEY, block_number=10, one_to_n_address=one_to_n_address, chain_id=4, offered_fee=TokenAmount(1))
response = mocked_json_response(response_data=dict(last_iou=last_iou.as_json()))
with patch.object(session, 'get', return_value=response):
iou = get_last_iou(**request_args)
assert (iou == last_iou)
new_iou_1 = update_iou(replace(iou), PRIVKEY, added_amount=10)
assert (new_iou_1.amount == (last_iou.amount + 10))
assert (new_iou_1.sender == last_iou.sender)
assert (new_iou_1.receiver == last_iou.receiver)
assert (new_iou_1.expiration_block == last_iou.expiration_block)
assert (new_iou_1.signature is not None)
new_iou_2 = update_iou(replace(iou), PRIVKEY, expiration_block=45)
assert (new_iou_2.expiration_block == 45)
assert (new_iou_1.sender == iou.sender)
assert (new_iou_1.receiver == iou.receiver)
assert (new_iou_1.expiration_block == iou.expiration_block)
assert (new_iou_2.signature is not None) |
class MobileNetV3_Small(nn.Module):
def __init__(self, num_classes=1000, act=nn.Hardswish):
super(MobileNetV3_Small, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.hs1 = act(inplace=True)
self.bneck = nn.Sequential(Block(3, 16, 16, 16, nn.ReLU, True, 2), Block(3, 16, 72, 24, nn.ReLU, False, 2), Block(3, 24, 88, 24, nn.ReLU, False, 1), Block(5, 24, 96, 40, act, True, 2), Block(5, 40, 240, 40, act, True, 1), Block(5, 40, 240, 40, act, True, 1), Block(5, 40, 120, 48, act, True, 1), Block(5, 48, 144, 48, act, True, 1), Block(5, 48, 288, 96, act, True, 2), Block(5, 96, 576, 96, act, True, 1), Block(5, 96, 576, 96, act, True, 1))
self.conv2 = nn.Conv2d(96, 576, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = nn.BatchNorm2d(576)
self.hs2 = act(inplace=True)
self.gap = nn.AdaptiveAvgPool2d(1)
self.linear3 = nn.Linear(576, 1280, bias=False)
self.bn3 = nn.BatchNorm1d(1280)
self.hs3 = act(inplace=True)
self.drop = nn.Dropout(0.2)
self.linear4 = nn.Linear(1280, num_classes)
self.init_params()
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, mode='fan_out')
if (m.bias is not None):
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.001)
if (m.bias is not None):
init.constant_(m.bias, 0)
def forward(self, x):
out = self.hs1(self.bn1(self.conv1(x)))
out = self.bneck(out)
out = self.hs2(self.bn2(self.conv2(out)))
out = self.gap(out).flatten(1)
out = self.drop(self.hs3(self.bn3(self.linear3(out))))
return self.linear4(out) |
def _random_config() -> dict:
rng = np.random.default_rng(seed=RANDOM_SEED)
return {'max_search_radius': rng.uniform(1, 100), 'update_method': rng.choice(list(btrack.constants.BayesianUpdates)), 'return_kalman': bool(rng.uniform(0, 2)), 'store_candidate_graph': bool(rng.uniform(0, 2)), 'verbose': bool(rng.uniform(0, 2)), 'volume': tuple(((0, rng.uniform(1, 100)) for _ in range(3)))} |
class SWA(Optimizer):
def __init__(self, optimizer, swa_freq=None, swa_lr_factor=None):
(self._auto_mode, (self.swa_freq,)) = self._check_params(swa_freq)
self.swa_lr_factor = swa_lr_factor
if self._auto_mode:
if (swa_freq < 1):
raise ValueError('Invalid swa_freq: {}'.format(swa_freq))
else:
if (self.swa_lr_factor is not None):
warnings.warn('Swa_freq is None, ignoring swa_lr')
self.swa_lr_factor = None
self.swa_freq = None
if ((self.swa_lr_factor is not None) and (self.swa_lr_factor < 0)):
raise ValueError('Invalid SWA learning rate factor: {}'.format(swa_lr_factor))
self.optimizer = optimizer
self.defaults = self.optimizer.defaults
self.param_groups = self.optimizer.param_groups
self.state = defaultdict(dict)
self.opt_state = self.optimizer.state
for group in self.param_groups:
group['n_avg'] = 0
group['step_counter'] = 0
def _check_params(swa_freq):
params = [swa_freq]
params_none = [(param is None) for param in params]
if ((not all(params_none)) and any(params_none)):
warnings.warn('Some of swa_start, swa_freq is None, ignoring other')
for (i, param) in enumerate(params):
if ((param is not None) and (not isinstance(param, int))):
params[i] = int(param)
warnings.warn('Casting swa_start, swa_freq to int')
return ((not any(params_none)), params)
def reset_lr_to_swa(self):
for param_group in self.param_groups:
param_group['initial_lr'] = (self.swa_lr_factor * param_group['lr'])
def update_swa_group(self, group):
for p in group['params']:
param_state = self.state[p]
if ('swa_buffer' not in param_state):
param_state['swa_buffer'] = torch.zeros_like(p.data)
buf = param_state['swa_buffer']
virtual_decay = (1 / float((group['n_avg'] + 1)))
diff = ((p.data - buf) * virtual_decay)
buf.add_(diff)
group['n_avg'] += 1
def update_swa(self):
for group in self.param_groups:
self.update_swa_group(group)
def swap_swa_param(self):
for group in self.param_groups:
for p in group['params']:
param_state = self.state[p]
if ('swa_buffer' not in param_state):
warnings.warn("SWA wasn't applied to param {}; skipping it".format(p))
continue
buf = param_state['swa_buffer']
tmp = torch.empty_like(p.data)
tmp.copy_(p.data)
p.data.copy_(buf)
buf.copy_(tmp)
def step(self, closure=None):
loss = self.optimizer.step(closure)
for group in self.param_groups:
group['step_counter'] += 1
steps = group['step_counter']
if self._auto_mode:
if ((steps % self.swa_freq) == 0):
self.update_swa_group(group)
return loss
def state_dict(self):
opt_state_dict = self.optimizer.state_dict()
swa_state = {(id(k) if isinstance(k, torch.Tensor) else k): v for (k, v) in self.state.items()}
opt_state = opt_state_dict['state']
param_groups = opt_state_dict['param_groups']
return {'opt_state': opt_state, 'swa_state': swa_state, 'param_groups': param_groups}
def load_state_dict(self, state_dict):
swa_state_dict = {'state': state_dict['swa_state'], 'param_groups': state_dict['param_groups']}
opt_state_dict = {'state': state_dict['opt_state'], 'param_groups': state_dict['param_groups']}
super(SWA, self).load_state_dict(swa_state_dict)
self.optimizer.load_state_dict(opt_state_dict)
self.opt_state = self.optimizer.state
def add_param_group(self, param_group):
param_group['n_avg'] = 0
param_group['step_counter'] = 0
self.optimizer.add_param_group(param_group) |
def test_profile(testdir):
file_test = testdir.makepyfile('\n import pytest\n from selenium.webdriver.common.by import By\n\n \n def firefox_options(firefox_options):\n firefox_options.set_preference("browser.anchor_color", "#FF69B4")\n firefox_options.set_preference("browser.display.foreground_color",\n "#FF0000")\n firefox_options.set_preference("browser.display.use_document_colors",\n False)\n return firefox_options\n\n .nondestructive\n def test_profile(base_url, selenium):\n selenium.get(base_url)\n header = selenium.find_element(By.TAG_NAME, \'h1\')\n anchor = selenium.find_element(By.TAG_NAME, \'a\')\n header_color = header.value_of_css_property(\'color\')\n anchor_color = anchor.value_of_css_property(\'color\')\n assert header_color == \'rgb(255, 0, 0)\'\n assert anchor_color == \'rgb(255, 105, 180)\'\n ')
testdir.quick_qa(file_test, passed=1) |
def grad_elec(cc_grad, t1=None, t2=None, l1=None, l2=None, eris=None, atmlst=None, verbose=lib.logger.INFO):
mycc = cc_grad.base
if (t1 is None):
t1 = mycc.t1
if (t2 is None):
t2 = mycc.t2
if (l1 is None):
l1 = mycc.l1
if (l2 is None):
l2 = mycc.l2
if (eris is None):
eris = mycc.ao2mo()
d1 = uccsd_t_rdm._gamma1_intermediates(mycc, t1, t2, l1, l2, eris, for_grad=True)
fd2intermediate = lib.H5TmpFile()
d2 = uccsd_t_rdm._gamma2_outcore(mycc, t1, t2, l1, l2, eris, fd2intermediate, True)
cc_grad = uccsd_grad.Gradients(mycc)
de = uccsd_grad.grad_elec(cc_grad, t1, t2, l1, l2, eris, atmlst, d1, d2, verbose)
return de |
class Player(QWidget):
fullScreenChanged = pyqtSignal(bool)
def __init__(self, playlist, parent=None):
super(Player, self).__init__(parent)
self.colorDialog = None
self.trackInfo = ''
self.statusInfo = ''
self.duration = 0
self.player = QMediaPlayer()
self.playlist = QMediaPlaylist()
self.player.setPlaylist(self.playlist)
self.player.durationChanged.connect(self.durationChanged)
self.player.positionChanged.connect(self.positionChanged)
self.player.metaDataChanged.connect(self.metaDataChanged)
self.playlist.currentIndexChanged.connect(self.playlistPositionChanged)
self.player.mediaStatusChanged.connect(self.statusChanged)
self.player.bufferStatusChanged.connect(self.bufferingProgress)
self.player.videoAvailableChanged.connect(self.videoAvailableChanged)
self.player.error.connect(self.displayErrorMessage)
self.videoWidget = VideoWidget()
self.player.setVideoOutput(self.videoWidget)
self.playlistModel = PlaylistModel()
self.playlistModel.setPlaylist(self.playlist)
self.playlistView = QListView()
self.playlistView.setModel(self.playlistModel)
self.playlistView.setCurrentIndex(self.playlistModel.index(self.playlist.currentIndex(), 0))
self.playlistView.activated.connect(self.jump)
self.slider = QSlider(Qt.Horizontal)
self.slider.setRange(0, (self.player.duration() / 1000))
self.labelDuration = QLabel()
self.slider.sliderMoved.connect(self.seek)
self.labelHistogram = QLabel()
self.labelHistogram.setText('Histogram:')
self.histogram = HistogramWidget()
histogramLayout = QHBoxLayout()
histogramLayout.addWidget(self.labelHistogram)
histogramLayout.addWidget(self.histogram, 1)
self.probe = QVideoProbe()
self.probe.videoFrameProbed.connect(self.histogram.processFrame)
self.probe.setSource(self.player)
openButton = QPushButton('Open', clicked=self.open)
controls = PlayerControls()
controls.setState(self.player.state())
controls.setVolume(self.player.volume())
controls.setMuted(controls.isMuted())
controls.play.connect(self.player.play)
controls.pause.connect(self.player.pause)
controls.stop.connect(self.player.stop)
controls.next.connect(self.playlist.next)
controls.previous.connect(self.previousClicked)
controls.changeVolume.connect(self.player.setVolume)
controls.changeMuting.connect(self.player.setMuted)
controls.changeRate.connect(self.player.setPlaybackRate)
controls.stop.connect(self.videoWidget.update)
self.player.stateChanged.connect(controls.setState)
self.player.volumeChanged.connect(controls.setVolume)
self.player.mutedChanged.connect(controls.setMuted)
self.fullScreenButton = QPushButton('FullScreen')
self.fullScreenButton.setCheckable(True)
self.colorButton = QPushButton('Color Options...')
self.colorButton.setEnabled(False)
self.colorButton.clicked.connect(self.showColorDialog)
displayLayout = QHBoxLayout()
displayLayout.addWidget(self.videoWidget, 2)
displayLayout.addWidget(self.playlistView)
controlLayout = QHBoxLayout()
controlLayout.setContentsMargins(0, 0, 0, 0)
controlLayout.addWidget(openButton)
controlLayout.addStretch(1)
controlLayout.addWidget(controls)
controlLayout.addStretch(1)
controlLayout.addWidget(self.fullScreenButton)
controlLayout.addWidget(self.colorButton)
layout = QVBoxLayout()
layout.addLayout(displayLayout)
hLayout = QHBoxLayout()
hLayout.addWidget(self.slider)
hLayout.addWidget(self.labelDuration)
layout.addLayout(hLayout)
layout.addLayout(controlLayout)
layout.addLayout(histogramLayout)
self.setLayout(layout)
if (not self.player.isAvailable()):
QMessageBox.warning(self, 'Service not available', 'The QMediaPlayer object does not have a valid service.\nPlease check the media service plugins are installed.')
controls.setEnabled(False)
self.playlistView.setEnabled(False)
openButton.setEnabled(False)
self.colorButton.setEnabled(False)
self.fullScreenButton.setEnabled(False)
self.metaDataChanged()
self.addToPlaylist(playlist)
def open(self):
(fileNames, _) = QFileDialog.getOpenFileNames(self, 'Open Files')
self.addToPlaylist(fileNames)
def addToPlaylist(self, fileNames):
for name in fileNames:
fileInfo = QFileInfo(name)
if fileInfo.exists():
url = QUrl.fromLocalFile(fileInfo.absoluteFilePath())
if (fileInfo.suffix().lower() == 'm3u'):
self.playlist.load(url)
else:
self.playlist.addMedia(QMediaContent(url))
else:
url = QUrl(name)
if url.isValid():
self.playlist.addMedia(QMediaContent(url))
def durationChanged(self, duration):
duration /= 1000
self.duration = duration
self.slider.setMaximum(duration)
def positionChanged(self, progress):
progress /= 1000
if (not self.slider.isSliderDown()):
self.slider.setValue(progress)
self.updateDurationInfo(progress)
def metaDataChanged(self):
if self.player.isMetaDataAvailable():
self.setTrackInfo(('%s - %s' % (self.player.metaData(QMediaMetaData.AlbumArtist), self.player.metaData(QMediaMetaData.Title))))
def previousClicked(self):
if (self.player.position() <= 5000):
self.playlist.previous()
else:
self.player.setPosition(0)
def jump(self, index):
if index.isValid():
self.playlist.setCurrentIndex(index.row())
self.player.play()
def playlistPositionChanged(self, position):
self.playlistView.setCurrentIndex(self.playlistModel.index(position, 0))
def seek(self, seconds):
self.player.setPosition((seconds * 1000))
def statusChanged(self, status):
self.handleCursor(status)
if (status == QMediaPlayer.LoadingMedia):
self.setStatusInfo('Loading...')
elif (status == QMediaPlayer.StalledMedia):
self.setStatusInfo('Media Stalled')
elif (status == QMediaPlayer.EndOfMedia):
QApplication.alert(self)
elif (status == QMediaPlayer.InvalidMedia):
self.displayErrorMessage()
else:
self.setStatusInfo('')
def handleCursor(self, status):
if (status in (QMediaPlayer.LoadingMedia, QMediaPlayer.BufferingMedia, QMediaPlayer.StalledMedia)):
self.setCursor(Qt.BusyCursor)
else:
self.unsetCursor()
def bufferingProgress(self, progress):
self.setStatusInfo(('Buffering %d%' % progress))
def videoAvailableChanged(self, available):
if available:
self.fullScreenButton.clicked.connect(self.videoWidget.setFullScreen)
self.videoWidget.fullScreenChanged.connect(self.fullScreenButton.setChecked)
if self.fullScreenButton.isChecked():
self.videoWidget.setFullScreen(True)
else:
self.fullScreenButton.clicked.disconnect(self.videoWidget.setFullScreen)
self.videoWidget.fullScreenChanged.disconnect(self.fullScreenButton.setChecked)
self.videoWidget.setFullScreen(False)
self.colorButton.setEnabled(available)
def setTrackInfo(self, info):
self.trackInfo = info
if (self.statusInfo != ''):
self.setWindowTitle(('%s | %s' % (self.trackInfo, self.statusInfo)))
else:
self.setWindowTitle(self.trackInfo)
def setStatusInfo(self, info):
self.statusInfo = info
if (self.statusInfo != ''):
self.setWindowTitle(('%s | %s' % (self.trackInfo, self.statusInfo)))
else:
self.setWindowTitle(self.trackInfo)
def displayErrorMessage(self):
self.setStatusInfo(self.player.errorString())
def updateDurationInfo(self, currentInfo):
duration = self.duration
if (currentInfo or duration):
currentTime = QTime(((currentInfo / 3600) % 60), ((currentInfo / 60) % 60), (currentInfo % 60), ((currentInfo * 1000) % 1000))
totalTime = QTime(((duration / 3600) % 60), ((duration / 60) % 60), (duration % 60), ((duration * 1000) % 1000))
format = ('hh:mm:ss' if (duration > 3600) else 'mm:ss')
tStr = ((currentTime.toString(format) + ' / ') + totalTime.toString(format))
else:
tStr = ''
self.labelDuration.setText(tStr)
def showColorDialog(self):
if (self.colorDialog is None):
brightnessSlider = QSlider(Qt.Horizontal)
brightnessSlider.setRange((- 100), 100)
brightnessSlider.setValue(self.videoWidget.brightness())
brightnessSlider.sliderMoved.connect(self.videoWidget.setBrightness)
self.videoWidget.brightnessChanged.connect(brightnessSlider.setValue)
contrastSlider = QSlider(Qt.Horizontal)
contrastSlider.setRange((- 100), 100)
contrastSlider.setValue(self.videoWidget.contrast())
contrastSlider.sliderMoved.connect(self.videoWidget.setContrast)
self.videoWidget.contrastChanged.connect(contrastSlider.setValue)
hueSlider = QSlider(Qt.Horizontal)
hueSlider.setRange((- 100), 100)
hueSlider.setValue(self.videoWidget.hue())
hueSlider.sliderMoved.connect(self.videoWidget.setHue)
self.videoWidget.hueChanged.connect(hueSlider.setValue)
saturationSlider = QSlider(Qt.Horizontal)
saturationSlider.setRange((- 100), 100)
saturationSlider.setValue(self.videoWidget.saturation())
saturationSlider.sliderMoved.connect(self.videoWidget.setSaturation)
self.videoWidget.saturationChanged.connect(saturationSlider.setValue)
layout = QFormLayout()
layout.addRow('Brightness', brightnessSlider)
layout.addRow('Contrast', contrastSlider)
layout.addRow('Hue', hueSlider)
layout.addRow('Saturation', saturationSlider)
button = QPushButton('Close')
layout.addRow(button)
self.colorDialog = QDialog(self)
self.colorDialog.setWindowTitle('Color Options')
self.colorDialog.setLayout(layout)
button.clicked.connect(self.colorDialog.close)
self.colorDialog.show() |
.remote_data
.flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY)
def test_get_solaranywhere_bad_probability_of_exceedance():
with pytest.raises(ValueError, match='must be an integer'):
pvlib.iotools.get_solaranywhere(latitude=44, longitude=(- 73), api_key='empty', source='SolarAnywherePOELatest', probability_of_exceedance=0.5) |
class VendorImporter():
def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
self.root_name = root_name
self.vendored_names = set(vendored_names)
self.vendor_pkg = (vendor_pkg or root_name.replace('extern', '_vendor'))
def search_path(self):
(yield (self.vendor_pkg + '.'))
(yield '')
def _module_matches_namespace(self, fullname):
(root, base, target) = fullname.partition((self.root_name + '.'))
return ((not root) and any(map(target.startswith, self.vendored_names)))
def load_module(self, fullname):
(root, base, target) = fullname.partition((self.root_name + '.'))
for prefix in self.search_path:
try:
extant = (prefix + target)
__import__(extant)
mod = sys.modules[extant]
sys.modules[fullname] = mod
return mod
except ImportError:
pass
else:
raise ImportError("The '{target}' package is required; normally this is bundled with this package so if you get this warning, consult the packager of your distribution.".format(**locals()))
def create_module(self, spec):
return self.load_module(spec.name)
def exec_module(self, module):
pass
def find_spec(self, fullname, path=None, target=None):
return (importlib.util.spec_from_loader(fullname, self) if self._module_matches_namespace(fullname) else None)
def install(self):
if (self not in sys.meta_path):
sys.meta_path.append(self) |
class OnClosedVoiceChat(Scaffold):
def on_closed_voice_chat(self) -> Callable:
method = 'CLOSED_HANDLER'
def decorator(func: Callable) -> Callable:
if (self is not None):
self._on_event_update.add_handler(method, func)
return func
return decorator |
def run():
test_opts = TestOptions().parse()
if (test_opts.resize_factors is not None):
assert (len(test_opts.resize_factors.split(',')) == 1), 'When running inference, provide a single downsampling factor!'
out_path_results = os.path.join(test_opts.exp_dir, 'inference_results', 'downsampling_{}'.format(test_opts.resize_factors))
out_path_coupled = os.path.join(test_opts.exp_dir, 'inference_coupled', 'downsampling_{}'.format(test_opts.resize_factors))
else:
out_path_results = os.path.join(test_opts.exp_dir, 'inference_results')
out_path_coupled = os.path.join(test_opts.exp_dir, 'inference_coupled')
os.makedirs(out_path_results, exist_ok=True)
os.makedirs(out_path_coupled, exist_ok=True)
ckpt = torch.load(test_opts.checkpoint_path, map_location='cpu')
opts = ckpt['opts']
opts.update(vars(test_opts))
if ('learn_in_w' not in opts):
opts['learn_in_w'] = False
opts = Namespace(**opts)
net = E2Style(opts)
net.eval()
net.cuda()
print('Loading dataset for {}'.format(opts.dataset_type))
dataset_args = data_configs.DATASETS[opts.dataset_type]
transforms_dict = dataset_args['transforms'](opts).get_transforms()
dataset = InferenceDataset(root=opts.data_path, transform=transforms_dict['transform_inference'], opts=opts)
dataloader = DataLoader(dataset, batch_size=opts.test_batch_size, shuffle=False, num_workers=int(opts.test_workers), drop_last=True)
if (opts.n_images is None):
opts.n_images = len(dataset)
global_i = 0
global_time = []
latent_list = []
image_list_path = os.path.join(opts.exp_dir, 'image_list.txt')
with open(image_list_path, 'w') as f:
for input_batch in tqdm(dataloader):
if (global_i >= opts.n_images):
break
with torch.no_grad():
input_cuda = input_batch.cuda().float()
tic = time.time()
(result_batch, latent_batch) = net(input_cuda, randomize_noise=False, resize=opts.resize_outputs, return_latents=True)
latent_list.append(latent_batch)
toc = time.time()
global_time.append((toc - tic))
for i in range(opts.test_batch_size):
result = tensor2im(result_batch[i])
im_path = dataset.paths[global_i]
f.write((im_path + '\r\n'))
if (opts.couple_outputs or ((global_i % 100) == 0)):
input_im = log_input_image(input_batch[i], opts)
resize_amount = ((256, 256) if opts.resize_outputs else (1024, 1024))
if (opts.resize_factors is not None):
source = Image.open(im_path)
res = np.concatenate([np.array(source.resize(resize_amount)), np.array(input_im.resize(resize_amount, resample=Image.NEAREST)), np.array(result.resize(resize_amount))], axis=1)
else:
res = np.concatenate([np.array(input_im.resize(resize_amount)), np.array(result.resize(resize_amount))], axis=1)
Image.fromarray(res).save(os.path.join(out_path_coupled, os.path.basename(im_path)))
im_save_path = os.path.join(out_path_results, os.path.basename(im_path))
Image.fromarray(np.array(result)).save(im_save_path)
global_i += 1
f.close()
stats_path = os.path.join(opts.exp_dir, 'stats.txt')
if opts.save_inverted_codes:
np.save(os.path.join(opts.exp_dir, f'latent_code.npy'), torch.cat(latent_list, 0).detach().cpu().numpy())
result_str = 'Runtime {:.4f}+-{:.4f}'.format(np.mean(global_time), np.std(global_time))
print(result_str)
with open(stats_path, 'w') as f:
f.write(result_str)
f.close() |
class LazyBatcher():
def __init__(self, config: ColBERTConfig, triples, queries, collection, rank=0, nranks=1):
(self.bsize, self.accumsteps) = (config.bsize, config.accumsteps)
self.nway = config.nway
self.query_tokenizer = QueryTokenizer(config)
self.doc_tokenizer = DocTokenizer(config)
self.tensorize_triples = partial(tensorize_triples, self.query_tokenizer, self.doc_tokenizer)
self.position = 0
self.triples = Examples.cast(triples, nway=self.nway).tolist(rank, nranks)
self.queries = Queries.cast(queries)
self.collection = Collection.cast(collection)
def __iter__(self):
return self
def __len__(self):
return len(self.triples)
def __next__(self):
(offset, endpos) = (self.position, min((self.position + self.bsize), len(self.triples)))
self.position = endpos
if ((offset + self.bsize) > len(self.triples)):
raise StopIteration
(all_queries, all_passages, all_scores) = ([], [], [])
for position in range(offset, endpos):
(query, *pids) = self.triples[position]
pids = pids[:self.nway]
query = self.queries[query]
try:
(pids, scores) = zipstar(pids)
except:
scores = []
passages = [self.collection[pid] for pid in pids]
all_queries.append(query)
all_passages.extend(passages)
all_scores.extend(scores)
assert (len(all_scores) in [0, len(all_passages)]), len(all_scores)
return self.collate(all_queries, all_passages, all_scores)
def collate(self, queries, passages, scores):
assert (len(queries) == self.bsize)
assert (len(passages) == (self.nway * self.bsize))
return self.tensorize_triples(queries, passages, scores, (self.bsize // self.accumsteps), self.nway) |
class TestDBShellout(_BaseTestDB):
class_to_test = TaskWarriorShellout
def should_skip(self):
return (not TaskWarriorShellout.can_use())
def test_filtering_simple(self):
self.tw.task_add('foobar1')
self.tw.task_add('foobar2')
tasks = self.tw.filter_tasks({'description.contains': 'foobar2'})
assert (len(tasks) == 1)
assert (tasks[0]['id'] == 2)
def test_filtering_brace(self):
self.tw.task_add('[foobar1]')
self.tw.task_add('[foobar2]')
tasks = self.tw.filter_tasks({'description.contains': '[foobar2]'})
assert (len(tasks) == 1)
assert (tasks[0]['id'] == 2)
def test_filtering_quote(self):
self.tw.task_add('[foobar1]')
self.tw.task_add('"foobar2"')
tasks = self.tw.filter_tasks({'description.contains': '"foobar2"'})
assert (len(tasks) == 1)
assert (tasks[0]['id'] == 2)
def test_filtering_plus(self):
self.tw.task_add('foobar1')
self.tw.task_add('foobar2')
self.tw.task_add('foobar+')
tasks = self.tw.filter_tasks({'description.contains': '"foobar+"'})
assert (len(tasks) == 1)
assert (tasks[0]['id'] == 3)
def test_filtering_minus(self):
self.tw.task_add('foobar1')
self.tw.task_add('foobar2')
self.tw.task_add('foobar-')
tasks = self.tw.filter_tasks({'description.contains': '"foobar-"'})
assert (len(tasks) == 1)
assert (tasks[0]['id'] == 3)
def test_filtering_colon(self):
self.tw.task_add('foobar1')
self.tw.task_add('foobar2')
self.tw.task_add('foobar:')
tasks = self.tw.filter_tasks({'description.contains': 'foobar:'})
assert (len(tasks) == 1)
assert (tasks[0]['id'] == 3)
def test_filtering_qmark(self):
self.tw.task_add('foobar1')
self.tw.task_add('foo?bar')
tasks = self.tw.filter_tasks({'description.contains': 'oo?ba'})
assert (len(tasks) == 1)
assert (tasks[0]['id'] == 2)
def test_filtering_qmark_not_contains(self):
self.tw.task_add('foobar1')
self.tw.task_add('foo?bar')
tasks = self.tw.filter_tasks({'description': 'foo?bar'})
assert (len(tasks) == 1)
assert (tasks[0]['id'] == 2)
def test_filtering_semicolon(self):
self.tw.task_add('foobar1')
self.tw.task_add('foobar2')
self.tw.task_add('foo;bar')
tasks = self.tw.filter_tasks({'description.contains': 'foo;bar'})
assert (len(tasks) == 1)
assert (tasks[0]['id'] == 3)
def test_filtering_question_mark(self):
self.tw.task_add('foobar1')
self.tw.task_add('foobar2')
self.tw.task_add('foo?bar')
tasks = self.tw.filter_tasks({'description.contains': 'foo?bar'})
assert (len(tasks) == 1)
assert (tasks[0]['id'] == 3)
def test_filtering_slash(self):
self.tw.task_add('foobar1')
self.tw.task_add('foobar2')
self.tw.task_add('foo/bar')
tasks = self.tw.filter_tasks({'description.contains': '"foo/bar"'})
assert (len(tasks) == 1)
assert (tasks[0]['id'] == 3)
def test_filtering_logic_disjunction(self):
self.tw.task_add('foobar1')
self.tw.task_add('foobar2')
self.tw.task_add('foobar3')
tasks = self.tw.filter_tasks({'or': [('description.has', 'foobar1'), ('description.has', 'foobar3')]})
assert (len(tasks) == 2)
assert (tasks[0]['id'] == 1)
assert (tasks[1]['id'] == 3)
def test_filtering_logic_conjunction(self):
self.tw.task_add('foobar1')
self.tw.task_add('foobar2')
self.tw.task_add('foobar3')
tasks = self.tw.filter_tasks({'and': [('description.has', 'foobar1'), ('description.has', 'foobar3')]})
assert (len(tasks) == 0)
def test_filtering_logic_conjunction_junction_whats_your_function(self):
self.tw.task_add('foobar1')
self.tw.task_add('foobar2')
self.tw.task_add('foobar3')
tasks = self.tw.filter_tasks({'and': [('description', 'foobar1')], 'or': [('status', 'pending'), ('status', 'waiting')]})
assert (len(tasks) == 1)
def test_annotation_escaping(self):
original = {'description': 're-opening the issue'}
self.tw.task_add('foobar')
task = self.tw.load_tasks()['pending'][0]
task['annotations'] = [original]
self.tw.task_update(task)
task = self.tw.load_tasks()['pending'][0]
self.tw.task_update(task)
assert (len(task['annotations']) == 1)
assert (task['annotations'][0]['description'] == original['description'])
def test_remove_uda_string_marshal(self):
task = self.tw_marshal.task_add('foobar', somestring='this is a uda')
task['somestring'] = None
(id, task) = self.tw_marshal.task_update(task)
with pytest.raises(KeyError):
task['somestring']
def test_remove_uda_date_marshal(self):
task = self.tw_marshal.task_add('foobar', somedate=datetime.datetime(2011, 1, 1))
task['somedate'] = None
(id, task) = self.tw_marshal.task_update(task)
with pytest.raises(KeyError):
task['somedate']
def test_remove_uda_numeric_marshal(self):
task = self.tw_marshal.task_add('foobar', somenumber=15)
task['somenumber'] = None
(id, task) = self.tw_marshal.task_update(task)
with pytest.raises(KeyError):
task['somenumber']
def test_add_and_retrieve_uda_string_url(self):
arbitrary_url = '
self.tw.config_overrides['uda'] = {'someurl': {'label': 'Some URL', 'type': 'string'}}
self.tw.task_add('foobar', someurl=arbitrary_url)
results = self.tw.filter_tasks({'someurl.is': arbitrary_url})
assert (len(results) == 1)
task = results[0]
assert (task['someurl'] == arbitrary_url)
def test_add_and_retrieve_uda_string_url_in_parens(self):
arbitrary_url = '
self.tw.config_overrides['uda'] = {'someurl': {'label': 'Some URL', 'type': 'string'}}
self.tw.task_add('foobar', someurl=arbitrary_url)
results = self.tw.filter_tasks({'and': [('someurl.is', arbitrary_url)]})
assert (len(results) == 1)
task = results[0]
assert (task['someurl'] == arbitrary_url) |
class TestComplexPackage():
(autouse=True, scope='class')
def built(self, builder):
builder('pypackagecomplex')
def test_public_chain_resolves(self, parse):
submodule_file = parse('_build/html/autoapi/complex/subpackage/submodule/index.html')
assert submodule_file.find(id='complex.subpackage.submodule.public_chain')
subpackage_file = parse('_build/html/autoapi/complex/subpackage/index.html')
assert subpackage_file.find(id='complex.subpackage.public_chain')
package_file = parse('_build/html/autoapi/complex/index.html')
assert package_file.find(id='complex.public_chain')
def test_private_made_public(self, parse):
submodule_file = parse('_build/html/autoapi/complex/subpackage/index.html')
assert submodule_file.find(id='complex.subpackage.now_public_function')
def test_multiple_import_locations(self, parse):
submodule_file = parse('_build/html/autoapi/complex/subpackage/submodule/index.html')
assert submodule_file.find(id='complex.subpackage.submodule.public_multiple_imports')
subpackage_file = parse('_build/html/autoapi/complex/subpackage/index.html')
assert subpackage_file.find(id='complex.subpackage.public_multiple_imports')
package_file = parse('_build/html/autoapi/complex/index.html')
assert package_file.find(id='complex.public_multiple_imports')
def test_simple_wildcard_imports(self, parse):
wildcard_file = parse('_build/html/autoapi/complex/wildcard/index.html')
assert wildcard_file.find(id='complex.wildcard.public_chain')
assert wildcard_file.find(id='complex.wildcard.now_public_function')
assert wildcard_file.find(id='complex.wildcard.public_multiple_imports')
assert wildcard_file.find(id='complex.wildcard.module_level_method')
def test_wildcard_all_imports(self, parse):
wildcard_file = parse('_build/html/autoapi/complex/wildall/index.html')
assert (not wildcard_file.find(id='complex.wildall.not_all'))
assert (not wildcard_file.find(id='complex.wildall.NotAllClass'))
assert (not wildcard_file.find(id='complex.wildall.does_not_exist'))
assert wildcard_file.find(id='complex.wildall.SimpleClass')
assert wildcard_file.find(id='complex.wildall.simple_function')
assert wildcard_file.find(id='complex.wildall.public_chain')
assert wildcard_file.find(id='complex.wildall.module_level_method')
def test_no_imports_in_module_with_all(self, parse):
foo_file = parse('_build/html/autoapi/complex/foo/index.html')
assert (not foo_file.find(id='complex.foo.module_level_method'))
def test_all_overrides_import_in_module_with_all(self, parse):
foo_file = parse('_build/html/autoapi/complex/foo/index.html')
assert foo_file.find(id='complex.foo.PublicClass')
def test_parses_unicode_file(self, parse):
foo_file = parse('_build/html/autoapi/complex/unicode_data/index.html')
assert foo_file.find(id='complex.unicode_data.unicode_str') |
class Effect4810(BaseEffect):
type = 'passive'
def handler(fit, module, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'ECM')), 'scanLadarStrengthBonus', module.getModifiedItemAttr('ecmStrengthBonusPercent'), stackingPenalties=True, **kwargs) |
.parametrize('cap_fees, flat_fee, prop_fee, imbalance_fee, initial_amount, expected_amount', [(False, 0, 0, 10000, 50000, (50000 + 2000)), (False, 0, 0, 20000, 50000, (50000 + 3995)), (False, 0, 0, 30000, 50000, (50000 + 5910)), (False, 0, 0, 40000, 50000, (50000 + 7613)), (False, 0, 0, 50000, 50000, (50000 + 9091)), (True, 0, 0, 10000, 50000, 50000), (True, 0, 0, 20000, 50000, 50000), (True, 0, 0, 30000, 50000, 50000), (True, 0, 0, 40000, 50000, 50000), (True, 0, 0, 50000, 50000, 50000)])
def test_get_lock_amount_after_fees_imbalanced_channel(cap_fees, flat_fee, prop_fee, imbalance_fee, initial_amount, expected_amount):
balance = TokenAmount(100000)
prop_fee_per_channel = ppm_fee_per_channel(ProportionalFeeAmount(prop_fee))
imbalance_fee = calculate_imbalance_fees(channel_capacity=balance, proportional_imbalance_fee=ProportionalFeeAmount(imbalance_fee))
lock = make_hash_time_lock_state(amount=initial_amount)
channel_in = factories.create(NettingChannelStateProperties(our_state=NettingChannelEndStateProperties(balance=TokenAmount(0)), partner_state=NettingChannelEndStateProperties(balance=balance), fee_schedule=FeeScheduleState(cap_fees=cap_fees, flat=FeeAmount(flat_fee), proportional=prop_fee_per_channel, imbalance_penalty=imbalance_fee)))
channel_out = factories.create(NettingChannelStateProperties(our_state=NettingChannelEndStateProperties(balance=balance), partner_state=NettingChannelEndStateProperties(balance=TokenAmount(0)), fee_schedule=FeeScheduleState(cap_fees=cap_fees, flat=FeeAmount(flat_fee), proportional=prop_fee_per_channel, imbalance_penalty=imbalance_fee)))
locked_after_fees = get_amount_without_fees(amount_with_fees=lock.amount, channel_in=channel_in, channel_out=channel_out)
assert (locked_after_fees == expected_amount) |
class TestDescription():
def test_default(self, isolation, isolated_data_dir, platform):
config = {'project': {'name': 'my_app', 'version': '0.0.1'}}
project = Project(isolation, config=config)
environment = MockEnvironment(isolation, project.metadata, 'default', project.config.envs['default'], {}, isolated_data_dir, isolated_data_dir, platform, 0)
assert (environment.description == environment.description == '')
def test_not_string(self, isolation, isolated_data_dir, platform):
config = {'project': {'name': 'my_app', 'version': '0.0.1'}, 'tool': {'hatch': {'envs': {'default': {'description': 9000}}}}}
project = Project(isolation, config=config)
environment = MockEnvironment(isolation, project.metadata, 'default', project.config.envs['default'], {}, isolated_data_dir, isolated_data_dir, platform, 0)
with pytest.raises(TypeError, match='Field `tool.hatch.envs.default.description` must be a string'):
_ = environment.description
def test_correct(self, isolation, isolated_data_dir, platform):
description = 'foo'
config = {'project': {'name': 'my_app', 'version': '0.0.1'}, 'tool': {'hatch': {'envs': {'default': {'description': description}}}}}
project = Project(isolation, config=config)
environment = MockEnvironment(isolation, project.metadata, 'default', project.config.envs['default'], {}, isolated_data_dir, isolated_data_dir, platform, 0)
assert (environment.description is description) |
class BaseDownloadTests(DownloadMixin, TestCase):
def setUp(self):
self.release_275_page = Page.objects.create(title='Python 2.7.5 Release', path='download/releases/2.7.5', content='whatever', is_published=True)
self.release_275 = Release.objects.create(version=Release.PYTHON2, name='Python 2.7.5', is_latest=True, is_published=True, release_page=self.release_275_page, release_date=(timezone.now() - datetime.timedelta(days=(- 1))))
self.release_275_windows_32bit = ReleaseFile.objects.create(os=self.windows, release=self.release_275, name='Windows x86 MSI Installer (2.7.5)', description='Windows binary -- does not include source', url='ftp/python/2.7.5/python-2.7.5.msi')
self.release_275_windows_64bit = ReleaseFile.objects.create(os=self.windows, release=self.release_275, name='Windows X86-64 MSI Installer (2.7.5)', description='Windows AMD64 / Intel 64 / X86-64 binary -- does not include source', url='ftp/python/2.7.5/python-2.7.5.amd64.msi')
self.release_275_osx = ReleaseFile.objects.create(os=self.osx, release=self.release_275, name='Mac OSX 64-bit/32-bit', description='Mac OS X 10.6 and later', url='ftp/python/2.7.5/python-2.7.5-macosx10.6.dmg')
self.release_275_linux = ReleaseFile.objects.create(name='Source tarball', os=self.linux, release=self.release_275, is_source=True, description='Gzipped source', url='ftp/python/2.7.5/Python-2.7.5.tgz')
self.draft_release = Release.objects.create(version=Release.PYTHON3, name='Python 9.7.2', is_published=False, release_page=self.release_275_page)
self.draft_release_linux = ReleaseFile.objects.create(name='Source tarball for a draft release', os=self.linux, release=self.draft_release, is_source=True, description='Gzipped source', url='ftp/python/9.7.2/Python-9.7.2.tgz')
self.hidden_release = Release.objects.create(version=Release.PYTHON3, name='Python 0.0.0', is_published=True, show_on_download_page=False, release_page=self.release_275_page)
self.pre_release = Release.objects.create(version=Release.PYTHON3, name='Python 3.9.90', is_published=True, pre_release=True, show_on_download_page=True, release_page=self.release_275_page)
self.python_3 = Release.objects.create(version=Release.PYTHON3, name='Python 3.10', is_latest=True, is_published=True, show_on_download_page=True, release_page=self.release_275_page) |
class CompactLatticeConstFst(_FstBase, _const_fst.CompactLatticeConstFst):
_ops = _clat_ops
_drawer_type = _CompactLatticeFstDrawer
_printer_type = _CompactLatticeFstPrinter
_weight_factory = CompactLatticeWeight
_state_iterator_type = CompactLatticeConstFstStateIterator
_arc_iterator_type = CompactLatticeConstFstArcIterator
def __init__(self, fst=None):
super(CompactLatticeConstFst, self).__init__()
if (fst is not None):
if isinstance(fst, _const_fst.CompactLatticeConstFst):
_fstext_shims._assign_compact_lattice_const_fst(fst, self)
elif isinstance(fst, _fst.CompactLatticeFst):
_fstext_shims._assign_compact_lattice_fst_to_const_fst(fst, self)
else:
raise TypeError('fst should be an FST over the compact lattice semiring') |
class CheckingFinderTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.project = testutils.sample_project()
self.mod1 = testutils.create_module(self.project, 'mod1')
def tearDown(self):
testutils.remove_project(self.project)
super().tearDown()
def test_trivial_case(self):
self.mod1.write('')
pymodule = self.project.get_pymodule(self.mod1)
finder = similarfinder.SimilarFinder(pymodule)
self.assertEqual([], list(finder.get_matches('10', {})))
def test_simple_finding(self):
self.mod1.write(dedent(' class A(object):\n pass\n a = A()\n '))
pymodule = self.project.get_pymodule(self.mod1)
finder = similarfinder.SimilarFinder(pymodule)
result = list(finder.get_matches('${anything} = ${A}()', {}))
self.assertEqual(1, len(result))
def test_not_matching_when_the_name_does_not_match(self):
self.mod1.write(dedent(' class A(object):\n pass\n a = list()\n '))
pymodule = self.project.get_pymodule(self.mod1)
finder = similarfinder.SimilarFinder(pymodule)
result = list(finder.get_matches('${anything} = ${C}()', {'C': 'name=mod1.A'}))
self.assertEqual(0, len(result))
def test_not_matching_unknowns_finding(self):
self.mod1.write(dedent(' class A(object):\n pass\n a = unknown()\n '))
pymodule = self.project.get_pymodule(self.mod1)
finder = similarfinder.SimilarFinder(pymodule)
result = list(finder.get_matches('${anything} = ${C}()', {'C': 'name=mod1.A'}))
self.assertEqual(0, len(result))
def test_finding_and_matching_pyobjects(self):
source = dedent(' class A(object):\n pass\n NewA = A\n a = NewA()\n ')
self.mod1.write(source)
pymodule = self.project.get_pymodule(self.mod1)
finder = similarfinder.SimilarFinder(pymodule)
result = list(finder.get_matches('${anything} = ${A}()', {'A': 'object=mod1.A'}))
self.assertEqual(1, len(result))
start = source.rindex('a =')
self.assertEqual((start, (len(source) - 1)), result[0].get_region())
def test_finding_and_matching_types(self):
source = dedent(' class A(object):\n def f(self):\n pass\n a = A()\n b = a.f()\n ')
self.mod1.write(source)
pymodule = self.project.get_pymodule(self.mod1)
finder = similarfinder.SimilarFinder(pymodule)
result = list(finder.get_matches('${anything} = ${inst}.f()', {'inst': 'type=mod1.A'}))
self.assertEqual(1, len(result))
start = source.rindex('b')
self.assertEqual((start, (len(source) - 1)), result[0].get_region())
def test_checking_the_type_of_an_ass_name_node(self):
self.mod1.write(dedent(' class A(object):\n pass\n an_a = A()\n '))
pymodule = self.project.get_pymodule(self.mod1)
finder = similarfinder.SimilarFinder(pymodule)
result = list(finder.get_matches('${a} = ${assigned}', {'a': 'type=mod1.A'}))
self.assertEqual(1, len(result))
def test_checking_instance_of_an_ass_name_node(self):
self.mod1.write(dedent(' class A(object):\n pass\n class B(A):\n pass\n b = B()\n '))
pymodule = self.project.get_pymodule(self.mod1)
finder = similarfinder.SimilarFinder(pymodule)
result = list(finder.get_matches('${a} = ${assigned}', {'a': 'instance=mod1.A'}))
self.assertEqual(1, len(result))
def test_checking_equality_of_imported_pynames(self):
mod2 = testutils.create_module(self.project, 'mod2')
mod2.write(dedent(' class A(object):\n pass\n '))
self.mod1.write(dedent(' from mod2 import A\n an_a = A()\n '))
pymod1 = self.project.get_pymodule(self.mod1)
finder = similarfinder.SimilarFinder(pymod1)
result = list(finder.get_matches('${a_class}()', {'a_class': 'name=mod2.A'}))
self.assertEqual(1, len(result)) |
class TestMessageHandler():
test_flag = False
SRE_TYPE = type(re.match('', ''))
def test_slot_behaviour(self):
handler = MessageHandler(filters.ALL, self.callback)
for attr in handler.__slots__:
assert (getattr(handler, attr, 'err') != 'err'), f"got extra slot '{attr}'"
assert (len(mro_slots(handler)) == len(set(mro_slots(handler)))), 'duplicate slot'
(autouse=True)
def _reset(self):
self.test_flag = False
async def callback(self, update, context):
self.test_flag = (isinstance(context, CallbackContext) and isinstance(context.bot, Bot) and isinstance(update, Update) and isinstance(context.update_queue, asyncio.Queue) and isinstance(context.job_queue, JobQueue) and isinstance(context.chat_data, dict) and isinstance(context.bot_data, dict) and ((isinstance(context.user_data, dict) and (isinstance(update.message, Message) or isinstance(update.edited_message, Message))) or ((context.user_data is None) and (isinstance(update.channel_post, Message) or isinstance(update.edited_channel_post, Message)))))
def callback_regex1(self, update, context):
if context.matches:
types = all(((type(res) is self.SRE_TYPE) for res in context.matches))
num = (len(context.matches) == 1)
self.test_flag = (types and num)
def callback_regex2(self, update, context):
if context.matches:
types = all(((type(res) is self.SRE_TYPE) for res in context.matches))
num = (len(context.matches) == 2)
self.test_flag = (types and num)
def test_with_filter(self, message):
handler = MessageHandler(filters.ChatType.GROUP, self.callback)
message.chat.type = 'group'
assert handler.check_update(Update(0, message))
message.chat.type = 'private'
assert (not handler.check_update(Update(0, message)))
def test_callback_query_with_filter(self, message):
class TestFilter(filters.UpdateFilter):
flag = False
def filter(self, u):
self.flag = True
test_filter = TestFilter()
handler = MessageHandler(test_filter, self.callback)
update = Update(1, callback_query=CallbackQuery(1, None, None, message=message))
assert update.effective_message
assert (not handler.check_update(update))
assert (not test_filter.flag)
def test_specific_filters(self, message):
f = (((~ filters.UpdateType.MESSAGES) & (~ filters.UpdateType.CHANNEL_POST)) & filters.UpdateType.EDITED_CHANNEL_POST)
handler = MessageHandler(f, self.callback)
assert (not handler.check_update(Update(0, edited_message=message)))
assert (not handler.check_update(Update(0, message=message)))
assert (not handler.check_update(Update(0, channel_post=message)))
assert handler.check_update(Update(0, edited_channel_post=message))
def test_other_update_types(self, false_update):
handler = MessageHandler(None, self.callback)
assert (not handler.check_update(false_update))
assert (not handler.check_update('string'))
def test_filters_returns_empty_dict(self):
class DataFilter(MessageFilter):
data_filter = True
def filter(self, msg: Message):
return {}
handler = MessageHandler(DataFilter(), self.callback)
assert (handler.check_update(Update(0, message)) is False)
async def test_context(self, app, message):
handler = MessageHandler(None, self.callback)
app.add_handler(handler)
async with app:
(await app.process_update(Update(0, message=message)))
assert self.test_flag
self.test_flag = False
(await app.process_update(Update(0, edited_message=message)))
assert self.test_flag
self.test_flag = False
(await app.process_update(Update(0, channel_post=message)))
assert self.test_flag
self.test_flag = False
(await app.process_update(Update(0, edited_channel_post=message)))
assert self.test_flag
async def test_context_regex(self, app, message):
handler = MessageHandler(filters.Regex('one two'), self.callback_regex1)
app.add_handler(handler)
async with app:
message.text = 'not it'
(await app.process_update(Update(0, message)))
assert (not self.test_flag)
message.text += ' one two now it is'
(await app.process_update(Update(0, message)))
assert self.test_flag
async def test_context_multiple_regex(self, app, message):
handler = MessageHandler((filters.Regex('one') & filters.Regex('two')), self.callback_regex2)
app.add_handler(handler)
async with app:
message.text = 'not it'
(await app.process_update(Update(0, message)))
assert (not self.test_flag)
message.text += ' one two now it is'
(await app.process_update(Update(0, message)))
assert self.test_flag |
def test_dead_default() -> None:
blockquote = from_fsm(Fsm(alphabet={Charclass('/'), Charclass('*'), (~ Charclass('/*'))}, states={0, 1, 2, 3, 4, 5}, initial=0, finals={4}, map={0: {Charclass('/'): 1, (~ Charclass('/*')): 5, Charclass('*'): 5}, 1: {Charclass('/'): 5, (~ Charclass('/*')): 5, Charclass('*'): 2}, 2: {Charclass('/'): 2, (~ Charclass('/*')): 2, Charclass('*'): 3}, 3: {Charclass('/'): 4, (~ Charclass('/*')): 2, Charclass('*'): 3}, 4: {Charclass('/'): 5, (~ Charclass('/*')): 5, Charclass('*'): 5}, 5: {Charclass('/'): 5, (~ Charclass('/*')): 5, Charclass('*'): 5}}))
assert (str(blockquote) == '/\\*([^*]|\\*+[^*/])*\\*+/') |
def Match(Expr, *, Cases):
assert isinstance(Cases, dict)
t = [(k, w) for (k, v) in Cases.items() for w in (v if isinstance(v, (tuple, list, set, frozenset)) else [v])]
if isinstance(Expr, (Variable, Node)):
return [((expr((~ k.operator), Expr, k.right_operand()) | v) if isinstance(k, Condition) else (((Expr != k) if (not isinstance(k, (tuple, list, set, frozenset))) else not_belong(Expr, k)) | v)) for (k, v) in t]
else:
assert False, 'Bad construction with Match: the expression must be a variable or an expression involving a variable' |
class Effect991(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Small Energy Turret')), 'maxRange', ship.getModifiedItemAttr('eliteBonusGunship1'), skill='Assault Frigates', **kwargs) |
class LogosLexer(ObjectiveCppLexer):
name = 'Logos'
aliases = ['logos']
filenames = ['*.x', '*.xi', '*.xm', '*.xmi']
mimetypes = ['text/x-logos']
version_added = '1.6'
priority = 0.25
tokens = {'statements': [('(%orig|%log)\\b', Keyword), ('(%c)\\b(\\()(\\s*)([a-zA-Z$_][\\w$]*)(\\s*)(\\))', bygroups(Keyword, Punctuation, Text, Name.Class, Text, Punctuation)), ('(%init)\\b(\\()', bygroups(Keyword, Punctuation), 'logos_init_directive'), ('(%init)(?=\\s*;)', bygroups(Keyword)), ('(%hook|%group)(\\s+)([a-zA-Z$_][\\w$]+)', bygroups(Keyword, Text, Name.Class), '#pop'), ('(%subclass)(\\s+)', bygroups(Keyword, Text), ('#pop', 'logos_classname')), inherit], 'logos_init_directive': [('\\s+', Text), (',', Punctuation, ('logos_init_directive', '#pop')), ('([a-zA-Z$_][\\w$]*)(\\s*)(=)(\\s*)([^);]*)', bygroups(Name.Class, Text, Punctuation, Text, Text)), ('([a-zA-Z$_][\\w$]*)', Name.Class), ('\\)', Punctuation, '#pop')], 'logos_classname': [('([a-zA-Z$_][\\w$]*)(\\s*:\\s*)([a-zA-Z$_][\\w$]*)?', bygroups(Name.Class, Text, Name.Class), '#pop'), ('([a-zA-Z$_][\\w$]*)', Name.Class, '#pop')], 'root': [('(%subclass)(\\s+)', bygroups(Keyword, Text), 'logos_classname'), ('(%hook|%group)(\\s+)([a-zA-Z$_][\\w$]+)', bygroups(Keyword, Text, Name.Class)), ('(%config)(\\s*\\(\\s*)(\\w+)(\\s*=)(.*?)(\\)\\s*)', bygroups(Keyword, Text, Name.Variable, Text, String, Text)), ('(%ctor)(\\s*)(\\{)', bygroups(Keyword, Text, Punctuation), 'function'), ('(%new)(\\s*)(\\()(.*?)(\\))', bygroups(Keyword, Text, Keyword, String, Keyword)), ('(\\s*)(%end)(\\s*)', bygroups(Text, Keyword, Text)), inherit]}
_logos_keywords = re.compile('%(?:hook|ctor|init|c\\()')
def analyse_text(text):
if LogosLexer._logos_keywords.search(text):
return 1.0
return 0 |
def _ignore(module, root):
if (not module.__name__.startswith(root)):
return True
name = module.__name__[len(root):]
if (name in modules_ignored):
return True
while ((idx := name.rfind('.')) > 0):
name = name[:idx]
if (name in modules_ignored):
return True
return False |
.skipif(randovania.is_frozen(), reason='graphviz not included in executable')
.parametrize('single_image', [False, True])
.parametrize('include_pickups', [False, True])
def test_render_region_graph_logic(mocker, single_image, include_pickups, blank_game_description):
gd = blank_game_description
args = MagicMock()
args.single_image = single_image
args.json_database = None
args.game = gd.game.value
args.include_pickups = include_pickups
args.include_teleporters = True
import graphviz
mock_digraph: MagicMock = mocker.patch.object(graphviz, 'Digraph')
render_regions.render_region_graph_logic(args)
if single_image:
mock_digraph.assert_called_once_with(name=gd.game.short_name, comment=gd.game.long_name)
else:
mock_digraph.assert_called_once_with(name='Intro')
def calls_for(region, area: Area):
(yield call(f'{region.name}-{area.name}', area.name, color=ANY, fillcolor=ANY, style='filled', fontcolor='#ffffff', shape=ANY, penwidth='3.0'))
if include_pickups:
for node in area.nodes:
if isinstance(node, PickupNode):
(yield call(str(node.pickup_index), ANY, shape='house'))
area_node = list(itertools.chain.from_iterable((calls_for(region, area) for region in gd.region_list.regions for area in region.areas)))
dot = mock_digraph.return_value
dot.node.assert_has_calls(area_node)
dot.render.assert_called_once_with(format='png', view=True, cleanup=True) |
class BatchHardTripletLossDistanceFunction():
def cosine_distance(embeddings):
return (1 - util.pytorch_cos_sim(embeddings, embeddings))
def eucledian_distance(embeddings, squared=False):
dot_product = torch.matmul(embeddings, embeddings.t())
square_norm = torch.diag(dot_product)
distances = ((square_norm.unsqueeze(0) - (2.0 * dot_product)) + square_norm.unsqueeze(1))
distances[(distances < 0)] = 0
if (not squared):
mask = distances.eq(0).float()
distances = (distances + (mask * 1e-16))
distances = ((1.0 - mask) * torch.sqrt(distances))
return distances |
class Recorder(object):
def __init__(self, work_dir, print_log, log_interval):
self.cur_time = time.time()
self.print_log_flag = print_log
self.log_interval = log_interval
self.log_path = '{}/log.txt'.format(work_dir)
self.timer = dict(dataloader=0.001, device=0.001, forward=0.001, backward=0.001)
def print_time(self):
localtime = time.asctime(time.localtime(time.time()))
self.print_log(('Local current time : ' + localtime))
def print_log(self, str, path=None, print_time=True):
if (path is None):
path = self.log_path
if print_time:
localtime = time.asctime(time.localtime(time.time()))
str = ((('[ ' + localtime) + ' ] ') + str)
print(str)
if self.print_log_flag:
with open(path, 'a') as f:
f.writelines(str)
f.writelines('\n')
def record_time(self):
self.cur_time = time.time()
return self.cur_time
def split_time(self):
split_time = (time.time() - self.cur_time)
self.record_time()
return split_time
def timer_reset(self):
self.cur_time = time.time()
self.timer = dict(dataloader=0.001, device=0.001, forward=0.001, backward=0.001)
def record_timer(self, key):
self.timer[key] += self.split_time()
def print_time_statistics(self):
proportion = {k: '{:02d}%'.format(int(round(((v * 100) / sum(self.timer.values()))))) for (k, v) in self.timer.items()}
self.print_log('\tTime consumption: [Data]{dataloader}, [GPU]{device}, [Forward]{forward}, [Backward]{backward}'.format(**proportion)) |
class AMPTrainer(SimpleTrainer):
def __init__(self, model, data_loader, optimizer, grad_scaler=None):
unsupported = 'AMPTrainer does not support single-process multi-device training!'
if isinstance(model, DistributedDataParallel):
assert (not (model.device_ids and (len(model.device_ids) > 1))), unsupported
assert (not isinstance(model, DataParallel)), unsupported
super().__init__(model, data_loader, optimizer)
if (grad_scaler is None):
from torch.cuda.amp import GradScaler
grad_scaler = GradScaler()
self.grad_scaler = grad_scaler
def run_step(self):
assert self.model.training, '[AMPTrainer] model was changed to eval mode!'
assert torch.cuda.is_available(), '[AMPTrainer] CUDA is required for AMP training!'
from torch.cuda.amp import autocast
start = time.perf_counter()
data = next(self._data_loader_iter)
data_time = (time.perf_counter() - start)
with autocast():
loss_dict = self.model(data)
if isinstance(loss_dict, torch.Tensor):
losses = loss_dict
loss_dict = {'total_loss': loss_dict}
else:
losses = sum(loss_dict.values())
self.optimizer.zero_grad()
self.grad_scaler.scale(losses).backward()
self._write_metrics(loss_dict, data_time)
self.grad_scaler.step(self.optimizer)
self.grad_scaler.update()
def state_dict(self):
ret = super().state_dict()
ret['grad_scaler'] = self.grad_scaler.state_dict()
return ret
def load_state_dict(self, state_dict):
super().load_state_dict(state_dict)
self.grad_scaler.load_state_dict(state_dict['grad_scaler']) |
class CCBlock(dict):
def read_cc(self, fid, pointer):
if ((pointer != 0) and (pointer is not None)):
fid.seek(pointer)
self['pointer'] = pointer
(self['id'], reserved, self['length'], self['link_count'], self['cc_tx_name'], self['cc_md_unit'], self['cc_md_comment'], self['cc_cc_inverse']) = _CCStruct1.unpack(fid.read(56))
if ((self['link_count'] - 4) > 0):
self['cc_ref'] = _mdf_block_read(fid, _LINK, (self['link_count'] - 4))
(self['cc_type'], self['cc_precision'], self['cc_flags'], self['cc_ref_count'], self['cc_val_count'], self['cc_phy_range_min'], self['cc_phy_range_max']) = _CCStruct2.unpack(fid.read(24))
if self['cc_val_count']:
self['cc_val'] = _mdf_block_read(fid, _REAL, self['cc_val_count'])
if (self['cc_type'] == 3):
pointer = self['cc_ref']
self['cc_ref'] = {}
cc = CommentBlock()
cc.read_tx(fid=fid, pointer=pointer)
self['cc_ref'].update(cc)
elif (self['cc_type'] in (7, 8, 9, 10, 11)):
self['cc_ref'] = list(self['cc_ref'])
for i in range(self['cc_ref_count']):
fid.seek(self['cc_ref'][i])
identifier = unpack('4s', fid.read(4))[0]
if (identifier in ('##TX', '##MD', b'##TX', b'##MD')):
temp = CommentBlock()
temp.read_tx(fid=fid, pointer=self['cc_ref'][i])
self['cc_ref'][i] = temp['Comment']
elif (identifier in ('##CC', b'##CC')):
cc = CCBlock()
cc.read_cc(fid, self['cc_ref'][i])
self['cc_ref'][i] = cc
if self['cc_md_comment']:
self['Comment'] = CommentBlock()
self['Comment'].read_cm_cc(fid=fid, pointer=self['cc_md_comment'])
if self['cc_md_unit']:
self['unit'] = CommentBlock()
self['unit'].read_cm_cc_unit(fid=fid, pointer=self['cc_md_unit'])
if self['cc_tx_name']:
self['name'] = CommentBlock()
self['name'].read_tx(fid=fid, pointer=self['cc_tx_name'])
else:
self['cc_type'] = 0 |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
def register_optics(name: str, overwrite: bool=False, reason_to_exclude: Optional[str]=None) -> Callable:
return generic_register(name=name, registrator_name='Optics solver', registry=OPTICS_METHOD_REGISTRY, signature=OPTICS_METHOD_SIGNATURE, overwrite=overwrite, reason_to_exclude=reason_to_exclude) |
class Movie(Cog):
def __init__(self, bot: Bot):
self.bot = bot
self. ClientSession = bot.
(name='movies', aliases=('movie',), invoke_without_command=True)
async def movies(self, ctx: Context, genre: str='', amount: int=5) -> None:
if (amount > 20):
(await ctx.send("You can't get more than 20 movies at once. (TMDB limits)"))
return
if (amount < 1):
(await ctx.send("You can't get less than 1 movie."))
return
genre = genre.capitalize()
try:
result = (await self.get_movies_data(self. MovieGenres[genre].value, 1))
except KeyError:
(await self.bot.invoke_help_command(ctx))
return
page = random.randint(1, min(result['total_pages'], MAX_PAGES))
movies = (await self.get_movies_data(self. MovieGenres[genre].value, page))
pages = (await self.get_pages(self. movies, amount))
embed = (await self.get_embed(genre))
(await ImagePaginator.paginate(pages, ctx, embed))
(name='genres', aliases=('genre', 'g'))
async def genres(self, ctx: Context) -> None:
(await ctx.send(f"Current available genres: {', '.join(((('`' + genre.name) + '`') for genre in MovieGenres))}"))
async def get_movies_data(self, client: ClientSession, genre_id: str, page: int) -> list[dict[(str, Any)]]:
params = {'api_key': Tokens.tmdb.get_secret_value(), 'language': 'en-US', 'sort_by': 'popularity.desc', 'include_adult': 'false', 'include_video': 'false', 'page': page, 'with_genres': genre_id}
url = (BASE_URL + 'discover/movie')
async with client.get(url, params=params) as resp:
(result, status) = ((await resp.json()), resp.status)
if ('results' not in result):
err_msg = f"There was a problem making the TMDB API request. Response Code: {status}, TMDB: Status Code: {result.get('status_code', None)} TMDB: Status Message: {result.get('status_message', None)}, TMDB: Errors: {result.get('errors', None)}, "
logger.error(err_msg)
raise APIError('TMDB API', status, err_msg)
return result
async def get_pages(self, client: ClientSession, movies: dict[(str, Any)], amount: int) -> list[tuple[(str, str)]]:
pages = []
for i in range(amount):
movie_id = movies['results'][i]['id']
movie = (await self.get_movie(client, movie_id))
(page, img) = (await self.create_page(movie))
pages.append((page, img))
return pages
async def get_movie(self, client: ClientSession, movie: int) -> dict[(str, Any)]:
if (not isinstance(movie, int)):
raise ValueError('Error while fetching movie from TMDB, movie argument must be integer. ')
url = (BASE_URL + f'movie/{movie}')
async with client.get(url, params=MOVIE_PARAMS) as resp:
return (await resp.json())
async def create_page(self, movie: dict[(str, Any)]) -> tuple[(str, str)]:
text = ''
text += f'''**{movie['title']}**
'''
if movie['tagline']:
text += f'''{movie['tagline']}
'''
else:
text += '\n'
text += f'''**Rating:** {movie['vote_average']}/10 :star:
'''
text += f'''**Release Date:** {movie['release_date']}
'''
text += '__**Production Information**__\n'
companies = movie['production_companies']
countries = movie['production_countries']
text += f'''**Made by:** {', '.join((company['name'] for company in companies))}
'''
text += f'''**Made in:** {', '.join((country['name'] for country in countries))}
'''
text += '__**Some Numbers**__\n'
budget = (f"{movie['budget']:,d}" if movie['budget'] else '?')
revenue = (f"{movie['revenue']:,d}" if movie['revenue'] else '?')
if (movie['runtime'] is not None):
duration = divmod(movie['runtime'], 60)
else:
duration = ('?', '?')
text += f'''**Budget:** ${budget}
'''
text += f'''**Revenue:** ${revenue}
'''
text += f'''**Duration:** {f'{duration[0]} hour(s) {duration[1]} minute(s)'}
'''
text += movie['overview']
img = f"
return (text, img)
async def get_embed(self, name: str) -> Embed:
embed = Embed(title=f'Random {name} Movies')
embed.set_footer(text='This product uses the TMDb API but is not endorsed or certified by TMDb.')
embed.set_thumbnail(url=THUMBNAIL_URL)
return embed |
_test
def test_simplernn_legacy_interface():
old_layer = keras.layers.SimpleRNN(input_shape=[3, 5], output_dim=2, name='d')
new_layer = keras.layers.SimpleRNN(2, input_shape=[3, 5], name='d')
assert (json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config()))
old_layer = keras.layers.SimpleRNN(2, init='normal', inner_init='glorot_uniform', W_regularizer='l1', U_regularizer='l1', b_regularizer='l1', dropout_W=0.1, dropout_U=0.1, name='SimpleRNN')
new_layer = keras.layers.SimpleRNN(2, kernel_initializer='normal', recurrent_initializer='glorot_uniform', kernel_regularizer='l1', recurrent_regularizer='l1', bias_regularizer='l1', dropout=0.1, recurrent_dropout=0.1, name='SimpleRNN')
assert (json.dumps(old_layer.get_config()) == json.dumps(new_layer.get_config())) |
class IDXGIResource(IDXGIDeviceSubObject):
_iid_ = comtypes.GUID('{035f3ab4-482e-4e50-b41f-8a7f8bd8960b}')
_methods_ = [comtypes.STDMETHOD(comtypes.HRESULT, 'GetSharedHandle'), comtypes.STDMETHOD(comtypes.HRESULT, 'GetUsage'), comtypes.STDMETHOD(comtypes.HRESULT, 'SetEvictionPriority'), comtypes.STDMETHOD(comtypes.HRESULT, 'GetEvictionPriority')] |
def layer(op):
def layer_decorated(self, *args, **kwargs):
name = kwargs.setdefault('name', self.get_unique_name(op.__name__))
if (len(self.inputs) == 0):
raise RuntimeError(('No input variables found for layer %s.' % name))
elif (len(self.inputs) == 1):
layer_input = self.inputs[0]
else:
layer_input = list(self.inputs)
layer_output = op(self, layer_input, *args, **kwargs)
self.layers[name] = layer_output
self.feed(layer_output)
return self
return layer_decorated |
def main():
global totalMethod
formatNames = list(formats.keys())
formatNames.sort()
optparser = optparse.OptionParser(usage='\n\t%prog [options] [file] ...')
optparser.add_option('-o', '--output', metavar='FILE', type='string', dest='output', help='output filename [stdout]')
optparser.add_option('-n', '--node-thres', metavar='PERCENTAGE', type='float', dest='node_thres', default=0.5, help='eliminate nodes below this threshold [default: %default]')
optparser.add_option('-e', '--edge-thres', metavar='PERCENTAGE', type='float', dest='edge_thres', default=0.1, help='eliminate edges below this threshold [default: %default]')
optparser.add_option('-f', '--format', type='choice', choices=formatNames, dest='format', default='prof', help=('profile format: %s [default: %%default]' % naturalJoin(formatNames)))
optparser.add_option('--total', type='choice', choices=('callratios', 'callstacks'), dest='totalMethod', default=totalMethod, help='preferred method of calculating total time: callratios or callstacks (currently affects only perf format) [default: %default]')
optparser.add_option('-c', '--colormap', type='choice', choices=('color', 'pink', 'gray', 'bw', 'print'), dest='theme', default='color', help='color map: color, pink, gray, bw, or print [default: %default]')
optparser.add_option('-s', '--strip', action='store_true', dest='strip', default=False, help='strip function parameters, template parameters, and const modifiers from demangled C++ function names')
optparser.add_option('-w', '--wrap', action='store_true', dest='wrap', default=False, help='wrap function names')
optparser.add_option('--show-samples', action='store_true', dest='show_samples', default=False, help='show function samples')
optparser.add_option('-z', '--root', type='string', dest='root', default='', help='prune call graph to show only descendants of specified root function')
optparser.add_option('-l', '--leaf', type='string', dest='leaf', default='', help='prune call graph to show only ancestors of specified leaf function')
optparser.add_option('--skew', type='float', dest='theme_skew', default=1.0, help='skew the colorization curve. Values < 1.0 give more variety to lower percentages. Values > 1.0 give less variety to lower percentages')
(options, args) = optparser.parse_args(sys.argv[1:])
if ((len(args) > 1) and (options.format != 'pstats')):
optparser.error('incorrect number of arguments')
try:
theme = themes[options.theme]
except KeyError:
optparser.error(("invalid colormap '%s'" % options.theme))
if options.theme_skew:
theme.skew = options.theme_skew
totalMethod = options.totalMethod
try:
Format = formats[options.format]
except KeyError:
optparser.error(("invalid format '%s'" % options.format))
if Format.stdinInput:
if (not args):
fp = sys.stdin
else:
fp = open(args[0], 'rt')
parser = Format(fp)
elif Format.multipleInput:
if (not args):
optparser.error(('at least a file must be specified for %s input' % options.format))
parser = Format(*args)
else:
if (len(args) != 1):
optparser.error(('exactly one file must be specified for %s input' % options.format))
parser = Format(args[0])
profile = parser.parse()
if (options.output is None):
output = sys.stdout
elif PYTHON_3:
output = open(options.output, 'wt', encoding='UTF-8')
else:
output = open(options.output, 'wt')
dot = DotWriter(output)
dot.strip = options.strip
dot.wrap = options.wrap
if options.show_samples:
dot.show_function_events.append(SAMPLES)
profile = profile
profile.prune((options.node_thres / 100.0), (options.edge_thres / 100.0))
if options.root:
rootId = profile.getFunctionId(options.root)
if (not rootId):
sys.stderr.write((('root node ' + options.root) + ' not found (might already be pruned : try -e0 -n0 flags)\n'))
sys.exit(1)
profile.prune_root(rootId)
if options.leaf:
leafId = profile.getFunctionId(options.leaf)
if (not leafId):
sys.stderr.write((('leaf node ' + options.leaf) + ' not found (maybe already pruned : try -e0 -n0 flags)\n'))
sys.exit(1)
profile.prune_leaf(leafId)
dot.graph(profile, theme) |
def _make_args_class(base, argnames):
unroll_argnames = unroll.unrolling_iterable(enumerate(argnames))
class Args(base):
_attrs_ = _immutable_fields_ = argnames
def _init_args(self, *args):
for (i, name) in unroll_argnames:
setattr(self, name, args[i])
def _copy_args(self, other):
for (_, name) in unroll_argnames:
val = getattr(self, name)
setattr(other, name, val)
def _get_args(self):
args = ()
for (i, name) in unroll_argnames:
args += (getattr(self, name),)
return args
def tostring(self):
return ('%s%s' % (self.__class__.__name__, len(self._get_args())))
return Args |
def compute_gradient_penalty(discriminator, real_samples, fake_samples, device='cuda'):
alpha = torch.rand([real_samples.size(0), 1], device=device)
interpolates = ((alpha * real_samples) + ((1.0 - alpha) * fake_samples)).requires_grad_(True)
d_interpolates = discriminator(interpolates)
fake = torch.ones([real_samples.shape[0], 1], requires_grad=False, device=device)
gradients = torch.autograd.grad(outputs=d_interpolates, inputs=interpolates, grad_outputs=fake, create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.view(gradients.size(0), (- 1))
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty |
def convert_weights_to_lp(model: nn.Module, dtype=torch.float16):
def _convert_weights(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.to(dtype)
if (l.bias is not None):
l.bias.data = l.bias.data.to(dtype)
if isinstance(l, (nn.MultiheadAttention, Attention)):
for attr in [*[f'{s}_proj_weight' for s in ['in', 'q', 'k', 'v']], 'in_proj_bias', 'bias_k', 'bias_v']:
tensor = getattr(l, attr)
if (tensor is not None):
tensor.data = tensor.data.to(dtype)
for name in ['text_projection', 'proj']:
if hasattr(l, name):
attr = getattr(l, name)
if (attr is not None):
attr.data = attr.data.to(dtype)
model.apply(_convert_weights) |
def get_sufficient_info_reward_location(reward_helper_info):
asked_entities = reward_helper_info['_entities']
answers = reward_helper_info['_answers']
observation_before_finish = reward_helper_info['observation_before_finish']
game_finishing_mask = reward_helper_info['game_finishing_mask']
res = []
for (ent, a, obs) in zip(asked_entities, answers, observation_before_finish):
obs = obs.split()
flag = True
for w in (ent.split() + a.split()):
if (w not in obs):
res.append(0.0)
flag = False
break
if flag:
res.append(1.0)
res = np.array(res)
res = (res.reshape((1, res.shape[0])) * game_finishing_mask)
return res.T |
class NormalizeCaseTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.filesystem.is_case_sensitive = False
def test_normalize_case(self):
self.filesystem.create_file('/Foo/Bar')
self.assertEqual(f'{self.filesystem.root_dir_name}Foo/Bar', self.filesystem._original_path('/foo/bar'))
self.assertEqual(f'{self.filesystem.root_dir_name}Foo/Bar', self.filesystem._original_path('/FOO/BAR'))
def test_normalize_case_for_drive(self):
self.filesystem.is_windows_fs = True
self.filesystem.create_file('C:/Foo/Bar')
self.assertEqual('C:/Foo/Bar', self.filesystem._original_path('c:/foo/bar'))
self.assertEqual('C:/Foo/Bar', self.filesystem._original_path('C:/FOO/BAR'))
def test_normalize_case_for_non_existing_file(self):
self.filesystem.create_dir('/Foo/Bar')
self.assertEqual(f'{self.filesystem.root_dir_name}Foo/Bar/baz', self.filesystem._original_path('/foo/bar/baz'))
self.assertEqual(f'{self.filesystem.root_dir_name}Foo/Bar/BAZ', self.filesystem._original_path('/FOO/BAR/BAZ'))
((not TestCase.is_windows), 'Regression test for Windows problem only')
def test_normalize_case_for_lazily_added_empty_file(self):
filesystem = fake_filesystem.FakeFilesystem()
real_dir_path = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]
filesystem.add_real_directory(real_dir_path)
initPyPath = os.path.join(real_dir_path, '__init__.py')
self.assertEqual(initPyPath, filesystem._original_path(initPyPath.upper())) |
def imagenet_pretrain_rcrop(mean=None, std=None):
trans_list = [transforms.RandomResizedCrop(size=224, scale=(0.2, 1.0)), transforms.RandomHorizontalFlip(p=0.5), transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8), transforms.RandomGrayscale(p=0.2), transforms.RandomApply([GaussianBlur([0.1, 2.0])], p=0.5), transforms.ToTensor(), transforms.Normalize(mean=mean, std=std)]
transform = transforms.Compose(trans_list)
transform = MultiViewTransform(transform, num_views=2)
return transform |
def viz_gen_and_dis_losses(all_D_losses, all_G_losses, save_dir=None):
plt.plot(all_D_losses, 'r')
plt.plot(all_G_losses, 'g')
plt.title('Model convergence')
plt.ylabel('Losses')
plt.xlabel('# of steps')
plt.legend(['Discriminator network', 'Generator network'], loc='upper right')
plt.show()
if (not save_dir):
plt.savefig(os.path.join(save_dir, '_conv.png')) |
def test_transformer_force_over():
transformer = Transformer.from_crs('EPSG:4326', 'EPSG:3857', force_over=True)
(xxx, yyy) = transformer.transform(0, 140)
(xxx_over, yyy_over) = transformer.transform(0, (- 220))
assert (xxx > 0)
assert (xxx_over < 0)
(xxx_inverse, yyy_inverse) = transformer.transform(xxx, yyy, direction=TransformDirection.INVERSE)
(xxx_over_inverse, yyy_over_inverse) = transformer.transform(xxx_over, yyy_over, direction=TransformDirection.INVERSE)
assert_almost_equal(xxx_inverse, 0)
assert_almost_equal(xxx_over_inverse, 0)
assert_almost_equal(yyy_inverse, 140)
assert_almost_equal(yyy_over_inverse, (- 220)) |
class OpenImagesChallengeEvaluator(OpenImagesDetectionEvaluator):
def __init__(self, categories, evaluate_masks=False, matching_iou_threshold=0.5, evaluate_corlocs=False, group_of_weight=1.0):
if (not evaluate_masks):
metrics_prefix = 'OpenImagesDetectionChallenge'
else:
metrics_prefix = 'OpenImagesInstanceSegmentationChallenge'
super(OpenImagesChallengeEvaluator, self).__init__(categories, matching_iou_threshold, evaluate_masks=evaluate_masks, evaluate_corlocs=evaluate_corlocs, group_of_weight=group_of_weight, metric_prefix=metrics_prefix)
self._evaluatable_labels = {}
def add_single_ground_truth_image_info(self, image_id, gt_dict):
super(OpenImagesChallengeEvaluator, self).add_single_ground_truth_image_info(image_id, gt_dict)
input_fields = InputDataFields
gt_classes = (gt_dict[input_fields.gt_classes] - self._label_id_offset)
image_classes = np.array([], dtype=int)
if (input_fields.gt_image_classes in gt_dict):
image_classes = gt_dict[input_fields.gt_image_classes]
elif (input_fields.gt_labeled_classes in gt_dict):
image_classes = gt_dict[input_fields.gt_labeled_classes]
image_classes -= self._label_id_offset
self._evaluatable_labels[image_id] = np.unique(np.concatenate((image_classes, gt_classes)))
def add_single_detected_image_info(self, image_id, detections_dict):
if (image_id not in self._image_ids):
self._image_ids.update([image_id])
self._evaluatable_labels[image_id] = np.array([])
detection_classes = (detections_dict[DetectionResultFields.detection_classes] - self._label_id_offset)
allowed_classes = np.where(np.isin(detection_classes, self._evaluatable_labels[image_id]))
detection_classes = detection_classes[allowed_classes]
detected_boxes = detections_dict[DetectionResultFields.detection_boxes][allowed_classes]
detected_scores = detections_dict[DetectionResultFields.detection_scores][allowed_classes]
if self._evaluate_masks:
detection_masks = detections_dict[DetectionResultFields.detection_masks][allowed_classes]
else:
detection_masks = None
self._evaluation.add_single_detected_image_info(image_key=image_id, detected_boxes=detected_boxes, detected_scores=detected_scores, detected_class_labels=detection_classes, detected_masks=detection_masks)
def clear(self):
super(OpenImagesChallengeEvaluator, self).clear()
self._evaluatable_labels.clear() |
def connect_to_wifi(request: WSGIRequest) -> HttpResponse:
ssid = request.POST.get('ssid')
password = request.POST.get('password')
if ((ssid is None) or (password is None) or (ssid == '') or (password == '')):
return HttpResponseBadRequest('Please provide both SSID and password')
try:
output = subprocess.check_output(['sudo', '/usr/local/sbin/raveberry/connect_to_wifi', ssid, password]).decode()
return HttpResponse(output)
except subprocess.CalledProcessError as error:
output = error.output.decode()
return HttpResponseBadRequest(output) |
.parametrize('is_locked', [False, True])
def test_update_with_use_latest_vs_lock(package: ProjectPackage, repo: Repository, pool: RepositoryPool, io: NullIO, is_locked: bool) -> None:
package.add_dependency(Factory.create_dependency('A', '*'))
package.add_dependency(Factory.create_dependency('B', '*'))
package.add_dependency(Factory.create_dependency('C', '*'))
package_a1 = get_package('A', '1')
package_a1.add_dependency(Factory.create_dependency('B', '3'))
package_a2 = get_package('A', '2')
package_a2.add_dependency(Factory.create_dependency('B', '1'))
package_c1 = get_package('C', '1')
package_c1.add_dependency(Factory.create_dependency('B', '3'))
package_c2 = get_package('C', '2')
package_c2.add_dependency(Factory.create_dependency('B', '1'))
package_b1 = get_package('B', '1')
package_b1.add_dependency(Factory.create_dependency('A', '2'))
package_b1.add_dependency(Factory.create_dependency('C', '2'))
package_b2 = get_package('B', '2')
package_b2.add_dependency(Factory.create_dependency('A', '1'))
package_b2.add_dependency(Factory.create_dependency('C', '1'))
package_b3 = get_package('B', '3')
package_b3.add_dependency(Factory.create_dependency('A', '1'))
package_b3.add_dependency(Factory.create_dependency('C', '1'))
repo.add_package(package_a1)
repo.add_package(package_a2)
repo.add_package(package_b1)
repo.add_package(package_b2)
repo.add_package(package_b3)
repo.add_package(package_c1)
repo.add_package(package_c2)
if is_locked:
locked = [package_a1, package_b3, package_c1]
use_latest = [package.name for package in locked]
else:
locked = []
use_latest = []
solver = Solver(package, pool, [], locked, io)
transaction = solver.solve(use_latest)
check_solver_result(transaction, [{'job': 'install', 'package': package_c1}, {'job': 'install', 'package': package_b3}, {'job': 'install', 'package': package_a1}]) |
class PartialTxInput(TxInput, PSBTSection):
def __init__(self, *args, **kwargs):
TxInput.__init__(self, *args, **kwargs)
self._utxo = None
self._witness_utxo = None
self.part_sigs = {}
self.sighash = None
self.bip32_paths = {}
self.redeem_script = None
self.witness_script = None
self._unknown = {}
self.script_type = 'unknown'
self.num_sig = 0
self.pubkeys = []
self._script_descriptor = None
self._trusted_value_sats = None
self._trusted_address = None
self.block_height = None
self.spent_height = None
self._is_p2sh_segwit = None
self._is_native_segwit = None
def utxo(self):
return self._utxo
def utxo(self, value: Optional[Transaction]):
self._utxo = value
self.validate_data()
self.ensure_there_is_only_one_utxo()
def witness_utxo(self):
return self._witness_utxo
_utxo.setter
def witness_utxo(self, value: Optional[TxOutput]):
self._witness_utxo = value
self.validate_data()
self.ensure_there_is_only_one_utxo()
def script_descriptor(self):
return self._script_descriptor
_descriptor.setter
def script_descriptor(self, desc: Optional[Descriptor]):
self._script_descriptor = desc
if desc:
if (self.redeem_script is None):
self.redeem_script = desc.expand().redeem_script
if (self.witness_script is None):
self.witness_script = desc.expand().witness_script
def to_json(self):
d = super().to_json()
d.update({'height': self.block_height, 'value_sats': self.value_sats(), 'address': self.address, 'desc': (self.script_descriptor.to_string() if self.script_descriptor else None), 'utxo': (str(self.utxo) if self.utxo else None), 'witness_utxo': (self.witness_utxo.serialize_to_network().hex() if self.witness_utxo else None), 'sighash': self.sighash, 'redeem_script': (self.redeem_script.hex() if self.redeem_script else None), 'witness_script': (self.witness_script.hex() if self.witness_script else None), 'part_sigs': {pubkey.hex(): sig.hex() for (pubkey, sig) in self.part_sigs.items()}, 'bip32_paths': {pubkey.hex(): (xfp.hex(), bip32.convert_bip32_intpath_to_strpath(path)) for (pubkey, (xfp, path)) in self.bip32_paths.items()}, 'unknown_psbt_fields': {key.hex(): val.hex() for (key, val) in self._unknown.items()}})
return d
def from_txin(cls, txin: TxInput, *, strip_witness: bool=True) -> 'PartialTxInput':
res = PartialTxInput(prevout=txin.prevout, script_sig=(None if strip_witness else txin.script_sig), nsequence=txin.nsequence, witness=(None if strip_witness else txin.witness), is_coinbase_output=txin.is_coinbase_output())
return res
def validate_data(self, *, for_signing=False) -> None:
if self.utxo:
if (self.prevout.txid.hex() != self.utxo.txid()):
raise PSBTInputConsistencyFailure(f'PSBT input validation: If a non-witness UTXO is provided, its hash must match the hash specified in the prevout')
if self.witness_utxo:
if (self.utxo.outputs()[self.prevout.out_idx] != self.witness_utxo):
raise PSBTInputConsistencyFailure(f'PSBT input validation: If both non-witness UTXO and witness UTXO are provided, they must be consistent')
if (for_signing and False):
if ((not self.is_segwit()) and self.witness_utxo):
raise PSBTInputConsistencyFailure(f'PSBT input validation: If a witness UTXO is provided, no non-witness signature may be created')
if (self.redeem_script and self.address):
addr = hash160_to_p2sh(hash_160(self.redeem_script))
if (self.address != addr):
raise PSBTInputConsistencyFailure(f'PSBT input validation: If a redeemScript is provided, the scriptPubKey must be for that redeemScript')
if self.witness_script:
if self.redeem_script:
if (self.redeem_script != bfh(bitcoin.p2wsh_nested_script(self.witness_script.hex()))):
raise PSBTInputConsistencyFailure(f'PSBT input validation: If a witnessScript is provided, the redeemScript must be for that witnessScript')
elif self.address:
if (self.address != bitcoin.script_to_p2wsh(self.witness_script.hex())):
raise PSBTInputConsistencyFailure(f'PSBT input validation: If a witnessScript is provided, the scriptPubKey must be for that witnessScript')
def parse_psbt_section_kv(self, kt, key, val):
try:
kt = PSBTInputType(kt)
except ValueError:
pass
if DEBUG_PSBT_PARSING:
print(f'{repr(kt)} {key.hex()} {val.hex()}')
if (kt == PSBTInputType.NON_WITNESS_UTXO):
if (self.utxo is not None):
raise SerializationError(f'duplicate key: {repr(kt)}')
self.utxo = Transaction(val)
self.utxo.deserialize()
if key:
raise SerializationError(f'key for {repr(kt)} must be empty')
elif (kt == PSBTInputType.WITNESS_UTXO):
if (self.witness_utxo is not None):
raise SerializationError(f'duplicate key: {repr(kt)}')
self.witness_utxo = TxOutput.from_network_bytes(val)
if key:
raise SerializationError(f'key for {repr(kt)} must be empty')
elif (kt == PSBTInputType.PARTIAL_SIG):
if (key in self.part_sigs):
raise SerializationError(f'duplicate key: {repr(kt)}')
if (len(key) not in (33, 65)):
raise SerializationError(f'key for {repr(kt)} has unexpected length: {len(key)}')
self.part_sigs[key] = val
elif (kt == PSBTInputType.SIGHASH_TYPE):
if (self.sighash is not None):
raise SerializationError(f'duplicate key: {repr(kt)}')
if (len(val) != 4):
raise SerializationError(f'value for {repr(kt)} has unexpected length: {len(val)}')
self.sighash = struct.unpack('<I', val)[0]
if key:
raise SerializationError(f'key for {repr(kt)} must be empty')
elif (kt == PSBTInputType.BIP32_DERIVATION):
if (key in self.bip32_paths):
raise SerializationError(f'duplicate key: {repr(kt)}')
if (len(key) not in (33, 65)):
raise SerializationError(f'key for {repr(kt)} has unexpected length: {len(key)}')
self.bip32_paths[key] = unpack_bip32_root_fingerprint_and_int_path(val)
elif (kt == PSBTInputType.REDEEM_SCRIPT):
if (self.redeem_script is not None):
raise SerializationError(f'duplicate key: {repr(kt)}')
self.redeem_script = val
if key:
raise SerializationError(f'key for {repr(kt)} must be empty')
elif (kt == PSBTInputType.WITNESS_SCRIPT):
if (self.witness_script is not None):
raise SerializationError(f'duplicate key: {repr(kt)}')
self.witness_script = val
if key:
raise SerializationError(f'key for {repr(kt)} must be empty')
elif (kt == PSBTInputType.FINAL_SCRIPTSIG):
if (self.script_sig is not None):
raise SerializationError(f'duplicate key: {repr(kt)}')
self.script_sig = val
if key:
raise SerializationError(f'key for {repr(kt)} must be empty')
elif (kt == PSBTInputType.FINAL_SCRIPTWITNESS):
if (self.witness is not None):
raise SerializationError(f'duplicate key: {repr(kt)}')
self.witness = val
if key:
raise SerializationError(f'key for {repr(kt)} must be empty')
else:
full_key = self.get_fullkey_from_keytype_and_key(kt, key)
if (full_key in self._unknown):
raise SerializationError(f'duplicate key. PSBT input key for unknown type: {full_key}')
self._unknown[full_key] = val
def serialize_psbt_section_kvs(self, wr):
self.ensure_there_is_only_one_utxo()
if self.witness_utxo:
wr(PSBTInputType.WITNESS_UTXO, self.witness_utxo.serialize_to_network())
if self.utxo:
wr(PSBTInputType.NON_WITNESS_UTXO, bfh(self.utxo.serialize_to_network(include_sigs=True)))
for (pk, val) in sorted(self.part_sigs.items()):
wr(PSBTInputType.PARTIAL_SIG, val, pk)
if (self.sighash is not None):
wr(PSBTInputType.SIGHASH_TYPE, struct.pack('<I', self.sighash))
if (self.redeem_script is not None):
wr(PSBTInputType.REDEEM_SCRIPT, self.redeem_script)
if (self.witness_script is not None):
wr(PSBTInputType.WITNESS_SCRIPT, self.witness_script)
for k in sorted(self.bip32_paths):
packed_path = pack_bip32_root_fingerprint_and_int_path(*self.bip32_paths[k])
wr(PSBTInputType.BIP32_DERIVATION, packed_path, k)
if (self.script_sig is not None):
wr(PSBTInputType.FINAL_SCRIPTSIG, self.script_sig)
if (self.witness is not None):
wr(PSBTInputType.FINAL_SCRIPTWITNESS, self.witness)
for (full_key, val) in sorted(self._unknown.items()):
(key_type, key) = self.get_keytype_and_key_from_fullkey(full_key)
wr(key_type, val, key=key)
def value_sats(self) -> Optional[int]:
if (self._trusted_value_sats is not None):
return self._trusted_value_sats
if self.utxo:
out_idx = self.prevout.out_idx
return self.utxo.outputs()[out_idx].value
if self.witness_utxo:
return self.witness_utxo.value
return None
def address(self) -> Optional[str]:
if (self._trusted_address is not None):
return self._trusted_address
scriptpubkey = self.scriptpubkey
if scriptpubkey:
return get_address_from_output_script(scriptpubkey)
return None
def scriptpubkey(self) -> Optional[bytes]:
if (self._trusted_address is not None):
return bfh(bitcoin.address_to_script(self._trusted_address))
if self.utxo:
out_idx = self.prevout.out_idx
return self.utxo.outputs()[out_idx].scriptpubkey
if self.witness_utxo:
return self.witness_utxo.scriptpubkey
return None
def is_complete(self) -> bool:
if ((self.script_sig is not None) and (self.witness is not None)):
return True
if self.is_coinbase_input():
return True
if ((self.script_sig is not None) and (not self.is_segwit())):
return True
signatures = list(self.part_sigs.values())
s = len(signatures)
if (self.script_type in ('p2pk', 'p2pkh', 'p2wpkh', 'p2wpkh-p2sh')):
return (s >= 1)
if (self.script_type in ('p2sh', 'p2wsh', 'p2wsh-p2sh')):
return (s >= self.num_sig)
return False
def finalize(self) -> None:
def clear_fields_when_finalized():
self.part_sigs = {}
self.sighash = None
self.bip32_paths = {}
self.redeem_script = None
self.witness_script = None
if ((self.script_sig is not None) and (self.witness is not None)):
clear_fields_when_finalized()
return
if self.is_complete():
self.script_sig = bfh(Transaction.input_script(self))
self.witness = bfh(Transaction.serialize_witness(self))
clear_fields_when_finalized()
def combine_with_other_txin(self, other_txin: 'TxInput') -> None:
assert (self.prevout == other_txin.prevout)
if (other_txin.script_sig is not None):
self.script_sig = other_txin.script_sig
if (other_txin.witness is not None):
self.witness = other_txin.witness
if isinstance(other_txin, PartialTxInput):
if other_txin.witness_utxo:
self.witness_utxo = other_txin.witness_utxo
if other_txin.utxo:
self.utxo = other_txin.utxo
self.part_sigs.update(other_txin.part_sigs)
if (other_txin.sighash is not None):
self.sighash = other_txin.sighash
self.bip32_paths.update(other_txin.bip32_paths)
if (other_txin.redeem_script is not None):
self.redeem_script = other_txin.redeem_script
if (other_txin.witness_script is not None):
self.witness_script = other_txin.witness_script
self._unknown.update(other_txin._unknown)
self.ensure_there_is_only_one_utxo()
self.finalize()
def ensure_there_is_only_one_utxo(self):
if ((self.utxo is not None) and (self.witness_utxo is not None)):
self.witness_utxo = None
def convert_utxo_to_witness_utxo(self) -> None:
if self.utxo:
self._witness_utxo = self.utxo.outputs()[self.prevout.out_idx]
self._utxo = None
def is_native_segwit(self) -> Optional[bool]:
if (self._is_native_segwit is None):
if self.address:
self._is_native_segwit = bitcoin.is_segwit_address(self.address)
return self._is_native_segwit
def is_p2sh_segwit(self) -> Optional[bool]:
if (self._is_p2sh_segwit is None):
def calc_if_p2sh_segwit_now():
if (not (self.address and self.redeem_script)):
return None
if (self.address != bitcoin.hash160_to_p2sh(hash_160(self.redeem_script))):
return False
try:
decoded = [x for x in script_GetOp(self.redeem_script)]
except MalformedBitcoinScript:
decoded = None
if match_script_against_template(decoded, SCRIPTPUBKEY_TEMPLATE_WITNESS_V0):
return True
future_witness_versions = list(range(opcodes.OP_1, (opcodes.OP_16 + 1)))
for (witver, opcode) in enumerate(future_witness_versions, start=1):
match = [opcode, OPPushDataGeneric((lambda x: (2 <= x <= 40)))]
if match_script_against_template(decoded, match):
return True
return False
self._is_p2sh_segwit = calc_if_p2sh_segwit_now()
return self._is_p2sh_segwit
def is_segwit(self, *, guess_for_address=False) -> bool:
if super().is_segwit():
return True
if (self.is_native_segwit() or self.is_p2sh_segwit()):
return True
if ((self.is_native_segwit() is False) and (self.is_p2sh_segwit() is False)):
return False
if self.witness_script:
return True
_type = self.script_type
if ((_type == 'address') and guess_for_address):
_type = Transaction.guess_txintype_from_address(self.address)
return is_segwit_script_type(_type)
def already_has_some_signatures(self) -> bool:
return (self.part_sigs or (self.script_sig is not None) or (self.witness is not None)) |
class ExternalMultiKernelManager(MultiKernelManager):
def restart_kernel(self, *args, **kwargs):
raise NotImplementedError('Restarting a kernel running in Excel is not supported.')
async def _async_restart_kernel(self, *args, **kwargs):
raise NotImplementedError('Restarting a kernel running in Excel is not supported.')
def shutdown_kernel(self, *args, **kwargs):
raise NotImplementedError('Shutting down a kernel running in Excel is not supported.')
async def _async_shutdown_kernel(self, *args, **kwargs):
raise NotImplementedError('Shutting down a kernel running in Excel is not supported.')
def shutdown_all(self, *args, **kwargs):
raise NotImplementedError('Shutting down a kernel running in Excel is not supported.')
async def _async_shutdown_all(self, *args, **kwargs):
raise NotImplementedError('Shutting down a kernel running in Excel is not supported.') |
def compute_dense_reward(self, action):
action = np.clip(action, (- 1), 1)
gripper_pos = self.robot.ee_pose.p
cube_pos = self.cubeA.pose.p
dist_gripper_cube = np.linalg.norm((gripper_pos - cube_pos))
goal_pos = self.goal_position
dist_cube_goal = np.linalg.norm((goal_pos - cube_pos))
grasping_cube = self.robot.check_grasp(self.cubeA)
reward_dist_gripper_cube = ((- 1.0) * dist_gripper_cube)
reward_dist_cube_goal = ((- 1.0) * dist_cube_goal)
reward_grasping_cube = (1.0 if grasping_cube else (- 1.0))
weight_dist_gripper_cube = 0.3
weight_dist_cube_goal = 0.5
weight_grasping_cube = 0.2
reward = (((weight_dist_gripper_cube * reward_dist_gripper_cube) + (weight_dist_cube_goal * reward_dist_cube_goal)) + (weight_grasping_cube * reward_grasping_cube))
reward -= (0.01 * (action ** 2).sum())
return reward |
class TCustomCommands(PluginTestCase):
def setUp(self):
module = self.modules['CustomCommands']
globals().update(vars(module))
self.plugin = self.plugins['CustomCommands'].cls
config.init()
self.cmd_list = CustomCommands.DEFAULT_COMS
self.commands = JSONObjectDict.from_list(self.cmd_list)
init_fake_app()
def tearDown(self):
config.quit()
destroy_fake_app()
def test_JSONBasedEditor(self):
ed = JSONBasedEditor(Command, self.commands, None, 'title')
ed.show_now()
ed.destroy()
def test_playlist_plugin(self):
pl = Playlist('foo', songs_lib=app.library)
pl.extend([AudioFile({'~filename': '/dev/null'})])
self.called_pl = None
self.called_songs = None
def proxy(songs, playlist=None):
self.called_pl = playlist
self.called_songs = songs
plugin = self.plugin(playlists=[pl])
plugin._handle_songs = proxy
plugin.plugin_playlist(pl)
self.assertTrue(self.called_songs)
self.assertEqual(self.called_pl, pl)
self.assertEqual(self.called_songs, pl.songs)
def test_plugin_loads_json_once(self):
plugin = self.plugin()
self.assertTrue(plugin._commands)
fake = {'songs': Command(name='bar')}
self.plugin._commands = fake
plugin = self.plugin()
self.assertEqual(plugin._commands, fake) |
(frozen=True)
class Result():
code: int
command_run: str
stderr: str
stdout: str
test_case_dir: Path
tempdir: Path
def print_description(self, *, verbosity: Verbosity) -> None:
if self.code:
print(f'{self.command_run}:', end=' ')
print_error('FAILURE\n')
replacements = (str((self.tempdir / TEST_CASES)), str(self.test_case_dir))
if self.stderr:
print_error(self.stderr, fix_path=replacements)
if self.stdout:
print_error(self.stdout, fix_path=replacements) |
class MessageEntityType(StringEnum):
__slots__ = ()
MENTION = 'mention'
HASHTAG = 'hashtag'
CASHTAG = 'cashtag'
PHONE_NUMBER = 'phone_number'
BOT_COMMAND = 'bot_command'
URL = 'url'
EMAIL = 'email'
BOLD = 'bold'
ITALIC = 'italic'
CODE = 'code'
PRE = 'pre'
TEXT_LINK = 'text_link'
TEXT_MENTION = 'text_mention'
UNDERLINE = 'underline'
STRIKETHROUGH = 'strikethrough'
SPOILER = 'spoiler'
CUSTOM_EMOJI = 'custom_emoji' |
class LongNameTest(unittest.TestCase):
root_rp = rpath.RPath(Globals.local_connection, abs_test_dir)
out_rp = root_rp.append_path('output')
def test_length_limit(self):
Myrm(self.out_rp.path)
self.out_rp.mkdir()
really_long = self.out_rp.append(('a' * NAME_MAX_LEN))
really_long.touch()
with self.assertRaises(OSError, msg="File name could exceed max length '{max}'.".format(max=NAME_MAX_LEN)) as cm:
self.out_rp.append(('a' * (NAME_MAX_LEN + 1))).touch()
self.assertEqual(cm.exception.errno, errno.ENAMETOOLONG)
def make_input_dirs(self):
dir1 = self.root_rp.append('longname1')
dir2 = self.root_rp.append('longname2')
Myrm(dir1.path)
Myrm(dir2.path)
dir1.mkdir()
rp11 = dir1.append(('A' * NAME_MAX_LEN))
rp11.write_string('foobar')
rp12 = dir1.append(('B' * NAME_MAX_LEN))
rp12.mkdir()
rp121 = rp12.append(('C' * NAME_MAX_LEN))
rp121.touch()
dir2.mkdir()
rp21 = dir2.append(('A' * NAME_MAX_LEN))
rp21.write_string('Hello, world')
rp22 = dir2.append(('D' * NAME_MAX_LEN))
rp22.mkdir()
rp221 = rp22.append(('C' * NAME_MAX_LEN))
rp221.touch()
return (dir1, dir2)
def check_dir1(self, dirrp):
rp1 = dirrp.append(('A' * NAME_MAX_LEN))
self.assertEqual(rp1.get_string(), 'foobar')
rp2 = dirrp.append(('B' * NAME_MAX_LEN))
self.assertTrue(rp2.isdir())
rp21 = rp2.append(('C' * NAME_MAX_LEN))
self.assertTrue(rp21.isreg())
def check_dir2(self, dirrp):
rp1 = dirrp.append(('A' * NAME_MAX_LEN))
self.assertEqual(rp1.get_string(), 'Hello, world')
rp2 = dirrp.append(('D' * NAME_MAX_LEN))
self.assertTrue(rp2.isdir())
rp21 = rp2.append(('C' * NAME_MAX_LEN))
self.assertTrue(rp21.isreg())
def generic_test(self, inlocal, outlocal, extra_args, compare_back):
(in1, in2) = self.make_input_dirs()
Myrm(self.out_rp.path)
restore_dir = self.root_rp.append('longname_out')
rdiff_backup(inlocal, outlocal, in1.path, self.out_rp.path, 10000, extra_options=(extra_args + (b'backup',)))
if compare_back:
self.check_dir1(self.out_rp)
rdiff_backup(inlocal, outlocal, in2.path, self.out_rp.path, 20000, extra_options=(extra_args + (b'backup',)))
if compare_back:
self.check_dir2(self.out_rp)
Myrm(restore_dir.path)
rdiff_backup(inlocal, outlocal, self.out_rp.path, restore_dir.path, 30000, extra_options=(extra_args + (b'restore', b'--at', b'now')))
self.check_dir2(restore_dir)
Myrm(restore_dir.path)
rdiff_backup(1, 1, self.out_rp.path, restore_dir.path, 30000, extra_options=(extra_args + (b'restore', b'--at', b'10000')))
self.check_dir1(restore_dir)
def test_basic_local(self):
self.generic_test(1, 1, (), 1)
def test_quoting_local(self):
self.generic_test(1, 1, (b'--chars-to-quote', b'A-Z'), 0)
def test_regress_basic(self):
(in1, in2) = self.make_input_dirs()
Myrm(self.out_rp.path)
restore_dir = self.root_rp.append('longname_out')
Myrm(restore_dir.path)
rdiff_backup(1, 1, in1.path, self.out_rp.path, 10000)
rdiff_backup(1, 1, in2.path, self.out_rp.path, 20000)
self.add_current_mirror(self.out_rp, 10000)
comtst.rdiff_backup_action(True, True, self.out_rp, None, (), b'regress', ())
rdiff_backup(1, 1, self.out_rp.path, restore_dir.path, 30000, extra_options=(b'restore', b'--at', b'now'))
self.check_dir1(restore_dir)
def add_current_mirror(self, out_rp, time):
data_rp = self.out_rp.append_path('rdiff-backup-data')
cur_mirror_rp = data_rp.append(('current_mirror.%s.data' % (Time.timetostring(time),)))
cur_mirror_rp.touch()
def test_long_socket_name(self):
input_dir = os.path.join(old_test_dir, b'select', b'filetypes')
output_dir = os.path.join(abs_test_dir, (b'tenletters' * 10))
Myrm(output_dir)
restore_dir = os.path.join(abs_test_dir, (b'restoresme' * 10))
Myrm(restore_dir)
rdiff_backup(True, True, input_dir, output_dir)
rdiff_backup(True, True, output_dir, restore_dir, extra_options=(b'restore', b'--at', b'0'))
compare_recursive(rpath.RPath(Globals.local_connection, input_dir), rpath.RPath(Globals.local_connection, restore_dir)) |
def _convert_dict_to_message(_dict: dict) -> BaseMessage:
role = _dict['role']
if (role == 'user'):
return HumanMessage(content=_dict['content'])
elif (role == 'assistant'):
return AIMessage(content=_dict['content'])
elif (role == 'system'):
return SystemMessage(content=_dict['content'])
else:
return ChatMessage(content=_dict['content'], role=role) |
class SetBotCommands():
async def set_bot_commands(self: 'pyrogram.Client', commands: List['types.BotCommand'], scope: 'types.BotCommandScope'=types.BotCommandScopeDefault(), language_code: str='') -> bool:
return (await self.invoke(raw.functions.bots.SetBotCommands(commands=[c.write() for c in commands], scope=(await scope.write(self)), lang_code=language_code))) |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
if (data_args.selected_indices_store_path and (not os.path.isdir(data_args.selected_indices_store_path))):
os.makedirs(data_args.selected_indices_store_path, exist_ok=True)
elif (not data_args.selected_indices_store_path):
print('data_args.selected_indices_store_path is ', data_args.selected_indices_store_path)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
logger.info(f'Training/evaluation parameters {training_args}')
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif ((last_checkpoint is not None) and (training_args.resume_from_checkpoint is None)):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
set_seed(training_args.seed)
if (data_args.my_task_name in ['hans']):
raw_datasets = load_dataset(data_args.my_task_name, cache_dir=model_args.cache_dir)
elif (data_args.task_name is not None):
raw_datasets = load_dataset('glue', data_args.task_name, cache_dir=model_args.cache_dir)
elif (data_args.dataset_name is not None):
raw_datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
else:
data_files = {'train': data_args.train_file, 'validation': data_args.validation_file}
if training_args.do_predict:
if (data_args.test_file is not None):
train_extension = data_args.train_file.split('.')[(- 1)]
test_extension = data_args.test_file.split('.')[(- 1)]
assert (test_extension == train_extension), '`test_file` should have the same extension (csv or json) as `train_file`.'
data_files['test'] = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.')
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}')
if data_args.train_file.endswith('.csv'):
raw_datasets = load_dataset('csv', data_files=data_files, cache_dir=model_args.cache_dir)
else:
raw_datasets = load_dataset('json', data_files=data_files, cache_dir=model_args.cache_dir)
if (data_args.task_name is not None):
is_regression = (data_args.task_name == 'stsb')
if (not is_regression):
label_list = raw_datasets['train'].features['label'].names
num_labels = len(label_list)
else:
num_labels = 1
else:
is_regression = (raw_datasets['train'].features['label'].dtype in ['float32', 'float64'])
if is_regression:
num_labels = 1
else:
label_list = raw_datasets['train'].unique('label')
label_list.sort()
num_labels = len(label_list)
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
model = AutoModelForSequenceClassification.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
if (data_args.task_name is not None):
(sentence1_key, sentence2_key) = task_to_keys[data_args.task_name]
else:
non_label_column_names = [name for name in raw_datasets['train'].column_names if (name != 'label')]
if (('sentence1' in non_label_column_names) and ('sentence2' in non_label_column_names)):
(sentence1_key, sentence2_key) = ('sentence1', 'sentence2')
elif (len(non_label_column_names) >= 2):
(sentence1_key, sentence2_key) = non_label_column_names[:2]
else:
(sentence1_key, sentence2_key) = (non_label_column_names[0], None)
if data_args.pad_to_max_length:
padding = 'max_length'
else:
padding = False
label_to_id = None
if ((model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id) and (data_args.task_name is not None) and (not is_regression)):
label_name_to_id = {k.lower(): v for (k, v) in model.config.label2id.items()}
if (list(sorted(label_name_to_id.keys())) == list(sorted(label_list))):
label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)}
else:
logger.warning("Your model seems to have been trained with labels, but they don't match the dataset: ", f'''model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}.
Ignoring the model labels as a result.''')
elif ((data_args.task_name is None) and (not is_regression)):
label_to_id = {v: i for (i, v) in enumerate(label_list)}
if (label_to_id is not None):
model.config.label2id = label_to_id
model.config.id2label = {id: label for (label, id) in config.label2id.items()}
elif ((data_args.task_name is not None) and (not is_regression)):
model.config.label2id = {l: i for (i, l) in enumerate(label_list)}
model.config.id2label = {id: label for (label, id) in config.label2id.items()}
if (data_args.max_seq_length > tokenizer.model_max_length):
logger.warning(f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for themodel ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.')
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
def preprocess_function(examples):
args = ((examples[sentence1_key],) if (sentence2_key is None) else (examples[sentence1_key], examples[sentence2_key]))
result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)
if ((label_to_id is not None) and ('label' in examples)):
result['label'] = [(label_to_id[l] if (l != (- 1)) else (- 1)) for l in examples['label']]
return result
with training_args.main_process_first(desc='dataset map pre-processing'):
raw_datasets = raw_datasets.map(preprocess_function, batched=True, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on dataset')
if ((data_args.task_name == 'mnli') and (data_args.my_task_name == 'hans')):
mnli_dataset = load_dataset('glue', 'mnli', cache_dir=model_args.cache_dir)
with training_args.main_process_first(desc='dataset map pre-processing'):
mnli_dataset = mnli_dataset.map(preprocess_function, batched=True, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on dataset')
if training_args.do_train:
if ('train' not in raw_datasets):
raise ValueError('--do_train requires a train dataset')
if ((data_args.task_name == 'qqp') and (data_args.my_task_name == 'paws')):
import json
full_train_dataset = raw_datasets['train']
if (data_args.cur_mode == 'default'):
if os.path.isfile(os.path.join(data_args.selected_indices_store_path, f'qqp_paws_train_{data_args.cur_mode}_{training_args.seed}.json')):
with open(os.path.join(data_args.selected_indices_store_path, f'qqp_paws_train_{data_args.cur_mode}_{training_args.seed}.json')) as f:
selected_indices = json.load(f)
else:
selected_indices = random.sample(range(len(full_train_dataset)), 10000)
with open(os.path.join(data_args.selected_indices_store_path, f'qqp_paws_train_{data_args.cur_mode}_{training_args.seed}.json'), 'w') as f:
json.dump(selected_indices, f)
else:
all_no_paraphrase_examples = []
all_paraphrase_examples = []
ori_seed = training_args.seed
if (data_args.acquisition_function in ['upperbound_checking']):
training_args.seed = 2
if (os.path.isfile(os.path.join(model_args.cache_dir, f'qqp_paws_train_examples_{data_args.cur_mode}_{training_args.seed}.json')) and ((data_args.cur_mode_1 is None) or ((data_args.cur_mode_1 is not None) and os.path.isfile(os.path.join(model_args.cache_dir, f'qqp_paws_train_examples_{data_args.cur_mode_1}_{training_args.seed}.json'))))):
with open(os.path.join(model_args.cache_dir, f'qqp_paws_train_examples_{data_args.cur_mode}_{training_args.seed}.json')) as f:
pre_full_train_dataset = json.load(f)
training_args.seed = ori_seed
print(f'train seed is back to {training_args.seed}')
elif (not (data_args.acquisition_function in ['upperbound_checking'])):
ratio = float(data_args.cur_mode.split('_')[1])
no_paraphrase = int(((10000 * 1) / (ratio + 1)))
paraphrase = (10000 - no_paraphrase)
if data_args.cur_mode.startswith('spurious'):
with open('paws_train.tsv') as f:
lines = f.readlines()
for l in lines[1:]:
(idx, sentence1, sentence2, label) = l.split('\t')
label = label.strip()
sentence1 = sentence1[2:(- 1)]
sentence2 = sentence2[2:(- 1)]
if ((calculate_overlap_ratio(sentence1, sentence2) > 0.9) and (label == '0')):
all_no_paraphrase_examples.append({'question1': sentence1, 'question2': sentence2, 'label': int(label)})
else:
for e in full_train_dataset:
if ((calculate_overlap_ratio(e['question1'], e['question2']) > 0.9) and (e['label'] == 0)):
all_no_paraphrase_examples.append({'question1': e['question1'], 'question2': e['question2'], 'label': e['label']})
for e in full_train_dataset:
if ((calculate_overlap_ratio(e['question1'], e['question2']) > 0.9) and (e['label'] == 1)):
all_paraphrase_examples.append({'question1': e['question1'], 'question2': e['question2'], 'label': e['label']})
pre_full_train_dataset = (random.sample(all_no_paraphrase_examples, no_paraphrase) + random.sample(all_paraphrase_examples, paraphrase))
with open(os.path.join(model_args.cache_dir, f'qqp_paws_train_examples_{data_args.cur_mode}_{training_args.seed}.json'), 'w') as f:
json.dump(pre_full_train_dataset, f)
else:
raise ValueError('not implemented')
if (data_args.cur_mode_1 is not None):
if os.path.isfile(os.path.join(model_args.cache_dir, f'qqp_paws_train_examples_{data_args.cur_mode_1}_{training_args.seed}.json')):
with open(os.path.join(model_args.cache_dir, f'qqp_paws_train_examples_{data_args.cur_mode_1}_{training_args.seed}.json')) as f:
pre_full_train_dataset_1 = json.load(f)
else:
import copy
ratio_1 = float(data_args.cur_mode_1.split('_')[(- 1)])
no_paraphrase_1 = int(((10000 * 1) / (ratio_1 + 1)))
paraphrase_1 = (10000 - no_paraphrase_1)
all_no_paraphrase_examples_1 = copy.deepcopy(all_no_paraphrase_examples)
all_paraphrase_examples_1 = copy.deepcopy(all_paraphrase_examples)
pre_full_train_dataset_1 = (random.sample(all_no_paraphrase_examples_1, no_paraphrase_1) + random.sample(all_paraphrase_examples_1, paraphrase_1))
print(no_paraphrase_1)
for (i, e) in enumerate(pre_full_train_dataset_1):
if (i < no_paraphrase_1):
assert (e['label'] == 0)
else:
assert (e['label'] == 1)
with open(os.path.join(model_args.cache_dir, f'qqp_paws_train_examples_{data_args.cur_mode_1}_{training_args.seed}.json'), 'w') as f:
json.dump(pre_full_train_dataset_1, f)
full_train_dataset_1 = []
for (i, e) in enumerate(pre_full_train_dataset_1):
my_label = e['label']
e = preprocess_function(e)
e['label'] = my_label
e['idx'] = i
full_train_dataset_1.append(e)
full_train_dataset = []
for (i, e) in enumerate(pre_full_train_dataset):
my_label = e['label']
e = preprocess_function(e)
e['label'] = my_label
e['idx'] = i
full_train_dataset.append(e)
selected_indices = list(range(len(full_train_dataset)))
if data_args.check_token_coverage:
import nltk
store = []
for e in pre_full_train_dataset:
s1 = set(nltk.word_tokenize(e['question1'].lower()))
s2 = set(nltk.word_tokenize(e['question2'].lower()))
s1.union(s2)
store.append(s1)
all_tokens = set()
for s in store:
all_tokens = all_tokens.union(s)
if (data_args.acquisition_function in ['random']):
all_indices = list(range(10000))
random.shuffle(all_indices)
elif (data_args.acquisition_function in ['least-confidence']):
with open(os.path.join(data_args.selected_indices_store_path, f'confidence_{data_args.tag}.json')) as f:
all_indices = json.load(f)
assert (len(all_indices) >= 1000)
elif (data_args.acquisition_function in ['self_dissimilar']):
with open(os.path.join(data_args.selected_indices_store_path, f'self_dissimilar_{data_args.tag}.json')) as f:
all_indices = json.load(f)
assert (len(all_indices) >= 1000)
cur = set()
coverage = []
for i in range(50, 1050, 50):
for idx in all_indices[(i - 50):i]:
cur = cur.union(store[idx])
coverage.append((len(cur) / len(all_tokens)))
with open(os.path.join(data_args.selected_indices_store_path, f'coverage_{data_args.tag}.json'), 'w') as f:
json.dump(coverage, f)
exit(0)
if (data_args.acquisition_function in ['random']):
final_selected_indices = random.sample(selected_indices, data_args.select_num)
elif (data_args.acquisition_function in ['least-confidence']):
if os.path.isfile(os.path.join(data_args.selected_indices_store_path, f'confidence_{data_args.tag}.json')):
with open(os.path.join(data_args.selected_indices_store_path, f'confidence_{data_args.tag}.json')) as f:
final_selected_indices = json.load(f)
else:
final_selected_indices = random.sample(selected_indices, data_args.seed_set_size)
with open(os.path.join(data_args.selected_indices_store_path, f'confidence_{data_args.tag}.json'), 'w') as f:
json.dump(final_selected_indices, f)
elif (data_args.acquisition_function in ['self_dissimilar']):
if os.path.isfile(os.path.join(data_args.selected_indices_store_path, f'self_dissimilar_{data_args.tag}.json')):
with open(os.path.join(data_args.selected_indices_store_path, f'self_dissimilar_{data_args.tag}.json')) as f:
final_selected_indices = json.load(f)[:data_args.select_num]
else:
final_selected_indices = random.sample(selected_indices, data_args.seed_set_size)
with open(os.path.join(data_args.selected_indices_store_path, f'self_dissimilar_{data_args.tag}.json'), 'w') as f:
json.dump(final_selected_indices, f)
elif (data_args.acquisition_function in ['all_plus_test']):
final_selected_indices = selected_indices
elif (data_args.acquisition_function in ['upperbound_checking']):
from collections import defaultdict
with open('/home/sysuser/domain/Github/my_scripts_0505/indices/0519/upper_bound/all_plus_test_no_paraphrase_all_plus_test_upper_bound_2.json') as f:
ori_no_paraphrase = json.load(f)
with open('/home/sysuser/domain/Github/my_scripts_0505/indices/0519/upper_bound/all_plus_test_paraphrase_all_plus_test_upper_bound_2.json') as f:
ori_paraphrase = json.load(f)
print('len(ori_no_paraphrase): ', len(ori_no_paraphrase))
print('len(ori_paraphrase): ', len(ori_paraphrase))
c = defaultdict(int)
for (k, v) in ori_no_paraphrase.items():
for p in v[:50]:
c[p[0]] += 1
c = sorted(c.items(), key=(lambda x: x[1]), reverse=True)
no_paraphrase = [i[0] for i in c]
cc = defaultdict(int)
for (k, v) in ori_paraphrase.items():
for p in v[:50]:
cc[p[0]] += 1
cc = sorted(cc.items(), key=(lambda x: x[1]), reverse=True)
paraphrase = [i[0] for i in cc]
for idx in paraphrase:
assert (full_train_dataset[idx]['label'] == 1), f'idx={idx}'
for idx in no_paraphrase:
assert (full_train_dataset[idx]['label'] == 0), f'idx={idx}'
ratio = 0.393
no_paraphrase_num = int(((data_args.select_num * 1) / (ratio + 1)))
paraphrase_num = (data_args.select_num - no_paraphrase_num)
print(f'paraphrase num: {paraphrase_num}, non-paraphrase num: {no_paraphrase_num}')
final_selected_indices = (paraphrase[:paraphrase_num] + no_paraphrase[:no_paraphrase_num])
train_dataset = [full_train_dataset[i] for i in final_selected_indices]
print(f'There are {len(train_dataset)} examples to train now')
ratios = []
label_distribute = {0: 0, 1: 1}
for e in train_dataset:
if (data_args.cur_mode == 'default'):
ratios.append(calculate_overlap_ratio(e['question1'], e['question2']))
label_distribute[e['label']] += 1
if (data_args.cur_mode == 'default'):
with open(os.path.join(data_args.selected_indices_store_path, f'{data_args.acquisition_function}_ratio_{data_args.tag}.json'), 'w') as f:
json.dump(ratios, f)
with open(os.path.join(data_args.selected_indices_store_path, f'{data_args.acquisition_function}_label_distribute_{data_args.tag}.json'), 'w') as f:
json.dump(label_distribute, f)
elif ((data_args.task_name == 'mnli') and (data_args.my_task_name == 'hans')):
from collections import defaultdict
import json
hans_train_dataset = raw_datasets['train']
old_mnli_dataset = mnli_dataset['train']
mnli_dataset = []
for e in old_mnli_dataset:
e.pop('idx')
mnli_dataset.append(e)
offset = len(hans_train_dataset)
if (not os.path.isdir(data_args.selected_indices_store_path)):
os.makedirs(data_args.selected_indices_store_path, exist_ok=True)
print('train indices: ')
print(os.path.join(data_args.selected_indices_store_path, f'hans_train_{data_args.cur_mode}_{training_args.seed}.json'))
print(os.path.join(data_args.selected_indices_store_path, f'mnli_train_{data_args.cur_mode}_{training_args.seed}.json'))
if (os.path.isfile(os.path.join(data_args.selected_indices_store_path, f'hans_train_{data_args.cur_mode}_{training_args.seed}.json')) and os.path.isfile(os.path.join(data_args.selected_indices_store_path, f'mnli_train_{data_args.cur_mode}_{training_args.seed}.json'))):
with open(os.path.join(data_args.selected_indices_store_path, f'hans_train_{data_args.cur_mode}_{training_args.seed}.json')) as f_hans:
hans_selected_indices = json.load(f_hans)
with open(os.path.join(data_args.selected_indices_store_path, f'mnli_train_{data_args.cur_mode}_{training_args.seed}.json')) as f_mnli:
mnli_selected_indices = json.load(f_mnli)
else:
hans_selected_indices = []
hans_selected_indices_entailment = []
nd1 = 5000
ratio = (float(data_args.cur_mode.split('_')[(- 1)]) / 100)
e_num = int((nd1 * ratio))
ne_num = (nd1 - e_num)
label_format = {0: 'entailment', 1: 'not entailment', 2: 'not entailment'}
group_indices = {'entailment': defaultdict(list), 'not entailment': defaultdict(list)}
for (idx, e) in enumerate(hans_train_dataset):
if (lexical_overlap(e['premise'], e['hypothesis']) and (spurious_correlation_type_not_present(e) == 'unknown')):
group_indices[label_format[e['label']]][e['subcase']].append(idx)
for (k1, v1) in group_indices.items():
for (k2, v2) in v1.items():
print(f'train subcase {k2} has {len(group_indices[k1][k2])} examples with label {k1}')
e_remain = (e_num % 15)
e_sub = (e_num // 15)
count = 0
for (k, v) in group_indices['entailment'].items():
hans_selected_indices_entailment += v
if (count < e_remain):
hans_selected_indices += random.sample(v, (e_sub + 1))
print(f'{k}: {(e_sub + 1)}', end=' ')
else:
hans_selected_indices += random.sample(v, e_sub)
print(f'{k}: {e_sub}', end=' ')
count += 1
index_map = {}
if (data_args.index_map_store_path is not None):
for i in hans_selected_indices_entailment:
index_map[i] = ['lexical overlap entailment']
ne_remain = (ne_num % 15)
ne_sub = (ne_num // 15)
count = 0
hans_selected_indices_not_entailment = []
for (k, v) in group_indices['not entailment'].items():
hans_selected_indices_not_entailment += v
if (count < ne_remain):
hans_selected_indices += random.sample(v, (ne_sub + 1))
print(f'{k}: {(ne_sub + 1)}', end=' ')
else:
hans_selected_indices += random.sample(v, ne_sub)
print(f'{k}: {ne_sub}', end=' ')
count += 1
if (data_args.index_map_store_path is not None):
for i in hans_selected_indices_not_entailment:
index_map[i] = ['lexical overlap not entailment']
print(f'''
Train, lexical overlap with spurious contradiction selects {ne_num} not entailment, {e_num} entailment''')
mnli_selected_indices = []
nd2 = 5000
ne_num = int((nd2 * ratio))
e_num = (nd2 - ne_num)
group_indices = defaultdict(list)
for (idx, e) in enumerate(mnli_dataset):
if ((not lexical_overlap(e['premise'], e['hypothesis'])) and (spurious_correlation_type_not_present(e) != 'unknown')):
group_indices[label_format[e['label']]].append(idx)
mnli_selected_indices += random.sample(group_indices['not entailment'], ne_num)
if (data_args.index_map_store_path is not None):
for i in group_indices['not entailment']:
index_map[(i + offset)] = ['spurious contradiction not entailment']
mnli_selected_indices += random.sample(group_indices['entailment'], e_num)
print(f'Train, spurious contradiction selects {ne_num} not entailment, {e_num} entailment')
if (data_args.index_map_store_path is not None):
for i in group_indices['entailment']:
index_map[(i + offset)] = ['spurious contradiction entailment']
with open(data_args.index_map_store_path, 'w') as f:
json.dump(index_map, f)
exit(0)
with open(os.path.join(data_args.selected_indices_store_path, f'hans_train_{data_args.cur_mode}_{training_args.seed}.json'), 'w') as f:
json.dump(hans_selected_indices, f)
with open(os.path.join(data_args.selected_indices_store_path, f'mnli_train_{data_args.cur_mode}_{training_args.seed}.json'), 'w') as f:
json.dump(mnli_selected_indices, f)
hans_train_selected_indices = hans_selected_indices
mnli_train_selected_indices = mnli_selected_indices
print(f'Total training pool: {(len(hans_selected_indices) + len(mnli_selected_indices))}')
agg_dataset = [e for e in hans_train_dataset]
agg_dataset += [e for e in mnli_dataset]
agg_indices = (hans_selected_indices + [(i + offset) for i in mnli_selected_indices])
if data_args.check_token_coverage:
import nltk
def get_token_set(e):
s1 = set(nltk.word_tokenize(e['premise'].lower()))
s2 = set(nltk.word_tokenize(e['hypothesis'].lower()))
s1.union(s2)
return s1
store = []
pre_full_train_dataset = [agg_dataset[i] for i in agg_indices]
for e in pre_full_train_dataset:
s1 = set(nltk.word_tokenize(e['premise'].lower()))
s2 = set(nltk.word_tokenize(e['hypothesis'].lower()))
s1.union(s2)
store.append(s1)
all_tokens = set()
for s in store:
all_tokens = all_tokens.union(s)
if (data_args.acquisition_function in ['random']):
all_indices = random.sample(agg_indices, 10000)
random.shuffle(all_indices)
elif (data_args.acquisition_function in ['least-confidence']):
with open(os.path.join(data_args.selected_indices_store_path, f'confidence_{data_args.tag}.json')) as f:
all_indices = json.load(f)
assert (len(all_indices) >= 1000)
elif (data_args.acquisition_function in ['self_dissimilar']):
with open(os.path.join(data_args.selected_indices_store_path, f'self_dissimilar_{data_args.tag}.json')) as f:
all_indices = json.load(f)
assert (len(all_indices) >= 1000)
cur = set()
coverage = []
for i in range(50, 1050, 50):
for idx in all_indices[(i - 50):i]:
cur = cur.union(get_token_set(agg_dataset[idx]))
coverage.append((len(cur) / len(all_tokens)))
with open(os.path.join(data_args.selected_indices_store_path, f'coverage_{data_args.tag}.json'), 'w') as f:
json.dump(coverage, f)
exit(0)
if (data_args.acquisition_function in ['random']):
my_hans_train_selected_indices = random.sample(agg_indices, data_args.select_num)
elif (data_args.acquisition_function in ['least-confidence']):
if os.path.isfile(os.path.join(data_args.selected_indices_store_path, f'confidence_{data_args.tag}.json')):
with open(os.path.join(data_args.selected_indices_store_path, f'confidence_{data_args.tag}.json')) as f:
my_hans_train_selected_indices = json.load(f)
else:
my_hans_train_selected_indices = random.sample(agg_indices, data_args.seed_set_size)
with open(os.path.join(data_args.selected_indices_store_path, f'confidence_{data_args.tag}.json'), 'w') as f:
json.dump(my_hans_train_selected_indices, f)
elif (data_args.acquisition_function in ['self_dissimilar']):
if os.path.isfile(os.path.join(data_args.selected_indices_store_path, f'self_dissimilar_{data_args.tag}.json')):
with open(os.path.join(data_args.selected_indices_store_path, f'self_dissimilar_{data_args.tag}.json')) as f:
my_hans_train_selected_indices = json.load(f)[:data_args.select_num]
else:
my_hans_train_selected_indices = random.sample(agg_indices, data_args.seed_set_size)
with open(os.path.join(data_args.selected_indices_store_path, f'self_dissimilar_{data_args.tag}.json'), 'w') as f:
json.dump(my_hans_train_selected_indices, f)
my_hans_train_selected_indices = sorted(my_hans_train_selected_indices)
train_dataset = [agg_dataset[i] for i in my_hans_train_selected_indices]
label_format = {0: 0, 1: 1, 2: 1}
for e in train_dataset:
e['label'] = label_format[e['label']]
print(f'final training set size {len(train_dataset)}')
elif ((data_args.task_name == 'mnli') and (data_args.my_task_name == 'aug_major')):
aug_final_selected_indices = random.sample(list(range(len(raw_datasets['train']))), 50)
train_dataset = [raw_datasets['train'][idx] for idx in aug_final_selected_indices]
else:
train_dataset = raw_datasets['train']
if (data_args.max_train_samples is not None):
if isinstance(train_dataset, list):
train_dataset = train_dataset[:data_args.max_train_samples]
else:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if (('validation' not in raw_datasets) and ('validation_matched' not in raw_datasets)):
raise ValueError('--do_eval requires a validation dataset')
if ((data_args.task_name == 'qqp') and (data_args.my_task_name == 'paws')):
with open('paws_dev_and_test.tsv') as f:
lines = f.readlines()
eval_dataset_1 = []
eval_dataset_0 = []
train_eval_dataset_1 = []
train_eval_dataset_0 = []
import copy
for l in lines[1:]:
(idx, question1, question2, label) = l.split('\t')
e = preprocess_function({'question1': question1[2:(- 1)], 'question2': question2[2:(- 1)], 'label': int(label.strip()), 'idx': idx})
e['label'] = int(label.strip())
if (e['label'] == 1):
eval_dataset_1.append(e)
train_eval_dataset_1.append(copy.deepcopy(e))
train_eval_dataset_1[(- 1)]['idx'] = idx
elif (e['label'] == 0):
eval_dataset_0.append(e)
train_eval_dataset_0.append(copy.deepcopy(e))
train_eval_dataset_0[(- 1)]['idx'] = idx
else:
raise ValueError('unrecognized label')
print(f'In evaluation, label 1 has {len(eval_dataset_1)} examples, label 0 has {len(eval_dataset_0)} examples')
offset = len(train_dataset)
for i in range(len(train_eval_dataset_0)):
train_eval_dataset_0[i]['idx'] = (offset + i)
offset = (len(train_dataset) + len(train_eval_dataset_0))
for i in range(len(train_eval_dataset_1)):
train_eval_dataset_1[i]['idx'] = (offset + i)
print(f'first {len(train_eval_dataset_0)} second {len(train_eval_dataset_1)}')
if (data_args.acquisition_function in ['all_plus_test']):
if (data_args.max_train_samples is not None):
train_dataset += (train_eval_dataset_0[:data_args.max_train_samples] + train_eval_dataset_1[:data_args.max_train_samples])
full_train_dataset_1 += (train_eval_dataset_0[:data_args.max_train_samples] + train_eval_dataset_1[:data_args.max_train_samples])
else:
train_dataset += (train_eval_dataset_0[:] + train_eval_dataset_1[:])
full_train_dataset_1 += (train_eval_dataset_0[:] + train_eval_dataset_1[:])
elif ((data_args.task_name == 'mnli') and (data_args.my_task_name == 'hans')):
from collections import defaultdict
import json
if (os.path.isfile(os.path.join(data_args.selected_indices_store_path, f'hans_test_{data_args.cur_mode}_{training_args.seed}.json')) and os.path.isfile(os.path.join(data_args.selected_indices_store_path, f'mnli_test_{data_args.cur_mode}_{training_args.seed}.json'))):
with open(os.path.join(data_args.selected_indices_store_path, f'hans_test_{data_args.cur_mode}_{training_args.seed}.json')) as f_hans:
hans_selected_indices = json.load(f_hans)
with open(os.path.join(data_args.selected_indices_store_path, f'mnli_test_{data_args.cur_mode}_{training_args.seed}.json')) as f_mnli:
mnli_selected_indices = json.load(f_mnli)
else:
hans_selected_indices = []
nd1 = 1500
ratio = 0.5
e_num = int((nd1 * ratio))
ne_num = (nd1 - e_num)
label_format = {0: 'entailment', 1: 'not entailment', 2: 'not entailment'}
group_indices = {'entailment': defaultdict(list), 'not entailment': defaultdict(list)}
for (idx, e) in enumerate(hans_train_dataset):
if (lexical_overlap(e['premise'], e['hypothesis']) and (spurious_correlation_type_not_present(e) == 'unknown') and (not (idx in hans_train_selected_indices))):
group_indices[label_format[e['label']]][e['subcase']].append(idx)
for (k1, v1) in group_indices.items():
for (k2, v2) in v1.items():
print(f'test subcase {k2} has {len(group_indices[k1][k2])} examples with label {k1}')
e_remain = (e_num % 15)
e_sub = (e_num // 15)
count = 0
for (k, v) in group_indices['entailment'].items():
if (count < e_remain):
hans_selected_indices += random.sample(v, (e_sub + 1))
print(f'{k}: {(e_sub + 1)}', end=' ')
else:
hans_selected_indices += random.sample(v, e_sub)
print(f'{k}: {e_sub}', end=' ')
count += 1
ne_remain = (ne_num % 15)
ne_sub = (ne_num // 15)
count = 0
for (k, v) in group_indices['not entailment'].items():
if (count < ne_remain):
hans_selected_indices += random.sample(v, (ne_sub + 1))
print(f'{k}: {(ne_sub + 1)}', end=' ')
else:
hans_selected_indices += random.sample(v, ne_sub)
print(f'{k}: {ne_sub}', end=' ')
count += 1
print(f'''
Test, lexical overlap with spurious contradiction selects {len(hans_selected_indices)} examples''')
mnli_selected_indices = []
nd2 = 1500
ne_num = int((nd2 * ratio))
e_num = (nd2 - ne_num)
group_indices = defaultdict(list)
for (idx, e) in enumerate(mnli_dataset):
if ((not lexical_overlap(e['premise'], e['hypothesis'])) and (spurious_correlation_type_not_present(e) != 'unknown') and (not (idx in mnli_train_selected_indices))):
group_indices[label_format[e['label']]].append(idx)
mnli_selected_indices += random.sample(group_indices['not entailment'], ne_num)
mnli_selected_indices += random.sample(group_indices['entailment'], e_num)
print(f'Test, spurious contradiction selects {ne_num} not entailment, {e_num} entailment')
with open(os.path.join(data_args.selected_indices_store_path, f'hans_test_{data_args.cur_mode}_{training_args.seed}.json'), 'w') as f:
json.dump(hans_selected_indices, f)
with open(os.path.join(data_args.selected_indices_store_path, f'mnli_test_{data_args.cur_mode}_{training_args.seed}.json'), 'w') as f:
json.dump(mnli_selected_indices, f)
eval_dataset = []
for i in hans_selected_indices:
eval_dataset.append(hans_train_dataset[i])
for i in mnli_selected_indices:
eval_dataset.append(mnli_dataset[i])
label_format = {0: 0, 1: 1, 2: 1}
for e in eval_dataset:
e['label'] = label_format[e['label']]
print(f'Total evaluation pool: {len(eval_dataset)}')
else:
eval_dataset = raw_datasets[('validation_matched' if ((data_args.task_name == 'mnli') and (data_args.my_task_name is None)) else 'validation')]
if (data_args.max_eval_samples is not None):
if ((data_args.task_name == 'qqp') and (data_args.my_task_name == 'paws')):
eval_dataset_0 = eval_dataset_0[:data_args.max_eval_samples]
eval_dataset_1 = eval_dataset_1[:data_args.max_eval_samples]
elif isinstance(eval_dataset, list):
eval_dataset = eval_dataset[:data_args.max_eval_samples]
else:
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
if (training_args.do_predict or (data_args.acquisition_function in ['least-confidence', 'self_dissimilar', 'all_plus_test'])):
if ((data_args.task_name == 'mnli') and (data_args.my_task_name == 'hans')):
selected_indices = agg_indices
final_selected_indices = my_hans_train_selected_indices
full_train_dataset = agg_dataset
if ((data_args.task_name == 'mnli') and (data_args.my_task_name == 'aug_major')):
import copy
available_indices = list((set(list(range(len(raw_datasets['train'])))) - set(aug_final_selected_indices)))
predict_indices = random.sample(available_indices, 50)
ori_predict_dataset = [raw_datasets['train'][idx] for idx in predict_indices]
predict_dataset = []
def add_comma(s):
components = s.split(' ')
random_indices = random.sample(list(range(len(components))), 2)
components[random_indices[0]] = (components[random_indices[0]] + ',')
components[random_indices[1]] = (components[random_indices[1]] + ',')
return ' '.join(components)
def add_period(s):
components = s.split(' ')
random_indices = random.sample(list(range(len(components))), 2)
components[random_indices[0]] = (components[random_indices[0]] + '.')
return ' '.join(components)
def add_word(s):
components = s.split(' ')
random_indices = random.sample(list(range(len(components))), 2)
components[random_indices[0]] = (components[random_indices[0]] + ', huh,')
return ' '.join(components)
for e in ori_predict_dataset:
e.pop('label')
predict_dataset.append(e)
comma = copy.deepcopy(e)
comma['premise'] = add_comma(comma['premise'])
comma['hypothesis'] = add_comma(comma['hypothesis'])
predict_dataset.append(comma)
period = copy.deepcopy(e)
period['premise'] = add_period(period['premise'])
period['hypothesis'] = add_period(period['hypothesis'])
predict_dataset.append(period)
prefix1 = copy.deepcopy(e)
prefix1['premise'] = ('The premise: ' + prefix1['premise'])
prefix1['hypothesis'] = ('The hypothesis: ' + prefix1['hypothesis'])
predict_dataset.append(prefix1)
prefix2 = copy.deepcopy(e)
prefix2['premise'] = ('Sentence 1: ' + prefix2['premise'])
prefix2['hypothesis'] = ('Sentence 2: ' + prefix2['hypothesis'])
predict_dataset.append(prefix2)
word_added = copy.deepcopy(e)
word_added['premise'] = add_word(word_added['premise'])
word_added['hypothesis'] = add_word(word_added['hypothesis'])
predict_dataset.append(word_added)
if (data_args.acquisition_function in ['least-confidence', 'self_dissimilar']):
if (data_args.acquisition_function in ['least-confidence']):
training_args.do_predict = True
indices_to_predict = []
predict_dataset = []
for idx in final_selected_indices:
if (not (idx in selected_indices)):
print('not in')
for idx in selected_indices:
if (data_args.acquisition_function in ['least-confidence']):
if (not (idx in final_selected_indices)):
indices_to_predict.append(idx)
if (data_args.acquisition_function in ['self_dissimilar']):
indices_to_predict.append(idx)
if (data_args.acquisition_function in ['all_plus_test']):
indices_to_predict = range(len(full_train_dataset))
import copy
for idx in indices_to_predict:
predict_dataset.append(copy.deepcopy(full_train_dataset[idx]))
if ('idx' not in predict_dataset[(- 1)]):
predict_dataset[(- 1)]['idx'] = idx
else:
assert (idx == predict_dataset[(- 1)]['idx'])
if (not (data_args.acquisition_function in ['all_plus_test'])):
predict_dataset[(- 1)].pop('label')
print(f'{data_args.acquisition_function} prediction pool: {len(predict_dataset)}')
elif (data_args.acquisition_function in ['all_plus_test']):
predict_dataset = full_train_dataset_1
print(f'all plus test has {len(full_train_dataset_1)} examples')
elif (not ((data_args.task_name == 'mnli') and (data_args.my_task_name == 'aug_major'))):
if (('test' not in raw_datasets) and ('test_matched' not in raw_datasets)):
raise ValueError('--do_predict requires a test dataset')
predict_dataset = raw_datasets[('test_matched' if (data_args.task_name == 'mnli') else 'test')]
if (data_args.max_predict_samples is not None):
if isinstance(predict_dataset, list):
predict_dataset = predict_dataset[:data_args.max_predict_samples]
else:
predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
if (data_args.task_name is not None):
metric = load_metric('glue', data_args.task_name)
else:
metric = load_metric('accuracy')
def compute_metrics(p: EvalPrediction):
preds = (p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions)
preds = (np.squeeze(preds) if is_regression else np.argmax(preds, axis=1))
if (data_args.task_name is not None):
if ((data_args.task_name == 'mnli') and (data_args.my_task_name == 'hans')):
print('label is formatted')
label_format = {0: 0, 1: 1, 2: 1}
preds = [label_format[i] for i in preds]
result = metric.compute(predictions=preds, references=p.label_ids)
if (len(result) > 1):
result['combined_score'] = np.mean(list(result.values())).item()
return result
elif is_regression:
return {'mse': ((preds - p.label_ids) ** 2).mean().item()}
else:
return {'accuracy': (preds == p.label_ids).astype(np.float32).mean().item()}
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif training_args.fp16:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
if ((data_args.task_name == 'qqp') and (data_args.my_task_name == 'paws')):
trainer = Trainer(model=model, args=training_args, data_args=data_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset_1 if training_args.do_eval else None), compute_metrics=compute_metrics, tokenizer=tokenizer, data_collator=data_collator)
else:
trainer = Trainer(model=model, args=training_args, data_args=data_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), compute_metrics=compute_metrics, tokenizer=tokenizer, data_collator=data_collator)
if training_args.do_train:
checkpoint = None
if (training_args.resume_from_checkpoint is not None):
checkpoint = training_args.resume_from_checkpoint
elif (last_checkpoint is not None):
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(train_dataset))
metrics['train_samples'] = min(max_train_samples, len(train_dataset))
trainer.save_model()
trainer.log_metrics('train', metrics)
trainer.save_metrics('train', metrics)
trainer.save_state()
if training_args.do_eval:
logger.info('*** Evaluate ***')
tasks = [data_args.task_name]
if ((data_args.task_name == 'qqp') and (data_args.my_task_name == 'paws')):
eval_datasets = [eval_dataset_1, eval_dataset_0]
tasks.append(data_args.task_name)
else:
eval_datasets = [eval_dataset]
if ((data_args.task_name == 'mnli') and (data_args.my_task_name is None)):
tasks.append('mnli-mm')
eval_datasets.append(raw_datasets['validation_mismatched'])
for (eval_dataset, task) in zip(eval_datasets, tasks):
if ((data_args.task_name == 'qqp') and (data_args.my_task_name == 'paws')):
print(f"current group has label {eval_dataset[0]['label']}")
metrics = trainer.evaluate(eval_dataset=eval_dataset)
max_eval_samples = (data_args.max_eval_samples if (data_args.max_eval_samples is not None) else len(eval_dataset))
metrics['eval_samples'] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics('eval', metrics)
trainer.save_metrics('eval', metrics)
if training_args.do_predict:
logger.info('*** Predict ***')
tasks = [data_args.task_name]
predict_datasets = [predict_dataset]
if ((data_args.task_name == 'mnli') and (data_args.my_task_name is None)):
tasks.append('mnli-mm')
predict_datasets.append(raw_datasets['test_mismatched'])
if (data_args.acquisition_function in ['least-confidence']):
data_args.confidence_prediction = True
for (predict_dataset, task) in zip(predict_datasets, tasks):
if ((not (data_args.acquisition_function in ['least-confidence'])) and (not ((data_args.task_name == 'mnli') and (data_args.my_task_name == 'aug_major')))):
predict_dataset = predict_dataset.remove_columns('label')
predictions = trainer.predict(predict_dataset, metric_key_prefix='predict').predictions
if (((data_args.task_name == 'mnli') and (data_args.my_task_name == 'aug_major')) or ((data_args.task_name != 'mnli') and (not (data_args.acquisition_function in ['least-confidence'])))):
predictions = (np.squeeze(predictions) if is_regression else np.argmax(predictions[0], axis=1))
output_predict_file = os.path.join(training_args.output_dir, f'predict_results_{task}.txt')
if trainer.is_world_process_zero():
with open(output_predict_file, 'w') as writer:
logger.info(f'***** Predict results {task} *****')
writer.write('index\tprediction\n')
for (index, item) in enumerate(predictions):
if is_regression:
writer.write(f'''{index} {item:3.3f}
''')
else:
item = label_list[item]
writer.write(f'''{index} {item}
''')
if (data_args.my_task_name in ['aug_major']):
set_seed((training_args.seed + 10))
predictions = trainer.predict(predict_dataset, metric_key_prefix='predict').predictions
if (((data_args.task_name == 'mnli') and (data_args.my_task_name == 'aug_major')) or ((data_args.task_name != 'mnli') and (not (data_args.acquisition_function in ['least-confidence'])))):
predictions = (np.squeeze(predictions) if is_regression else np.argmax(predictions[0], axis=1))
output_predict_file = os.path.join(training_args.output_dir, f'predict_results_{task}_1.txt')
if trainer.is_world_process_zero():
with open(output_predict_file, 'w') as writer:
logger.info(f'***** Predict results {task} *****')
writer.write('index\tprediction\n')
for (index, item) in enumerate(predictions):
if is_regression:
writer.write(f'''{index} {item:3.3f}
''')
else:
item = label_list[item]
writer.write(f'''{index} {item}
''')
data_args.confidence_prediction = False
kwargs = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if (data_args.task_name is not None):
kwargs['language'] = 'en'
kwargs['dataset_tags'] = 'glue'
kwargs['dataset_args'] = data_args.task_name
kwargs['dataset'] = f'GLUE {data_args.task_name.upper()}'
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
if ((data_args.acquisition_function in ['self_dissimilar']) and data_args.calculate_dissimilar_embeds):
import torch
from tqdm import tqdm
import json
from sklearn.metrics.pairwise import cosine_similarity
dataloader = trainer.get_test_dataloader(predict_dataset)
trainer.model.eval()
local_indices = []
embeds = []
for (step, inputs) in enumerate(dataloader):
local_indices += inputs.pop('idx').tolist()
inputs = trainer._prepare_input(inputs)
outputs = model(**inputs, return_dict=True)
embeds += outputs.pooled_output.cpu().tolist()
indices_map = {}
for (i, idx) in enumerate(local_indices):
indices_map[idx] = i
downstream_representations = torch.tensor(embeds, dtype=torch.float)
downstream_representations_mean = torch.mean(downstream_representations, 0, True)
downstream_representations = (downstream_representations - downstream_representations_mean)
if ((data_args.task_name == 'mnli') and (data_args.my_task_name == 'hans')):
final_selected_indices = my_hans_train_selected_indices
selected_indices = [indices_map[i] for i in final_selected_indices]
num_instance = min(len(downstream_representations), data_args.self_dissimilar_cap)
newly_selected_representations = downstream_representations[selected_indices]
scores = np.array([0 for i in range(len(downstream_representations))], dtype=np.float64)
progress_bar = tqdm(range(num_instance), desc='calculate self similarity')
for i in selected_indices:
scores[i] = float('inf')
for count in range(num_instance):
scores += np.sum(cosine_similarity(downstream_representations, newly_selected_representations), axis=1)
min_idx = np.argmin(scores)
newly_selected_representations = downstream_representations[min_idx].reshape(1, (- 1))
selected_indices.append(min_idx.item())
scores[min_idx.item()] = float('inf')
progress_bar.update(1)
selected_indices = [local_indices[i] for i in selected_indices]
with open(os.path.join(data_args.selected_indices_store_path, f'self_dissimilar_{data_args.tag}.json'), 'w') as f:
json.dump(selected_indices, f)
if ((data_args.acquisition_function in ['all_plus_test']) and data_args.calculate_dissimilar_embeds):
ratio_1 = float(data_args.cur_mode_1.split('_')[(- 1)])
no_paraphrase_1 = int(((10000 * 1) / (ratio_1 + 1)))
no_paraphrase_train = predict_dataset[:no_paraphrase_1]
paraphrase_train = predict_dataset[no_paraphrase_1:10000]
print('no_paraphrase_1: ', no_paraphrase_1)
for e in no_paraphrase_train:
assert (e['label'] == 0)
for e in paraphrase_train:
assert (e['label'] == 1)
no_paraphrase_test = predict_dataset[10000:10486]
paraphrase_test = predict_dataset[10486:10677]
for e in no_paraphrase_test:
assert (e['label'] == 0)
for e in paraphrase_test:
assert (e['label'] == 1)
import torch
from tqdm import tqdm
import json
from sklearn.metrics.pairwise import cosine_similarity
def get_indices(train_split, test_split, offset):
dataloader = trainer.get_test_dataloader((train_split + test_split))
trainer.model.eval()
local_indices = []
embeds = []
for (step, inputs) in enumerate(dataloader):
local_indices += inputs.pop('idx').tolist()
inputs = trainer._prepare_input(inputs)
outputs = model(**inputs, return_dict=True)
embeds += outputs.pooled_output.cpu().tolist()
downstream_representations = torch.tensor(embeds, dtype=torch.float)
downstream_representations_mean = torch.mean(downstream_representations, 0, True)
downstream_representations = (downstream_representations - downstream_representations_mean)
train_embs = downstream_representations[:len(train_split)]
test_embs = downstream_representations[len(train_split):]
scores = np.array([0 for i in range(len(train_embs))], dtype=np.float64)
top_scores_indices = {}
for (idx, one_test_emb) in enumerate(test_embs):
scores += np.sum(cosine_similarity(train_embs, one_test_emb.reshape(1, (- 1))), axis=1)
processed_scores = sorted([[(i + offset), s] for (i, s) in enumerate(scores)], key=(lambda x: x[1]))
top_scores_indices[idx] = processed_scores[(- 500):]
return top_scores_indices
no_paraphrase_indices = get_indices(no_paraphrase_train, no_paraphrase_test, 0)
paraphrase_indices = get_indices(paraphrase_train, paraphrase_test, no_paraphrase_1)
with open(os.path.join(data_args.selected_indices_store_path, f'all_plus_test_paraphrase_{data_args.tag}.json'), 'w') as f:
json.dump(paraphrase_indices, f)
with open(os.path.join(data_args.selected_indices_store_path, f'all_plus_test_no_paraphrase_{data_args.tag}.json'), 'w') as f:
json.dump(no_paraphrase_indices, f) |
class TestPack(unittest.TestCase):
def test_over_x(self):
env = AllocatorEnvironment(self, 3, 3)
env.add_fail(3, 4)
def test_over_y(self):
env = AllocatorEnvironment(self, 3, 3)
env.add_fail(4, 3)
def test_1(self):
env = AllocatorEnvironment(self, 4, 4)
for i in range(16):
env.add(1, 1)
env.add_fail(1, 1)
def test_2(self):
env = AllocatorEnvironment(self, 3, 3)
env.add(2, 2)
for i in range(4):
env.add(1, 1)
def test_3(self):
env = AllocatorEnvironment(self, 3, 3)
env.add(3, 3)
env.add_fail(1, 1)
def test_4(self):
env = AllocatorEnvironment(self, 5, 4)
for i in range(4):
env.add(2, 2)
env.add_fail(2, 1)
env.add(1, 2)
env.add(1, 2)
env.add_fail(1, 1)
def test_5(self):
env = AllocatorEnvironment(self, 4, 4)
env.add(3, 2)
env.add(4, 2)
env.add(1, 2)
env.add_fail(1, 1) |
def supervised(args):
if (args.dataset == 'TUAB'):
(train_loader, test_loader, val_loader) = prepare_TUAB_dataloader(args)
else:
raise NotImplementedError
if (args.model == 'SPaRCNet'):
model = SPaRCNet(in_channels=args.in_channels, sample_length=int((args.sampling_rate * args.sample_length)), n_classes=args.n_classes, block_layers=4, growth_rate=16, bn_size=16, drop_rate=0.5, conv_bias=True, batch_norm=True)
elif (args.model == 'ContraWR'):
model = ContraWR(in_channels=args.in_channels, n_classes=args.n_classes, fft=args.token_size, steps=(args.hop_length // 5))
elif (args.model == 'CNNTransformer'):
model = CNNTransformer(in_channels=args.in_channels, n_classes=args.n_classes, fft=args.sampling_rate, steps=(args.hop_length // 5), dropout=0.2, nhead=4, emb_size=256)
elif (args.model == 'FFCL'):
model = FFCL(in_channels=args.in_channels, n_classes=args.n_classes, fft=args.token_size, steps=(args.hop_length // 5), sample_length=int((args.sampling_rate * args.sample_length)), shrink_steps=20)
elif (args.model == 'STTransformer'):
model = STTransformer(emb_size=256, depth=4, n_classes=args.n_classes, channel_legnth=int((args.sampling_rate * args.sample_length)), n_channels=args.in_channels)
elif (args.model == 'BIOT'):
model = BIOTClassifier(n_classes=args.n_classes, n_channels=args.in_channels, n_fft=args.token_size, hop_length=args.hop_length)
if (args.pretrain_model_path and (args.sampling_rate == 200)):
model.biot.load_state_dict(torch.load(args.pretrain_model_path))
print(f'load pretrain model from {args.pretrain_model_path}')
else:
raise NotImplementedError
lightning_model = LitModel_finetune(args, model)
version = f'{args.dataset}-{args.model}-{args.lr}-{args.batch_size}-{args.sampling_rate}-{args.token_size}-{args.hop_length}'
logger = TensorBoardLogger(save_dir='./', version=version, name='log')
early_stop_callback = EarlyStopping(monitor='val_auroc', patience=5, verbose=False, mode='max')
trainer = pl.Trainer(devices=[0], accelerator='gpu', strategy=DDPStrategy(find_unused_parameters=False), auto_select_gpus=True, benchmark=True, enable_checkpointing=True, logger=logger, max_epochs=args.epochs, callbacks=[early_stop_callback])
trainer.fit(lightning_model, train_dataloaders=train_loader, val_dataloaders=val_loader)
pretrain_result = trainer.test(model=lightning_model, ckpt_path='best', dataloaders=test_loader)[0]
print(pretrain_result) |
class AbstractPlot(object):
__metaclass__ = ABCMeta
def __init__(self):
self.BOKEH_RESIZE = 50
self.TMVA_RESIZE = 80
self.xlim = None
self.ylim = None
self.xlabel = ''
self.ylabel = ''
self.title = ''
self.figsize = (13, 7)
self.fontsize = 14
self.new_plot = False
self.canvas = None
self._tmva_keeper = []
def _plot(self):
pass
def _plot_tmva(self):
pass
def _plot_bokeh(self, current_plot, show_legend=True):
pass
def _repr_html_(self):
self.plot()
return ''
def plot(self, new_plot=False, xlim=None, ylim=None, title=None, figsize=None, xlabel=None, ylabel=None, fontsize=None, show_legend=True, grid=True):
xlabel = (self.xlabel if (xlabel is None) else xlabel)
ylabel = (self.ylabel if (ylabel is None) else ylabel)
figsize = (self.figsize if (figsize is None) else figsize)
fontsize = (self.fontsize if (fontsize is None) else fontsize)
self.fontsize_ = fontsize
self.show_legend_ = show_legend
title = (self.title if (title is None) else title)
xlim = (self.xlim if (xlim is None) else xlim)
ylim = (self.ylim if (ylim is None) else ylim)
new_plot = (self.new_plot or new_plot)
if new_plot:
plt.figure(figsize=figsize)
plt.xlabel(xlabel, fontsize=fontsize)
plt.ylabel(ylabel, fontsize=fontsize)
plt.title(title, fontsize=fontsize)
plt.tick_params(axis='both', labelsize=fontsize)
plt.grid(grid)
if (xlim is not None):
plt.xlim(xlim)
if (ylim is not None):
plt.ylim(ylim)
self._plot()
if show_legend:
plt.legend(loc='best', scatterpoints=1)
def plot_bokeh(self, xlim=None, ylim=None, title=None, figsize=None, xlabel=None, ylabel=None, fontsize=None, show_legend=True):
global _COLOR_CYCLE_BOKEH
global _BOKEH_OUTPUT_NOTEBOOK_ACTIVATED
import bokeh.plotting as bkh
from bokeh.models import Range1d
from bokeh.core.properties import value
figsize = (self.figsize if (figsize is None) else figsize)
xlabel = (self.xlabel if (xlabel is None) else xlabel)
ylabel = (self.ylabel if (ylabel is None) else ylabel)
title = (self.title if (title is None) else title)
xlim = (self.xlim if (xlim is None) else xlim)
ylim = (self.ylim if (ylim is None) else ylim)
fontsize = (self.fontsize if (fontsize is None) else fontsize)
self.fontsize_ = fontsize
self.show_legend_ = show_legend
figsize = ((figsize[0] * self.BOKEH_RESIZE), (figsize[1] * self.BOKEH_RESIZE))
if (not _BOKEH_OUTPUT_NOTEBOOK_ACTIVATED):
bkh.output_notebook()
_BOKEH_OUTPUT_NOTEBOOK_ACTIVATED = True
current_plot = bkh.figure(title=title, plot_width=figsize[0], plot_height=figsize[1])
_COLOR_CYCLE_BOKEH = itertools.cycle(COLOR_ARRAY_BOKEH)
if (xlim is not None):
current_plot.x_range = Range1d(start=xlim[0], end=xlim[1])
if (ylim is not None):
current_plot.y_range = Range1d(start=ylim[0], end=ylim[1])
current_plot.title_text_font_size = value('{}pt'.format(fontsize))
current_plot.xaxis.axis_label = xlabel
current_plot.yaxis.axis_label = ylabel
current_plot.legend.orientation = 'top_right'
current_plot = self._plot_bokeh(current_plot, show_legend)
bkh.show(current_plot)
def plot_tmva(self, new_plot=False, style_file=None, figsize=None, xlim=None, ylim=None, title=None, xlabel=None, ylabel=None, show_legend=True):
import ROOT
global _COLOR_CYCLE_TMVA
self._tmva_keeper = []
xlabel = (self.xlabel if (xlabel is None) else xlabel)
ylabel = (self.ylabel if (ylabel is None) else ylabel)
figsize = (self.figsize if (figsize is None) else figsize)
title = (self.title if (title is None) else title)
xlim = (self.xlim if (xlim is None) else xlim)
ylim = (self.ylim if (ylim is None) else ylim)
if (new_plot or (self.canvas is None)):
_COLOR_CYCLE_TMVA = itertools.cycle(COLOR_ARRAY_TMVA)
t = numpy.random.randint(low=100, high=100000)
figsize = ((figsize[0] * self.TMVA_RESIZE), (figsize[1] * self.TMVA_RESIZE))
self.canvas = canvas('canvas{}'.format(t), figsize)
if (style_file is not None):
ROOT.gROOT.LoadMacro(style_file)
else:
self.canvas.SetFillColor(0)
self.canvas.SetGrid()
self.canvas.GetFrame().SetFillColor(21)
self.canvas.GetFrame().SetBorderSize(12)
(graph, leg) = self._plot_tmva()
graph.SetTitle(title)
graph.GetXaxis().SetTitle(xlabel)
if (xlim is not None):
graph.GetXaxis().SetLimits(xlim[0], xlim[1])
graph.GetYaxis().SetTitle(ylabel)
if (ylim is not None):
graph.SetMinimum(ylim[0])
graph.SetMaximum(ylim[1])
if show_legend:
leg.Draw()
self._tmva_keeper.append((graph, leg))
return self.canvas |
class BalancedSampler(Sampler):
def __init__(self, data_source, batch_size, images_per_class=3):
self.data_source = data_source
self.ys = data_source.ys
self.num_groups = (batch_size // images_per_class)
self.batch_size = batch_size
self.num_instances = images_per_class
self.num_samples = len(self.ys)
self.num_classes = len(set(self.ys))
def __len__(self):
return self.num_samples
def __iter__(self):
num_batches = (len(self.data_source) // self.batch_size)
ret = []
while (num_batches > 0):
sampled_classes = np.random.choice(self.num_classes, self.num_groups, replace=False)
for i in range(len(sampled_classes)):
ith_class_idxs = np.nonzero((np.array(self.ys) == sampled_classes[i]))[0]
class_sel = np.random.choice(ith_class_idxs, size=self.num_instances, replace=True)
ret.extend(np.random.permutation(class_sel))
num_batches -= 1
return iter(ret) |
def get_model_cache(config):
cache_config = config.get('DATA_MODEL_CACHE_CONFIG', {})
engine = cache_config.get('engine', 'noop')
if (engine == 'noop'):
return NoopDataModelCache(cache_config)
if (engine == 'inmemory'):
return InMemoryDataModelCache(cache_config)
if (engine == 'memcached'):
endpoint = cache_config.get('endpoint', None)
if (endpoint is None):
raise Exception('Missing `endpoint` for memcached model cache configuration')
timeout = cache_config.get('timeout')
connect_timeout = cache_config.get('connect_timeout')
predisconnect = cache_config.get('predisconnect_from_db')
cache = MemcachedModelCache(cache_config, endpoint, timeout=timeout, connect_timeout=connect_timeout)
if predisconnect:
cache = DisconnectWrapper(cache, config)
return cache
if ((engine == 'redis') or (engine == 'rediscluster')):
redis_client = redis_cache_from_config(cache_config)
return RedisDataModelCache(cache_config, redis_client)
raise Exception(('Unknown model cache engine `%s`' % engine)) |
def run_one_test(pm, args, index, tidx):
global NAMES
result = True
tresult = ''
tap = ''
res = TestResult(tidx['id'], tidx['name'])
if (args.verbose > 0):
print('\t\n=====> ', end='')
print(((('Test ' + tidx['id']) + ': ') + tidx['name']))
if ('skip' in tidx):
if (tidx['skip'] == 'yes'):
res = TestResult(tidx['id'], tidx['name'])
res.set_result(ResultState.skip)
res.set_errormsg('Test case designated as skipped.')
pm.call_pre_case(tidx, test_skip=True)
pm.call_post_execute()
return res
NAMES['TESTID'] = tidx['id']
pm.call_pre_case(tidx)
prepare_env(args, pm, 'setup', '-----> prepare stage', tidx['setup'])
if (args.verbose > 0):
print('-----> execute stage')
pm.call_pre_execute()
(p, procout) = exec_cmd(args, pm, 'execute', tidx['cmdUnderTest'])
if p:
exit_code = p.returncode
else:
exit_code = None
pm.call_post_execute()
if ((exit_code is None) or (exit_code != int(tidx['expExitCode']))):
print('exit: {!r}'.format(exit_code))
print('exit: {}'.format(int(tidx['expExitCode'])))
res.set_result(ResultState.fail)
res.set_failmsg('Command exited with {}, expected {}\n{}'.format(exit_code, tidx['expExitCode'], procout))
print(procout)
else:
if (args.verbose > 0):
print('-----> verify stage')
match_pattern = re.compile(str(tidx['matchPattern']), (re.DOTALL | re.MULTILINE))
(p, procout) = exec_cmd(args, pm, 'verify', tidx['verifyCmd'])
if procout:
match_index = re.findall(match_pattern, procout)
if (len(match_index) != int(tidx['matchCount'])):
res.set_result(ResultState.fail)
res.set_failmsg('Could not match regex pattern. Verify command output:\n{}'.format(procout))
else:
res.set_result(ResultState.success)
elif (int(tidx['matchCount']) != 0):
res.set_result(ResultState.fail)
res.set_failmsg('No output generated by verify command.')
else:
res.set_result(ResultState.success)
prepare_env(args, pm, 'teardown', '-----> teardown stage', tidx['teardown'], procout)
pm.call_post_case()
index += 1
del NAMES['TESTID']
return res |
def original_initialization(mask_temp, initial_state_dict):
global model
step = 0
for (name, param) in model.named_parameters():
if ('weight' in name):
weight_dev = param.device
param.data = torch.from_numpy((mask_temp[step] * initial_state_dict[name].cpu().numpy())).to(weight_dev)
step = (step + 1)
if ('bias' in name):
param.data = initial_state_dict[name]
step = 0 |
def hsv_to_rgb(h, s, v):
if (s == 0.0):
return (v, v, v)
i = int((h * 6.0))
f = ((h * 6.0) - i)
p = (v * (1.0 - s))
q = (v * (1.0 - (s * f)))
t = (v * (1.0 - (s * (1.0 - f))))
i = (i % 6)
v = int((v * 255))
t = int((t * 255))
p = int((p * 255))
q = int((q * 255))
if (i == 0):
return (v, t, p)
if (i == 1):
return (q, v, p)
if (i == 2):
return (p, v, t)
if (i == 3):
return (p, q, v)
if (i == 4):
return (t, p, v)
if (i == 5):
return (v, p, q) |
def simulated_annealing(ratio):
imgs = {}
masks = {}
t = []
measure = []
true_image_set = []
resolution = []
img_set = []
mask = []
img_true = img('true__out__images.pickle')[0]
mask_true = img('true__out__images.pickle')[1]
img_rotate = img('rotation_var__out__images.pickle')[0]
mask_rotate = img('rotation_var__out__images.pickle')[1]
for i in range(100):
img_set.append(img_rotate[i])
mask.append(mask_rotate[i])
if (i < int((100 * ratio))):
img_set.append(img_true[i])
mask.append(mask_true[i])
true_image_set.append((len(img_set) - 1))
for i in range(len(img_set)):
imgs[i] = img_set[i]
masks[i] = mask[i]
s = SSNR3D(imgs, masks)
start = time.clock()
while True:
result = []
center_num1 = random.randrange(0, len(img_set), 1)
center_num2 = random.randrange(0, len(img_set), 1)
if (center_num1 != center_num2):
result.append(center_num1)
result.append(center_num2)
s.set_img_set(result)
e = s.get_fsc_sum()
if (e > 6.8):
break
print(result)
s0 = center_num2
sn = s0
k = 0
kmax = (100 + int((100 * ratio)))
while ((k < kmax) and (len(result) < (kmax // 2))):
while True:
sn = (sn + 1)
if (sn == kmax):
sn = 0
if (sn not in result):
break
s.add_to_set(sn)
en = s.get_fsc_sum()
s.remove_from_set(sn)
if (en > e):
result.append(sn)
s.set_img_set(result)
e = en
elif (math.exp(((- abs((en - e))) / (k + 1))) < random.random()):
result.append(sn)
s.set_img_set(result)
k += 1
s_record = sn
for sn in result:
s.remove_from_set(sn)
en = s.get_fsc_sum()
s.add_to_set(sn)
if (en > e):
result.remove(sn)
s.set_img_set(result)
e = en
resolution.append(e)
measure.append(cal_accuracy(result, true_image_set))
end = time.clock()
t.append((end - start))
print(('Resolution = ' + str(e)))
for i in range((ITERATION - 1)):
k = 0
sn = s_record
while ((k < kmax) and (len(result) < (kmax // 2))):
while True:
sn += 1
if (sn == kmax):
sn = 0
if (sn not in result):
break
s.add_to_set(sn)
en = s.get_fsc_sum()
s.remove_from_set(sn)
if (en > e):
result.append(sn)
s.set_img_set(result)
e = en
elif (math.exp(((- abs((en - e))) / (k + 1))) < random.random()):
result.append(sn)
s.set_img_set(result)
k += 1
s_record = sn
for sn in result:
s.remove_from_set(sn)
en = s.get_fsc_sum()
s.add_to_set(sn)
if (en > e):
result.remove(sn)
s.set_img_set(result)
e = en
resolution.append(e)
measure.append(cal_accuracy(result, true_image_set))
end = time.clock()
t.append((end - start))
print(('Resolution = ' + str(e)))
print(('Time = ' + str((end - start))))
print(('Result: ' + ', '.join(result)))
start_y = 1
sheet = 0
for i in range(start_y, (ITERATION + start_y)):
write_to_excel(sheet, 1, i, resolution[(i - start_y)])
write_to_excel(sheet, 2, i, measure[(i - start_y)][0])
write_to_excel(sheet, 3, i, measure[(i - start_y)][1])
write_to_excel(sheet, 4, i, measure[(i - start_y)][2])
write_to_excel(sheet, 5, i, t[(i - start_y)])
if (i == start_y):
write_to_excel(sheet, 0, i, ratio) |
_model
class ProjectUser(User):
project_id: int = field(default=None)
project_user_id: int = field(default=None)
role: str = field(default=None)
def from_json(cls, value: JsonResponse, **kwargs) -> 'ProjectUser':
user = value.get('user', {})
user['project_id'] = value['project_id']
user['project_user_id'] = value['id']
user['role'] = value['role']
return super(ProjectUser, cls).from_json(user, **kwargs)
def _str_attrs(self) -> List[str]:
return ['project_id', 'project_user_id', 'role'] |
class TestSpanObserver(SpanObserver):
def __init__(self, span):
self.span = span
self.on_start_called = False
self.on_finish_called = False
self.on_finish_exc_info = None
self.tags = {}
self.logs = []
self.children = []
def on_start(self):
assert (not self.on_start_called), 'start was already called on this span'
self.on_start_called = True
def on_set_tag(self, key, value):
self.tags[key] = value
def assert_tag(self, key, value):
assert (key in self.tags), f'{key!r} not found in tags ({list(self.tags.keys())!r})'
assert (self.tags[key] == value), 'tag {!r}: expected value {!r} but found {!r}'.format(key, value, self.tags[key])
def on_log(self, name, payload):
self.logs.append((name, payload))
def on_finish(self, exc_info):
assert (not self.on_finish_called), 'finish was already called on this span'
self.on_finish_called = True
self.on_finish_exc_info = exc_info
def on_child_span_created(self, span):
child = TestSpanObserver(span)
self.children.append(child)
span.register(child)
def get_only_child(self):
assert (len(self.children) == 1), 'observer has wrong number of children'
return self.children[0] |
('invoice.payment_succeeded')
def invoice_paid_to_slack(event, **kwargs):
data = event.data['object']
invoice_id = data['id']
invoice = Invoice.sync_from_stripe_data(stripe.Invoice.retrieve(invoice_id))
log.debug('Stripe invoice %s is paid. Posting to Slack...', invoice)
slack_message('adserver/slack/invoice-paid.slack', {'customer': invoice.customer, 'invoice': invoice}) |
def dataset_utm_north_down(draw):
x = draw(floats(min_value=(- 1000000.0), max_value=1000000.0, allow_nan=False, allow_infinity=False))
y = draw(floats(min_value=(- 1000000.0), max_value=1000000.0, allow_nan=False, allow_infinity=False))
res = draw(floats(min_value=0.1, max_value=30, allow_nan=False, allow_infinity=False))
h = draw(integers(min_value=1, max_value=1000))
w = draw(integers(min_value=1, max_value=1000))
return FakeDataset(transform=((windows.Affine.identity() * windows.Affine.translation(x, y)) * windows.Affine.scale(res)), height=h, width=w) |
_execution_thread(None)
class TestStyle(PyScriptTest):
def test_pyscript_not_defined(self):
doc = '\n <html>\n <head>\n <link rel="stylesheet" href="build/core.css" />\n </head>\n <body>\n <py-config>hello</py-config>\n <py-script>hello</script>\n </body>\n </html>\n '
self.writefile('test-not-defined-css.html', doc)
self.goto('test-not-defined-css.html')
expect(self.page.locator('py-config')).to_be_hidden()
expect(self.page.locator('py-script')).to_be_hidden() |
class TestGenericTags():
def test__generic_abi_macos(self, monkeypatch):
monkeypatch.setattr(sysconfig, 'get_config_var', (lambda key: '.cpython-37m-darwin.so'))
monkeypatch.setattr(tags, 'interpreter_name', (lambda : 'cp'))
assert (tags._generic_abi() == ['cp37m'])
def test__generic_abi_linux_cpython(self, monkeypatch):
config = {'Py_DEBUG': False, 'WITH_PYMALLOC': True, 'EXT_SUFFIX': '.cpython-37m-x86_64-linux-gnu.so'}
monkeypatch.setattr(sysconfig, 'get_config_var', config.__getitem__)
monkeypatch.setattr(tags, 'interpreter_name', (lambda : 'cp'))
assert (tags._cpython_abis((3, 7)) == ['cp37m'])
assert (tags._generic_abi() == ['cp37m'])
def test__generic_abi_jp(self, monkeypatch):
config = {'EXT_SUFFIX': '.return_exactly_this.so'}
monkeypatch.setattr(sysconfig, 'get_config_var', config.__getitem__)
assert (tags._generic_abi() == ['return_exactly_this'])
def test__generic_abi_graal(self, monkeypatch):
config = {'EXT_SUFFIX': '.graalpy-38-native-x86_64-darwin.so'}
monkeypatch.setattr(sysconfig, 'get_config_var', config.__getitem__)
assert (tags._generic_abi() == ['graalpy_38_native'])
def test__generic_abi_disable_gil(self, monkeypatch):
config = {'Py_DEBUG': False, 'EXT_SUFFIX': '.cpython-313t-x86_64-linux-gnu.so', 'WITH_PYMALLOC': 0, 'Py_GIL_DISABLED': 1}
monkeypatch.setattr(sysconfig, 'get_config_var', config.__getitem__)
assert (tags._generic_abi() == ['cp313t'])
assert (tags._generic_abi() == tags._cpython_abis((3, 13)))
def test__generic_abi_none(self, monkeypatch):
config = {'EXT_SUFFIX': '..so'}
monkeypatch.setattr(sysconfig, 'get_config_var', config.__getitem__)
assert (tags._generic_abi() == [])
.parametrize('ext_suffix', ['invalid', None])
def test__generic_abi_error(self, ext_suffix, monkeypatch):
config = {'EXT_SUFFIX': ext_suffix}
monkeypatch.setattr(sysconfig, 'get_config_var', config.__getitem__)
with pytest.raises(SystemError) as e:
tags._generic_abi()
assert ('EXT_SUFFIX' in str(e.value))
def test__generic_abi_linux_pypy(self, monkeypatch):
config = {'Py_DEBUG': False, 'EXT_SUFFIX': '.pypy39-pp73-x86_64-linux-gnu.so'}
monkeypatch.setattr(sysconfig, 'get_config_var', config.__getitem__)
monkeypatch.setattr(tags, 'interpreter_name', (lambda : 'pp'))
assert (tags._generic_abi() == ['pypy39_pp73'])
def test__generic_abi_old_windows(self, monkeypatch):
config = {'EXT_SUFFIX': '.pyd', 'Py_DEBUG': 0, 'WITH_PYMALLOC': 0, 'Py_GIL_DISABLED': 0}
monkeypatch.setattr(sysconfig, 'get_config_var', config.__getitem__)
assert (tags._generic_abi() == tags._cpython_abis(sys.version_info[:2]))
def test__generic_abi_windows(self, monkeypatch):
config = {'EXT_SUFFIX': '.cp310-win_amd64.pyd'}
monkeypatch.setattr(sysconfig, 'get_config_var', config.__getitem__)
assert (tags._generic_abi() == ['cp310'])
.skipif((sys.implementation.name != 'cpython'), reason='CPython-only')
def test__generic_abi_agree(self):
assert (tags._generic_abi() == tags._cpython_abis(sys.version_info[:2]))
def test_generic_platforms(self):
platform = sysconfig.get_platform().replace('-', '_')
platform = platform.replace('.', '_')
assert (list(tags._generic_platforms()) == [platform])
def test_generic_platforms_space(self, monkeypatch):
platform_ = 'isilon onefs'
monkeypatch.setattr(sysconfig, 'get_platform', (lambda : platform_))
assert (list(tags._generic_platforms()) == [platform_.replace(' ', '_')])
def test_iterator_returned(self):
result_iterator = tags.generic_tags('sillywalk33', ['abi'], ['plat1', 'plat2'])
assert isinstance(result_iterator, collections.abc.Iterator)
def test_all_args(self):
result_iterator = tags.generic_tags('sillywalk33', ['abi'], ['plat1', 'plat2'])
result = list(result_iterator)
assert (result == [tags.Tag('sillywalk33', 'abi', 'plat1'), tags.Tag('sillywalk33', 'abi', 'plat2'), tags.Tag('sillywalk33', 'none', 'plat1'), tags.Tag('sillywalk33', 'none', 'plat2')])
.parametrize('abi', [[], ['none']])
def test_abi_unspecified(self, abi):
no_abi = list(tags.generic_tags('sillywalk34', abi, ['plat1', 'plat2']))
assert (no_abi == [tags.Tag('sillywalk34', 'none', 'plat1'), tags.Tag('sillywalk34', 'none', 'plat2')])
def test_interpreter_default(self, monkeypatch):
monkeypatch.setattr(tags, 'interpreter_name', (lambda : 'sillywalk'))
monkeypatch.setattr(tags, 'interpreter_version', (lambda warn: 'NN'))
result = list(tags.generic_tags(abis=['none'], platforms=['any']))
assert (result == [tags.Tag('sillywalkNN', 'none', 'any')])
def test_abis_default(self, monkeypatch):
monkeypatch.setattr(tags, '_generic_abi', (lambda : ['abi']))
result = list(tags.generic_tags(interpreter='sillywalk', platforms=['any']))
assert (result == [tags.Tag('sillywalk', 'abi', 'any'), tags.Tag('sillywalk', 'none', 'any')])
def test_platforms_default(self, monkeypatch):
monkeypatch.setattr(tags, 'platform_tags', (lambda : ['plat']))
result = list(tags.generic_tags(interpreter='sillywalk', abis=['none']))
assert (result == [tags.Tag('sillywalk', 'none', 'plat')]) |
_error_logging()
class SignalsPlotter(AbstractDocument):
def __init__(self, tickers: Union[(Ticker, Sequence[Ticker])], start_date: datetime, end_date: datetime, data_handler: DataHandler, alpha_models: Union[(AlphaModel, Sequence[AlphaModel])], settings: Settings, pdf_exporter: PDFExporter, title: str='Signals Plotter', signal_frequency: Frequency=Frequency.DAILY, data_frequency: Frequency=Frequency.DAILY):
super().__init__(settings, pdf_exporter, title)
(self.tickers, _) = convert_to_list(tickers, Ticker)
(self.alpha_models, _) = convert_to_list(alpha_models, AlphaModel)
self.start_date = start_date
self.end_date = end_date
self.data_handler = data_handler
assert isinstance(self.data_handler.timer, SettableTimer)
self.timer: SettableTimer = self.data_handler.timer
self.signal_frequency = signal_frequency
self.data_frequency = data_frequency
for ticker in self.tickers:
if isinstance(ticker, FutureTicker):
ticker.initialize_data_provider(SettableTimer(end_date), self.data_handler.data_provider)
def build_document(self):
self._add_header()
for ticker in self.tickers:
self.create_tickers_analysis(ticker)
self.document.add_element(NewPageElement())
self.add_models_implementation()
def create_tickers_analysis(self, ticker: Ticker):
prices_df = self.get_prices(ticker)
alpha_model_signals: Dict[(AlphaModel, QFSeries)] = {}
self.document.add_element(HeadingElement(level=2, text=ticker.name))
for alpha_model in self.alpha_models:
exposures = []
dates = []
prev_exposure = Exposure.OUT
for date in self._get_signals_dates():
try:
self.timer.set_current_time(date)
new_exposure = alpha_model.get_signal(ticker, prev_exposure, date, self.data_frequency).suggested_exposure
exposures.append(new_exposure.value)
dates.append(date)
prev_exposure = new_exposure
except NoValidTickerException as e:
print(e)
exposures_series = QFSeries(data=exposures, index=dates)
alpha_model_signals[alpha_model] = exposures_series
candlestick_chart = CandlestickChart(prices_df, title=str(alpha_model))
candlestick_chart.add_highlight(exposures_series)
position_decorator = AxesPositionDecorator(*self.full_image_axis_position)
candlestick_chart.add_decorator(position_decorator)
self.document.add_element(ChartElement(candlestick_chart, figsize=self.full_image_size, dpi=self.dpi))
candlestick_chart = CandlestickChart(prices_df, title='All models summary')
for (model, exposures_series) in alpha_model_signals.items():
candlestick_chart.add_highlight(exposures_series)
position_decorator = AxesPositionDecorator(*self.full_image_axis_position)
candlestick_chart.add_decorator(position_decorator)
self.document.add_element(ChartElement(candlestick_chart, figsize=self.full_image_size, dpi=self.dpi))
def get_prices(self, ticker: Ticker):
if isinstance(ticker, FutureTicker):
futures_chain = FuturesChain(ticker, self.data_handler.data_provider, FuturesAdjustmentMethod.NTH_NEAREST)
prices_df = futures_chain.get_price(PriceField.ohlc(), start_date=self.start_date, end_date=self.end_date, frequency=self.data_frequency)
else:
prices_df = self.data_handler.data_provider.get_price(ticker, PriceField.ohlc(), start_date=self.start_date, end_date=self.end_date, frequency=self.data_frequency)
return prices_df
def add_models_implementation(self):
alpha_model_types = {alpha_model.__class__ for alpha_model in self.alpha_models}
for model_type in alpha_model_types:
self.document.add_element(HeadingElement(2, 'Implementation of {}'.format(model_type.__name__)))
self.document.add_element(ParagraphElement('\n'))
with open(inspect.getfile(model_type)) as f:
class_implementation = f.read()
class_implementation = (('<pre>class {}'.format(model_type.__name__) + class_implementation.split('class {}'.format(model_type.__name__))[1]) + '</pre>')
self.document.add_element(CustomElement(class_implementation))
def save(self, report_dir: str=''):
plt.style.use(['tearsheet'])
file_name = '%Y_%m_%d-%H%M {}.pdf'.format(self.title)
file_name = datetime.now().strftime(file_name)
if (not file_name.endswith('.pdf')):
file_name = '{}.pdf'.format(file_name)
return self.pdf_exporter.generate([self.document], report_dir, file_name)
def _get_signals_dates(self):
evaluation_dates = []
if (self.signal_frequency == Frequency.DAILY):
evaluation_dates = date_range(self.start_date, self.end_date, freq=self.signal_frequency.to_pandas_freq())
elif (self.signal_frequency > Frequency.DAILY):
days_dates = date_range(self.start_date, self.end_date, freq='D')
signal_time_holder = []
for day in days_dates:
start_time = (day + MarketOpenEvent.trigger_time())
end_time = (day + MarketCloseEvent.trigger_time())
signal_times_for_a_day = date_range(start_time, end_time, freq=self.signal_frequency.to_pandas_freq())
signal_time_holder.append(signal_times_for_a_day)
evaluation_dates = signal_time_holder[0].union_many(signal_time_holder[1:])
return evaluation_dates |
class Migration(migrations.Migration):
dependencies = [('adserver', '0047_breakout_ad_parts')]
operations = [migrations.AddConstraint(model_name='adimpression', constraint=models.UniqueConstraint(condition=models.Q(advertisement=None), fields=('publisher', 'date'), name='null_offer_unique'))] |
_datapipe('save_by_iopath')
class IoPathSaverIterDataPipe(IterDataPipe[str]):
def __init__(self, source_datapipe: IterDataPipe[Tuple[(Any, U)]], mode: str='w', filepath_fn: Optional[Callable]=None, *, pathmgr=None, handler=None):
if (iopath is None):
raise ModuleNotFoundError('Package `iopath` is required to be installed to use this datapipe.Please use `pip install iopath` or `conda install -c conda-forge iopath`to install the package')
self.source_datapipe: IterDataPipe[Tuple[(Any, U)]] = source_datapipe
self.mode: str = mode
self.filepath_fn: Optional[Callable] = filepath_fn
self.pathmgr = (_create_default_pathmanager() if (pathmgr is None) else pathmgr)
if (handler is not None):
self.register_handler(handler, allow_override=True)
def __iter__(self) -> Iterator[str]:
for (meta, data) in self.source_datapipe:
filepath = (meta if (self.filepath_fn is None) else self.filepath_fn(meta))
with iopath.file_lock(filepath):
if (not os.path.exists(filepath)):
with self.pathmgr.open(filepath, self.mode) as f:
f.write(data)
(yield filepath)
def register_handler(self, handler, allow_override=False):
self.pathmgr.register_handler(handler, allow_override=allow_override)
def __len__(self) -> int:
return len(self.source_datapipe) |
class Memory(MemoryAPI):
__slots__ = ['_bytes']
logger = logging.getLogger('eth.vm.memory.Memory')
def __init__(self) -> None:
self._bytes = bytearray()
def extend(self, start_position: int, size: int) -> None:
if (size == 0):
return
new_size = ceil32((start_position + size))
if (new_size <= len(self)):
return
size_to_extend = (new_size - len(self))
try:
self._bytes.extend(itertools.repeat(0, size_to_extend))
except BufferError:
self._bytes = (self._bytes + bytearray(size_to_extend))
def __len__(self) -> int:
return len(self._bytes)
def write(self, start_position: int, size: int, value: bytes) -> None:
if size:
validate_uint256(start_position)
validate_uint256(size)
validate_is_bytes(value)
validate_length(value, length=size)
validate_lte((start_position + size), maximum=len(self))
for (idx, v) in enumerate(value):
self._bytes[(start_position + idx)] = v
def read(self, start_position: int, size: int) -> memoryview:
return memoryview(self._bytes)[start_position:(start_position + size)]
def read_bytes(self, start_position: int, size: int) -> bytes:
return bytes(self._bytes[start_position:(start_position + size)]) |
class ResNet(nn.Module):
def __init__(self, block, num_block, k=10, num_classes=100):
super(ResNet, self).__init__()
self.in_channels = (1 * k)
self.conv1 = nn.Sequential(nn.Conv2d(3, (1 * k), kernel_size=3, padding=1, bias=False), nn.BatchNorm2d((1 * k)), nn.ReLU(inplace=True))
self.conv2_x = self._make_layer(block, (1 * k), num_block[0], 1)
self.conv3_x = self._make_layer(block, (2 * k), num_block[1], 2)
self.conv4_x = self._make_layer(block, (4 * k), num_block[2], 2)
self.conv5_x = self._make_layer(block, (8 * k), num_block[3], 2)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(((8 * k) * block.expansion), num_classes)
def _make_layer(self, block, out_channels, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_channels, out_channels, stride))
self.in_channels = (out_channels * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
output = self.conv1(x)
output = self.conv2_x(output)
output = self.conv3_x(output)
output = self.conv4_x(output)
output = self.conv5_x(output)
output = self.avg_pool(output)
output = output.view(output.size(0), (- 1))
output = self.fc(output)
return output |
def load(filename, file=None, mimetype=None):
decoder = get_decoder(filename, mimetype)
if (not file):
with open(filename) as f:
file_contents = f.read()
else:
file_contents = file.read()
file.close()
if hasattr(file_contents, 'decode'):
file_contents = file_contents.decode()
location = pyglet.resource.FileLocation(_dirname(filename))
return decoder.decode(file_contents, location) |
def wps_webpage_taskreward(sid: str):
tasklist_url = '
r = s.post(tasklist_url, headers={'sid': sid})
if (len(r.history) != 0):
if (r.history[0].status_code == 302):
sio.write(': sid, \n\n')
return 0
resp = json.loads(r.text)
resplist = [resp['data']['1']['task'], resp['data']['2']['task'], resp['data']['3']['task']]
statustask = 1
for i in range(len(resplist)):
checkinformation(resplist[i], sid) |
def parse_args():
special_args = [{'name': ['-s', '--size'], 'default': '10000', 'metavar': 'n', 'type': int, 'help': 'The size n in n^2 (default 10000)'}, {'name': ['-t', '--type'], 'choices': ['cpu', 'gpu'], 'default': 'gpu', 'type': str, 'help': 'Use GPU or CPU arrays'}, {'name': ['-c', '--chunk-size'], 'default': '128 MiB', 'metavar': 'nbytes', 'type': str, 'help': "Chunk size (default '128 MiB')"}, {'name': ['-k', '--kernel-size'], 'default': '1', 'metavar': 'k', 'type': int, 'help': 'Kernel size, 2*k+1, in each dimension (default 1)'}, {'name': '--ignore-size', 'default': '1 MiB', 'metavar': 'nbytes', 'type': parse_bytes, 'help': "Ignore messages smaller than this (default '1 MB')"}, {'name': '--runs', 'default': 3, 'type': int, 'help': 'Number of runs'}, {'name': ['-b', '--backend'], 'choices': ['dask', 'dask-noop'], 'default': 'dask', 'type': str, 'help': 'Compute backend to use.'}]
return parse_benchmark_args(description='Transpose on LocalCUDACluster benchmark', args_list=special_args) |
def listRedundantModules():
mods = {}
for (name, mod) in sys.modules.items():
if (not hasattr(mod, '__file__')):
continue
mfile = os.path.abspath(mod.__file__)
if (mfile[(- 1)] == 'c'):
mfile = mfile[:(- 1)]
if (mfile in mods):
print(('module at %s has 2 names: %s, %s' % (mfile, name, mods[mfile])))
else:
mods[mfile] = name |
class Compressor(object):
_dictionary = None
_dictionary_size = None
def __init__(self, mode=DEFAULT_MODE, quality=lib.BROTLI_DEFAULT_QUALITY, lgwin=lib.BROTLI_DEFAULT_WINDOW, lgblock=0):
enc = lib.BrotliEncoderCreateInstance(ffi.NULL, ffi.NULL, ffi.NULL)
if (not enc):
raise RuntimeError('Unable to allocate Brotli encoder!')
enc = ffi.gc(enc, lib.BrotliEncoderDestroyInstance)
_set_parameter(enc, lib.BROTLI_PARAM_MODE, 'mode', mode)
_set_parameter(enc, lib.BROTLI_PARAM_QUALITY, 'quality', quality)
_set_parameter(enc, lib.BROTLI_PARAM_LGWIN, 'lgwin', lgwin)
_set_parameter(enc, lib.BROTLI_PARAM_LGBLOCK, 'lgblock', lgblock)
self._encoder = enc
def _compress(self, data, operation):
original_output_size = int(math.ceil(((len(data) + (len(data) >> 2)) + 10240)))
available_out = ffi.new('size_t *')
available_out[0] = original_output_size
output_buffer = ffi.new('uint8_t []', available_out[0])
ptr_to_output_buffer = ffi.new('uint8_t **', output_buffer)
input_size = ffi.new('size_t *', len(data))
input_buffer = ffi.new('uint8_t []', data)
ptr_to_input_buffer = ffi.new('uint8_t **', input_buffer)
rc = lib.BrotliEncoderCompressStream(self._encoder, operation, input_size, ptr_to_input_buffer, available_out, ptr_to_output_buffer, ffi.NULL)
if (rc != lib.BROTLI_TRUE):
raise error('Error encountered compressing data.')
assert (not input_size[0])
size_of_output = (original_output_size - available_out[0])
return ffi.buffer(output_buffer, size_of_output)[:]
def compress(self, data):
return self._compress(data, lib.BROTLI_OPERATION_PROCESS)
process = compress
def flush(self):
chunks = [self._compress(b'', lib.BROTLI_OPERATION_FLUSH)]
while (lib.BrotliEncoderHasMoreOutput(self._encoder) == lib.BROTLI_TRUE):
chunks.append(self._compress(b'', lib.BROTLI_OPERATION_FLUSH))
return b''.join(chunks)
def finish(self):
chunks = []
while (lib.BrotliEncoderIsFinished(self._encoder) == lib.BROTLI_FALSE):
chunks.append(self._compress(b'', lib.BROTLI_OPERATION_FINISH))
return b''.join(chunks) |
class PublisherReportView(PublisherAccessMixin, BaseReportView):
export_view = 'publisher_report_export'
template_name = 'adserver/reports/publisher.html'
fieldnames = ['index', 'views', 'clicks', 'ctr', 'ecpm', 'revenue', 'revenue_share']
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
publisher_slug = kwargs.get('publisher_slug', '')
publisher = get_object_or_404(Publisher, slug=publisher_slug)
queryset = self.get_queryset(publisher=publisher, campaign_type=context['campaign_type'], start_date=context['start_date'], end_date=context['end_date'])
report = PublisherReport(queryset)
report.generate()
context.update({'publisher': publisher, 'report': report, 'campaign_types': CAMPAIGN_TYPES, 'export_url': self.get_export_url(publisher_slug=publisher.slug), 'metabase_publisher_dashboard': settings.METABASE_DASHBOARDS.get('PUBLISHER_FIGURES')})
return context |
def read_corpus(corpus_preprocessd_file, id2term_dict):
id2corpus_terms = {}
for line in tqdm(open(corpus_preprocessd_file)):
term_set = set()
r = line.strip().split('\t')
id = r[0]
for i in range(2, len(r)):
utt = r[i]
for w in utt.split():
term_set.add(id2term_dict[w])
id2corpus_terms[id] = ' '.join(list(term_set))
return id2corpus_terms |
class MultiHeadedDotAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1, scale=1, project_k_v=1, use_output_layer=1, do_aoa=0, norm_q=0, dropout_aoa=0.3):
super(MultiHeadedDotAttention, self).__init__()
assert (((d_model * scale) % h) == 0)
self.d_k = ((d_model * scale) // h)
self.h = h
self.project_k_v = project_k_v
if norm_q:
self.norm = LayerNorm(d_model)
else:
self.norm = (lambda x: x)
self.q_linears = nn.Linear(d_model, (d_model * scale))
self.k_linears = clones(nn.Linear(d_model, (d_model * scale)), 3)
self.v_linears = clones(nn.Linear(d_model, (d_model * scale)), 3)
self.output_layer = nn.Linear((d_model * scale), d_model)
self.use_aoa = do_aoa
if self.use_aoa:
self.aoa_layer = nn.Sequential(nn.Linear(((1 + scale) * d_model), (2 * d_model)), nn.GLU())
if (dropout_aoa > 0):
self.dropout_aoa = nn.Dropout(p=dropout_aoa)
else:
self.dropout_aoa = (lambda x: x)
if (self.use_aoa or (not use_output_layer)):
del self.output_layer
self.output_layer = (lambda x: x)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, value, key, flag, mask=None):
if (mask is not None):
if (len(mask.size()) == 2):
mask = mask.unsqueeze((- 2))
mask = mask.unsqueeze(1)
single_query = 0
if (len(query.size()) == 2):
single_query = 1
query = query.unsqueeze(1)
nbatches = query.size(0)
query = self.norm(query)
if (self.project_k_v == 0):
query_ = self.linears[0](query).view(nbatches, (- 1), self.h, self.d_k).transpose(1, 2)
key_ = key.view(nbatches, (- 1), self.h, self.d_k).transpose(1, 2)
value_ = value.view(nbatches, (- 1), self.h, self.d_k).transpose(1, 2)
else:
query_ = self.q_linears(query).view(nbatches, (- 1), self.h, self.d_k).transpose(1, 2)
(key_p, key_n, key_c) = [l(x).view(nbatches, (- 1), self.h, self.d_k).transpose(1, 2) for (l, x) in zip(self.k_linears, (key, key, key))]
(value_p, value_n, value_c) = [l(x).view(nbatches, (- 1), self.h, self.d_k).transpose(1, 2) for (l, x) in zip(self.v_linears, (value, value, value))]
(x, self.attn) = attention(query_, key_p, key_n, key_c, value_p, value_n, value_c, flag, mask=mask, dropout=self.dropout)
x = x.transpose(1, 2).contiguous().view(nbatches, (- 1), (self.h * self.d_k))
if self.use_aoa:
x = self.aoa_layer(self.dropout_aoa(torch.cat([x, query], (- 1))))
x = self.output_layer(x)
if single_query:
query = query.squeeze(1)
x = x.squeeze(1)
return x |
def add_standard_arguments(parser):
group = parser.add_argument_group('General options')
group.add_argument('--help', '-h', action='help', help='Show this help message and exit.')
loglevel_choices = ['critical', 'error', 'warning', 'info', 'debug']
loglevel_default = 'info'
group.add_argument('--loglevel', choices=loglevel_choices, default=loglevel_default, metavar='LEVEL', help=('Set logger level. Choices: %s. Default: %s.' % (ldq(loglevel_choices), dq(loglevel_default))))
progress_choices = ['terminal', 'log', 'off']
progress_default = 'terminal'
group.add_argument('--progress', choices=progress_choices, default=progress_default, metavar='DEST', help=('Set how progress status is reported. Choices: %s. Default: %s.' % (ldq(progress_choices), dq(progress_default)))) |
def test_ahi_l2_area_def(himl2_filename, caplog):
from pyproj import CRS
ps = '+a=6378137 +h= +lon_0=140.7 +no_defs +proj=geos +rf=298. +type=crs +units=m +x_0=0 +y_0=0'
fh = ahil2_filehandler(himl2_filename)
clmk_id = make_dataid(name='cloudmask')
area_def = fh.get_area_def(clmk_id)
assert (area_def.width == dimensions['Columns'])
assert (area_def.height == dimensions['Rows'])
assert np.allclose(area_def.area_extent, exp_ext)
expected_crs = CRS(ps)
assert (area_def.crs == expected_crs)
fh = ahil2_filehandler(himl2_filename)
fh.nlines = 3000
with pytest.raises(ValueError, match='Input L2 file is not a full disk Himawari scene..*'):
fh.get_area_def(clmk_id) |
def bot_methods(ext_bot=True, include_camel_case=False):
arg_values = []
ids = []
non_api_methods = ['de_json', 'de_list', 'to_dict', 'to_json', 'parse_data', 'get_bot', 'set_bot', 'initialize', 'shutdown', 'insert_callback_data']
classes = ((Bot, ExtBot) if ext_bot else (Bot,))
for cls in classes:
for (name, attribute) in inspect.getmembers(cls, predicate=inspect.isfunction):
if (name.startswith('_') or (name in non_api_methods)):
continue
if ((not include_camel_case) and any((x.isupper() for x in name))):
continue
arg_values.append((cls, name, attribute))
ids.append(f'{cls.__name__}.{name}')
return pytest.mark.parametrize(argnames='bot_class, bot_method_name,bot_method', argvalues=arg_values, ids=ids) |
def retrieve_artifact(name: str, gpu: Optional[str]):
if (gpu not in [None, 'single', 'multi']):
raise ValueError(f'Invalid GPU for artifact. Passed GPU: `{gpu}`.')
if (gpu is not None):
name = f'{gpu}-gpu_{name}'
_artifact = {}
if os.path.exists(name):
files = os.listdir(name)
for file in files:
try:
with open(os.path.join(name, file)) as f:
_artifact[file.split('.')[0]] = f.read()
except UnicodeDecodeError as e:
raise ValueError(f'Could not open {os.path.join(name, file)}.') from e
return _artifact |
def save_summaries(summaries, path, original_document_name):
for (summary, document_name) in zip(summaries, original_document_name):
if ('.' in document_name):
bare_document_name = '.'.join(document_name.split('.')[:(- 1)])
extension = document_name.split('.')[(- 1)]
name = ((bare_document_name + '_summary.') + extension)
else:
name = (document_name + '_summary')
file_path = os.path.join(path, name)
with open(file_path, 'w') as output:
output.write(summary) |
def test_move_items_by(qapp):
item1 = BeePixmapItem(QtGui.QImage())
item1.setPos(0, 0)
item2 = BeePixmapItem(QtGui.QImage())
item2.setPos(30, 40)
command = commands.MoveItemsBy([item1, item2], QtCore.QPointF(50, 100))
command.redo()
assert (item1.pos().x() == 50)
assert (item1.pos().y() == 100)
assert (item2.pos().x() == 80)
assert (item2.pos().y() == 140)
command.undo()
assert (item1.pos().x() == 0)
assert (item1.pos().y() == 0)
assert (item2.pos().x() == 30)
assert (item2.pos().y() == 40) |
class DiscriminatorPointConv():
def __init__(self, name, sorting_method='cxyz', activation_fn=tf.nn.leaky_relu, bn=True):
self.name = name
self.sorting_method = sorting_method
self.activation_fn = activation_fn
self.bn = bn
self.reuse = False
def __call__(self, point_cloud, is_training):
with tf.variable_scope(self.name, reuse=self.reuse):
batch_size = point_cloud.shape[0]
npoint = point_cloud.shape[1]
l0_xyz = point_cloud
l0_points = None
sort_mtd = self.sorting_method
(l1_xyz, l1_points, l1_indices) = point_conv_module(l0_xyz, l0_points, npoint=1024, c_fts_out=64, k_neighbors=16, d_rate=1, is_training=is_training, sorting_method=sort_mtd, activation=self.activation_fn, bn=self.bn, scope='conv_layer_1', center_patch=False)
(l2_xyz, l2_points, l2_indices) = point_conv_module(l1_xyz, l1_points, npoint=1024, c_fts_out=64, k_neighbors=1, d_rate=1, is_training=is_training, sorting_method=sort_mtd, activation=self.activation_fn, bn=self.bn, scope='conv_layer_2', center_patch=False)
(l3_xyz, l3_points, l3_indices) = point_conv_module(l2_xyz, l2_points, npoint=512, c_fts_out=128, k_neighbors=8, d_rate=1, is_training=is_training, sorting_method=sort_mtd, activation=self.activation_fn, bn=self.bn, scope='conv_layer_3', center_patch=False)
(l4_xyz, l4_points, l4_indices) = point_conv_module(l3_xyz, l3_points, npoint=128, c_fts_out=256, k_neighbors=4, d_rate=1, is_training=is_training, sorting_method=sort_mtd, activation=self.activation_fn, bn=self.bn, scope='conv_layer_4', center_patch=False)
(l5_xyz, l5_points, l5_indices) = point_conv_module(l4_xyz, l4_points, npoint=64, c_fts_out=512, k_neighbors=2, d_rate=1, is_training=is_training, sorting_method=sort_mtd, activation=self.activation_fn, bn=self.bn, scope='conv_layer_5', center_patch=False)
output = pf.conv1d(l5_points, 1, is_training=is_training, name='output', kernel_size=1, strides=1, with_bn=self.bn, activation=None)
output = tf.reshape(output, (batch_size, (- 1)))
self.reuse = True
self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)
return output
def __str__(self):
res = ''
train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)
for tv in train_vars:
res += (tv.name + '\n')
return res[:(len(res) - 2)] |
def test_proj_imshow(data_vda_jybeam_lower, use_dask):
plt = pytest.importorskip('matplotlib.pyplot')
(cube, data) = cube_and_raw(data_vda_jybeam_lower, use_dask=use_dask)
mom0 = cube.moment0()
if (LooseVersion(plt.matplotlib.__version__) < LooseVersion('2.1')):
plt.imshow(mom0.value)
else:
plt.imshow(mom0) |
def get_lon_lat(pixel, nav_params):
scan_angles = transform_image_coords_to_scanning_angles(pixel, nav_params.proj_params.image_offset, nav_params.proj_params.scanning_angles)
view_vector_sat = transform_scanning_angles_to_satellite_coords(scan_angles, nav_params.proj_params.scanning_angles.misalignment)
view_vector_earth_fixed = transform_satellite_to_earth_fixed_coords(view_vector_sat, nav_params.orbit, nav_params.attitude)
point_on_earth = intersect_with_earth(view_vector_earth_fixed, nav_params.orbit.sat_position, nav_params.proj_params.earth_ellipsoid)
(lon, lat) = transform_earth_fixed_to_geodetic_coords(point_on_earth, nav_params.proj_params.earth_ellipsoid.flattening)
return (lon, lat) |
def make_dataset(path, impl, fix_lua_indexing=False, dictionary=None):
if ((impl == 'raw') and IndexedRawTextDataset.exists(path)):
assert (dictionary is not None)
return IndexedRawTextDataset(path, dictionary)
elif ((impl == 'lazy') and IndexedDataset.exists(path)):
return IndexedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif ((impl == 'cached') and IndexedDataset.exists(path)):
return IndexedCachedDataset(path, fix_lua_indexing=fix_lua_indexing)
elif ((impl == 'mmap') and MMapIndexedDataset.exists(path)):
return MMapIndexedDataset(path)
return None |
def ws_sacpz(network=None, station=None, location=None, channel=None, time=None, tmin=None, tmax=None):
d = {}
if network:
d['network'] = network
if station:
d['station'] = station
if location:
d['location'] = location
else:
d['location'] = '--'
if channel:
d['channel'] = channel
if ((tmin is not None) and (tmax is not None)):
d['starttime'] = sdatetime(tmin)
d['endtime'] = sdatetime(tmax)
elif (time is not None):
d['time'] = sdatetime(time)
return ws_request((base_url + '/sacpz/1/query'), **d) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.