code stringlengths 281 23.7M |
|---|
class UserProfileViewTest(TestCase):
def setUpTestData(cls):
add_default_data()
def test_UserProfileViewOk(self):
response = self.client.get(reverse('user_profile', args=['max']))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'petition/user_profile.html')
def test_UserProfileViewKo(self):
response = self.client.get(reverse('user_profile', args=['not_existing_user']))
self.assertEqual(response.status_code, 404) |
class UserShared(TelegramObject):
__slots__ = ('request_id', 'user_id')
def __init__(self, request_id: int, user_id: int, *, api_kwargs: Optional[JSONDict]=None):
super().__init__(api_kwargs=api_kwargs)
self.request_id: int = request_id
self.user_id: int = user_id
self._id_attrs = (self.request_id, self.user_id)
self._freeze() |
class MainMenuBar(wx.MenuBar):
def __init__(self, mainFrame):
pyfalog.debug('Initialize MainMenuBar')
self.characterEditorId = wx.NewId()
self.damagePatternEditorId = wx.NewId()
self.targetProfileEditorId = wx.NewId()
self.implantSetEditorId = wx.NewId()
self.graphFrameId = wx.NewId()
self.backupFitsId = wx.NewId()
self.exportSkillsNeededId = wx.NewId()
self.importCharacterId = wx.NewId()
self.exportHtmlId = wx.NewId()
self.wikiId = wx.NewId()
self.forumId = wx.NewId()
self.saveCharId = wx.NewId()
self.saveCharAsId = wx.NewId()
self.revertCharId = wx.NewId()
self.eveFittingsId = wx.NewId()
self.exportToEveId = wx.NewId()
self.ssoLoginId = wx.NewId()
self.attrEditorId = wx.NewId()
self.toggleOverridesId = wx.NewId()
self.toggleIgnoreRestrictionID = wx.NewId()
self.devToolsId = wx.NewId()
self.optimizeFitPrice = wx.NewId()
self.mainFrame = mainFrame
wx.MenuBar.__init__(self)
fileMenu = wx.Menu()
self.Append(fileMenu, _t('&File'))
fileMenu.Append(self.mainFrame.addPageId, (_t('&New Tab') + '\tCTRL+T'), 'Open a new fitting tab')
fileMenu.Append(self.mainFrame.closePageId, (_t('&Close Tab') + '\tCTRL+W'), 'Close the current fit')
fileMenu.Append(self.mainFrame.closeAllPagesId, (_t('&Close All Tabs') + '\tCTRL+ALT+W'), 'Close all open fits')
fileMenu.AppendSeparator()
fileMenu.Append(self.backupFitsId, _t('&Backup All Fittings'), _t('Backup all fittings to a XML file'))
fileMenu.Append(self.exportHtmlId, _t('Export All Fittings to &HTML'), _t('Export fits to HTML file (set in Preferences)'))
fileMenu.AppendSeparator()
fileMenu.Append(wx.ID_EXIT)
fitMenu = wx.Menu()
self.Append(fitMenu, _t('Fi&t'))
fitMenu.Append(wx.ID_UNDO, (_t('&Undo') + '\tCTRL+Z'), _t('Undo the most recent action'))
fitMenu.Append(wx.ID_REDO, (_t('&Redo') + '\tCTRL+Y'), _t('Redo the most recent undone action'))
fitMenu.AppendSeparator()
fitMenu.Append(wx.ID_COPY, (_t('&To Clipboard') + '\tCTRL+C'), _t('Export a fit to the clipboard'))
fitMenu.Append(wx.ID_PASTE, (_t('&From Clipboard') + '\tCTRL+V'), _t('Import a fit from the clipboard'))
fitMenu.AppendSeparator()
fitMenu.Append(wx.ID_OPEN, (_t('&Import Fittings') + '\tCTRL+O'), _t('Import fittings into pyfa'))
fitMenu.Append(wx.ID_SAVEAS, (_t('&Export Fitting') + '\tCTRL+S'), _t('Export fitting to another format'))
fitMenu.AppendSeparator()
fitMenu.Append(self.optimizeFitPrice, (_t('&Optimize Fit Price') + '\tCTRL+D'))
graphFrameItem = wx.MenuItem(fitMenu, self.graphFrameId, (_t('&Graphs') + '\tCTRL+G'))
graphFrameItem.SetBitmap(BitmapLoader.getBitmap('graphs_small', 'gui'))
fitMenu.Append(graphFrameItem)
if (not graphs.graphFrame_enabled):
self.Enable(self.graphFrameId, False)
self.ignoreRestrictionItem = fitMenu.Append(self.toggleIgnoreRestrictionID, _t('Disable Fitting Re&strictions'))
fitMenu.AppendSeparator()
fitMenu.Append(self.eveFittingsId, (_t('&Browse ESI Fittings') + '\tCTRL+B'))
fitMenu.Append(self.exportToEveId, (_t('E&xport to ESI') + '\tCTRL+E'))
self.Enable(self.eveFittingsId, True)
self.Enable(self.exportToEveId, True)
characterMenu = wx.Menu()
self.Append(characterMenu, _t('&Character'))
characterMenu.Append(self.saveCharId, _t('&Save Character'))
characterMenu.Append(self.saveCharAsId, _t('Save Character &As...'))
characterMenu.Append(self.revertCharId, _t('&Revert Character'))
characterMenu.AppendSeparator()
characterMenu.Append(self.importCharacterId, _t('&Import Character File'), _t('Import characters into pyfa from file'))
characterMenu.Append(self.exportSkillsNeededId, _t('&Export Skills Needed'), _t('Export skills needed for this fitting'))
characterMenu.AppendSeparator()
characterMenu.Append(self.ssoLoginId, _t('&Manage ESI Characters'))
globalMenu = wx.Menu()
if (not self.mainFrame.disableOverrideEditor):
attrItem = wx.MenuItem(globalMenu, self.attrEditorId, _t('Attribute &Overrides'))
attrItem.SetBitmap(BitmapLoader.getBitmap('fit_rename_small', 'gui'))
globalMenu.Append(attrItem)
globalMenu.Append(self.toggleOverridesId, _t('&Turn Overrides On'))
globalMenu.AppendSeparator()
self.Append(globalMenu, _t('&Global'))
preferencesShortCut = ('CTRL+,' if ('wxMac' in wx.PlatformInfo) else 'CTRL+P')
preferencesItem = wx.MenuItem(globalMenu, wx.ID_PREFERENCES, ((_t('&Preferences') + '\t') + preferencesShortCut))
preferencesItem.SetBitmap(BitmapLoader.getBitmap('preferences_small', 'gui'))
globalMenu.Append(preferencesItem)
editorsMenu = wx.Menu()
self.Append(editorsMenu, _t('&Editors'))
charEditItem = wx.MenuItem(editorsMenu, self.characterEditorId, (_t('&Character Editor') + '\tCTRL+K'))
charEditItem.SetBitmap(BitmapLoader.getBitmap('character_small', 'gui'))
editorsMenu.Append(charEditItem)
implantSetEditItem = wx.MenuItem(editorsMenu, self.implantSetEditorId, (_t('&Implant Set Editor') + '\tCTRL+I'))
implantSetEditItem.SetBitmap(BitmapLoader.getBitmap('hardwire_small', 'gui'))
editorsMenu.Append(implantSetEditItem)
damagePatternEditItem = wx.MenuItem(editorsMenu, self.damagePatternEditorId, _t('&Damage Pattern Editor'))
damagePatternEditItem.SetBitmap(BitmapLoader.getBitmap('damagePattern_small', 'gui'))
editorsMenu.Append(damagePatternEditItem)
targetProfileEditItem = wx.MenuItem(editorsMenu, self.targetProfileEditorId, _t('&Target Profile Editor'))
targetProfileEditItem.SetBitmap(BitmapLoader.getBitmap('explosive_small', 'gui'))
editorsMenu.Append(targetProfileEditItem)
helpMenu = wx.Menu()
self.Append(helpMenu, _t('&Help'))
helpMenu.Append(self.wikiId, _t('&Wiki'), _t('Go to wiki on GitHub'))
helpMenu.Append(self.forumId, _t('&Forums'), _t('Go to EVE Online Forum thread'))
helpMenu.AppendSeparator()
helpMenu.Append(wx.ID_ABOUT)
if config.debug:
helpMenu.Append(self.mainFrame.widgetInspectMenuID, _t('Open Wid&gets Inspect tool'), _t('Open Widgets Inspect tool'))
helpMenu.Append(self.devToolsId, _t('Open &Dev Tools'), _t('Dev Tools'))
self.mainFrame.Bind(GE.FIT_CHANGED, self.fitChanged)
self.mainFrame.Bind(GE.FIT_RENAMED, self.fitRenamed)
def fitChanged(self, event):
event.Skip()
activeFitID = self.mainFrame.getActiveFit()
if ((activeFitID is not None) and (activeFitID not in event.fitIDs)):
return
enable = (activeFitID is not None)
self.Enable(wx.ID_SAVEAS, enable)
self.Enable(wx.ID_COPY, enable)
self.Enable(self.exportSkillsNeededId, enable)
self.refreshUndo()
sChar = Character.getInstance()
charID = self.mainFrame.charSelection.getActiveCharacter()
char = sChar.getCharacter(charID)
self.Enable(self.saveCharId, ((not char.ro) and char.isDirty))
self.Enable(self.saveCharAsId, char.isDirty)
self.Enable(self.revertCharId, char.isDirty)
self.Enable(self.toggleIgnoreRestrictionID, enable)
if activeFitID:
sFit = Fit.getInstance()
fit = sFit.getFit(activeFitID)
if fit.ignoreRestrictions:
self.ignoreRestrictionItem.SetItemLabel(_t('Enable Fitting Re&strictions'))
else:
self.ignoreRestrictionItem.SetItemLabel(_t('Disable Fitting Re&strictions'))
def fitRenamed(self, event):
self.refreshUndo()
event.Skip()
def refreshUndo(self):
command = self.mainFrame.command
self.Enable(wx.ID_UNDO, False)
self.Enable(wx.ID_REDO, False)
if command.CanUndo():
self.Enable(wx.ID_UNDO, True)
if command.CanRedo():
self.Enable(wx.ID_REDO, True) |
class CfdCommand(object):
def __init__(self):
self.resources = {'Pixmap': 'fem-cfd-analysis', 'MenuText': QtCore.QT_TRANSLATE_NOOP('Cfd_Command', 'Default Cfd Command MenuText'), 'Accel': '', 'ToolTip': QtCore.QT_TRANSLATE_NOOP('Cfd_Command', 'Default Cfd Command ToolTip')}
self.is_active = None
def GetResources(self):
return self.resources
def IsActive(self):
if (not self.is_active):
active = False
elif (self.is_active == 'with_document'):
active = (FreeCADGui.ActiveDocument is not None)
elif (self.is_active == 'with_analysis'):
active = ((FemGui.getActiveAnalysis() is not None) and self.active_analysis_in_active_doc())
elif (self.is_active == 'with_results'):
active = ((FemGui.getActiveAnalysis() is not None) and self.active_analysis_in_active_doc() and self.results_present())
elif (self.is_active == 'with_selresult'):
active = ((FemGui.getActiveAnalysis() is not None) and self.active_analysis_in_active_doc() and self.result_selected())
elif (self.is_active == 'with_part_feature'):
active = ((FreeCADGui.ActiveDocument is not None) and self.part_feature_selected())
elif (self.is_active == 'with_femmesh'):
active = ((FreeCADGui.ActiveDocument is not None) and self.femmesh_selected())
elif (self.is_active == 'with_gmsh_femmesh'):
active = ((FreeCADGui.ActiveDocument is not None) and self.gmsh_femmesh_selected())
elif (self.is_active == 'with_femmesh_andor_res'):
active = ((FreeCADGui.ActiveDocument is not None) and self.with_femmesh_andor_res_selected())
elif (self.is_active == 'with_material'):
active = ((FemGui.getActiveAnalysis() is not None) and self.active_analysis_in_active_doc() and self.material_selected())
elif (self.is_active == 'with_solver'):
active = ((FemGui.getActiveAnalysis() is not None) and self.active_analysis_in_active_doc() and self.solver_selected())
elif (self.is_active == 'with_analysis_without_solver'):
active = ((FemGui.getActiveAnalysis() is not None) and self.active_analysis_in_active_doc() and (not self.analysis_has_solver()))
return active
def results_present(self):
results = False
analysis_members = FemGui.getActiveAnalysis().Group
for o in analysis_members:
if o.isDerivedFrom('Fem::FemResultObject'):
results = True
return results
def result_selected(self):
result_is_in_active_analysis = False
sel = FreeCADGui.Selection.getSelection()
if ((len(sel) == 1) and sel[0].isDerivedFrom('Fem::FemResultObject')):
for o in FemGui.getActiveAnalysis().Group:
if (o == sel[0]):
result_is_in_active_analysis = True
break
if result_is_in_active_analysis:
return True
else:
return False
def part_feature_selected(self):
sel = FreeCADGui.Selection.getSelection()
if ((len(sel) == 1) and sel[0].isDerivedFrom('Part::Feature')):
return True
else:
return False
def femmesh_selected(self):
sel = FreeCADGui.Selection.getSelection()
if ((len(sel) == 1) and sel[0].isDerivedFrom('Fem::FemMeshObject')):
return True
else:
return False
def gmsh_femmesh_selected(self):
sel = FreeCADGui.Selection.getSelection()
if ((len(sel) == 1) and hasattr(sel[0], 'Proxy') and (sel[0].Proxy.Type == 'FemMeshGmsh')):
return True
else:
return False
def material_selected(self):
sel = FreeCADGui.Selection.getSelection()
if ((len(sel) == 1) and sel[0].isDerivedFrom('App::MaterialObjectPython')):
return True
else:
return False
def with_femmesh_andor_res_selected(self):
sel = FreeCADGui.Selection.getSelection()
if ((len(sel) == 1) and sel[0].isDerivedFrom('Fem::FemMeshObject')):
return True
elif (len(sel) == 2):
if sel[0].isDerivedFrom('Fem::FemMeshObject'):
if sel[1].isDerivedFrom('Fem::FemResultObject'):
return True
else:
return False
elif sel[1].isDerivedFrom('Fem::FemMeshObject'):
if sel[0].isDerivedFrom('Fem::FemResultObject'):
return True
else:
return False
else:
return False
else:
return False
def active_analysis_in_active_doc(self):
return (FemGui.getActiveAnalysis().Document is FreeCAD.ActiveDocument)
def solver_selected(self):
sel = FreeCADGui.Selection.getSelection()
if ((len(sel) == 1) and sel[0].isDerivedFrom('Fem::FemSolverObjectPython')):
return True
else:
return False
def analysis_has_solver(self):
solver = False
analysis_members = FemGui.getActiveAnalysis().Group
for o in analysis_members:
if o.isDerivedFrom('Fem::FemSolverObjectPython'):
solver = True
if (solver is True):
return True
else:
return False
def hide_meshes_show_parts_constraints(self):
if FreeCAD.GuiUp:
for acnstrmesh in FemGui.getActiveAnalysis().Group:
if ('Constraint' in acnstrmesh.TypeId):
acnstrmesh.ViewObject.Visibility = True
if ('Mesh' in acnstrmesh.TypeId):
aparttoshow = acnstrmesh.Name.replace('_Mesh', '')
for apart in FreeCAD.activeDocument().Objects:
if (aparttoshow == apart.Name):
apart.ViewObject.Visibility = True
acnstrmesh.ViewObject.Visibility = False |
def load(fp: BinaryIO, *, parse_float: ParseFloat=float) -> Dict[(str, Any)]:
s_bytes = fp.read()
try:
s = s_bytes.decode()
except AttributeError:
warnings.warn('Text file object support is deprecated in favor of binary file objects. Use `open("foo.toml", "rb")` to open the file in binary mode.', DeprecationWarning, stacklevel=2)
s = s_bytes
return loads(s, parse_float=parse_float) |
(ttl=86400, hash_funcs={pymedphys.mosaiq.Connection: id})
def _get_all_columns(connection, table):
raw_columns = pymedphys.mosaiq.execute(connection, '\n SELECT COLUMN_NAME, DATA_TYPE\n FROM INFORMATION_SCHEMA.COLUMNS\n WHERE TABLE_NAME = %(table)s\n ', {'table': table})
columns = [item[0] for item in raw_columns]
types = [item[1] for item in raw_columns]
return (columns, types) |
class Config():
def __init__(self) -> None:
self.root_path: str = '.'
self.data_set: str = 'sample'
self.batch_size: int = 32
self.if_shuffle: bool = True
self.label_size: Optional[int] = None
self.char_embed: int = 128
self.num_filters: int = 128
self.word_dropout: float = 0.05
self.hidden_size: int = 256
self.layers: int = 2
self.lstm_dropout: float = 0.5
self.embed_path: str = (self.root_path + '/data/word_vec_{}.bin'.format(self.data_set))
self.epoch: int = 500
self.if_gpu: bool = True
self.opt: Optimizer = Optimizer.AdaBound
self.lr: float = (0.001 if (self.opt != Optimizer.SGD) else 0.1)
self.final_lr: float = (0.1 if (self.opt == Optimizer.AdaBound) else None)
self.l2: float = 0.0
self.check_every: int = 1
self.clip_norm: int = 5
self.lr_patience: int = (3 if (self.opt != Optimizer.SGD) else 5)
self.data_path: str = (self.root_path + '/data/{}'.format(self.data_set))
self.train_data_path: str = (self.data_path + '_train.pkl')
self.dev_data_path: str = (self.data_path + '_dev.pkl')
self.test_data_path: str = (self.data_path + '_test.pkl')
self.config_data_path: str = (self.data_path + '_config.pkl')
self.model_root_path: str = (self.root_path + '/dumps')
self.model_path: str = (self.model_root_path + '/{}_model'.format(self.data_set))
def __repr__(self) -> str:
return str(vars(self)) |
(web_fixture=WebFixture)
class NavbarToggleFixture(Fixture):
def is_expanded(self, locator):
return (self.web_fixture.driver_browser.is_visible(locator) and self.web_fixture.driver_browser.does_element_have_attribute(locator, 'class', value='collapse show'))
def panel_is_visible(self):
return self.web_fixture.driver_browser.is_visible(XPath.paragraph().including_text('Peek-A-Boo'))
def panel_is_expanded(self):
return self.is_expanded(XPath.paragraph().including_text('Peek-A-Boo'))
def xpath_to_locate_toggle(self):
return XPath('//span[contains(,"navbar-toggler-icon")]')
def new_MainWidget(self):
fixture = self
class MainWidget(Div):
def __init__(self, view):
super().__init__(view)
navbar = Navbar(view)
navbar.use_layout(NavbarLayout(colour_theme='dark', bg_scheme='dark'))
fixture.element_to_collapse = P(view, text='Peek-A-Boo', css_id='my_id')
navbar.layout.add_toggle(fixture.element_to_collapse)
self.add_child(fixture.element_to_collapse)
self.add_child(navbar)
return MainWidget |
def test_excluded_subpackage() -> None:
poetry = Factory().create_poetry(project('excluded_subpackage'))
builder = SdistBuilder(poetry)
setup = builder.build_setup()
setup_ast = ast.parse(setup)
setup_ast.body = [n for n in setup_ast.body if isinstance(n, ast.Assign)]
ns: dict[(str, Any)] = {}
exec(compile(setup_ast, filename='setup.py', mode='exec'), ns)
assert (ns['packages'] == ['example']) |
def getTrainingData(batch_size=64):
__imagenet_pca = {'eigval': torch.Tensor([0.2175, 0.0188, 0.0045]), 'eigvec': torch.Tensor([[(- 0.5675), 0.7192, 0.4009], [(- 0.5808), (- 0.0045), (- 0.814)], [(- 0.5836), (- 0.6948), 0.4203]])}
__imagenet_stats = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}
transformed_training = depthDataset(csv_file='./data/nyu2_train.csv', transform=transforms.Compose([Scale(240), RandomHorizontalFlip(), RandomRotate(5), CenterCrop([304, 228], [152, 114]), ToTensor(), Lighting(0.1, __imagenet_pca['eigval'], __imagenet_pca['eigvec']), ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4), Normalize(__imagenet_stats['mean'], __imagenet_stats['std'])]))
dataloader_training = DataLoader(transformed_training, batch_size, shuffle=True, num_workers=16, pin_memory=True)
return dataloader_training |
def test_towgs84_transformation__defaults():
transformation = ToWGS84Transformation(GeographicCRS())
assert (transformation.towgs84 == [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
assert (_to_dict(transformation) == {'Scale difference': 0.0, 'X-axis rotation': 0.0, 'X-axis translation': 0.0, 'Y-axis rotation': 0.0, 'Y-axis translation': 0.0, 'Z-axis rotation': 0.0, 'Z-axis translation': 0.0}) |
def test_cannot_order_room_with_random_room_id(graphql_client, hotel_room_factory, user, conference_factory, mocker, bed_layout_factory):
graphql_client.force_login(user)
conference = conference_factory(start=timezone.make_aware(timezone.datetime(2020, 1, 1)), end=timezone.make_aware(timezone.datetime(2020, 1, 10)))
create_order_mock = mocker.patch('api.orders.mutations.create_order')
room = hotel_room_factory(conference=conference)
bed_layout = bed_layout_factory()
room.available_bed_layouts.add(bed_layout)
response = graphql_client.query('mutation CreateOrder($code: String!, $input: CreateOrderInput!) {\n createOrder(conference: $code, input: $input) {\n __typename\n\n ... on CreateOrderResult {\n paymentUrl\n }\n\n ... on Error {\n message\n }\n }\n }', variables={'code': conference.code, 'input': {'tickets': [], 'paymentProvider': 'stripe', 'email': 'patrick.', 'hotelRooms': [{'roomId': '', 'checkin': '2020-01-05', 'checkout': '2020-01-03', 'bedLayoutId': str(bed_layout.id)}], 'invoiceInformation': {'isBusiness': False, 'company': '', 'name': 'Patrick', 'street': 'street', 'zipcode': '92100', 'city': 'Avellino', 'country': 'IT', 'vatId': '', 'fiscalCode': 'GNLNCH22T27L523A'}, 'locale': 'en'}})
assert (not response.get('errors'))
assert (response['data']['createOrder']['__typename'] == 'Error')
assert (response['data']['createOrder']['message'] == 'Room not found')
create_order_mock.assert_not_called() |
class transformer_decoder(nn.Module):
def __init__(self, num_layers):
super(transformer_decoder, self).__init__()
base = MultiHeadAttention(1, 192, 64, 192)
self.q = nn.Parameter(torch.rand(1, 64, 192)).cuda()
self.layer0 = MultiHeadAttention_d0(1, 192, 64, 192)
self.layer1 = MultiHeadAttention(1, 192, 64, 192)
self.layers = clones(base, (num_layers - 1))
def forward(self, x, mask):
q = self.layer0(self.q, self.q, self.q)
n = x.size(0)
q = q.expand(n, 64, 192)
x = self.layer1(q, x, x, mask=mask.cuda())
for layer in self.layers:
x = layer(x, x, x)
return x |
def eth_abi_encode(func: dict, args: list) -> str:
if (not func):
return '00'
types = list([inp['type'] for inp in func.get('inputs', [])])
if func.get('name'):
result = (function_abi_to_4byte_selector(func) + encode_abi(types, args))
else:
result = encode_abi(types, args)
return result.hex() |
class TestNonce(TestCase):
def test_use(self):
self.assertEqual(Nonce.objects.count(), 0)
self.assertTrue(Nonce.use(server_url='/', timestamp=1, salt='1'))
self.assertFalse(Nonce.use(server_url='/', timestamp=1, salt='1'))
self.assertEqual(Nonce.objects.count(), 1) |
class OpencorporaTag(object):
PARTS_OF_SPEECH = frozenset(['NOUN', 'ADJF', 'ADJS', 'COMP', 'VERB', 'INFN', 'PRTF', 'PRTS', 'GRND', 'NUMR', 'ADVB', 'NPRO', 'PRED', 'PREP', 'CONJ', 'PRCL', 'INTJ'])
ANIMACY = frozenset(['anim', 'inan'])
GENDERS = frozenset(['masc', 'femn', 'neut'])
NUMBERS = frozenset(['sing', 'plur'])
CASES = frozenset(['nomn', 'gent', 'datv', 'accs', 'ablt', 'loct', 'voct', 'gen1', 'gen2', 'acc2', 'loc1', 'loc2'])
ASPECTS = frozenset(['perf', 'impf'])
TRANSITIVITY = frozenset(['tran', 'intr'])
PERSONS = frozenset(['1per', '2per', '3per'])
TENSES = frozenset(['pres', 'past', 'futr'])
MOODS = frozenset(['indc', 'impr'])
VOICES = frozenset(['actv', 'pssv'])
INVOLVEMENT = frozenset(['incl', 'excl'])
typed_grammemes = True
FORMAT = 'opencorpora-int'
_NON_PRODUCTIVE_GRAMMEMES = set(['NUMR', 'NPRO', 'PRED', 'PREP', 'CONJ', 'PRCL', 'INTJ', 'Apro'])
_EXTRA_INCOMPATIBLE = {'plur': set(['GNdr'])}
_GRAMMEME_INDICES = collections.defaultdict(int)
_GRAMMEME_INCOMPATIBLE = collections.defaultdict(set)
_LAT2CYR = None
_CYR2LAT = None
KNOWN_GRAMMEMES = set()
_NUMERAL_AGREEMENT_GRAMMEMES = (set(['sing', 'nomn']), set(['sing', 'accs']), set(['sing', 'gent']), set(['plur', 'nomn']), set(['plur', 'gent']))
RARE_CASES = {'gen1': 'gent', 'gen2': 'gent', 'acc1': 'accs', 'acc2': 'accs', 'loc1': 'loct', 'loc2': 'loct', 'voct': 'nomn'}
__slots__ = ['_grammemes_tuple', '_grammemes_cache', '_str', '_POS', '_cyr', '_cyr_grammemes_cache']
def __init__(self, tag):
self._str = tag
grammemes = tag.replace(' ', ',', 1).split(',')
grammemes_tuple = tuple([intern(str(g)) for g in grammemes])
self._assert_grammemes_are_known(set(grammemes_tuple))
self._grammemes_tuple = grammemes_tuple
self._POS = self._grammemes_tuple[0]
self._grammemes_cache = None
self._cyr_grammemes_cache = None
self._cyr = None
POS = _select_grammeme_from(PARTS_OF_SPEECH)
animacy = _select_grammeme_from(ANIMACY)
aspect = _select_grammeme_from(ASPECTS)
case = _select_grammeme_from(CASES)
gender = _select_grammeme_from(GENDERS)
involvement = _select_grammeme_from(INVOLVEMENT)
mood = _select_grammeme_from(MOODS)
number = _select_grammeme_from(NUMBERS)
person = _select_grammeme_from(PERSONS)
tense = _select_grammeme_from(TENSES)
transitivity = _select_grammeme_from(TRANSITIVITY)
voice = _select_grammeme_from(VOICES)
def grammemes(self):
if (self._grammemes_cache is None):
self._grammemes_cache = frozenset(self._grammemes_tuple)
return self._grammemes_cache
def grammemes_cyr(self):
if (self._cyr_grammemes_cache is None):
cyr_grammemes = [self._LAT2CYR[g] for g in self._grammemes_tuple]
self._cyr_grammemes_cache = frozenset(cyr_grammemes)
return self._cyr_grammemes_cache
def cyr_repr(self):
if (self._cyr is None):
self._cyr = self.lat2cyr(self)
return self._cyr
def cyr2lat(cls, tag_or_grammeme):
return _translate_tag(tag_or_grammeme, cls._CYR2LAT)
def lat2cyr(cls, tag_or_grammeme):
return _translate_tag(tag_or_grammeme, cls._LAT2CYR)
def __contains__(self, grammeme):
if isinstance(grammeme, (set, frozenset)):
if (grammeme <= self.grammemes):
return True
self._assert_grammemes_are_known(grammeme)
return False
if (grammeme in self.grammemes):
return True
else:
if (not self.grammeme_is_known(grammeme)):
raise ValueError(('Grammeme is unknown: %s' % grammeme))
return False
def __str__(self):
return self._str
def __repr__(self):
return ("OpencorporaTag('%s')" % self)
def __eq__(self, other):
return (self._grammemes_tuple == other._grammemes_tuple)
def __ne__(self, other):
return (self._grammemes_tuple != other._grammemes_tuple)
def __lt__(self, other):
return (self._grammemes_tuple < other._grammemes_tuple)
def __gt__(self, other):
return (self._grammemes_tuple > other._grammemes_tuple)
def __hash__(self):
return hash(self._grammemes_tuple)
def __len__(self):
return len(self._grammemes_tuple)
def __reduce__(self):
return (self.__class__, (self._str,), None)
def is_productive(self):
return (not (self.grammemes & self._NON_PRODUCTIVE_GRAMMEMES))
def _is_unknown(self):
return (self._POS not in self.PARTS_OF_SPEECH)
def grammeme_is_known(cls, grammeme):
cls._assert_grammemes_initialized()
return (grammeme in cls.KNOWN_GRAMMEMES)
def _assert_grammemes_are_known(cls, grammemes):
if (not (grammemes <= cls.KNOWN_GRAMMEMES)):
cls._assert_grammemes_initialized()
unknown = (grammemes - cls.KNOWN_GRAMMEMES)
unknown_repr = ', '.join([("'%s'" % g) for g in sorted(unknown)])
raise ValueError(('Grammemes are unknown: {%s}' % unknown_repr))
def _assert_grammemes_initialized(cls):
if (not cls.KNOWN_GRAMMEMES):
msg = 'The class was not properly initialized.'
raise RuntimeError(msg)
def updated_grammemes(self, required):
new_grammemes = (self.grammemes | required)
for grammeme in required:
if (not self.grammeme_is_known(grammeme)):
raise ValueError(('Unknown grammeme: %s' % grammeme))
new_grammemes -= self._GRAMMEME_INCOMPATIBLE[grammeme]
return new_grammemes
def fix_rare_cases(cls, grammemes):
return frozenset((cls.RARE_CASES.get(g, g) for g in grammemes))
def add_grammemes_to_known(cls, lat, cyr, overwrite=True):
if ((not overwrite) and (lat in cls.KNOWN_GRAMMEMES)):
return
cls.KNOWN_GRAMMEMES.add(lat)
cls._LAT2CYR[lat] = cyr
cls._CYR2LAT[cyr] = lat
def _init_grammemes(cls, dict_grammemes):
cls.KNOWN_GRAMMEMES = set()
cls._CYR2LAT = {}
cls._LAT2CYR = {}
for (name, parent, alias, description) in dict_grammemes:
cls.add_grammemes_to_known(name, alias)
gr = dict(((name, parent) for (name, parent, alias, description) in dict_grammemes))
children = collections.defaultdict(set)
for (index, (name, parent, alias, description)) in enumerate(dict_grammemes):
if parent:
children[parent].add(name)
if gr.get(parent, None):
children[gr[parent]].add(name)
for (grammeme, g_set) in cls._EXTRA_INCOMPATIBLE.items():
for g in g_set.copy():
g_set.update(children[g])
for (index, (name, parent, alias, description)) in enumerate(dict_grammemes):
cls._GRAMMEME_INDICES[name] = index
incompatible = cls._EXTRA_INCOMPATIBLE.get(name, set())
incompatible = ((incompatible | children[parent]) - set([name]))
cls._GRAMMEME_INCOMPATIBLE[name] = frozenset(incompatible)
def _from_internal_tag(cls, tag):
return tag
def _from_internal_grammeme(cls, grammeme):
return grammeme
def numeral_agreement_grammemes(self, num):
if (((num % 10) == 1) and ((num % 100) != 11)):
index = 0
elif (((num % 10) >= 2) and ((num % 10) <= 4) and (((num % 100) < 10) or ((num % 100) >= 20))):
index = 1
else:
index = 2
if (self.POS not in ('NOUN', 'ADJF', 'PRTF')):
return set([])
if ((self.POS == 'NOUN') and (self.case not in ('nomn', 'accs'))):
if (index == 0):
grammemes = set(['sing', self.case])
else:
grammemes = set(['plur', self.case])
elif (index == 0):
if (self.case == 'nomn'):
grammemes = self._NUMERAL_AGREEMENT_GRAMMEMES[0]
else:
grammemes = self._NUMERAL_AGREEMENT_GRAMMEMES[1]
elif ((self.POS == 'NOUN') and (index == 1)):
grammemes = self._NUMERAL_AGREEMENT_GRAMMEMES[2]
elif ((self.POS in ('ADJF', 'PRTF')) and (self.gender == 'femn') and (index == 1)):
grammemes = self._NUMERAL_AGREEMENT_GRAMMEMES[3]
else:
grammemes = self._NUMERAL_AGREEMENT_GRAMMEMES[4]
return grammemes |
def gat_graph_conv(x, adj, eps, kernel):
v = (eps * tf.diag_part(adj))
mask = tf.diag(tf.ones_like(v))
adj = ((mask * tf.diag(v)) + ((1.0 - mask) * adj))
y1 = K.dot(adj, x)
conv_op_y1 = tf.split(y1, 1, axis=0)
conv_op_y1 = K.concatenate(conv_op_y1, axis=1)
conv_op_y1 = K.dot(conv_op_y1, kernel)
conv_out = conv_op_y1
return conv_out |
def lazify_imports(registry: dict[(str, str)], package: str, fallback: (Callable | None)=None) -> tuple[(tuple[(str, ...)], Callable, Callable)]:
__all__ = tuple(registry.keys())
def __dir__() -> tuple[(str, ...)]:
return __all__
def __getattr__(name: str) -> Any:
if (name not in registry):
raise AttributeError
module_path = '{}.{}'.format(package, registry[name])
return import_class(module_path, name, fallback=fallback)
return (__all__, __dir__, __getattr__) |
class YOLOv5Darknet(nn.Module):
def __init__(self, depth_multiple, width_multiple, focus, in_channels=3, bottle_depths=[3, 9, 9, 3], out_channels=[128, 256, 512, 1024], spp=[5, 9, 13], shortcut=[True, True, True, False], out_indices=(2, 3, 4), norm_type='BN', num_groups=None):
super(YOLOv5Darknet, self).__init__()
self.depth_multiple = depth_multiple
self.width_multiple = width_multiple
self.bottle_depths = bottle_depths
self.out_channels = out_channels
assert (len(self.bottle_depths) == len(self.out_channels)), 'len(self.bottle_depths) must = len(self.bottle_out_channels)'
self.out_indices = out_indices
assert (norm_type in ('BN', 'GN')), 'norm_type must BN or GN'
_in_channels = in_channels
if (focus is not None):
c2 = focus[0]
c2 = make_divisible((c2 * self.width_multiple), 8)
self.focus = Focus(in_channels, c2, *focus[1:], activation='leaky', norm_type=norm_type, num_groups=num_groups)
_in_channels = c2
self.out_indices = []
self.m = nn.ModuleList()
for (i, depth) in enumerate(self.bottle_depths):
c2 = self.out_channels[i]
c2 = make_divisible((c2 * self.width_multiple), 8)
n = (max(round((depth * self.depth_multiple)), 1) if (depth > 1) else depth)
con3x3 = DarknetConv2D_Norm_Activation(_in_channels, c2, kernel_size=3, stride=2, activation='leaky', norm_type=norm_type, num_groups=num_groups)
self.m.append(con3x3)
_in_channels = c2
if ((i == (len(self.bottle_depths) - 1)) and (spp is not None)):
spp = SPP(_in_channels, c2, k=spp)
self.m.append(spp)
_in_channels = c2
bottlenect = BottleneckCSP(_in_channels, c2, n=n, shortcut=shortcut[i])
_in_channels = c2
self.m.append(bottlenect)
if ((i + 1) in out_indices):
self.out_indices.append((len(self.m) - 1))
self.init_weights()
def forward(self, x):
out = self.focus(x)
outs = []
for (idx, _m) in enumerate(self.m):
out = _m(out)
if (idx in self.out_indices):
outs.append(out)
if (len(outs) == 0):
outs.append(out)
return outs
def init_weights(self, pretrained=None):
initialize_weights(self) |
def test_get_vol_files_list():
train_files = get_dataset_files_list(mode='train')
assert np.all([('train' in tf) for tf in train_files])
assert np.all([('valid' not in tf) for tf in train_files])
assert np.all([('test' not in tf) for tf in train_files])
valid_files = get_dataset_files_list(mode='validate')
assert np.all([('train' not in tf) for tf in valid_files])
assert np.all([('valid' in tf) for tf in valid_files])
assert np.all([('test' not in tf) for tf in valid_files])
print('_get_vol_files_list unit tests PASSED') |
def apply_specifiers(specifiers, declaration):
for s in specifiers:
if (type(s) == StorageClassSpecifier):
if declaration.storage:
p.parser.cparser.handle_error('Declaration has more than one storage class', '???', p.lineno(1))
return
declaration.storage = s
elif (type(s) in (TypeSpecifier, StructTypeSpecifier, EnumSpecifier)):
declaration.type.specifiers.append(s)
elif (type(s) == TypeQualifier):
declaration.type.qualifiers.append(s) |
def Gtest(test_loader, model, criterion=nn.L1Loss(reduction='mean')):
model.eval()
error = 0
correct = 0
with torch.no_grad():
for data in test_loader:
data = data.to(device)
output = model(data.x, data.edge_index, data.edge_attr, data.batch)
error += (criterion(output, data.y) * data.num_graphs)
correct += float(output.argmax(dim=1).eq(data.y).sum().item())
return ((error / len(test_loader.dataset)), (correct / len(test_loader.dataset))) |
class TrainerIntegrationCommon():
def check_saved_checkpoints(self, output_dir, freq, total, is_pretrained=True):
file_list = [WEIGHTS_NAME, 'training_args.bin', 'optimizer.pt', 'scheduler.pt', 'trainer_state.json']
if is_pretrained:
file_list.append('config.json')
for step in range(freq, total, freq):
checkpoint = os.path.join(output_dir, f'checkpoint-{step}')
self.assertTrue(os.path.isdir(checkpoint))
for filename in file_list:
self.assertTrue(os.path.isfile(os.path.join(checkpoint, filename)))
def check_best_model_has_been_loaded(self, output_dir, freq, total, trainer, metric, greater_is_better=False, is_pretrained=True):
checkpoint = os.path.join(output_dir, f'checkpoint-{((total // freq) * freq)}')
log_history = TrainerState.load_from_json(os.path.join(checkpoint, 'trainer_state.json')).log_history
values = [d[metric] for d in log_history]
best_value = (max(values) if greater_is_better else min(values))
best_checkpoint = ((values.index(best_value) + 1) * freq)
checkpoint = os.path.join(output_dir, f'checkpoint-{best_checkpoint}')
if is_pretrained:
best_model = RegressionPreTrainedModel.from_pretrained(checkpoint)
best_model.to(trainer.args.device)
else:
best_model = RegressionModel()
state_dict = torch.load(os.path.join(checkpoint, WEIGHTS_NAME))
best_model.load_state_dict(state_dict)
best_model.to(trainer.args.device)
self.assertTrue(torch.allclose(best_model.a, trainer.model.a))
self.assertTrue(torch.allclose(best_model.b, trainer.model.b))
metrics = trainer.evaluate()
self.assertEqual(metrics[metric], best_value)
def check_trainer_state_are_the_same(self, trainer_state, trainer_state1):
state = trainer_state.copy()
state1 = trainer_state1.copy()
log_history = state.pop('log_history', None)
log_history1 = state1.pop('log_history', None)
self.assertEqual(state, state1)
skip_log_keys = ['train_runtime', 'train_samples_per_second', 'train_steps_per_second', 'train_loss']
for (log, log1) in zip(log_history, log_history1):
for key in skip_log_keys:
_ = log.pop(key, None)
_ = log1.pop(key, None)
self.assertEqual(log, log1) |
class MaxPositionSize(TradingControl):
def __init__(self, on_error, asset=None, max_shares=None, max_notional=None):
super(MaxPositionSize, self).__init__(on_error, asset=asset, max_shares=max_shares, max_notional=max_notional)
self.asset = asset
self.max_shares = max_shares
self.max_notional = max_notional
if ((max_shares is None) and (max_notional is None)):
raise ValueError('Must supply at least one of max_shares and max_notional')
if (max_shares and (max_shares < 0)):
raise ValueError('max_shares cannot be negative.')
if (max_notional and (max_notional < 0)):
raise ValueError('max_notional must be positive.')
def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data):
if ((self.asset is not None) and (self.asset != asset)):
return
current_share_count = portfolio.positions[asset].amount
shares_post_order = (current_share_count + amount)
too_many_shares = ((self.max_shares is not None) and (abs(shares_post_order) > self.max_shares))
if too_many_shares:
self.handle_violation(asset, amount, algo_datetime)
current_price = algo_current_data.current(asset, 'price')
value_post_order = (shares_post_order * current_price)
too_much_value = ((self.max_notional is not None) and (abs(value_post_order) > self.max_notional))
if too_much_value:
self.handle_violation(asset, amount, algo_datetime) |
class ELAN(nn.Module):
def __init__(self, c1, c2):
c_ = (c2 // 4)
super(ELAN, self).__init__()
self.conv1 = Conv(c1, c_, 1, 1)
self.conv2 = Conv(c1, c_, 1, 1)
self.conv3 = Conv(c_, c_, 3, 1)
self.conv4 = Conv(c_, c_, 3, 1)
self.conv5 = Conv(c_, c_, 3, 1)
self.conv6 = Conv(c_, c_, 3, 1)
self.conv7 = Conv(c2, c2, 1, 1)
def forward(self, x):
out1 = self.conv1(x)
x = self.conv2(x)
out2 = x
x = self.conv3(x)
x = self.conv4(x)
out3 = x
x = self.conv5(x)
x = self.conv6(x)
out4 = x
x = torch.cat([out1, out2, out3, out4], dim=1)
out = self.conv7(x)
return out |
class SOAPHandler(BaseHTTPRequestHandler):
def do_GET(self):
args = self.path[1:].split('?')
if ((self.path != '/') and (args[0] not in self.server.dispatcher.methods.keys())):
self.send_error(404, ('Method not found: %s' % args[0]))
else:
if (self.path == '/'):
response = self.server.dispatcher.wsdl()
else:
(req, res, doc) = self.server.dispatcher.help(args[0])
if ((len(args) == 1) or (args[1] == 'request')):
response = req
else:
response = res
self.send_response(200)
self.send_header('Content-type', 'text/xml')
self.end_headers()
self.wfile.write(response)
def do_POST(self):
request = self.rfile.read(int(self.headers.get('content-length')))
if (sys.version < '3'):
encoding = self.headers.getparam('charset')
else:
encoding = self.headers.get_param('charset')
request = request.decode(encoding)
fault = {}
response = self.server.dispatcher.dispatch(request, fault=fault)
if fault:
self.send_response(500)
else:
self.send_response(200)
self.send_header('Content-type', 'text/xml')
self.end_headers()
self.wfile.write(response) |
def train_net(args, model, train_dl, valid_dl, output_model_path, comment):
if args.viz:
writer = SummaryWriter(log_dir=os.path.join(output_model_path, 'log'), comment=comment)
global_step = 0
optimizer = optim.Adam(model.parameters(), lr=args.lr)
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_milestones, gamma=0.1)
classifier_criterion = nn.CrossEntropyLoss()
best_acc = 0.0
for epoch in range(args.epochs):
model.train()
epoch_loss = 0
correct = 0.0
for (i, (images, labels, idx, y_hm, gaze_img, attributes)) in enumerate(train_dl):
images = images.cuda()
labels = labels.long()
labels = labels.cuda()
y_hm = y_hm.cuda()
if (args.model_type == 'gat'):
(y_pred, _, proposalN_windows_logits, _, _, coordinates) = model(images, y_hm, status='train')
elif (args.model_type == 'kfn'):
gaze_img = gaze_img.cuda()
y_pred = model(images, gaze_img)
loss_classifier = classifier_criterion(y_pred, labels)
if (args.model_type == 'gat'):
windowscls_loss = classifier_criterion(proposalN_windows_logits, labels.unsqueeze(1).repeat(1, proposalN).view((- 1)))
if (epoch < 2):
total_loss = loss_classifier
else:
total_loss = (loss_classifier + windowscls_loss)
if (i == 1):
cat_imgs = []
for (j, coordinate_ndarray) in enumerate(coordinates):
img = image_with_boxes(images[j], coordinate_ndarray)
cat_imgs.append(img)
cat_imgs = np.concatenate(cat_imgs, axis=1)
writer.add_images((('train' + '/') + 'cut_image_with_windows'), cat_imgs, epoch, dataformats='HWC')
else:
total_loss = loss_classifier
epoch_loss += total_loss.item()
pred = y_pred.max(1, keepdim=True)[1]
correct += pred.eq(labels.view_as(pred)).sum().item()
if args.viz:
writer.add_scalar('Classifier_Loss', loss_classifier.item(), global_step)
writer.add_scalar('Loss/Train', total_loss.item(), global_step)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], global_step)
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
global_step += 1
logging.info(f'Epoch: {(epoch + 1)} Training_Loss: {(epoch_loss / len(train_dl.dataset))}')
logging.info(f'Epoch: {(epoch + 1)} Accuracy: {(correct / len(train_dl.dataset))}')
with torch.no_grad():
(val_loss, val_acc) = eval_net(model, valid_dl, classifier_criterion, args.model_type)
if args.viz:
writer.add_scalar('Validation_Loss', val_loss, global_step)
writer.add_scalar('Validation_ACC', val_acc, global_step)
model.train()
if args.scheduler:
scheduler.step()
try:
os.makedirs(output_model_path)
logger.info('Created Checkpoint directory')
except OSError:
pass
if (best_acc < val_acc):
best_acc = val_acc
torch.save(model.state_dict(), (output_model_path + f'/best.pth'))
logger.info(f'Checkpoint saved !')
logging.info(f'Best Validation_Acc_scores: {best_acc}')
if args.viz:
writer.close()
return best_acc |
_tag
def metabase_question_embed(question_id, **kwargs):
if (not question_id):
return None
if (not settings.METABASE_SECRET_KEY):
log.warning("Metabase Secret Key is not set - Graphs won't render")
return None
payload = {'resource': {'question': question_id}, 'params': serialize_params(kwargs), 'exp': (round(time.time()) + (60 * 10))}
token = jwt.encode(payload, settings.METABASE_SECRET_KEY, algorithm='HS256')
iframe_url = (((settings.METABASE_SITE_URL + '/embed/question/') + token) + '#bordered=true&titled=false')
return render_to_string('adserver/metabase/question-iframe.html', {'iframe_url': iframe_url}) |
class MKS937B(Instrument):
ch_1 = Instrument.ChannelCreator(IonGaugeAndPressureChannel, 1)
ch_2 = Instrument.ChannelCreator(PressureChannel, 2)
ch_3 = Instrument.ChannelCreator(IonGaugeAndPressureChannel, 3)
ch_4 = Instrument.ChannelCreator(PressureChannel, 4)
ch_5 = Instrument.ChannelCreator(IonGaugeAndPressureChannel, 5)
ch_6 = Instrument.ChannelCreator(PressureChannel, 6)
def __init__(self, adapter, name='MKS 937B vacuum gauge controller', address=253, **kwargs):
super().__init__(adapter, name, includeSCPI=False, read_termination=';', write_termination=';FF', **kwargs)
self.address = address
self._re_response = re.compile(f'{self.address:03d}(?P<ack>ACK)?(?P<msg>.*)')
def _extract_reply(self, reply):
rvalue = self._re_response.search(reply)
if rvalue:
return rvalue.group('msg')
return reply
def _prepend_address(self, cmd):
return f'{self.address:03d}{cmd}'
def _check_extra_termination(self):
t = super().read_bytes(2)
if (t != b'FF'):
raise ValueError(f'unexpected termination string received {t}')
def read(self):
ret = super().read()
self._check_extra_termination()
return self._extract_reply(ret)
def write(self, command):
super().write(self._prepend_address(command))
def check_set_errors(self):
ret = super().read()
reply = self._re_response.search(ret)
if reply:
if (reply.group('ack') == 'ACK'):
self._check_extra_termination()
return []
raise ValueError(f"invalid reply '{ret}' found in check_errors")
serial = Instrument.measurement('SN?', ' Serial number of the instrument ', cast=str)
all_pressures = Instrument.measurement('PRZ?', ' Read pressures on all channels in selected units ')
combined_pressure1 = Instrument.measurement('PC1?', ' Read pressure on channel 1 and its combination sensor ')
combined_pressure2 = Instrument.measurement('PC2?', ' Read pressure on channel 2 and its combination sensor ')
unit = Instrument.control('U?', 'U!%s', 'Pressure unit used for all pressure readings from the instrument', validator=strict_discrete_set, map_values=True, values={'Torr': 'TORR', 'mBar': 'mBAR', 'Pascal': 'PASCAL', 'Micron': 'MICRON'}, check_set_errors=True) |
def pip_install(path: Path, environment: Env, editable: bool=False, deps: bool=False, upgrade: bool=False) -> str:
is_wheel = (path.suffix == '.whl')
args = ['install', '--disable-pip-version-check', '--isolated', '--no-input', '--prefix', str(environment.path)]
if ((not is_wheel) and (not editable)):
args.insert(1, '--use-pep517')
if upgrade:
args.append('--upgrade')
if (not deps):
args.append('--no-deps')
if editable:
if (not path.is_dir()):
raise PoetryException('Cannot install non directory dependencies in editable mode')
args.append('-e')
args.append(str(path))
try:
return environment.run_pip(*args)
except EnvCommandError as e:
raise PoetryException(f'Failed to install {path}') from e |
class KnownValues(unittest.TestCase):
def test_kuhf_kernel(self):
self.assertAlmostEqual(kmf.e_tot, (- 4.), 8)
kmf.analyze()
def test_uhf_kernel(self):
self.assertAlmostEqual(mf.e_tot, (- 3.), 8)
mf.analyze()
def test_kuhf_vs_uhf(self):
np.random.seed(1)
k = np.random.random(3)
mf = pscf.UHF(cell, k, exxdiv='vcut_sph')
dm = mf.get_init_guess(key='1e')
mf.max_cycle = 1
mf.diis = None
e1 = mf.kernel(dm)
nao = cell.nao
kmf = pscf.KUHF(cell, [k], exxdiv='vcut_sph')
kmf.max_cycle = 1
kmf.diis = None
e2 = kmf.kernel(dm.reshape(2, 1, nao, nao))
self.assertAlmostEqual(e1, e2, 9)
self.assertAlmostEqual(e1, (- 3.), 8)
def test_init_guess_by_chkfile(self):
np.random.seed(1)
k = np.random.random(3)
mf = pscf.KUHF(cell, [k], exxdiv='vcut_sph')
mf.chkfile = tempfile.NamedTemporaryFile().name
mf.max_cycle = 1
mf.diis = None
e1 = mf.kernel()
self.assertAlmostEqual(e1, (- 3.), 7)
mf1 = pscf.UHF(cell, exxdiv='vcut_sph')
mf1.chkfile = mf.chkfile
mf1.init_guess = 'chkfile'
mf1.diis = None
mf1.max_cycle = 1
e1 = mf1.kernel()
self.assertAlmostEqual(e1, (- 3.), 7)
self.assertTrue((mf1.mo_coeff[0].dtype == np.double))
('mesh not enough for density')
def test_dipole_moment(self):
dip = mf.dip_moment()
self.assertAlmostEqual(abs(dip).max(), 0, 2)
dip = kmf.dip_moment()
self.assertAlmostEqual(abs(dip).max(), 0, 2)
def test_spin_square(self):
ss = kmf.spin_square()[0]
self.assertAlmostEqual(ss, 2., 4)
def test_bands(self):
np.random.seed(1)
kpts_bands = np.random.random((1, 3))
e = mf.get_bands(kpts_bands)[0]
self.assertAlmostEqual(lib.fp(e), 0., 6)
e = kmf.get_bands(kpts_bands)[0]
self.assertAlmostEqual(lib.fp(e), (- 0.3020614), 5)
def test_small_system(self):
mol = pgto.Cell(atom='H 0 0 0;', a=[[3, 0, 0], [0, 3, 0], [0, 0, 3]], basis=[[0, [1, 1]]], spin=1, verbose=7, output='/dev/null')
mf = pscf.KUHF(mol, kpts=[[0.0, 0.0, 0.0]]).run()
self.assertAlmostEqual(mf.e_tot, (- 0.), 8)
mol = pgto.Cell(atom='He 0 0 0;', a=[[3, 0, 0], [0, 3, 0], [0, 0, 3]], basis=[[0, [1, 1]]], verbose=7, output='/dev/null')
mf = pscf.KUHF(mol, kpts=[[0.0, 0.0, 0.0]]).run()
self.assertAlmostEqual(mf.e_tot, (- 2.), 8) |
class BashhubSetupTest(unittest.TestCase):
.skipif(CI_UNSUPPORTED, reason='uuid for mac address not supported on github actions')
def test_get_mac_addresss(self):
test_mac = bashhub_setup.get_mac_address()
assert (str(uuid.getnode()) == test_mac)
def test_get_mac_addresss_where_uuid_is_random(self):
uuid.getnode = randomnode
hostname_mac = bashhub_setup.get_mac_address()
assert (socket.gethostname() == hostname_mac)
('bashhub.bashhub_setup.rest_client')
('bashhub.bashhub_setup.input', side_effect=['sys1', 'sys2', 'sys3', 'sys4'])
def test_handle_system_information_failure(self, mock_input, mock_rest_client):
mock_rest_client.get_system_information = MagicMock()
mock_rest_client.get_system_information.return_value = None
mock_rest_client.register_system = MagicMock()
mock_rest_client.register_system.return_value = None
result = handle_system_information('some-user', 'some-password')
assert (result == (None, None))
assert (mock_rest_client.register_system.call_count == 4)
('bashhub.bashhub_setup.rest_client')
('bashhub.bashhub_setup.input', side_effect=['sys1', 'sys2', 'sys3', 'sys4'])
def test_handle_system_information_succeed_second(self, mock_input, mock_rest_client):
mock_rest_client.get_system_information = MagicMock()
mock_rest_client.get_system_information.return_value = None
mock_rest_client.register_system = MagicMock()
mock_rest_client.register_system.side_effect = [None, 'sys2', None, None]
mock_rest_client.login_user = MagicMock()
mock_rest_client.login_user.return_value = 'token-string'
result = handle_system_information('some-user', 'some-password')
assert (result == ('token-string', 'sys2'))
assert (mock_rest_client.register_system.call_count == 2) |
def get_tb_stats(logdir, desired_tag):
try:
logpath = newest(logdir)
with open(logpath, 'rb') as f:
data = f.read()
except FileNotFoundError:
print('Unable to find log file in ', logdir)
return
(steps, tag_values) = ([], [])
while data:
(data, event_str) = read(data)
event = event_pb2.Event()
event.ParseFromString(event_str)
if event.HasField('summary'):
for value in event.summary.value:
if value.HasField('simple_value'):
if (value.tag == desired_tag):
steps.append(event.step)
tag_values.append(value.simple_value)
return {'ts': steps, desired_tag: tag_values} |
class TestTwoCNOT(Bloq):
_property
def signature(self) -> Signature:
return Signature.build(q1=1, q2=1)
def build_composite_bloq(self, bb: 'BloqBuilder', q1: 'Soquet', q2: 'Soquet') -> Dict[(str, SoquetT)]:
(q1, q2) = bb.add(CNOT(), ctrl=q1, target=q2)
(q1, q2) = bb.add(CNOT(), ctrl=q2, target=q1)
return {'q1': q1, 'q2': q2} |
def parse_args():
parser = argparse.ArgumentParser(description='Run KGAT.')
parser.add_argument('--weights_path', nargs='?', default='', help='Store model path.')
parser.add_argument('--data_path', nargs='?', default='../Data/', help='Input data path.')
parser.add_argument('--proj_path', nargs='?', default='', help='Project path.')
parser.add_argument('--dataset', nargs='?', default='yelp2018', help='Choose a dataset from {yelp2018, last-fm, amazon-book}')
parser.add_argument('--pretrain', type=int, default=0, help='0: No pretrain, -1: Pretrain with the learned embeddings, 1:Pretrain with stored models.')
parser.add_argument('--verbose', type=int, default=1, help='Interval of evaluation.')
parser.add_argument('--epoch', type=int, default=100, help='Number of epoch.')
parser.add_argument('--embed_size', type=int, default=64, help='CF Embedding size.')
parser.add_argument('--kge_size', type=int, default=64, help='KG Embedding size.')
parser.add_argument('--layer_size', nargs='?', default='[64]', help='Output sizes of every layer')
parser.add_argument('--batch_size', type=int, default=1024, help='CF batch size.')
parser.add_argument('--batch_size_kg', type=int, default=2048, help='KG batch size.')
parser.add_argument('--regs', nargs='?', default='[1e-5,1e-5,1e-2]', help='Regularization for user and item embeddings.')
parser.add_argument('--lr', type=float, default=0.0001, help='Learning rate.')
parser.add_argument('--model_type', nargs='?', default='kgat', help='Specify a loss type from {kgat, bprmf, fm, nfm, cke, cfkg}.')
parser.add_argument('--adj_type', nargs='?', default='si', help='Specify the type of the adjacency (laplacian) matrix from {bi, si}.')
parser.add_argument('--alg_type', nargs='?', default='ngcf', help='Specify the type of the graph convolutional layer from {bi, gcn, graphsage}.')
parser.add_argument('--adj_uni_type', nargs='?', default='sum', help='Specify a loss type (uni, sum).')
parser.add_argument('--gpu_id', type=int, default=0, help='0 for NAIS_prod, 1 for NAIS_concat')
parser.add_argument('--node_dropout', nargs='?', default='[0.1]', help='Keep probability w.r.t. node dropout (i.e., 1-dropout_ratio) for each deep layer. 1: no dropout.')
parser.add_argument('--mess_dropout', nargs='?', default='[0.1]', help='Keep probability w.r.t. message dropout (i.e., 1-dropout_ratio) for each deep layer. 1: no dropout.')
parser.add_argument('--Ks', nargs='?', default='[20, 40, 60, 80, 100]', help='Output sizes of every layer')
parser.add_argument('--save_flag', type=int, default=0, help='0: Disable model saver, 1: Activate model saver')
parser.add_argument('--test_flag', nargs='?', default='part', help='Specify the test type from {part, full}, indicating whether the reference is done in mini-batch')
parser.add_argument('--report', type=int, default=0, help='0: Disable performance report w.r.t. sparsity levels, 1: Show performance report w.r.t. sparsity levels')
parser.add_argument('--use_att', type=bool, default=True, help='whether using attention mechanism')
parser.add_argument('--use_kge', type=bool, default=True, help='whether using knowledge graph embedding')
parser.add_argument('--l1_flag', type=bool, default=True, help='Flase: using the L2 norm, True: using the L1 norm.')
return parser.parse_args() |
def test_fetchyaml_empty_path_raises():
context = Context({'fetchYaml': {'path': None}})
with pytest.raises(KeyInContextHasNoValueError) as err_info:
filefetcher.run_step(context)
assert (str(err_info.value) == "context['fetchYaml']['path'] must have a value for pypyr.steps.fetchyaml.") |
class FontConfigSearchResult(FontConfigPattern):
def __init__(self, fontconfig, result_pattern):
super(FontConfigSearchResult, self).__init__(fontconfig, result_pattern)
def name(self):
return self._get_string(FC_FAMILY)
def size(self):
return self._get_double(FC_SIZE)
def bold(self):
return (self._get_integer(FC_WEIGHT) == FC_WEIGHT_BOLD)
def italic(self):
return (self._get_integer(FC_SLANT) == FC_SLANT_ITALIC)
def face(self):
return self._get_face(FC_FT_FACE)
def file(self):
return self._get_string(FC_FILE)
def dispose(self):
self._destroy() |
def assert_table_lineage_equal(sql: str, source_tables=None, target_tables=None, dialect: str='ansi', test_sqlfluff: bool=True, test_sqlparse: bool=True):
lr = LineageRunner(sql, dialect=SQLPARSE_DIALECT)
lr_sqlfluff = LineageRunner(sql, dialect=dialect)
if test_sqlparse:
_assert_table_lineage(lr, source_tables, target_tables)
if test_sqlfluff:
_assert_table_lineage(lr_sqlfluff, source_tables, target_tables) |
.parametrize('username,password', users)
def test_project_create_import_post_upload_file_empty(db, client, username, password):
client.login(username=username, password=password)
url = reverse('project_create_import')
response = client.post(url, {'method': 'upload_file'})
if password:
assert (response.status_code == 400)
else:
assert (response.status_code == 302)
assert response.url.startswith('/account/login/') |
class ValidMD():
def __init__(self, filename):
self.filename = filename
self.required_user_fields = ['title', 'summary', 'image', 'author', 'tags', 'github-link', 'category']
self.optional_image_fields = ['featured_image_1', 'featured_image_2']
self.valid_tags = valid_tags
self.valid_categories = ['researchers', 'developers']
self.required_sections = ['Model Description']
self.optional_demo_link = ['demo-model-link']
def validate_tags(self, tags):
for t in tags:
if (t not in self.valid_tags):
raise ValueError('Tag {} is not valid in {}. Valid tag set is {}'.format(t, self.filename, self.valid_tags))
def validate_category(self, category):
if (category not in self.valid_categories):
raise ValueError('Category {} is not valid in {}. Choose from {}'.format(category, self.filename, self.valid_categories))
def validate_link(self, link):
try:
urlopen(link)
except HTTPError:
raise ValueError('{} is not valid url in {}'.format(link, self.filename))
def validate_image(self, image_name):
images = ([os.path.basename(i) for i in glob.glob('images/*')] + ['pytorch-logo.png', 'no-image'])
if (image_name not in images):
raise ValueError('Image {} referenced in {} not found in images/'.format(image_name, self.filename))
def validate_header(self, header):
assert (header['layout'] == 'hub_detail')
assert (header['background-class'] == 'hub-background')
assert (header['body-class'] == 'hub')
for field in self.required_user_fields:
header[field]
self.validate_tags(header['tags'])
self.validate_link(header['github-link'])
self.validate_image(header['image'])
self.validate_category(header['category'])
for field in self.optional_demo_link:
if (field in header.keys()):
self.validate_link(header[field])
for field in self.optional_image_fields:
if (field in header.keys()):
self.validate_image(header[field])
for k in header.keys():
if (not k.endswith('-link')):
self.no_extra_colon(k, header[k])
def no_extra_colon(self, field, value):
if (':' in str(value)):
raise ValueError("Remove extra ':' in field {} with value {} in file {}".format(field, value, self.filename))
def validate_markdown(self, markdown):
m = mistune.create_markdown(renderer=mistune.AstRenderer())
for block in m(markdown):
if (block['type'] == 'heading'):
text_children = [c for c in block['children'] if (c['type'] == 'text')]
for c in text_children:
assert (not c['text'].endswith(':'))
if (c['text'] in self.required_sections):
self.required_sections.remove(c['text'])
try:
assert (len(self.required_sections) == 0)
except AssertionError as e:
print('Missing required sections: {}'.format(self.required_sections))
raise e
def check_markdown_file(self):
print('Checking {}...'.format(self.filename))
header = []
markdown = []
header_read = False
with open(self.filename, 'r') as f:
for line in f:
if line.startswith('---'):
header_read = (not header_read)
continue
if (header_read == True):
header += [line]
else:
markdown += [line]
header = yaml.safe_load(''.join(header))
assert header, 'Failed to parse a valid yaml header'
self.validate_header(header)
markdown = ''.join(markdown)
self.validate_markdown(markdown) |
class ExploreModule():
def __init__(self, params, num_envs):
self.params = params
actions = ['STOP', 'MOVE_FORWARD', 'TURN_LEFT', 'TURN_RIGHT', 'LOOK_UP', 'LOOK_DOWN', 'GRAB_RELEASE']
self.action_mapping = {action: idx for (idx, action) in enumerate(actions)}
self.num_envs = num_envs
large_map_range = 100.0
self.occ_map_scale = ((0.1 * ((2 * large_map_range) + 1)) / params.highres_occ_map_size)
self.frontier_agent = FrontierAgent({'forward': 1, 'left': 2, 'right': 3, 'stop': 0}, 'habitat', self.occ_map_scale, show_animation=False, dilate_occupancy=True, max_time_per_target=30)
self.obs_odometer = torch.zeros(num_envs, 4)
self.delta_ego = torch.zeros(num_envs, 4)
self.seen_area = torch.zeros(num_envs)
self._steps_since_new_area = torch.zeros(num_envs)
def to(self, device):
self.obs_odometer = self.obs_odometer.to(device)
self.delta_ego = self.delta_ego.to(device)
self.seen_area = self.seen_area.to(device)
self._steps_since_new_area = self._steps_since_new_area.to(device)
def reset(self):
self.obs_odometer.fill_(0)
self.delta_ego.fill_(0)
self.seen_area.fill_(0)
self._steps_since_new_area.fill_(0)
def steps_since_new_area(self):
return self._steps_since_new_area[0].item()
def reset_steps_since_new_area(self):
self._steps_since_new_area.fill_(0)
def update(self, obs):
batch_size = self.delta_ego.shape[0]
for i in range(batch_size):
seen_area = obs['seen_area'][i][0]
if math.isclose(self.seen_area[i], seen_area):
self._steps_since_new_area[i] += 1
else:
self.seen_area[i] = seen_area
self._steps_since_new_area[i] = 0
if (self.params.name == 'frontier'):
obs_odometer_curr = process_odometer(obs['delta'])
self.delta_ego = compute_egocentric_coors(obs_odometer_curr, self.obs_odometer, self.occ_map_scale)
for i in range(batch_size):
if (obs['new_episode'][i] == 1):
self.obs_odometer[i] = obs_odometer_curr[i]
else:
self.obs_odometer[i] += obs_odometer_curr[i]
def _act_forward_right(self, obs):
if obs['cos_eor'][0]['is_collided']:
action = self.action_mapping['TURN_RIGHT']
else:
action = self.action_mapping['MOVE_FORWARD']
return action
def _act_frontier(self, obs):
occ_map = asnumpy(obs['coarse_occupancy'].cpu()).astype(np.uint8)
collision = asnumpy(obs['collision'].cpu())
delta_ego = asnumpy(self.delta_ego.cpu())
batch_size = occ_map.shape[0]
action = np.zeros(batch_size, dtype=int)
for i in range(batch_size):
action[i] = self.frontier_agent.act(occ_map[i], delta_ego[i], collision[i][0])
return action[0]
def act(self, obs):
if (self.params.name == 'random'):
action = random.choice(list(self.action_mapping.values())[1:4])
elif (self.params.name == 'forward_right'):
action = self._act_forward_right(obs)
elif (self.params.name == 'frontier'):
action = self._act_frontier(obs)
else:
raise ValueError
return {'action': action} |
def test_array_array():
from sys import byteorder
e = ('<' if (byteorder == 'little') else '>')
arr = m.create_array_array(3)
assert (str(arr.dtype) == (((("{{'names':['a','b','c','d'], " + "'formats':[('S4', (3,)),('") + e) + "i4', (2,)),('u1', (3,)),('{e}f4', (4, 2))], ") + "'offsets':[0,12,20,24], 'itemsize':56}}").format(e=e))
assert (m.print_array_array(arr) == [('a={{A,B,C,D},{K,L,M,N},{U,V,W,X}},b={0,1},' + 'c={0,1,2},d={{0,1},{10,11},{20,21},{30,31}}'), ('a={{W,X,Y,Z},{G,H,I,J},{Q,R,S,T}},b={1000,1001},' + 'c={10,11,12},d={{100,101},{110,111},{120,121},{130,131}}'), ('a={{S,T,U,V},{C,D,E,F},{M,N,O,P}},b={2000,2001},' + 'c={20,21,22},d={{200,201},{210,211},{220,221},{230,231}}')])
assert (arr['a'].tolist() == [[b'ABCD', b'KLMN', b'UVWX'], [b'WXYZ', b'GHIJ', b'QRST'], [b'STUV', b'CDEF', b'MNOP']])
assert (arr['b'].tolist() == [[0, 1], [1000, 1001], [2000, 2001]])
assert (m.create_array_array(0).dtype == arr.dtype) |
def logDictionary(f, d, levels, indent=0):
for (key, value) in d.items():
f.write(((((('\t' * indent) + str(levels[indent])) + ': ') + str(key)) + '\n'))
if isinstance(value, dict):
logDictionary(f, value, levels, (indent + 1))
else:
f.write(((((('\t' * (indent + 1)) + str(levels[(indent + 1)])) + ': ') + str(value)) + '\n')) |
class EmbeddingCollectionTest(unittest.TestCase):
def _test_ec(self, tables: List[EmbeddingConfig], features: KeyedJaggedTensor, quant_type: torch.dtype=torch.qint8, output_type: torch.dtype=torch.float, quant_state_dict_split_scale_bias: bool=False) -> None:
ec = EmbeddingCollection(tables=tables)
if quant_state_dict_split_scale_bias:
quant_prep_enable_quant_state_dict_split_scale_bias(ec)
embeddings = ec(features)
ec.qconfig = torch.quantization.QConfig(activation=torch.quantization.PlaceholderObserver.with_args(dtype=output_type), weight=torch.quantization.PlaceholderObserver.with_args(dtype=quant_type))
qec = QuantEmbeddingCollection.from_float(ec)
quantized_embeddings = qec(features)
self.assertEqual(list(quantized_embeddings.values())[0].values().dtype, output_type)
self.assertEqual(embeddings.keys(), quantized_embeddings.keys())
for key in embeddings.keys():
self.assertEqual(embeddings[key].values().size(), quantized_embeddings[key].values().size())
self.assertTrue(torch.allclose(embeddings[key].values().cpu().float(), quantized_embeddings[key].values().cpu().float(), atol=1))
state_dict = ec.state_dict()
quantized_state_dict = ec.state_dict()
self.assertEqual(state_dict.keys(), quantized_state_dict.keys())
(data_type=st.sampled_from([DataType.FP32, DataType.INT8]), quant_type=st.sampled_from([torch.half, torch.qint8]), output_type=st.sampled_from([torch.half, torch.float]), quant_state_dict_split_scale_bias=st.booleans())
(verbosity=Verbosity.verbose, max_examples=2, deadline=None)
def test_ec(self, data_type: DataType, quant_type: torch.dtype, output_type: torch.dtype, quant_state_dict_split_scale_bias: bool) -> None:
eb1_config = EmbeddingConfig(name='t1', embedding_dim=16, num_embeddings=10, feature_names=['f1'], data_type=data_type)
eb2_config = EmbeddingConfig(name='t2', embedding_dim=16, num_embeddings=10, feature_names=['f2'], data_type=data_type)
features = KeyedJaggedTensor(keys=['f1', 'f2'], values=torch.as_tensor([0, 1]), lengths=torch.as_tensor([1, 1]))
self._test_ec(tables=[eb1_config, eb2_config], features=features, quant_state_dict_split_scale_bias=quant_state_dict_split_scale_bias)
def test_shared_tables(self) -> None:
eb_config = EmbeddingConfig(name='t1', embedding_dim=16, num_embeddings=10, feature_names=['f1', 'f2'])
features = KeyedJaggedTensor(keys=['f1', 'f2'], values=torch.as_tensor([0, 1]), lengths=torch.as_tensor([1, 1]))
self._test_ec([eb_config], features)
def test_shared_features(self) -> None:
eb1_config = EmbeddingConfig(name='t1', embedding_dim=16, num_embeddings=10, feature_names=['f1'])
eb2_config = EmbeddingConfig(name='t2', embedding_dim=16, num_embeddings=10, feature_names=['f1'])
features = KeyedJaggedTensor(keys=['f1'], values=torch.as_tensor([0, 1]), lengths=torch.as_tensor([1, 1]))
self._test_ec([eb1_config, eb2_config], features)
def test_different_quantization_dtype_per_ec_table(self) -> None:
class TestModule(torch.nn.Module):
def __init__(self, m: torch.nn.Module) -> None:
super().__init__()
self.m = m
eb1_config = EmbeddingConfig(name='t1', embedding_dim=16, num_embeddings=10, feature_names=['f1'])
eb2_config = EmbeddingConfig(name='t2', embedding_dim=16, num_embeddings=10, feature_names=['f1'])
ec = EmbeddingCollection(tables=[eb1_config, eb2_config])
model = TestModule(ec)
qconfig_spec_keys: List[Type[torch.nn.Module]] = [EmbeddingCollection]
quant_mapping: Dict[(Type[torch.nn.Module], Type[torch.nn.Module])] = {EmbeddingCollection: QuantEmbeddingCollection}
trec_infer.modules.quantize_embeddings(model, dtype=torch.int8, additional_qconfig_spec_keys=qconfig_spec_keys, additional_mapping=quant_mapping, inplace=True, per_table_weight_dtype={'t1': torch.float16})
configs = model.m.embedding_configs()
self.assertEqual(len(configs), 2)
self.assertNotEqual(configs[0].name, configs[1].name)
for config in configs:
if (config.name == 't1'):
self.assertEqual(config.data_type, DataType.FP16)
else:
self.assertEqual(config.name, 't2')
self.assertEqual(config.data_type, DataType.INT8)
def test_different_quantization_dtype_per_ebc_table(self) -> None:
class TestModule(torch.nn.Module):
def __init__(self, m: torch.nn.Module) -> None:
super().__init__()
self.m = m
eb1_config = EmbeddingBagConfig(name='t1', embedding_dim=16, num_embeddings=10, feature_names=['f1'])
eb2_config = EmbeddingBagConfig(name='t2', embedding_dim=16, num_embeddings=10, feature_names=['f1'])
ebc = EmbeddingBagCollection(tables=[eb1_config, eb2_config])
model = TestModule(ebc)
trec_infer.modules.quantize_embeddings(model, dtype=torch.int8, inplace=True, per_table_weight_dtype={'t1': torch.float16})
configs = model.m.embedding_bag_configs()
self.assertEqual(len(configs), 2)
self.assertNotEqual(configs[0].name, configs[1].name)
for config in configs:
if (config.name == 't1'):
self.assertEqual(config.data_type, DataType.FP16)
else:
self.assertEqual(config.name, 't2')
self.assertEqual(config.data_type, DataType.INT8) |
.parametrize('M, p, size', [(np.array(10, dtype=np.int64), np.array(0.5, dtype=config.floatX), None), (np.array(10, dtype=np.int64), np.array(0.5, dtype=config.floatX), []), (np.array(10, dtype=np.int64), np.array(0.5, dtype=config.floatX), [2, 3]), (np.full((1, 2), 10, dtype=np.int64), np.array(0.5, dtype=config.floatX), None)])
def test_nbinom_samples(M, p, size):
compare_sample_values(nbinom, M, p, size=size, test_fn=(lambda *args, size=None, random_state=None, **kwargs: nbinom.rng_fn(random_state, *(args + (size,))))) |
def _extraPayloadCheck(params, dir):
if params.has_key('utilityburst'):
try:
f = open(('%s/config.xml' % dir), 'r')
try:
lines = f.readlines()
for line in lines:
if (re.search('PCHEAP_CONFIG_LOADED_WITH_UTILITY_BURST', line) != None):
return True
finally:
f.close()
except:
pass
return False
return True |
class FC3_Url(KickstartCommand):
removedKeywords = KickstartCommand.removedKeywords
removedAttrs = KickstartCommand.removedAttrs
def __init__(self, writePriority=0, *args, **kwargs):
KickstartCommand.__init__(self, writePriority, *args, **kwargs)
self.url = kwargs.get('url', None)
self.op = self._getParser()
def __eq__(self, other):
if (not other):
return False
return (self.url == other.url)
def __ne__(self, other):
return (not (self == other))
def __str__(self):
retval = KickstartCommand.__str__(self)
if self.seen:
retval += ('# Use network installation\nurl --url="%s"\n' % self.url)
return retval
def _getParser(self):
op = KSOptionParser(prog='url', description='\n Install from an installation tree on a remote server\n via FTP or HTTP.', version=FC3)
op.add_argument('--url', required=True, version=FC3, help='\n The URL to install from. Variable substitution is done\n for $releasever and $basearch in the url.')
return op
def parse(self, args):
ns = self.op.parse_args(args=args, lineno=self.lineno)
self.set_to_self(ns)
return self |
def get_annot(t) -> dict:
if is_generic(t):
origin = getattr(t, '__origin__', None)
if (origin is not None):
origin_annotations = get_annots(origin)
args = t.__args__
params = origin.__parameters__
param_to_args = dict(zip(params, args))
return {k: (param_to_args[v] if (v in param_to_args) else v) for (k, v) in origin_annotations.items()}
mapping = generate_mapping(t)
return {k: (mapping[v.__name__] if (v.__name__ in mapping) else v) for (k, v) in get_annots(t).items()}
return get_annots(t) |
def performace_to_table(role_id_prec, role_id_rec, role_id_f, role_prec, role_rec, role_f, role_ner_prec, role_ner_rec, role_ner_f, role_cls_ner_prec, role_cls_ner_rec, role_cls_ner_f):
return pd.DataFrame({'Role Identification': [(role_id_prec * 100.0), (role_id_rec * 100.0), (role_id_f * 100.0)], 'Role Classification': [(role_prec * 100.0), (role_rec * 100.0), (role_f * 100.0)], 'Role Identification + NER': [(role_ner_prec * 100.0), (role_ner_rec * 100.0), (role_ner_f * 100.0)], 'Role Classification + NER': [(role_cls_ner_prec * 100.0), (role_cls_ner_rec * 100.0), (role_cls_ner_f * 100.0)]}, index=['Precision', 'Recall', 'F1']) |
def gen_w(params):
worker_script = f'''#!/bin/bash -ex
exec > >(tee /var/log/user-command.log|logger -t user-data -s 2>/dev/console) 2>&1
sudo yum install tc -y
git clone
cd wondershaper
sudo ./wondershaper -a eth0 -u {(params['up'] * 1024)} -d {(params['down'] * 1024)}
cd ..
git clone {repo_path}
cd DeDLOC
pip install -e ./src
cd albert
mkdir -p ~/data
wget -qO- {data_path} | tar xzf -
sh -c 'cat <<"EOF" >> ~/.netrc
machine api.wandb.ai
login user
password {WandB_API_key}
EOF'
ulimit -n 4096
WANDB_PROJECT={experiment_name} HIVEMIND_THREADS=128 python run_trainer.py --output_dir ./outputs --overwrite_output_dir --logging_dir ./logs --logging_first_step --logging_steps 100 --initial_peers {coordinator_endpoint} --run_name aws_worker --experiment_prefix {experiment_name} --seed 42 --client_mode False --averaging_timeout 120 --bandwidth {params['up']}
'''
return worker_script |
def test_TrafficSignalState():
tss = _TrafficSignalState('ID_1', 'Signal_State')
tss2 = _TrafficSignalState('ID_1', 'Signal_State')
tss3 = _TrafficSignalState('ID_2', 'Signal_State')
prettyprint(tss.get_element())
assert (tss == tss2)
assert (tss != tss3)
tss4 = _TrafficSignalState.parse(tss.get_element())
assert (tss4 == tss)
assert (version_validation('TrafficSignalState', tss, 0) == ValidationResponse.OK)
assert (version_validation('TrafficSignalState', tss, 1) == ValidationResponse.OK)
assert (version_validation('TrafficSignalState', tss, 2) == ValidationResponse.OK) |
def test_show_benchmark(benchmark, tabbed_browser, qtbot, mode_manager):
tab = tabbed_browser.widget.tabs[0]
with qtbot.wait_signal(tab.load_finished):
tab.load_url(QUrl('qute://testdata/data/hints/benchmark.html'))
manager = qutebrowser.browser.hints.HintManager(win_id=0)
def bench():
with qtbot.wait_signal(mode_manager.entered):
manager.start()
with qtbot.wait_signal(mode_manager.left):
mode_manager.leave(usertypes.KeyMode.hint)
benchmark(bench) |
class ContractNotificationToPSFTests(TestCase):
def setUp(self):
self.notification = notifications.ContractNotificationToPSF()
self.contract = baker.make_recipe('sponsors.tests.awaiting_signature_contract', _fill_optional=['document'], _create_files=True)
self.subject_template = 'sponsors/email/psf_contract_subject.txt'
self.content_template = 'sponsors/email/psf_contract.txt'
def test_send_email_using_correct_templates(self):
context = {'contract': self.contract}
expected_subject = render_to_string(self.subject_template, context).strip()
expected_content = render_to_string(self.content_template, context).strip()
self.notification.notify(contract=self.contract)
self.assertTrue(mail.outbox)
email = mail.outbox[0]
self.assertEqual(expected_subject, email.subject)
self.assertEqual(expected_content, email.body)
self.assertEqual(settings.SPONSORSHIP_NOTIFICATION_FROM_EMAIL, email.from_email)
self.assertEqual([settings.SPONSORSHIP_NOTIFICATION_TO_EMAIL], email.to)
def test_attach_contract_pdf(self):
self.assertTrue(self.contract.document.name)
with self.contract.document.open('rb') as fd:
expected_content = fd.read()
self.assertTrue(expected_content)
self.contract.refresh_from_db()
self.notification.notify(contract=self.contract)
email = mail.outbox[0]
self.assertEqual(len(email.attachments), 1)
(name, content, mime) = email.attachments[0]
self.assertEqual(name, 'Contract.pdf')
self.assertEqual(mime, 'application/pdf')
self.assertEqual(content, expected_content) |
class MutationExportData():
def __init__(self):
self.reference = 1
self.mutants = {}
def formatMutants(self):
mutationLines = []
if self.mutants:
for mutantReference in sorted(self.mutants):
mutant = self.mutants[mutantReference]
mutationLines.append(renderMutant(mutant, firstPrefix='[{}] '.format(mutantReference), prefix=' '))
return '\n'.join(mutationLines) |
class PatternLexer(Scanner):
def __init__(self, s):
self.string = s
Scanner.__init__(self, [('([^<>|\\\\]|\\\\.)+', self.text), ('\\|\\||[<>|]', self.table)])
def text(self, scanner, string):
return PatternLexeme(TEXT, re.sub('\\\\([|<>\\\\])', '\\1', string))
def table(self, scanner, string):
return PatternLexeme({'||': DISJ, '|': COND, '<': OPEN, '>': CLOSE}[string], string)
def __iter__(self):
s = self.scan(self.string)
if (s[1] != ''):
raise LexerError('characters left over in string')
else:
return iter((s[0] + [PatternLexeme(EOF, '')])) |
class TabletCanvas(EventDispatcher):
def __init__(self, window):
self.window = window
def close(self):
raise NotImplementedError('abstract')
if _is_pyglet_doc_run:
def on_enter(self, cursor):
def on_leave(self, cursor):
def on_motion(self, cursor, x, y, pressure, tilt_x, tilt_y, buttons): |
class QKTCallback():
def __init__(self) -> None:
self._data = [[] for i in range(5)]
def callback(self, x0, x1=None, x2=None, x3=None, x4=None):
self._data[0].append(x0)
self._data[1].append(x1)
self._data[2].append(x2)
self._data[3].append(x3)
self._data[4].append(x4)
def get_callback_data(self):
return self._data
def clear_callback_data(self):
self._data = [[] for i in range(5)] |
def _retrieval_precision_update_input_check(input: torch.Tensor, target: torch.Tensor, num_tasks: int=1, indexes: Optional[torch.Tensor]=None, num_queries: int=1) -> None:
if (input.shape != target.shape):
raise ValueError(f'input and target must be of the same shape, got input.shape={input.shape} and target.shape={target.shape}.')
if (num_tasks == 1):
if (input.dim() != 1):
raise ValueError(f'input and target should be one dimensional tensors, got input and target dimensions={input.dim()}.')
elif ((input.dim() != 2) or (input.shape[0] != num_tasks)):
raise ValueError(f'input and target should be two dimensional tensors with {num_tasks} rows, got input and target shape={input.shape}.') |
class AxialAttention(nn.Module):
def __init__(self, dim, num_dimensions=2, heads=8, dim_heads=None, dim_index=(- 1), sum_axial_out=True):
assert ((dim % heads) == 0), 'hidden dimension must be divisible by number of heads'
super().__init__()
self.dim = dim
self.total_dimensions = (num_dimensions + 2)
self.dim_index = (dim_index if (dim_index > 0) else (dim_index + self.total_dimensions))
attentions = []
for permutation in calculate_permutations(num_dimensions, dim_index):
attentions.append(PermuteToFrom(permutation, SelfAttention(dim, heads, dim_heads)))
self.axial_attentions = nn.ModuleList(attentions)
self.sum_axial_out = sum_axial_out
def forward(self, x):
assert (len(x.shape) == self.total_dimensions), 'input tensor does not have the correct number of dimensions'
assert (x.shape[self.dim_index] == self.dim), 'input tensor does not have the correct input dimension'
if self.sum_axial_out:
return sum(map((lambda axial_attn: axial_attn(x)), self.axial_attentions))
out = x
for axial_attn in self.axial_attentions:
out = axial_attn(out)
return out |
class SideBlock(nn.Module):
def __init__(self, in_c, out_c, conv2d=None, norm_layer=None, kernel_size=3, padding=1, stride=1, non_linear=nn.ReLU):
super(SideBlock, self).__init__()
if (conv2d is None):
conv2d = nn.Conv2d
if (norm_layer is None):
norm_layer = Identity
if (non_linear is None):
non_linear = Identity
self.main_path = nn.Sequential(conv2d(in_c, out_c, kernel_size=kernel_size, padding=padding, stride=stride), norm_layer(out_c))
self.non_linear = non_linear()
def forward(self, input_var):
output_var = self.main_path(input_var)
return (self.non_linear(output_var), output_var) |
class Handle():
tip = ''
def __init__(self, window, player):
self.win = window
self.player = player
def hit_test(self, x, y, z):
(dx, dy, dz) = [(a - b) for (a, b) in zip(self.pos(), (x, y, z))]
if ((((dx * dx) + (dy * dy)) + (dz * dz)) < (self.radius * self.radius)):
return ((- dx), (- dy), (- dz))
def pos(self):
raise NotImplementedError()
def delete(self):
pass
def update_shapes(self):
pass
def begin_drag(self, offset):
self.offset = offset
return self
def on_mouse_press(self, x, y, button, modifiers):
self.win.remove_handlers(self)
def on_mouse_release(self, x, y, button, modifiers):
self.win.remove_handlers(self) |
class CarliniLID():
def __init__(self, sess, model, image_size, num_channels, num_labels, batch_size=100, confidence=L2_CONFIDENCE, targeted=L2_TARGETED, learning_rate=L2_LEARNING_RATE, binary_search_steps=L2_BINARY_SEARCH_STEPS, max_iterations=L2_MAX_ITERATIONS, abort_early=L2_ABORT_EARLY, initial_const=L2_INITIAL_CONST):
self.model = model
self.sess = sess
self.image_size = image_size
self.num_channels = num_channels
self.num_labels = num_labels
self.TARGETED = targeted
self.LEARNING_RATE = learning_rate
self.MAX_ITERATIONS = max_iterations
self.BINARY_SEARCH_STEPS = binary_search_steps
self.ABORT_EARLY = abort_early
self.CONFIDENCE = confidence
self.initial_const = initial_const
self.batch_size = batch_size
self.repeat = (binary_search_steps >= 10)
shape = (self.batch_size, self.image_size, self.image_size, self.num_channels)
modifier = tf.Variable(np.zeros(shape, dtype=np.float32))
self.max_mod = tf.reduce_max(modifier)
self.timg = tf.Variable(np.zeros(shape), dtype=tf.float32)
self.tlab = tf.Variable(np.zeros((self.batch_size, self.num_labels)), dtype=tf.float32)
self.const = tf.Variable(np.zeros(self.batch_size), dtype=tf.float32)
self.assign_timg = tf.placeholder(tf.float32, shape)
self.assign_tlab = tf.placeholder(tf.float32, (self.batch_size, self.num_labels))
self.assign_const = tf.placeholder(tf.float32, [self.batch_size])
self.newimg = (tf.tanh((modifier + self.timg)) / 2)
self.output = self.model(self.newimg)
self.l2dist = tf.reduce_sum(tf.square((self.newimg - (tf.tanh(self.timg) / 2))), [1, 2, 3])
real = tf.reduce_sum((self.tlab * self.output), 1)
other = tf.reduce_max((((1 - self.tlab) * self.output) - (self.tlab * 10000)), 1)
if self.TARGETED:
loss1 = tf.maximum(0.0, ((other - real) + self.CONFIDENCE))
else:
loss1 = tf.maximum(0.0, ((real - other) + self.CONFIDENCE))
self.clean_logits = tf.placeholder(tf.float32, (1, self.batch_size, None))
loss_lid = lid_adv_term(self.clean_logits, self.output, self.batch_size)
self.loss2 = tf.reduce_sum(self.l2dist)
self.loss1 = tf.reduce_sum((self.const * (loss1 + loss_lid)))
self.loss = (self.loss1 + self.loss2)
self.grads = tf.reduce_max(tf.gradients(self.loss, [modifier]))
start_vars = set((x.name for x in tf.global_variables()))
optimizer = tf.train.AdamOptimizer(self.LEARNING_RATE)
self.train = optimizer.minimize(self.loss, var_list=[modifier])
end_vars = tf.global_variables()
new_vars = [x for x in end_vars if (x.name not in start_vars)]
self.setup = []
self.setup.append(self.timg.assign(self.assign_timg))
self.setup.append(self.tlab.assign(self.assign_tlab))
self.setup.append(self.const.assign(self.assign_const))
self.init = tf.variables_initializer(var_list=([modifier] + new_vars))
def attack(self, X, Y):
nb_classes = Y.shape[1]
y_target = np.copy(Y)
if self.TARGETED:
for i in range(Y.shape[0]):
current = int(np.argmax(Y[i]))
target = np.random.choice(other_classes(nb_classes, current))
y_target[i] = np.eye(nb_classes)[target]
X_adv = np.zeros_like(X)
for i in tqdm(range(0, X.shape[0], self.batch_size)):
start = i
end = (i + self.batch_size)
end = np.minimum(end, X.shape[0])
X_adv[start:end] = self.attack_batch(X[start:end], y_target[start:end])
return X_adv
def attack_batch(self, imgs, labs):
def compare(x, y):
if (not isinstance(x, (float, int, np.int64))):
x = np.copy(x)
x[y] -= self.CONFIDENCE
x = np.argmax(x)
if self.TARGETED:
return (x == y)
else:
return (x != y)
batch_size = imgs.shape[0]
imgs = np.arctanh((imgs * 1.999999))
lower_bound = np.zeros(batch_size)
CONST = (np.ones(batch_size) * self.initial_const)
upper_bound = (np.ones(batch_size) * .0)
o_bestl2 = ([.0] * batch_size)
o_bestscore = ([(- 1)] * batch_size)
o_bestattack = ([np.zeros(imgs[0].shape)] * batch_size)
for outer_step in range(self.BINARY_SEARCH_STEPS):
self.sess.run(self.init)
batch = imgs[:batch_size]
batchlab = labs[:batch_size]
bestl2 = ([.0] * batch_size)
bestscore = ([(- 1)] * batch_size)
if ((self.repeat == True) and (outer_step == (self.BINARY_SEARCH_STEPS - 1))):
CONST = upper_bound
self.sess.run(self.setup, {self.assign_timg: batch, self.assign_tlab: batchlab, self.assign_const: CONST})
c_logits = self.sess.run([self.output], feed_dict={K.learning_phase(): 0})
prev = 1000000.0
for iteration in range(self.MAX_ITERATIONS):
(_, l, l2s, scores, nimg) = self.sess.run([self.train, self.loss, self.l2dist, self.output, self.newimg], feed_dict={K.learning_phase(): 0, self.clean_logits: c_logits})
if (self.ABORT_EARLY and ((iteration % (self.MAX_ITERATIONS // 10)) == 0)):
if (l > (prev * 0.9999)):
break
prev = l
for (e, (l2, sc, ii)) in enumerate(zip(l2s, scores, nimg)):
if ((l2 < bestl2[e]) and compare(sc, np.argmax(batchlab[e]))):
bestl2[e] = l2
bestscore[e] = np.argmax(sc)
if ((l2 < o_bestl2[e]) and compare(sc, np.argmax(batchlab[e]))):
o_bestl2[e] = l2
o_bestscore[e] = np.argmax(sc)
o_bestattack[e] = ii
for e in range(batch_size):
if (compare(bestscore[e], np.argmax(batchlab[e])) and (bestscore[e] != (- 1))):
upper_bound[e] = min(upper_bound[e], CONST[e])
if (upper_bound[e] < .0):
CONST[e] = ((lower_bound[e] + upper_bound[e]) / 2)
else:
lower_bound[e] = max(lower_bound[e], CONST[e])
if (upper_bound[e] < .0):
CONST[e] = ((lower_bound[e] + upper_bound[e]) / 2)
else:
CONST[e] *= 10
o_bestl2 = np.array(o_bestl2)
print(('sucess rate: %.4f' % (1 - (np.sum((o_bestl2 == .0)) / self.batch_size))))
return o_bestattack |
def main(args):
checkpoint = args.checkpoint
collection = args.collection
experiment_dir = args.expdir
k = 5
(searcher, index_path) = build_index_and_init_searcher(checkpoint, collection, experiment_dir)
squad = load_dataset('squad')
squad_dev = get_squad_split(squad)
question = squad_dev[10].question
all_tests_passed = True
results = searcher.search(question, k=k)
top_k_ids = []
for (passage_id, passage_rank, passage_score) in zip(*results):
top_k_ids.append(passage_id)
print(f'Initial search top results: {top_k_ids}')
config = ColBERTConfig(doc_maxlen=doc_maxlen, nbits=nbits, root=experiment_dir, experiment=experiment)
index_updater = IndexUpdater(config, searcher, checkpoint)
n = 1
index_updater.remove(top_k_ids[:n])
results = searcher.search(question, k=k)
top_k_ids_after = []
for (passage_id, passage_rank, passage_score) in zip(*results):
top_k_ids_after.append(passage_id)
print(f'Top results after removing the top passage: {top_k_ids_after}')
if ((top_k_ids[n:] == top_k_ids_after[:(- n)]) and (top_k_ids_after[(- n):] != top_k_ids[:n])):
print('REMOVAL SUCCEEDED')
else:
print(top_k_ids)
print(top_k_ids_after)
print('REMOVAL FAILED!!!')
all_tests_passed = False
passage_removed = 'Diego on May 24, 1984 during their May 2325, 1984 meetings in Washington, D.C. This was the first Super Bowl to be played at Jack Murphy Stadium (now currently known as Qualcomm Stadium) in San Diego, California. Fourteen cities were part of the bidding process, which was scheduled to award four Super Bowls (XXI, XXII, XXIII, and XXIV). The bidding cities included: Anaheim, Detroit, Houston, Jacksonville, Miami, Minneapolis, New Orleans, Pasadena, Philadelphia, San Francisco, San Diego, Seattle, Tampa, and Tempe. The Philadelphia host committee assembled what was considered a strong, but long-shot bid, hoping to win the first outdoor Super'
new_pids = index_updater.add([passage_removed])
results = searcher.search(question, k=k)
top_k_ids_after_append = []
for (passage_id, passage_rank, passage_score) in zip(*results):
top_k_ids_after_append.append(passage_id)
print(f'Top results after appending back the top passage: {top_k_ids_after_append}')
if ((top_k_ids[1:] == top_k_ids_after_append[1:]) and (top_k_ids_after_append[0] == new_pids[0])):
print('REAPPEND SUCCEEDED')
else:
print('REAPPEND FAILED!!!')
all_tests_passed = False
new_pids.extend(index_updater.add([passage_removed, passage_removed, passage_removed, passage_removed]))
results = searcher.search(question, k=k)
top_k_ids_after_append = []
for (passage_id, passage_rank, passage_score) in zip(*results):
top_k_ids_after_append.append(passage_id)
print(f'Top results after appending 4 more copies of the top passage: {top_k_ids_after_append}')
if (set(top_k_ids_after_append) == set(new_pids)):
print('REAPPEND 4 more SUCCEEDED')
else:
print('REAPPEND 4 more FAILED!!!')
all_tests_passed = False
index_updater.persist_to_disk()
config = ColBERTConfig(root=experiment_dir, experiment=experiment)
searcher = Searcher(index=experiment, config=config)
results = searcher.search(question, k=k)
top_k_ids_reload = []
for (passage_id, passage_rank, passage_score) in zip(*results):
top_k_ids_reload.append(passage_id)
print(f'Top results after appending 4 more copies of the top passage and reloading: {top_k_ids_reload}')
if (set(top_k_ids_reload) == set(new_pids)):
print('RELOAD SUCCEEDED')
else:
print('RELOAD FAILED!!!')
all_tests_passed = False
if all_tests_passed:
print('ALL TESTS PASSED')
else:
print('SOME TESTS FAILED!!!') |
class ZeroconfDevice(object):
def __init__(self, name: str, ip: str, port: int, model: str, id: str) -> None:
self.name = name
self.ip = ip
self.port = port
self.model = model
self.id = id
def __repr__(self) -> str:
return f'{type(self).__name__}({self.__dict__})'
def __eq__(self, other) -> bool:
return ((self is other) or (self.__dict__ == other.__dict__)) |
def read_traj_images(json_path, image_folder):
root_path = json_path.parents[0]
with open(json_path) as json_file:
json_dict = json.load(json_file)
image_names = ([None] * len(json_dict['plan']['low_actions']))
for (im_idx, im_dict) in enumerate(json_dict['images']):
if (image_names[im_dict['low_idx']] is None):
image_names[im_dict['low_idx']] = im_dict['image_name']
before_last_image = json_dict['images'][(- 1)]['image_name']
last_image = '{:09d}.png'.format((int(before_last_image.split('.')[0]) + 1))
image_names.append(last_image)
fimages = [((root_path / image_folder) / im) for im in image_names]
if (not any([os.path.exists(path) for path in fimages])):
fimages = [Path(str(path).replace('.png', '.jpg')) for path in fimages]
if (not all([os.path.exists(path) for path in fimages])):
return None
assert (len(fimages) > 0)
try:
images = read_images(fimages)
except:
return None
return images |
def get_quantized_dequantized_weight(layer: torch.nn.Module) -> torch.Tensor:
weight_tensor = layer._module_to_wrap.weight
weight_quantizer = layer.param_quantizers['weight']
quant_dequant_weights = weight_quantizer.quantize_dequantize(weight_tensor, weight_quantizer.round_mode)
return quant_dequant_weights |
def search_plan(sym, ntrial=6, type_dict=None, **kwargs):
history = []
threshold = 0
min_threshold = None
min_cost = None
nbegin = 3
for k in range(nbegin):
info = {}
sym = make_mirror_plan(sym, threshold=threshold, plan_info=info, **kwargs)
cost = get_cost(sym, type_dict, **kwargs)
save_size = (info['save_size'] >> 20)
local_size = (info['max_size'] >> 20)
guess = int(math.sqrt(((save_size * local_size) / 2)))
if ((min_cost is None) or (min_cost > cost)):
min_cost = cost
if ((min_threshold is None) or (local_size < min_threshold)):
min_threshold = local_size
print(('Search threshold=%d MB, cost=%d MB' % (threshold, cost)))
history.append((cost, threshold, sym))
threshold = guess
max_threshold = (threshold * math.sqrt(2))
step = int(((max_threshold - min_threshold) / ntrial))
threshold = (min_threshold + step)
if (step > 0):
for k in range(ntrial):
sym = make_mirror_plan(sym, threshold=threshold, plan_info=info, **kwargs)
cost = get_cost(sym, type_dict, **kwargs)
print(('Search threshold=%d MB, cost=%d MB' % (threshold, cost)))
history.append((cost, threshold, sym))
threshold += step
history.sort(key=(lambda x: x[0]))
(cost, threshold, sym) = history[0]
print(('Find best plan with threshold=%d, cost=%d MB' % (threshold, cost)))
return sym |
class TestPytestPluginManagerBootstrapming():
def test_preparse_args(self, pytestpm: PytestPluginManager) -> None:
pytest.raises(ImportError, (lambda : pytestpm.consider_preparse(['xyz', '-p', 'hello123'])))
with pytest.raises(ImportError) as excinfo:
pytestpm.consider_preparse(['-phello123'])
assert ('"hello123"' in excinfo.value.args[0])
pytestpm.consider_preparse(['-pno:hello123'])
pytestpm.consider_preparse(['-p'])
with pytest.raises(UsageError, match='^plugin main cannot be disabled$'):
pytestpm.consider_preparse(['-p', 'no:main'])
def test_plugin_prevent_register(self, pytestpm: PytestPluginManager) -> None:
pytestpm.consider_preparse(['xyz', '-p', 'no:abc'])
l1 = pytestpm.get_plugins()
pytestpm.register(42, name='abc')
l2 = pytestpm.get_plugins()
assert (len(l2) == len(l1))
assert (42 not in l2)
def test_plugin_prevent_register_unregistered_alredy_registered(self, pytestpm: PytestPluginManager) -> None:
pytestpm.register(42, name='abc')
l1 = pytestpm.get_plugins()
assert (42 in l1)
pytestpm.consider_preparse(['xyz', '-p', 'no:abc'])
l2 = pytestpm.get_plugins()
assert (42 not in l2)
def test_plugin_prevent_register_stepwise_on_cacheprovider_unregister(self, pytestpm: PytestPluginManager) -> None:
pytestpm.register(42, name='cacheprovider')
pytestpm.register(43, name='stepwise')
l1 = pytestpm.get_plugins()
assert (42 in l1)
assert (43 in l1)
pytestpm.consider_preparse(['xyz', '-p', 'no:cacheprovider'])
l2 = pytestpm.get_plugins()
assert (42 not in l2)
assert (43 not in l2)
def test_blocked_plugin_can_be_used(self, pytestpm: PytestPluginManager) -> None:
pytestpm.consider_preparse(['xyz', '-p', 'no:abc', '-p', 'abc'])
assert pytestpm.has_plugin('abc')
assert (not pytestpm.is_blocked('abc'))
assert (not pytestpm.is_blocked('pytest_abc')) |
def _simple_conv_model(model_type='functional'):
if (model_type == 'functional'):
inp = layers.Input((32, 32, 3))
x = layers.Conv2D(filters=32, kernel_size=2)(inp)
x = layers.ReLU(max_value=6.0)(x)
x = layers.Conv2D(filters=32, kernel_size=2, activation=tf.nn.relu6)(x)
x = layers.Activation(tf.nn.relu6)(x)
x = layers.Flatten()(x)
x = layers.Dense(units=10, activation=tf.nn.relu6)(x)
out = layers.Dense(units=2, activation=tf.nn.softmax)(x)
return tf.keras.Model(inp, out)
elif (model_type == 'sequential'):
return tf.keras.Sequential([layers.Conv2D(filters=32, kernel_size=2, input_shape=(32, 32, 3)), layers.ReLU(max_value=6.0), layers.Conv2D(filters=32, kernel_size=2, activation=tf.nn.relu6), layers.Activation(tf.nn.relu6), layers.Flatten(), layers.Dense(units=10, activation=tf.nn.relu6), layers.Dense(units=2, activation=tf.nn.softmax)]) |
class TestReplaceNodeTransformer(object):
def test_found(self):
node = ast.parse('a.b(c)')
replacement_node = ast.Name(id='d')
new_node = ReplaceNodeTransformer(node.body[0].value.func, replacement_node).visit(node)
assert (new_node is not node)
assert_code_equal('d(c)\n', decompile(new_node))
def test_not_found(self):
node = ast.parse('a.b(c)')
random_node = ast.Name(id='d')
new_node = ReplaceNodeTransformer(random_node, node.body[0].value.func).visit(node)
assert (new_node is not node)
assert_code_equal('a.b(c)\n', decompile(new_node)) |
class GeoJsonTooltip(GeoJsonDetail):
_template = Template((('\n {% macro script(this, kwargs) %}\n {{ this._parent.get_name() }}.bindTooltip(' + GeoJsonDetail.base_template) + ',{{ this.tooltip_options | tojson | safe }});\n {% endmacro %}\n '))
def __init__(self, fields: Sequence[str], aliases: Optional[Sequence[str]]=None, labels: bool=True, localize: bool=False, style: Optional[str]=None, class_name: str='foliumtooltip', sticky: bool=True, **kwargs: TypeJsonValue):
super().__init__(fields=fields, aliases=aliases, labels=labels, localize=localize, style=style, class_name=class_name)
self._name = 'GeoJsonTooltip'
kwargs.update({'sticky': sticky, 'class_name': class_name})
self.tooltip_options = {camelize(key): kwargs[key] for key in kwargs.keys()} |
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0):
super().__init__()
self.num_heads = num_heads
head_dim = (dim // num_heads)
self.scale = (qk_scale or (head_dim ** (- 0.5)))
self.qkv = nn.Linear(dim, (dim * 3), bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, f):
(B, N, C) = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, (C // self.num_heads)).permute(2, 0, 3, 1, 4)
(q, k, v) = (qkv[0], qkv[1], qkv[2])
attn = ((q k.transpose((- 2), (- 1))) * self.scale)
attn = attn.softmax(dim=(- 1))
attn = self.attn_drop(attn)
f = f.reshape(B, N, self.num_heads, (C // self.num_heads)).permute(0, 2, 1, 3).contiguous()
x = (attn v)
attn2gcn = x.clone().permute(0, 2, 1, 3).contiguous().reshape(B, N, C).contiguous()
x = (x + f)
x = x.transpose(1, 2).reshape(B, N, C).contiguous()
x = self.proj(x)
x = self.proj_drop(x)
return (x, attn2gcn) |
def test_gitlab_reference_handling_on_bad_data(run_line, tmp_path):
doc = (tmp_path / 'data.yml')
doc.write_text('include:\n - local: setup.yml\n\ntest:\n script:\n # !reference not a list, error\n - !reference .setup\n - echo running my own command\n')
res = run_line(['check-jsonschema', '--builtin-schema', 'gitlab-ci', '--data-transform', 'gitlab-ci', str(doc)], catch_exceptions=True)
assert (res.exit_code == 1) |
class FinTS3Segment(Container, SubclassesMixin, metaclass=FinTS3SegmentMeta):
header = DataElementGroupField(type=SegmentHeader, _d='Segmentkopf')
def TYPE(cls):
match = TYPE_VERSION_RE.match(cls.__name__)
if match:
return match.group(1)
def VERSION(cls):
match = TYPE_VERSION_RE.match(cls.__name__)
if match:
return int(match.group(2))
def __init__(self, *args, **kwargs):
if ('header' not in kwargs):
kwargs['header'] = SegmentHeader(self.TYPE, None, self.VERSION)
args = ((kwargs.pop('header'),) + args)
super().__init__(*args, **kwargs)
def find_subclass(cls, segment):
h = SegmentHeader.naive_parse(segment[0])
target_cls = None
for possible_cls in cls._all_subclasses():
if ((getattr(possible_cls, 'TYPE', None) == h.type) and (getattr(possible_cls, 'VERSION', None) == h.version)):
target_cls = possible_cls
if (not target_cls):
target_cls = cls
return target_cls |
class BasicBlock(nn.Sequential):
def __init__(self, conv, in_channels, out_channels, kernel_size, stride=1, bias=False, bn=True, act=nn.ReLU(True)):
m = [conv(in_channels, out_channels, kernel_size, bias=bias)]
if bn:
m.append(nn.BatchNorm2d(out_channels))
if (act is not None):
m.append(act)
super(BasicBlock, self).__init__(*m) |
def retrieve_available_artifacts():
class Artifact():
def __init__(self, name: str, single_gpu: bool=False, multi_gpu: bool=False):
self.name = name
self.single_gpu = single_gpu
self.multi_gpu = multi_gpu
self.paths = []
def __str__(self):
return self.name
def add_path(self, path: str, gpu: str=None):
self.paths.append({'name': self.name, 'path': path, 'gpu': gpu})
_available_artifacts: Dict[(str, Artifact)] = {}
directories = filter(os.path.isdir, os.listdir())
for directory in directories:
if directory.startswith('single-gpu-docker'):
artifact_name = directory[(len('single-gpu-docker') + 1):]
if (artifact_name in _available_artifacts):
_available_artifacts[artifact_name].single_gpu = True
else:
_available_artifacts[artifact_name] = Artifact(artifact_name, single_gpu=True)
_available_artifacts[artifact_name].add_path(directory, gpu='single')
elif directory.startswith('multi-gpu-docker'):
artifact_name = directory[(len('multi-gpu-docker') + 1):]
if (artifact_name in _available_artifacts):
_available_artifacts[artifact_name].multi_gpu = True
else:
_available_artifacts[artifact_name] = Artifact(artifact_name, multi_gpu=True)
_available_artifacts[artifact_name].add_path(directory, gpu='multi')
else:
artifact_name = directory
if (artifact_name not in _available_artifacts):
_available_artifacts[artifact_name] = Artifact(artifact_name)
_available_artifacts[artifact_name].add_path(directory)
return _available_artifacts |
class TestRegistryProxyModelGetRepoTag():
upstream_registry = 'quay.io'
upstream_repository = 'app-sre/ubi8-ubi'
orgname = 'quayio-cache'
repository = f'{orgname}/{upstream_repository}'
tag = 'latest'
(autouse=True)
def setup(self, app):
self.user = get_user('devtable')
self.org = create_organization(self.orgname, '{self.orgname}', self.user)
self.org.save()
self.config = create_proxy_cache_config(org_name=self.orgname, upstream_registry=self.upstream_registry, expiration_s=3600)
def test_caches_manifest_on_first_pull(self, create_repo, proxy_manifest_response):
repo_ref = create_repo(self.orgname, self.upstream_repository, self.user)
proxy_mock = proxy_manifest_response(self.tag, UBI8_8_4_MANIFEST_SCHEMA2, DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE)
with patch('data.registry_model.registry_proxy_model.Proxy', MagicMock(return_value=proxy_mock)):
proxy_model = ProxyModel(self.orgname, self.upstream_repository, self.user)
tag = proxy_model.get_repo_tag(repo_ref, self.tag)
assert (tag is not None)
assert (tag.manifest is not None)
assert (tag.manifest.internal_manifest_bytes.as_unicode() == UBI8_8_4_MANIFEST_SCHEMA2)
def test_updates_manifest_and_bumps_tag_expiration_when_upstream_manifest_changed(self, create_repo, proxy_manifest_response):
repo_ref = create_repo(self.orgname, self.upstream_repository, self.user)
proxy_mock = proxy_manifest_response(self.tag, UBI8_8_4_MANIFEST_SCHEMA2, DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE)
with patch('data.registry_model.registry_proxy_model.Proxy', MagicMock(return_value=proxy_mock)):
proxy_model = ProxyModel(self.orgname, self.upstream_repository, self.user)
tag = proxy_model.get_repo_tag(repo_ref, self.tag)
assert (tag is not None)
assert (tag.name == self.tag)
first_manifest = tag.manifest
proxy_mock = proxy_manifest_response(self.tag, UBI8_8_5_MANIFEST_SCHEMA2, DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE)
with patch('data.registry_model.registry_proxy_model.Proxy', MagicMock(return_value=proxy_mock)):
proxy_model = ProxyModel(self.orgname, self.upstream_repository, self.user)
tag = proxy_model.get_repo_tag(repo_ref, self.tag)
assert (tag is not None)
assert (tag.name == self.tag)
assert (tag.manifest.id != first_manifest.id)
assert (tag.manifest.digest == UBI8_8_5_DIGEST)
def test_renews_expired_tag_when_manifest_is_up_to_date_with_upstream(self, create_repo, proxy_manifest_response):
repo_ref = create_repo(self.orgname, self.upstream_repository, self.user)
proxy_mock = proxy_manifest_response(self.tag, UBI8_8_5_MANIFEST_SCHEMA2, DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE)
with patch('data.registry_model.registry_proxy_model.Proxy', MagicMock(return_value=proxy_mock)):
proxy_model = ProxyModel(self.orgname, self.upstream_repository, self.user)
tag = proxy_model.get_repo_tag(repo_ref, self.tag)
assert (tag is not None)
assert (tag.name == self.tag)
before_ms = (get_epoch_timestamp_ms() - (timedelta(hours=24).total_seconds() * 1000))
Tag.update(lifetime_start_ms=before_ms, lifetime_end_ms=(before_ms + 5)).where((Tag.id == tag.id)).execute()
expired_tag = tag
with patch('data.registry_model.registry_proxy_model.Proxy', MagicMock(return_value=proxy_mock)):
tag = proxy_model.get_repo_tag(repo_ref, self.tag)
assert (tag is not None)
assert (expired_tag.id == tag.id)
assert (expired_tag.manifest.id == tag.manifest.id)
assert (not tag.expired)
new_expiration_ms = (get_epoch_timestamp_ms() + (self.config.expiration_s * 1000))
assert (tag.lifetime_end_ms >= (new_expiration_ms - 500))
def test_passes_through_upstream_error_when_image_isnt_cached(self, create_repo, proxy_manifest_response):
repo_ref = create_repo(self.orgname, self.upstream_repository, self.user)
proxy_mock = proxy_manifest_response('not-existing-ref', '', '')
with patch('data.registry_model.registry_proxy_model.Proxy', MagicMock(return_value=proxy_mock)):
proxy_model = ProxyModel(self.orgname, self.upstream_repository, self.user)
with pytest.raises(TagDoesNotExist):
proxy_model.get_repo_tag(repo_ref, self.tag)
def test_passes_through_upstream_error_when_local_cache_is_expired(self, create_repo, proxy_manifest_response):
repo_ref = create_repo(self.orgname, self.upstream_repository, self.user)
proxy_mock = proxy_manifest_response(self.tag, UBI8_8_5_MANIFEST_SCHEMA2, DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE)
with patch('data.registry_model.registry_proxy_model.Proxy', MagicMock(return_value=proxy_mock)):
proxy_model = ProxyModel(self.orgname, self.upstream_repository, self.user)
tag = proxy_model.get_repo_tag(repo_ref, self.tag)
assert (tag is not None)
before_ms = (get_epoch_timestamp_ms() - (timedelta(hours=24).total_seconds() * 1000))
Tag.update(lifetime_start_ms=before_ms, lifetime_end_ms=(before_ms + 5)).where((Tag.id == tag.id)).execute()
proxy_mock = proxy_manifest_response('not-existing-ref', '', '')
with patch('data.registry_model.registry_proxy_model.Proxy', MagicMock(return_value=proxy_mock)):
proxy_model = ProxyModel(self.orgname, self.upstream_repository, self.user)
tag = proxy_model.get_repo_tag(repo_ref, self.tag)
assert (tag is None)
def test_returns_None_when_manifest_no_longer_exists_upstream_and_local_cache_is_expired(self, create_repo, proxy_manifest_response):
repo_ref = create_repo(self.orgname, self.upstream_repository, self.user)
proxy_mock = proxy_manifest_response(self.tag, UBI8_8_5_MANIFEST_SCHEMA2, DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE)
with patch('data.registry_model.registry_proxy_model.Proxy', MagicMock(return_value=proxy_mock)):
proxy_model = ProxyModel(self.orgname, self.upstream_repository, self.user)
tag = proxy_model.get_repo_tag(repo_ref, self.tag)
assert (tag is not None)
before_ms = (get_epoch_timestamp_ms() - (timedelta(hours=24).total_seconds() * 1000))
Tag.update(lifetime_start_ms=before_ms, lifetime_end_ms=(before_ms + 5)).where((Tag.id == tag.id)).execute()
proxy_mock = proxy_manifest_response('not-existing-ref', '', '')
with patch('data.registry_model.registry_proxy_model.Proxy', MagicMock(return_value=proxy_mock)):
proxy_model = ProxyModel(self.orgname, self.upstream_repository, self.user)
tag = proxy_model.get_repo_tag(repo_ref, self.tag)
assert (tag is None)
def test_bumps_tag_expiration_when_upstream_is_alive_and_cache_is_up_to_date(self, create_repo, proxy_manifest_response):
repo_ref = create_repo(self.orgname, self.upstream_repository, self.user)
proxy_mock = proxy_manifest_response(self.tag, UBI8_8_5_MANIFEST_SCHEMA2, DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE)
with patch('data.registry_model.registry_proxy_model.Proxy', MagicMock(return_value=proxy_mock)):
proxy_model = ProxyModel(self.orgname, self.upstream_repository, self.user)
tag = proxy_model.get_repo_tag(repo_ref, self.tag)
assert (tag is not None)
assert (tag.name == self.tag)
first_tag = tag
with patch('data.registry_model.registry_proxy_model.Proxy', MagicMock(return_value=proxy_mock)):
tag = proxy_model.get_repo_tag(repo_ref, self.tag)
assert (tag is not None)
assert (tag.lifetime_end_ms > first_tag.lifetime_end_ms)
def test_doesnt_bump_tag_expiration_when_upstream_is_dead(self, create_repo, proxy_manifest_response):
repo_ref = create_repo(self.orgname, self.upstream_repository, self.user)
proxy_mock = proxy_manifest_response(self.tag, UBI8_8_5_MANIFEST_SCHEMA2, DOCKER_SCHEMA2_MANIFEST_CONTENT_TYPE)
with patch('data.registry_model.registry_proxy_model.Proxy', MagicMock(return_value=proxy_mock)):
proxy_model = ProxyModel(self.orgname, self.upstream_repository, self.user)
tag = proxy_model.get_repo_tag(repo_ref, self.tag)
assert (tag is not None)
first_tag = tag
proxy_mock = proxy_manifest_response('not-existing-ref', '', '')
with patch('data.registry_model.registry_proxy_model.Proxy', MagicMock(return_value=proxy_mock)):
proxy_model = ProxyModel(self.orgname, self.upstream_repository, self.user)
tag = proxy_model.get_repo_tag(repo_ref, self.tag)
assert (tag is not None)
assert (tag.lifetime_end_ms == first_tag.lifetime_end_ms) |
class TestUserVersion():
.parametrize('val, major, minor', [(524289, 8, 1), (, 32767, 65535)])
def test_from_int(self, val, major, minor):
version = sql.UserVersion.from_int(val)
assert (version.major == major)
assert (version.minor == minor)
.parametrize('major, minor, val', [(8, 1, 524289), (32767, 65535, )])
def test_to_int(self, major, minor, val):
version = sql.UserVersion(major, minor)
assert (version.to_int() == val)
.parametrize('val', [, (- 1)])
def test_from_int_invalid(self, val):
with pytest.raises(AssertionError):
sql.UserVersion.from_int(val)
.parametrize('major, minor', [((- 1), 0), (0, (- 1)), (0, 65536), (32768, 0)])
def test_to_int_invalid(self, major, minor):
version = sql.UserVersion(major, minor)
with pytest.raises(AssertionError):
version.to_int()
(val=strategies.integers(min_value=0, max_value=))
def test_from_int_hypothesis(self, val):
version = sql.UserVersion.from_int(val)
assert (version.to_int() == val)
(major=strategies.integers(min_value=0, max_value=32767), minor=strategies.integers(min_value=0, max_value=65535))
def test_to_int_hypothesis(self, major, minor):
version = sql.UserVersion(major, minor)
assert (version.from_int(version.to_int()) == version) |
def prepare_sccs(sccs: list[set[T]], edges: dict[(T, list[T])]) -> dict[(AbstractSet[T], set[AbstractSet[T]])]:
sccsmap = {v: frozenset(scc) for scc in sccs for v in scc}
data: dict[(AbstractSet[T], set[AbstractSet[T]])] = {}
for scc in sccs:
deps: set[AbstractSet[T]] = set()
for v in scc:
deps.update((sccsmap[x] for x in edges[v]))
data[frozenset(scc)] = deps
return data |
class Packer(object):
def __init__(self, obj: Any):
tensor_lists = _extract_tensors(obj)
memo = {id(t): t for t in tensor_lists}
self._tensor_memo = copy(memo)
self._obj = deepcopy(obj, memo)
self._params_tensor_list: Optional[List[torch.Tensor]] = tensor_lists
self._unique_params_idxs: Optional[List[int]] = None
self._unique_inverse_idxs: Optional[List[int]] = None
self._unique_tensor_shapes: Optional[List[torch.Size]] = None
self._tensor_shapes: Optional[List[torch.Size]] = None
self._unique_tensor_numels: Optional[List[int]] = None
self._unique_tensor_numel_tot: Optional[int] = None
self._tensor_numels: Optional[List[int]] = None
self._tensor_numel_tot: Optional[int] = None
def get_param_tensor_list(self, unique: bool=True) -> List[torch.Tensor]:
if (self._params_tensor_list is not None):
params_tensors = self._params_tensor_list
else:
params_tensors = _extract_tensors(self._obj)
self._params_tensor_list = params_tensors
if unique:
if (self._unique_params_idxs is not None):
unique_idxs = self._unique_params_idxs
unique_inverse = self._unique_inverse_idxs
else:
(unique_idxs, unique_inverse) = _get_unique_idxs(params_tensors)
self._unique_params_idxs = unique_idxs
self._unique_inverse_idxs = unique_inverse
params_tensors = [params_tensors[i] for i in unique_idxs]
if unique:
self._unique_tensor_shapes = [p.shape for p in params_tensors]
else:
self._tensor_shapes = [p.shape for p in params_tensors]
return params_tensors
def get_param_tensor(self, unique: bool=True) -> Optional[torch.Tensor]:
params = self.get_param_tensor_list(unique=unique)
if (len(params) == 0):
return None
else:
if unique:
self._unique_tensor_numels = [p.numel() for p in params]
self._unique_tensor_numel_tot = sum(self._unique_tensor_numels)
else:
self._tensor_numels = [p.numel() for p in params]
self._tensor_numel_tot = sum(self._tensor_numels)
if (len(params) == 1):
return params[0]
else:
tparam = torch.cat([p.reshape((- 1)) for p in params])
return tparam
def construct_from_tensor_list(self, tensors: List[torch.Tensor], unique: bool=True) -> Any:
if unique:
tensor_shapes = self._unique_tensor_shapes
else:
tensor_shapes = self._tensor_shapes
if (tensor_shapes is None):
raise RuntimeError(('Please execute self.get_param_tensor_list(%s) first' % str(unique)))
else:
if (len(tensor_shapes) != len(tensors)):
raise RuntimeError('Mismatch length of the tensors')
if (len(tensor_shapes) == 0):
return self._obj
for (i, (tens, shape)) in enumerate(zip(tensors, tensor_shapes)):
if (tens.shape != shape):
msg = ('The tensors[%d] has mismatch shape from the original. Expected: %s, got: %s' % (i, tens.shape, shape))
raise RuntimeError(msg)
if unique:
assert self._unique_inverse_idxs, 'Please report to Github'
tensors = [tensors[self._unique_inverse_idxs[i]] for i in range(len(self._unique_inverse_idxs))]
else:
tensors = copy(tensors)
memo = copy(self._tensor_memo)
new_obj = deepcopy(self._obj, memo)
new_obj = _put_tensors(new_obj, tensors)
return new_obj
def construct_from_tensor(self, a: torch.Tensor, unique: bool=True) -> Any:
if unique:
tensor_shapes = self._unique_tensor_shapes
tensor_numel_tot = self._unique_tensor_numel_tot
tensor_numels = self._unique_tensor_numels
else:
tensor_shapes = self._tensor_shapes
tensor_numel_tot = self._tensor_numel_tot
tensor_numels = self._tensor_numels
if (tensor_shapes is None):
raise RuntimeError(('Please execute self.get_param_tensor(%s) first' % str(unique)))
elif (len(tensor_shapes) == 0):
return self._obj
else:
assert (tensor_numel_tot is not None), 'Please report to Github'
assert (tensor_numels is not None), 'Please report to Github'
if (a.numel() != tensor_numel_tot):
msg = ('The number of element does not match. Expected: %d, got: %d' % (tensor_numel_tot, a.numel()))
raise RuntimeError(msg)
if (len(tensor_numels) == 1):
params: List[torch.Tensor] = [a]
else:
ioffset = 0
params = []
for i in range(len(tensor_numels)):
p = a[ioffset:(ioffset + tensor_numels[i])].reshape(tensor_shapes[i])
ioffset += tensor_numels[i]
params.append(p)
return self.construct_from_tensor_list(params, unique=unique) |
class GeneratorFunieGAN(nn.Module):
def __init__(self, in_channels=3, out_channels=3):
super(GeneratorFunieGAN, self).__init__()
self.down1 = UNetDown(in_channels, 32, bn=False)
self.down2 = UNetDown(32, 128)
self.down3 = UNetDown(128, 256)
self.down4 = UNetDown(256, 256)
self.down5 = UNetDown(256, 256, bn=False)
self.up1 = UNetUp(256, 256)
self.up2 = UNetUp(512, 256)
self.up3 = UNetUp(512, 128)
self.up4 = UNetUp(256, 32)
self.final = nn.Sequential(nn.Upsample(scale_factor=2), nn.ZeroPad2d((1, 0, 1, 0)), nn.Conv2d(64, out_channels, 4, padding=1), nn.Tanh())
def forward(self, x):
d1 = self.down1(x)
d2 = self.down2(d1)
d3 = self.down3(d2)
d4 = self.down4(d3)
d5 = self.down5(d4)
u1 = self.up1(d5, d4)
u2 = self.up2(u1, d3)
u3 = self.up3(u2, d2)
u45 = self.up4(u3, d1)
return self.final(u45) |
class KnownValues(unittest.TestCase):
def test_parse_pople(self):
self.assertEqual(gto.basis._parse_pople_basis('631g(d)', 'C'), ('pople-basis/6-31G.dat', 'pople-basis/6-31G-polarization-d.dat'))
self.assertEqual(gto.basis._parse_pople_basis('631g**', 'C'), ('pople-basis/6-31Gss.dat',))
self.assertEqual(gto.basis._parse_pople_basis('631++g**', 'C'), ('pople-basis/6-31++Gss.dat',))
self.assertEqual(gto.basis._parse_pople_basis('6311+g(d,p)', 'C'), ('pople-basis/6-311+G.dat', 'pople-basis/6-311G-polarization-d.dat'))
self.assertRaises(KeyError, gto.basis._parse_pople_basis, '631g++', 'C')
def test_basis_load(self):
self.assertRaises(BasisNotFoundError, gto.basis.load, __file__, 'H')
self.assertRaises(BasisNotFoundError, gto.basis.load, 'abas', 'H')
self.assertEqual(len(gto.basis.load('631++g**', 'C')), 8)
self.assertEqual(len(gto.basis.load('ccpcvdz', 'C')), 7)
basdat = (gto.basis.load('minao', 'C') + gto.basis.load('sto3g', 'C'))
basdat1 = gto.basis.parse_nwchem.parse(gto.basis.parse_nwchem.convert_basis_to_nwchem('C', basdat), 'C')
bas = []
for b in sorted(basdat, reverse=True):
b1 = b[:1]
for x in b[1:]:
b1.append(list(x))
bas.append(b1)
bas = ([b for b in bas if (b[0] == 0)] + [b for b in bas if (b[0] == 1)])
self.assertEqual(bas, basdat1)
self.assertEqual(len(gto.basis.load('def2-svp', 'Rn')), 16)
def test_basis_load_from_file(self):
ftmp = tempfile.NamedTemporaryFile()
ftmp.write('\nLi S\n 16.1195750 0.\n 2.9362007 0.\n 0.7946505 0.\nLi S\n 0.6362897 -0.\n 0.1478601 0.\n 0.0480887 0.\n '.encode())
ftmp.flush()
b = gto.basis.load(ftmp.name, 'Li')
self.assertEqual(len(b), 2)
self.assertEqual(len(b[0][1:]), 3)
self.assertEqual(len(b[1][1:]), 3)
def test_basis_load_ecp(self):
self.assertEqual(gto.basis.load_ecp(__file__, 'H'), [])
def test_parse_basis(self):
basis_str = '\n#BASIS SET: (6s,3p) -> [2s,1p]\nC S\n 71.6168370 0. \n 13.0450960 0. \n#\n 3.5305122 0. \nC SP\n 2.9412494 -0. 0. \n 0.6834831 0. 0. \n 0.2222899 0. 0. '
self.assertRaises(BasisNotFoundError, gto.basis.parse_nwchem.parse, basis_str, 'O')
basis_dat = gto.basis.parse_nwchem.parse(basis_str)
self.assertEqual(len(basis_dat), 3)
def test_parse_ecp(self):
ecp_str = '\n#\nNa nelec 10\nNa ul\n1 -10.0000000 \n2 35.0516791 -47.4902024 \n#\n2 7.9060270 -17.2283007 \n\nNa S\n0 3.0000000*np.exp(0)\n1 41.5764759 36.2847626*np.exp(0)\n2 13.2649167 72.9304880*np.exp(0)\nNa P\n0 1257.2650682 5.0000000 \n1 \n2 54.5247759 \n'
ecpdat = gto.basis.parse_nwchem.parse_ecp(ecp_str, 'Na')
self.assertEqual(ecpdat[0], 10)
self.assertEqual(len(ecpdat[1]), 3)
ecpdat1 = gto.basis.parse_nwchem.parse_ecp(ecp_str)
self.assertEqual(ecpdat, ecpdat1)
ecpdat1 = gto.basis.parse_nwchem.parse_ecp(gto.basis.parse_nwchem.convert_ecp_to_nwchem('Na', ecpdat), 'Na')
self.assertEqual(ecpdat, ecpdat1)
def test_optimize_contraction(self):
bas = gto.parse('\n#BASIS SET: (6s,3p) -> [2s,1p]\n C S\n 2.9412494 -0.\n 0.6834831 0.\n 0.2222899 0.\n C S\n 2.9412494 0.\n 0.6834831 0.\n 0.2222899 0.\n ', optimize=True)
self.assertEqual(len(bas), 1)
bas = [[1, 0, [2.9412494, (- 0.)], [0.6834831, 0.], [0.2222899, 0.]], [1, 1, [2.9412494, (- 0.)], [0.6834831, 0.], [0.2222899, 0.]], [1, 1, [2.9412494, 0.], [0.6834831, 0.], [0.2222899, 0.]]]
bas = gto.basis.parse_nwchem.optimize_contraction(bas)
self.assertEqual(len(bas), 2)
def test_remove_zero(self):
bas = gto.parse('\n C S\n 7. 0. 0.\n 2. 0. 0.\n 0. 1. 0.\n 0. 0. 1.\n 0. 0. 0.\n ')
self.assertEqual(len(bas[0]), 3)
bas = [[0, 0, [7., 0.0, 0.0], [2., 0.0, 0.0], [0., 1.0, 0.0], [0., 0.0, 1.0], [0., 0.0, 0.0]]]
bas = gto.basis.parse_nwchem.remove_zero(bas)
self.assertEqual(len(bas[0]), 4)
def test_parse_molpro_basis(self):
basis_str = '\nC s aug-cc-pVTZ AVTZ : 11 5 1.10 1.10 8.8 10.10 11.11\naug-cc-pVTZ\n8236 1235 280.8 79.27 25.59 8.997 3.319 0.9059 0.3643 0.1285 0.04402\n0.000531 0.004108 0.021087 0.081853 0.234817 0.434401 0.346129 0.039378\n-0.008983 0.002385 -0.000113 -0.000878 -0.00454 -0.018133 -0.05576\n-0.126895 -0.170352 0.140382 0.598684 0.395389 1 1 1\nC p aug-cc-pVTZ AVTZ : 6 4 1.5 4.4 5.5 6.6\naug-cc-pVTZ\n18.71 4.133 1.2 0.3827 0.1209 0.03569 0.014031 0.086866 0.290216\n0.501008 0.343406 1 1 1\nC d aug-cc-pVTZ AVTZ : 3 0\naug-cc-pVTZ\n1.097 0.318 0.1\nC f aug-cc-pVTZ AVTZ : 2 0\naug-cc-pVTZ\n0.761 0.268\n'
basis1 = parse_molpro.parse(basis_str)
ref = gto.basis.parse('\n#BASIS SET: (11s,6p,3d,2f) -> [5s,4p,3d,2f]\nC S\n 8236.0000000 0.0005310 -0.0001130 0.0000000 0.0000000 0\n 1235.0000000 0.0041080 -0.0008780 0.0000000 0.0000000 0\n 0.0210870 -0.0045400 0.0000000 0.0000000 0\n 79.2700000 0.0818530 -0.0181330 0.0000000 0.0000000 0\n 25.5900000 0.2348170 -0.0557600 0.0000000 0.0000000 0\n 8.9970000 0.4344010 -0.1268950 0.0000000 0.0000000 0\n 3.3190000 0.3461290 -0.1703520 0.0000000 0.0000000 0\n 0.9059000 0.0393780 0.1403820 1.0000000 0.0000000 0\n 0.3643000 -0.0089830 0.5986840 0.0000000 0.0000000 0\n 0.1285000 0.0023850 0.3953890 0.0000000 1.0000000 0\n 0.0440200 0.0000000 0.0000000 0.0000000 0.0000000 1.0000000 \nC P\n 18.7100000 0.0140310 0.0000000 0.0000000 0\n 4.1330000 0.0868660 0.0000000 0.0000000 0\n 1.2000000 0.2902160 0.0000000 0.0000000 0\n 0.3827000 0.5010080 1.0000000 0.0000000 0\n 0.1209000 0.3434060 0.0000000 1.0000000 0\n 0.0356900 0.0000000 0.0000000 0.0000000 1.0000000 \nC D\n 1.0970000 1.0000000\nC D\n 0.3180000 1.0000000 \nC D\n 0.1000000 1.0000000 \nC F\n 0.7610000 1.0000000 \nC F\n 0.2680000 1.0000000 \nEND')
self.assertEqual(ref, basis1)
basis_str = '\nc s 631g sv : 10 3 1.6 7.9 10.10\n 3047.52500d+00 457.369500d+00 103.948700d+00 29.2101600d+00 9.d+00\n 3.d+00 7.d+00 1.d+00 0.d+00 0.d+00\n 1.d-03 1.d-02 0.d+00 0.d+00 0.d+00\n 0.d+00 -0.d+00 -0.d+00 1.d+00 1.d+00\nc p 631g sv : 4 2 1.3 4.4\n 7.d+00 1.d+00 0.d+00 0.d+00 0.d+00\n 0.d+00 0.d+00 1.d+00\n'
basis1 = parse_molpro.parse(basis_str)
ref = gto.basis.parse('\n#BASIS SET: (10s,4p) -> [3s,2p]\nC S\n 3047.5250000 0. 0 0\n 0. 0 0\n 0. 0 0\n 29.2101600 0. 0 0\n 9.2866630 0.4679413 0 0\n 3.1639270 0.3623120 0 0\n 7.8682720 0 -0.1193324 0\n 1.8812890 0 -0.1608542 0\n 0.5442493 0 1.1434560 0\n 0.1687144 0 0.0000000 1\nC P\n 7.8682720 0. 0\n 1.8812890 0.3164234 0\n 0.5442493 0.7443083 0\n 0.1687144 0 1\nEND ')
self.assertEqual(ref, basis1)
def test_parse_gaussian_basis(self):
basis_str = '\n****\nC 0\nS 8 1.00\n 8236.0000000 0.0005310 \n 1235.0000000 0.0041080 \n 0.0210870 \n 79.2700000 0.0818530 \n 25.5900000 0.2348170 \n 8.9970000 0.4344010 \n 3.3190000 0.3461290 \n 0.3643000 -0.0089830 \nS 8 1.00\n 8236.0000000 -0.0001130 \n 1235.0000000 -0.0008780 \n -0.0045400 \n 79.2700000 -0.0181330 \n 25.5900000 -0.0557600 \n 8.9970000 -0.1268950 \n 3.3190000 -0.1703520 \n 0.3643000 0.5986840 \nS 1 1.00\n 0.9059000 1.0000000 \nS 1 1.00\n 0.1285000 1.0000000 \nS 1 1.00\n 0.0440200 1.0000000 \nP 3 1.00\n 18.7100000 0.0140310 \n 4.1330000 0.0868660 \n 1.2000000 0.2902160 \nP 1 1.00\n 0.3827000 1.0000000 \nP 1 1.00\n 0.1209000 1.0000000 \nP 1 1.00\n 0.0356900 1.0000000 \nD 1 1.00\n 1.0970000 1.0000000 \nD 1 1.00\n 0.3180000 1.0000000 \nD 1 1.00\n 0.1000000 1.0000000 \nF 1 1.00\n 0.7610000 1.0000000 \nF 1 1.00\n 0.2680000 1.0000000 \n****\n '
basis1 = parse_gaussian.parse(basis_str)
ref = gto.basis.load('augccpvtz', 'C')
self.assertEqual(ref, basis1)
basis_str = '\n****\nC 0\nS 6 1.00\n 4563.2400000 0. \n 0.0152306 \n 0.0761269 \n 44.4553000 0.2608010 \n 13.0290000 0.6164620 \n 1.8277300 0.2210060 \nSP 3 1.00\n 20.9642000 0.1146600 0.0402487 \n 4.8033100 0.9199990 0.2375940 \n 1.4593300 -0. 0.8158540 \nSP 1 1.00\n 0.4834560 1.0000000 1.0000000 \nSP 1 1.00\n 0.1455850 1.0000000 1.0000000 \nSP 1 1.00\n 0.0438000 1.0000000 1.0000000 \nD 1 1.00\n 0.6260000 1.0000000 \n****\n'
basis1 = parse_gaussian.parse(basis_str)
ref = gto.basis.load('6311++g*', 'C')
self.assertEqual(ref, basis1)
basis_str = '\n****\nC 0 \nS 6 1.00\n 3047.5249000 0.0018347 \n 0.0140373 \n 0.0688426 \n 29.2101550 0.2321844 \n 9.2866630 0.4679413 \n 3.1639270 0.3623120 \nSP 3 1.00\n 7.8682724 -0.1193324 0.0689991 \n 1.8812885 -0.1608542 0.3164240 \n 0.5442493 1.1434564 0.7443083 \nSP 1 1.00\n 0.1687144 1.0000000 1.0000000 \nD 1 1.00\n 2.5040000 1.0000000 \nD 1 1.00\n 0.6260000 1.0000000 \nD 1 1.00\n 0.1565000 1.0000000 \nF 1 1.00\n 0.8000000 1.0000000 \n****\n'
basis1 = parse_gaussian.parse(basis_str)
ref = gto.basis.load('631g(3df,3pd)', 'C')
self.assertEqual(ref, basis1)
def test_parse_gaussian_load_basis(self):
with tempfile.NamedTemporaryFile(mode='w+') as f:
f.write('\n****\nH 0\nS 1 1.0\n1.0 1.0\n****\n')
f.flush()
self.assertEqual(parse_gaussian.load(f.name, 'H'), [[0, [1.0, 1.0]]])
with tempfile.NamedTemporaryFile(mode='w+') as f:
f.write('\nH 0\nS 1 1.0\n1.0 1.0\n****\n')
f.flush()
self.assertEqual(parse_gaussian.load(f.name, 'H'), [[0, [1.0, 1.0]]])
with tempfile.NamedTemporaryFile(mode='w+') as f:
f.write('\n****\nH 0\nS 1 1.0\n1.0 1.0\n')
f.flush()
self.assertEqual(parse_gaussian.load(f.name, 'H'), [[0, [1.0, 1.0]]])
with tempfile.NamedTemporaryFile(mode='w+') as f:
f.write('\nH 0\nS 1 1.0\n1.0 1.0\n')
f.flush()
self.assertEqual(parse_gaussian.load(f.name, 'H'), [[0, [1.0, 1.0]]])
def test_basis_truncation(self):
b = gto.basis.load('', 'C')
self.assertEqual(len(b), 3)
self.assertEqual(len(b[0][1]), 4)
self.assertEqual(len(b[1][1]), 2)
self.assertEqual(b[2][0], 3)
self.assertEqual(len(b[2][1]), 2)
b = gto.basis.load('631g(3df,3pd)', 'C')
self.assertEqual(len(b), 6)
self.assertEqual(len(b[0][1]), 2)
self.assertEqual(len(b[1][1]), 2)
self.assertEqual(len(b[2][1]), 2)
self.assertEqual(len(b[3][1]), 2)
self.assertEqual(len(b[4][1]), 2)
self.assertEqual(b[5][0], 3)
self.assertEqual(len(b[5][1]), 2)
b = gto.basis.load('aug-', 'C')
self.assertEqual(len(b), 6)
self.assertEqual(b[3][0], 1)
self.assertRaises(AssertionError, gto.basis.load, 'aug-', 'C')
def test_to_general_contraction(self):
b = gto.basis.to_general_contraction(gto.load('cc-pvtz', 'H'))
self.assertEqual(len(b), 3)
self.assertEqual(len(b[0]), 6)
self.assertEqual(len(b[1]), 3)
self.assertEqual(len(b[2]), 2)
def test_parse_molpro_ecp_soc(self):
ecp_data = parse_molpro.parse_ecp('\n! Q=7., MEFIT, MCDHF+Breit, Ref 32; CPP: alpha=1.028;delta=1.247;ncut=2.\nECP,I,46,4,3;\n1; 2,1.000000,0.000000;\n2; 2,3.380230,83.107547; 2,1.973454,5.099343;\n4; 2,2.925323,27.299020; 2,3.073557,55.607847; 2,1.903188,0.778322; 2,1.119689,1.751128;\n4; 2,1.999036,8.234552; 2,1.967767,12.488097; 2,0.998982,2.177334; 2,0.972272,3.167401;\n4; 2,2.928812,-11.777154; 2,2.904069,-15.525522; 2,0.287352,-0.148550; 2,0.489380,-0.273682;\n4; 2,2.925323,-54.598040; 2,3.073557,55.607847; 2,1.903188,-1.556643; 2,1.119689,1.751128;\n4; 2,1.999036,-8.234552; 2,1.967767,8.325398; 2,0.998982,-2.177334; 2,0.972272,2.111601;\n4; 2,2.928812,7.851436; 2,2.904069,-7.762761; 2,0.287352,0.099033; 2,0.489380,-0.136841;\n')
ref = [46, [[(- 1), [[], [], [[1.0, 0.0]], [], [], [], []]], [0, [[], [], [[3.38023, 83.107547], [1.973454, 5.099343]], [], [], [], []]], [1, [[], [], [[2.925323, 27.29902, (- 54.59804)], [3.073557, 55.607847, 55.607847], [1.903188, 0.778322, (- 1.556643)], [1.119689, 1.751128, 1.751128]], [], [], [], []]], [2, [[], [], [[1.999036, 8.234552, (- 8.234552)], [1.967767, 12.488097, 8.325398], [0.998982, 2.177334, (- 2.177334)], [0.972272, 3.167401, 2.111601]], [], [], [], []]], [3, [[], [], [[2.928812, (- 11.777154), 7.851436], [2.904069, (- 15.525522), (- 7.762761)], [0.287352, (- 0.14855), 0.099033], [0.48938, (- 0.273682), (- 0.136841)]], [], [], [], []]]]]
self.assertEqual(ecp_data, ref) |
class CallTipObject():
def __init__(self, textCtrl, name, offset):
self.textCtrl = textCtrl
self.name = name
self.bufferName = name
self.offset = offset
def tryUsingBuffer(self):
bufferName = self.textCtrl._callTipBuffer_name
t = (time.time() - self.textCtrl._callTipBuffer_time)
if ((self.bufferName == bufferName) and (t < 0)):
self._finish(self.textCtrl._callTipBuffer_result)
return True
else:
return False
def finish(self, callTipText):
self.setBuffer(callTipText)
self._finish(callTipText)
def setBuffer(self, callTipText, timeout=4):
self.textCtrl._callTipBuffer_name = self.bufferName
self.textCtrl._callTipBuffer_time = (time.time() + timeout)
self.textCtrl._callTipBuffer_result = callTipText
def _finish(self, callTipText):
self.textCtrl.calltipShow(self.offset, callTipText, True) |
def run(train_batch_size, epochs, lr, weight_decay, config, exp_id, log_dir, disable_gpu=False):
if (config['test_ratio'] is not None):
(train_loader, val_loader, test_loader) = get_data_loaders(config, train_batch_size, exp_id)
else:
(train_loader, val_loader) = get_data_loaders(config, train_batch_size, exp_id)
module = import_module(('model.' + 'MWCNN'))
mw_model = module.make_model(args).to('cuda')
model = Model(args).to('cuda')
writer = SummaryWriter(log_dir=log_dir)
if os.path.exists(os.path.join(args.log_dir_MW, 'state.pkl.epoch444')):
mw_model.load_state_dict(torch.load(os.path.join(args.log_dir_MW, 'state.pkl.epoch444')), strict=False)
logger.info('Successfully loaded pretrained Epoch_MW_model.')
else:
mw_model.load_state_dict(torch.load(os.path.join(args.log_dir_MW, 'state.pkl.epoch418')), strict=False)
logger.info('Successfully loaded pretrained newly saved MW_model.')
if os.path.exists(os.path.join(args.log_dir_IQA3, 'state.pkl')):
model.load_state_dict(torch.load(os.path.join(args.log_dir_IQA3, 'state.pkl')), strict=False)
logger.info('Successfully loaded pretrained IQA_model.')
optimizer = Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
if os.path.exists(os.path.join(args.log_dir_IQA3, 'optimizer_state.pkl')):
optimizer.load_state_dict(torch.load(os.path.join(args.log_dir_IQA3, 'optimizer_state.pkl')))
logger.info('Successfully loaded optimizer IQA_parameters.')
loss_avg = Loss(args)
iter = 0
for epoch in range(epochs)[1:]:
epoch_loss = []
for (batch_num, (im_mw, imp_iwt, gt_iwt, im_dmos)) in enumerate(train_loader):
iter += 1
mw_model.eval()
model.train()
optimizer.zero_grad()
pre_iwt = mw_model(im_mw)
pre_iwt = [LocalNormalization(pre_iwt[i][0].detach().cpu().numpy()) for i in range(train_batch_size)]
pre_iwt = torch.stack(pre_iwt).cuda()
error_map = (pre_iwt - imp_iwt)
pre_score = model(imp_iwt, error_map)
loss_batch = loss_avg(pre_score, im_dmos)
plt.imsave(os.path.join(args.log_dir_IQA3, 'hr.jpg'), gt_iwt.detach().cpu().numpy()[0][0])
plt.imsave(os.path.join(args.log_dir_IQA3, 'sr.jpg'), pre_iwt.detach().cpu().numpy()[0][0])
plt.imsave(os.path.join(args.log_dir_IQA3, 'lr.jpg'), imp_iwt.detach().cpu().numpy()[0][0])
loss_batch.backward()
optimizer.step()
torch.save(model.state_dict(), os.path.join(args.log_dir_IQA3, 'state.pkl'))
torch.save(optimizer.state_dict(), os.path.join(args.log_dir_IQA3, 'optimizer_state.pkl'))
logger.info('[EPOCH{}:ITER{}] <LOSS>={:.4}'.format(epoch, iter, loss_batch.item()))
writer.add_scalar('Train/Iter/Loss', loss_batch.item(), iter)
epoch_loss.append(loss_batch.item())
epoch_loss_log = np.mean(epoch_loss)
writer.add_scalar('Train/Epoch/Loss', epoch_loss_log, epoch)
with torch.no_grad():
mw_model.eval()
model.eval()
(srocc, krocc, plcc, rmse, mae) = validate(mw_model, model, val_loader)
logger.info('Validation Results - Epoch: {} <PLCC>: {:.4f} <SROCC>: {:.4f} <KROCC>: {:.4f} <RMSE>: {:.6f} <MAE>: {:.6f}'.format(epoch, plcc, srocc, krocc, rmse, mae))
writer.add_scalar('validation/SROCC', srocc, epoch)
writer.add_scalar('validation/KROCC', krocc, epoch)
writer.add_scalar('validation/PLCC', plcc, epoch)
writer.add_scalar('validation/RMSE', rmse, epoch)
writer.add_scalar('validation/MAE', mae, epoch)
if ((epoch % 1) == 0):
torch.save(model.state_dict(), os.path.join(args.log_dir_IQA3, 'state.pkl.epoch{}'.format(epoch)))
print('Successfully saved model of EPOCH{}'.format(epoch))
writer.close() |
def solve():
problem = Problem()
problem.addVariables('abcdxefgh', range(1, 10))
problem.addConstraint((lambda a, b, c, d, x: ((a < b < c < d) and (((((a + b) + c) + d) + x) == 27))), 'abcdx')
problem.addConstraint((lambda e, f, g, h, x: ((e < f < g < h) and (((((e + f) + g) + h) + x) == 27))), 'efghx')
problem.addConstraint(AllDifferentConstraint())
solutions = problem.getSolutions()
return solutions |
class Maximum(BinaryOperator):
def __init__(self, left, right):
super().__init__('maximum', left, right)
def __str__(self):
return f'maximum({self.left!s}, {self.right!s})'
def _diff(self, variable):
(left, right) = self.orphans
return (((left >= right) * left.diff(variable)) + ((left < right) * right.diff(variable)))
def _binary_jac(self, left_jac, right_jac):
(left, right) = self.orphans
return (((left >= right) * left_jac) + ((left < right) * right_jac))
def _binary_evaluate(self, left, right):
return np.maximum(left, right)
def _binary_new_copy(self, left, right):
return pybamm.maximum(left, right)
def _sympy_operator(self, left, right):
sympy = have_optional_dependency('sympy')
return sympy.Max(left, right) |
.skipif((sys.version_info < (3,)), reason='Cannot catch warnings in python 2')
_test
def test_warnings():
a = Input(shape=(3,), name='input_a')
b = Input(shape=(3,), name='input_b')
a_2 = Dense(4, name='dense_1')(a)
dp = Dropout(0.5, name='dropout')
b_2 = dp(b)
model = Model([a, b], [a_2, b_2])
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1.0, 0.5]
model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights, sample_weight_mode=None)
def gen_data(batch_sz):
while True:
(yield ([np.random.random((batch_sz, 3)), np.random.random((batch_sz, 3))], [np.random.random((batch_sz, 4)), np.random.random((batch_sz, 3))]))
with pytest.warns(Warning) as w:
out = model.fit_generator(gen_data(4), steps_per_epoch=10, use_multiprocessing=True, workers=2)
warning_raised = any([('Sequence' in str(w_.message)) for w_ in w])
assert warning_raised, 'No warning raised when using generator with processes.'
with pytest.warns(None) as w:
out = model.fit_generator(RandomSequence(3), steps_per_epoch=4, use_multiprocessing=True, workers=2)
assert all([('Sequence' not in str(w_.message)) for w_ in w]), 'A warning was raised for Sequence.' |
class TRCMTreeView(TestCase):
def setUp(self):
self.c = RCMTreeView()
_fill_view(self.c)
def test_right_click(self):
with visible(self.c):
send_button_click(self.c, Gdk.BUTTON_SECONDARY)
send_button_click(self.c, Gdk.BUTTON_SECONDARY, primary=True)
def test_popup(self):
menu = Gtk.Menu()
selection = self.c.get_selection()
selection.set_mode(Gtk.SelectionMode.MULTIPLE)
with visible(self.c):
selection.select_all()
self.assertTrue(self.c.popup_menu(menu, Gdk.BUTTON_SECONDARY, 0)) |
def _git_str_subprocess(gitpath: str) -> Optional[str]:
if (not os.path.isdir(os.path.join(gitpath, '.git'))):
return None
try:
commit_hash = _call_git(gitpath, 'describe', '--match=NeVeRmAtCh', '--always', '--dirty')
date = _call_git(gitpath, 'show', '-s', '--format=%ci', 'HEAD')
branch = _call_git(gitpath, 'rev-parse', '--abbrev-ref', 'HEAD')
return '{} on {} ({})'.format(commit_hash, branch, date)
except (subprocess.CalledProcessError, OSError):
return None |
class FICScorer(object):
def __init__(self):
self.imgToEval = {}
self.eval = {}
print('init COCO-EVAL scorer')
def score(self, GT, RES, IDs):
gts = {}
res = {}
for ID in IDs:
gts[ID] = GT[ID]
res[ID] = RES[ID]
print('tokenization...')
tokenizer = PTBTokenizer()
gts = tokenizer.tokenize(gts)
res = tokenizer.tokenize(res)
print('setting up scorers...')
scorers = [(Bleu(4), ['Bleu_1', 'Bleu_2', 'Bleu_3', 'Bleu_4']), (Meteor(), 'METEOR'), (Rouge(), 'ROUGE_L'), (Cider(), 'CIDEr')]
for (scorer, method) in scorers:
print(('computing %s score...' % scorer.method()))
(score, scores) = scorer.compute_score(gts, res)
if (type(method) == list):
for (sc, scs, m) in zip(score, scores, method):
self.setEval(sc, m)
self.setImgToEvalImgs(scs, IDs, m)
print(('%s: %0.3f' % (m, sc)))
else:
self.setEval(score, method)
self.setImgToEvalImgs(scores, IDs, method)
print(('%s: %0.3f' % (method, score)))
return self.eval
def setEval(self, score, method):
self.eval[method] = score
def setImgToEvalImgs(self, scores, imgIds, method):
for (imgId, score) in zip(imgIds, scores):
if (imgId not in self.imgToEval):
self.imgToEval[imgId] = {}
self.imgToEval[imgId]['image_id'] = imgId
self.imgToEval[imgId][method] = score |
('/v1/user/robots/<robot_shortname>')
_param('robot_shortname', 'The short name for the robot, without any user or organization prefix')
class UserRobot(ApiResource):
schemas = {'CreateRobot': CREATE_ROBOT_SCHEMA}
_user_admin()
('getUserRobot')
def get(self, robot_shortname):
parent = get_authenticated_user()
robot = model.get_user_robot(robot_shortname, parent)
return robot.to_dict(include_metadata=True, include_token=True)
_user_admin(disallow_for_restricted_users=True)
('createUserRobot')
_json_size(ROBOT_MAX_SIZE)
_json_request('CreateRobot', optional=True)
def put(self, robot_shortname):
parent = get_authenticated_user()
create_data = (request.get_json(silent=True) or {})
try:
robot = model.create_user_robot(robot_shortname, parent, create_data.get('description'), create_data.get('unstructured_metadata'))
except InvalidRobotException as e:
raise request_error(message=str(e))
log_action('create_robot', parent.username, {'robot': robot_shortname, 'description': create_data.get('description'), 'unstructured_metadata': create_data.get('unstructured_metadata')})
return (robot.to_dict(include_metadata=True, include_token=True), 201)
_user_admin(disallow_for_restricted_users=True)
('deleteUserRobot')
def delete(self, robot_shortname):
parent = get_authenticated_user()
robot_username = format_robot_username(parent.username, robot_shortname)
if (not model.robot_has_mirror(robot_username)):
model.delete_robot(robot_username)
log_action('delete_robot', parent.username, {'robot': robot_shortname})
return ('', 204)
else:
raise request_error(message='Robot is being used by a mirror') |
def beams_to_bintable(beams):
c1 = Column(name='BMAJ', format='1E', array=[bm.major.to(u.arcsec).value for bm in beams], unit=u.arcsec.to_string('FITS'))
c2 = Column(name='BMIN', format='1E', array=[bm.minor.to(u.arcsec).value for bm in beams], unit=u.arcsec.to_string('FITS'))
c3 = Column(name='BPA', format='1E', array=[bm.pa.to(u.deg).value for bm in beams], unit=u.deg.to_string('FITS'))
c4 = Column(name='CHAN', format='1J', array=np.arange(len(beams)))
c5 = Column(name='POL', format='1J', array=[(bm.meta['POL'] if ('POL' in bm.meta) else 0) for bm in beams])
bmhdu = BinTableHDU.from_columns([c1, c2, c3, c4, c5])
bmhdu.header['EXTNAME'] = 'BEAMS'
bmhdu.header['EXTVER'] = 1
bmhdu.header['XTENSION'] = 'BINTABLE'
bmhdu.header['NCHAN'] = len(beams)
bmhdu.header['NPOL'] = len(set([bm.meta['POL'] for bm in beams if ('POL' in bm.meta)]))
return bmhdu |
def _parse_lookaround(source, info, behind, positive):
saved_flags = info.flags
saved_ignore = source.ignore_space
try:
subpattern = _parse_pattern(source, info)
finally:
source.ignore_space = saved_ignore
info.flags = saved_flags
source.expect(u')')
return LookAround(subpattern, behind=behind, positive=positive) |
def get_config(config_path):
with open(config_path) as config_file:
base_config = json.load(config_file)
if os.path.exists('job_parameters.json'):
with open('job_parameters.json') as param_config_file:
param_config = json.load(param_config_file)
else:
param_config = {}
config = base_config
for (section, d) in param_config.items():
for k in d:
assert (k in config[section])
config[section].update(d)
return config |
.needs_connection
def test_calc_spectrum_multiple_molecules_otherinputs(verbose=True, plot=True, warnings=True, *args, **kwargs):
s = calc_spectrum(wavelength_min=4165, wavelength_max=5000, Tgas=1000, path_length=0.1, molecule=['CO2', 'CO'], mole_fraction=1, isotope={'CO2': '1,2', 'CO': '1,2,3'}, verbose=verbose)
assert (set(s.conditions['molecule']) == set(['CO2', 'CO']))
s = calc_spectrum(wavelength_min=4165, wavelength_max=5000, Tgas=1000, path_length=0.1, isotope={'CO2': '1,2', 'CO': '1,2,3'}, verbose=verbose)
assert (set(s.conditions['molecule']) == set(['CO2', 'CO']))
s = calc_spectrum(wavelength_min=4165, wavelength_max=5000, Tgas=1000, path_length=0.1, mole_fraction={'CO2': 0.2, 'CO': 0.8}, isotope='1,2', verbose=verbose)
assert (set(s.conditions['molecule']) == set(['CO2', 'CO']))
return True |
def get_espnetv2(width_scale, model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs):
assert (width_scale <= 2.0)
branches = 4
layers = [1, 4, 8, 4]
max_dilation_list = [6, 5, 4, 3, 2]
max_dilations = [([max_dilation_list[i]] + ([max_dilation_list[(i + 1)]] * (li - 1))) for (i, li) in enumerate(layers)]
dilations = [[sorted([((k + 1) if (k < dij) else 1) for k in range(branches)]) for dij in di] for di in max_dilations]
base_channels = 32
weighed_base_channels = (math.ceil((float(math.floor((base_channels * width_scale))) / branches)) * branches)
channels_per_layers = [(weighed_base_channels * pow(2, (i + 1))) for i in range(len(layers))]
init_block_channels = (base_channels if (weighed_base_channels > base_channels) else weighed_base_channels)
final_block_channels = (1024 if (width_scale <= 1.5) else 1280)
channels = [([ci] * li) for (ci, li) in zip(channels_per_layers, layers)]
net = ESPNetv2(channels=channels, init_block_channels=init_block_channels, final_block_channels=final_block_channels, final_block_groups=branches, dilations=dilations, **kwargs)
if pretrained:
if ((model_name is None) or (not model_name)):
raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.')
from .model_store import download_model
download_model(net=net, model_name=model_name, local_model_store_dir_path=root)
return net |
def test_stream_write(stream, audio_source):
with stream.mainloop.lock:
stream.connect_playback()
assert stream.is_ready
with stream.mainloop.lock:
writable_size = stream.get_writable_size()
assert (writable_size > 0)
nbytes = min(1024, writable_size)
audio_data = audio_source.get_audio_data(nbytes)
with stream.mainloop.lock:
written = stream.write(audio_data.pointer, nbytes)
assert (written == nbytes)
assert stream.is_ready
with stream.mainloop.lock:
stream.delete()
assert stream.is_terminated |
class StringEnd(PositionToken):
def __init__(self):
super().__init__()
self.errmsg = 'Expected end of text'
def parseImpl(self, instring, loc, doActions=True):
if (loc < len(instring)):
raise ParseException(instring, loc, self.errmsg, self)
if (loc == len(instring)):
return ((loc + 1), [])
if (loc > len(instring)):
return (loc, [])
raise ParseException(instring, loc, self.errmsg, self) |
class DHTID(int):
HASH_FUNC = hashlib.sha1
HASH_NBYTES = 20
RANGE = (MIN, MAX) = (0, (2 ** (HASH_NBYTES * 8)))
def __new__(cls, value: int):
assert (cls.MIN <= value < cls.MAX), f'DHTID must be in [{cls.MIN}, {cls.MAX}) but got {value}'
return super().__new__(cls, value)
def generate(cls, source: Optional[Any]=None, nbits: int=255):
source = (random.getrandbits(nbits).to_bytes(nbits, byteorder='big') if (source is None) else source)
source = (MSGPackSerializer.dumps(source) if (not isinstance(source, bytes)) else source)
raw_uid = cls.HASH_FUNC(source).digest()
return cls(int(raw_uid.hex(), 16))
def xor_distance(self, other: Union[(DHTID, Sequence[DHTID])]) -> Union[(int, List[int])]:
if isinstance(other, Iterable):
return list(map(self.xor_distance, other))
return (int(self) ^ int(other))
def longest_common_prefix_length(cls, *ids: DHTID) -> int:
ids_bits = [bin(uid)[2:].rjust((8 * cls.HASH_NBYTES), '0') for uid in ids]
return len(os.path.commonprefix(ids_bits))
def to_bytes(self, length=HASH_NBYTES, byteorder='big', *, signed=False) -> bytes:
return super().to_bytes(length, byteorder, signed=signed)
def from_bytes(cls, raw: bytes, byteorder='big', *, signed=False) -> DHTID:
return DHTID(super().from_bytes(raw, byteorder=byteorder, signed=signed))
def __repr__(self):
return f'{self.__class__.__name__}({hex(self)})'
def __bytes__(self):
return self.to_bytes() |
def main():
args = create_argparser().parse_args()
dist.init()
logger.configure()
torch.set_num_threads(40)
logger.log('creating data loader...')
if (args.batch_size == (- 1)):
batch_size = (args.global_batch_size // dist.get_world_size())
if ((args.global_batch_size % dist.get_world_size()) != 0):
logger.log(f'warning, using smaller global_batch_size of {(dist.get_world_size() * batch_size)} instead of {args.global_batch_size}')
else:
batch_size = args.batch_size
if (args.dataset == 'coco'):
transform = transforms.Compose([transforms.Resize(512), transforms.RandomCrop(512), transforms.ToTensor(), (lambda x: ((2 * x) - 1))])
dataset = COCODataset(args.dataset_path, subset_name='train2014', transform=transform)
dataset_sampler = InfiniteSampler(dataset=dataset, rank=dist.get_rank(), num_replicas=dist.get_world_size(), seed=0)
data = iter(torch.utils.data.DataLoader(dataset=dataset, sampler=dataset_sampler, batch_size=batch_size))
else:
raise f'Unsupported dataset {args.dataset}...'
logger.log('creating model and diffusion...')
ema_scale_fn = create_ema_and_scales_fn(target_ema_mode=args.target_ema_mode, start_ema=args.start_ema, scale_mode=args.scale_mode, start_scales=args.start_scales, end_scales=args.end_scales, total_steps=args.total_training_steps, distill_steps_per_iter=args.distill_steps_per_iter)
logger.log(f'loading the teacher model from {args.teacher_model_path}')
teacher_pipe = StableDiffusionPipeline.from_pretrained(args.teacher_model_path, torch_dtype=torch.float16, variant='fp16').to(dist.dev())
teacher_pipe.scheduler = DDIMScheduler.from_config(teacher_pipe.scheduler.config)
teacher_pipe.scheduler.final_alpha_cumprod = torch.tensor(1.0)
teacher_model = teacher_pipe.unet
pipe = StableDiffusionImg2ImgPipeline.from_pretrained(args.teacher_model_path, torch_dtype=torch.float32)
pipe.vae = teacher_pipe.vae
pipe.text_encoder = teacher_pipe.text_encoder
pipe.scheduler = teacher_pipe.scheduler
pipe.to(dist.dev())
model = pipe.unet.train()
logger.log('creating the target model')
target_model = deepcopy(model).to(dist.dev())
for (dst, src) in zip(target_model.parameters(), model.parameters()):
assert (dst.data == src.data).all()
assert (len(list(target_model.buffers())) == len(list(model.buffers())) == len(list(teacher_model.buffers())) == 0)
diffusion = DenoiserSD(pipe, sigma_data=0.5, loss_norm=args.loss_norm, num_timesteps=args.start_scales, weight_schedule=args.weight_schedule, use_fp16=args.use_fp16)
logger.log('training...')
CMTrainLoop(model=model, diffusion=diffusion, target_model=target_model, teacher_model=teacher_model, teacher_pipe=teacher_pipe, ema_scale_fn=ema_scale_fn, total_training_steps=args.total_training_steps, data=data, batch_size=batch_size, microbatch=args.microbatch, lr=args.lr, ema_rate=args.ema_rate, log_interval=args.log_interval, save_interval=args.save_interval, resume_checkpoint=args.resume_checkpoint, use_fp16=args.use_fp16, fp16_scale_growth=args.fp16_scale_growth, weight_decay=args.weight_decay, guidance_scale=args.guidance_scale, use_random_guidance_scales=args.use_random_guidance_scales, coco_ref_stats_path=args.coco_ref_stats_path, inception_path=args.inception_path, coco_max_cnt=args.coco_max_cnt, coco_prompt_path=args.coco_prompt_path).run_loop() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.