text stringlengths 957 885k |
|---|
<reponame>bennuttall/piwheels
# The piwheels project
# Copyright (c) 2017 <NAME> <https://github.com/bennuttall>
# Copyright (c) 2017 <NAME> <<EMAIL>>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Defines the :class:`SlaveDriver` task; see class for more details.
.. autoclass:: SlaveDriver
:members:
"""
from datetime import datetime, timedelta, timezone
from .. import const, protocols, tasks, transport
from ..states import SlaveState, FileState
from .the_oracle import DbClient
from .file_juggler import FsClient
UTC = timezone.utc
class SlaveDriver(tasks.PausingTask):
"""
This task handles interaction with the build slaves using the slave
protocol. Interaction is driven by the slaves (i.e. the master doesn't
*push* jobs, rather the slaves *request* a job and the master replies with
the next (package, version) tuple from the internal "builds" queue).
The task also incidentally interacts with several other queues: the
internal "status" queue is sent details of every reply sent to a build
slave (the :meth:`~.PiWheelsMaster.main_loop` method passes this
information on to any listening monitors). Also, the internal "indexes"
queue is informed of any packages that need web page indexes re-building
(as a result of a successful build).
"""
# pylint: disable=too-many-instance-attributes
name = 'master.slave_driver'
def __init__(self, config):
super().__init__(config, control_protocol=protocols.slave_driver_control)
self.abi_queues = {}
self.excluded_builds = {}
slave_queue = self.socket(
transport.ROUTER, protocol=protocols.slave_driver)
slave_queue.bind(config.slave_queue)
self.register(slave_queue, self.handle_slave)
builds_queue = self.socket(
transport.PULL, protocol=reversed(protocols.the_architect))
builds_queue.hwm = 10
builds_queue.bind(config.builds_queue)
self.register(builds_queue, self.handle_build)
self.status_queue = self.socket(
transport.PUSH, protocol=protocols.monitor_stats)
self.status_queue.hwm = 10
self.status_queue.connect(const.INT_STATUS_QUEUE)
SlaveState.status_queue = self.status_queue
self.web_queue = self.socket(
transport.REQ, protocol=reversed(protocols.the_scribe))
self.web_queue.connect(config.web_queue)
self.stats_queue = self.socket(
transport.PUSH, protocol=reversed(protocols.big_brother))
self.stats_queue.connect(config.stats_queue)
delete_queue = self.socket(
transport.REP, protocol=reversed(protocols.cloud_gazer))
delete_queue.bind(const.SKIP_QUEUE)
self.register(delete_queue, self.handle_delete)
self.db = DbClient(config, self.logger)
self.fs = FsClient(config, self.logger)
self.slaves = {}
self.pypi_simple = config.pypi_simple
self.every(timedelta(seconds=10), self.remove_expired)
def close(self):
self.fs.close()
self.db.close()
SlaveState.status_queue = None
super().close()
def list_slaves(self):
"""
Additional task control method to trigger a "HELLO" message to the
internal control queue. See :meth:`~.tasks.Task.quit` for more
information.
"""
self._ctrl('HELLO')
def kill_slave(self, slave_id):
"""
Additional task control method to trigger a "KILL" message to the
internal control queue. See :meth:`handle_control` for more
information.
"""
self._ctrl('KILL', slave_id)
def sleep_slave(self, slave_id):
"""
Additional task control method to trigger a "SLEEP" message to the
internal control queue. See :meth:`handle_control` for more
information.
"""
self._ctrl('SLEEP', slave_id)
def skip_slave(self, slave_id):
"""
Additional task control method to trigger a "SKIP" message to the
internal control queue. See :meth:`handle_control` for more
information.
"""
self._ctrl('SKIP', slave_id)
def wake_slave(self, slave_id):
"""
Additional task control method to trigger a "WAKE" message to the
internal control queue. See :meth:`handle_control` for more
information.
"""
self._ctrl('WAKE', slave_id)
def remove_expired(self):
"""
Remove slaves which have exceeded their timeout.
"""
expired = {
address: slave
for address, slave in self.slaves.items()
if slave.expired
}
for address, slave in expired.items():
if slave.reply[0] == 'BUILD':
package, version = slave.reply[1]
self.logger.warning(
'slave %d (%s): timed out while building %s %s for %s',
slave.slave_id, slave.label, package, version,
slave.native_abi)
else:
self.logger.warning(
'slave %d (%s): timed out during %s',
slave.slave_id, slave.label, slave.reply[0])
# Send a fake DIE message to the status queue so that listening
# monitors know to remove the entry
slave.reply = ('DIE', None)
del self.slaves[address]
def handle_control(self, queue):
"""
Handle incoming requests to the internal control queue.
This class understands a couple of extra control messages unique to it,
specifically "KILL" to tell a build slave to terminate, "SKIP" to tell
a build slave to terminate its current build immmediately, and "HELLO"
to cause all "HELLO" messages from build slaves to be replayed (for the
benefit of a newly attached monitor process).
"""
try:
super().handle_control(queue)
except tasks.TaskControl as ctrl:
if ctrl.msg in ('KILL', 'SLEEP', 'SKIP', 'WAKE'):
for slave in self.slaves.values():
if ctrl.data is None or slave.slave_id == ctrl.data:
{
'KILL': slave.kill,
'SLEEP': slave.sleep,
'SKIP': slave.skip,
'WAKE': slave.wake,
}[ctrl.msg]()
elif ctrl.msg == 'HELLO':
for slave in self.slaves.values():
slave.hello()
else:
raise # pragma: no cover
def handle_build(self, queue):
"""
Refresh the ABI-specific queues of package versions waiting to be
built. The queues are limited to 1000 packages per ABI, and are kept as
lists ordered by release date. When a message arrives from
:class:`TheArchitect` it refreshes (replaces) all current queues. There
is, however, still a duplication possibility as :class:`TheArchitect`
doesn't know what packages are actively being built; this method
handles filtering out such packages.
Even if the active builds fail (because build slaves crash, or the
network dies) this doesn't matter as a future re-run of the build
queue query will return these packages again, and if no build slaves
are actively working on them at that time they will then be retried.
"""
try:
msg, new_queues = queue.recv_msg()
except IOError as e:
self.logger.error(str(e))
else:
self.logger.info('refreshing build-queue')
now = datetime.now(tz=UTC)
build_abis = self.db.get_build_abis()
# Prune expired entries from the excluded_builds buffer and add
# empty dicts for new ABIs
for abi in build_abis:
try:
excluded = self.excluded_builds[abi]
except KeyError:
excluded = {}
self.excluded_builds[abi] = {
key: expires
for key, expires in excluded.items()
if expires > now
}
# Set up the new queues without recent builds (and converting
# list-pairs into tuples)
self.abi_queues = {
abi: [
(package, version) for package, version in new_queue
if (package, version) not in self.excluded_builds[abi]
and (package, None) not in self.excluded_builds[abi]
]
for abi, new_queue in new_queues.items()
}
for abi in build_abis:
self.abi_queues.setdefault(abi, [])
self.stats_queue.send_msg('STATBQ', {
abi: len(queue)
for (abi, queue) in self.abi_queues.items()
})
def handle_delete(self, queue):
"""
Handle package or version deletion requests.
When the PyPI upstream deletes a version or package, the
:class:`CloudGazer` task requests that other tasks perform the deletion
on its behalf. In the case of this task, this involves cancelling any
pending builds of that package (version), and ignoring any builds
involving that package (version) in the next queue update from
:class:`TheArchitect`.
"""
msg, data = queue.recv_msg()
if msg == 'DELVER':
del_pkg, del_ver = data
elif msg == 'DELPKG':
del_pkg, del_ver = data, None
self.logger.info('marking package %s %s as excluded', del_pkg, del_ver)
for abi in self.db.get_build_abis():
try:
excluded = self.excluded_builds[abi]
except KeyError:
excluded = {}
self.excluded_builds[abi] = excluded
excluded[(del_pkg, del_ver)] = (
datetime.now(tz=UTC) + timedelta(hours=1))
try:
build_queue = self.abi_queues[abi]
except KeyError:
build_queue = []
self.abi_queues[abi] = [
(pkg, ver)
for pkg, ver in build_queue
if (del_pkg, del_ver) not in ((pkg, ver), (pkg, None))
]
for slave in self.slaves.values():
if slave.reply[0] == 'BUILD':
build_pkg, build_ver = slave.reply[1]
if build_pkg == del_pkg and del_ver in (None, build_ver):
self.logger.info('skipping deleted package %s %s',
del_pkg, del_ver)
slave.skip()
queue.send_msg('OK')
def handle_slave(self, queue):
"""
Handle requests from build slaves.
See the :doc:`slaves` chapter for an overview of the protocol for
messages between build slaves and :class:`SlaveDriver`. This method
retrieves the message from the build slave, finds the associated
:class:`~.states.SlaveState` and updates it with the message, then
calls the appropriate message handler. The handler will be expected to
return a reply (in the usual form of a list of strings) or ``None`` if
no reply should be sent (e.g. for a final "BYE" message).
"""
try:
address, msg, data = queue.recv_addr_msg()
except IOError as e:
self.logger.error(str(e))
return
try:
slave = self.slaves[address]
except KeyError:
if msg == 'HELLO':
slave = SlaveState(address, *data)
else:
self.logger.error('invalid first message from slave: %s', msg)
return
slave.request = msg, data
handler = {
'HELLO': self.do_hello,
'BYE': self.do_bye,
'IDLE': self.do_idle,
'BUSY': self.do_busy,
'BUILT': self.do_built,
'SENT': self.do_sent,
}[msg]
msg, data = handler(slave)
if msg is not None:
slave.reply = msg, data
queue.send_addr_msg(address, msg, data)
def do_hello(self, slave):
"""
Handler for the build slave's initial "HELLO" message. This associates
the specified *slave* state with the slave's address and returns
"HELLO" with the master's id for the slave (the id communicated back
simply for consistency of logging; administrators can correlate master
log messages with slave log messages when both have the same id
number; we can't use IP address for this as multiple slaves can run on
one machine).
:param SlaveState slave:
The object representing the current status of the build slave.
"""
self.logger.warning(
'slave %d (%s): hello (build_timeout=%s, busy_timeout=%s, abi=%s, '
'platform=%s, os_name=%s, os_version=%s, board_revision=%s, '
'board_serial=%s)',
slave.slave_id, slave.label, slave.build_timeout,
slave.busy_timeout, slave.native_abi, slave.native_platform,
slave.os_name, slave.os_version, slave.board_revision,
slave.board_serial)
self.slaves[slave.address] = slave
return 'ACK', [slave.slave_id, self.pypi_simple]
def do_bye(self, slave):
"""
Handler for the build slave's final "BYE" message upon shutdown. This
removes the associated state from the internal ``slaves`` dict.
:param SlaveState slave:
The object representing the current status of the build slave.
"""
self.logger.warning('slave %d (%s): shutdown',
slave.slave_id, slave.label)
# Send a fake DIE message to the status queue so that listening
# monitors know to remove the entry
slave.reply = 'DIE', protocols.NoData
del self.slaves[slave.address]
return None, None
def do_idle(self, slave):
"""
Handler for the build slave's "IDLE" message (which is effectively the
slave requesting work). If the master wants to terminate the slave,
it sends back "BYE". If the build queue (for the slave's ABI) is empty
or the task is currently paused, "SLEEP" is returned indicating the
slave should wait a while and then try again.
If a job can be retrieved from the (ABI specific) build queue, then
a "BUILD" message is sent back with the required package and version.
:param SlaveState slave:
The object representing the current status of the build slave.
"""
if slave.reply[0] not in ('ACK', 'SLEEP', 'DONE'):
self.logger.error(
'slave %d (%s): protocol error (IDLE after %s)',
slave.slave_id, slave.label, slave.reply[0])
return 'DIE', protocols.NoData
elif slave.killed:
return 'DIE', protocols.NoData
elif self.paused:
self.logger.info(
'slave %d (%s): sleeping because master is paused',
slave.slave_id, slave.label)
return 'SLEEP', True
else:
try:
abi_queue = self.abi_queues[slave.native_abi]
excluded_builds = self.excluded_builds[slave.native_abi]
except KeyError:
abi_queue = []
try:
while abi_queue:
package, version = abi_queue.pop(0)
if (package, version) not in excluded_builds:
self.logger.info(
'slave %d (%s): build %s %s',
slave.slave_id, slave.label, package, version)
excluded_builds[(package, version)] = (
datetime.now(tz=UTC) + slave.build_timeout)
return 'BUILD', [package, version]
self.logger.info(
'slave %d (%s): sleeping because no builds',
slave.slave_id, slave.label)
return 'SLEEP', False
finally:
# Only push queue stats if there's space in the stats_queue
# (it's not essential; just a nice-to-have)
if self.stats_queue.poll(0, transport.POLLOUT):
self.stats_queue.send_msg('STATBQ', {
abi: len(queue)
for (abi, queue) in self.abi_queues.items()
})
def do_busy(self, slave):
"""
Handler for the build slave's "BUSY" message, which is sent
periodically during package builds. If the slave fails to respond with
a BUSY ping for a duration longer than :attr:`SlaveState.busy_timeout`
then the master will assume the slave has died and remove it from the
internal state mapping (if the slave happens to resurrect itself later
the master will simply treat it as a new build slave).
In response to "BUSY" the master can respond "CONT" to indicate the
build should continue processing, or "DONE" to indicate that the build
slave should immediately terminate and discard the build and return to
"IDLE" state.
"""
if slave.skipped:
self.logger.info('slave %d (%s): build skipped',
slave.slave_id, slave.label)
return 'DONE', protocols.NoData
else:
return 'CONT', protocols.NoData
def do_built(self, slave):
"""
Handler for the build slave's "BUILT" message, which is sent after an
attempted package build succeeds or fails. The handler logs the result
in the database and, if files have been generated by the build, informs
the :class:`~.file_juggler.FileJuggler` task to expect a file transfer
before sending "SEND" back to the build slave with the required
filename.
If no files were generated (e.g. in the case of a failed build, or a
degenerate success), "DONE" is returned indicating that the build slave
is free to discard all resources generated during the build and return
to its idle state.
"""
if slave.reply[0] != 'BUILD':
self.logger.error(
'slave %d (%s): protocol error (BUILT after %s)',
slave.slave_id, slave.label, slave.reply[0])
return 'DIE', protocols.NoData
elif slave.skipped:
# If the build was skipped, throw away the result without recording
# success or failure (it may have been skipped because we know
# there's something wrong with the slave)
self.logger.info('slave %d (%s): build skipped',
slave.slave_id, slave.label)
return 'DONE', protocols.NoData
else:
build_armv6l_hack(slave.build)
if slave.build.status and not slave.build.transfers_done:
self.logger.info('slave %d (%s): build succeeded',
slave.slave_id, slave.label)
self.fs.expect(slave.slave_id,
slave.build.files[slave.build.next_file])
self.logger.info('slave %d (%s): send %s',
slave.slave_id, slave.label,
slave.build.next_file)
return 'SEND', slave.build.next_file
else:
self.logger.info('slave %d (%s): build failed',
slave.slave_id, slave.label)
self.db.log_build(slave.build)
self.web_queue.send_msg('LOG', (
slave.build.build_id, slave.build.output))
self.web_queue.recv_msg()
self.web_queue.send_msg('PROJECT', slave.build.package)
self.web_queue.recv_msg()
return 'DONE', protocols.NoData
def do_sent(self, slave):
"""
Handler for the build slave's "SENT" message indicating that it's
finished sending the requested file to :class:`FileJuggler`. The
:class:`FsClient` RPC mechanism is used to ask :class:`FileJuggler` to
verify the transfer against the stored hash and, if this is successful,
a message is sent to :class:`TheScribe` to regenerate the package's
index.
If further files remain to be transferred, another "SEND" message is
returned to the build slave. Otherwise, "DONE" is sent to free all
build resources.
If a transfer fails to verify, another "SEND" message with the same
filename is returned to the build slave.
"""
if slave.reply[0] != 'SEND':
self.logger.error(
'slave %d (%s): protocol error (SENT after %s)',
slave.slave_id, slave.label, slave.reply[0])
return 'DIE', protocols.NoData
elif self.fs.verify(slave.slave_id, slave.build.package):
slave.build.files[slave.build.next_file].verified()
self.logger.info(
'slave %d (%s): verified transfer of %s',
slave.slave_id, slave.label, slave.reply[1])
if slave.build.transfers_done:
self.db.log_build(slave.build)
self.web_queue.send_msg('LOG', (
slave.build.build_id, slave.build.output))
self.web_queue.recv_msg()
self.web_queue.send_msg('BOTH', slave.build.package)
self.web_queue.recv_msg()
return 'DONE', protocols.NoData
else:
self.fs.expect(slave.slave_id,
slave.build.files[slave.build.next_file])
self.logger.info('slave %d (%s): send %s',
slave.slave_id, slave.label,
slave.build.next_file)
return 'SEND', slave.build.next_file
else:
self.logger.info('slave %d (%s): re-send %s',
slave.slave_id, slave.label,
slave.build.next_file)
return 'SEND', slave.build.next_file
def build_armv6l_hack(build):
"""
A dirty hack for armv6l wheels; if the build contains any arch-specific
wheels for armv7l, generate equivalent armv6l entries from them (with
the transferred flag set to True as nothing actually needs transferring).
"""
for file in list(build.files.values()):
if file.platform_tag == 'linux_armv7l':
armv7_name = file.filename
armv6_name = armv7_name[:-16] + 'linux_armv6l.whl'
if armv6_name not in build.files:
build.files[armv6_name] = FileState(
armv6_name, file.filesize, file.filehash, file.package_tag,
file.package_version_tag, file.py_version_tag,
file.abi_tag, 'linux_armv6l', file.requires_python,
file.dependencies, True)
|
from django.test import TestCase
import sys
import pytest
class Test_MixStyle(TestCase):
# unittest style
@classmethod
def setUpClass(cls):
print('mix style (unittest) - setup > {}'.format(sys._getframe().f_code.co_name))
@classmethod
def tearDownClass(cls):
print('mix style (unittest) - teardown > {}'.format(sys._getframe().f_code.co_name))
def setUp(self):
print('mix style (unittest) - setup > {}'.format(sys._getframe().f_code.co_name))
def tearDown(self):
print('mix style (unittest) - teardown > {}'.format(sys._getframe().f_code.co_name))
# classic xunit style
@classmethod
def setup_class(cls):
print('mix style (classic xunit) - setup > {}'.format(sys._getframe().f_code.co_name))
@classmethod
def teardown_class(cls):
print('mix style (classic xunit) - teardown > {}'.format(sys._getframe().f_code.co_name))
def setup_method(self, method):
print('mix style (classic xunit) - setup > {}'.format(sys._getframe().f_code.co_name))
def teardown_method(self, method):
print('mix style (classic xunit) - teardown > {}'.format(sys._getframe().f_code.co_name))
# fixture style
@pytest.fixture(autouse=True)
def scope_default(self, request):
print('mix style (fixture) - setup > {}'.format(sys._getframe().f_code.co_name))
def fin_scope_default():
print('mix style (fixture) - teardown > {}'.format(sys._getframe().f_code.co_name))
request.addfinalizer(fin_scope_default)
@pytest.fixture(autouse=True, scope='function')
def scope_function(self, request):
print('mix style (fixture) - setup > {}'.format(sys._getframe().f_code.co_name))
def fin_scope_function():
print('mix style (fixture) - teardown > {}'.format(sys._getframe().f_code.co_name))
request.addfinalizer(fin_scope_function)
@pytest.fixture(autouse=True, scope='class')
def scope_class(self, request):
print('mix style (fixture) - setup > {}'.format(sys._getframe().f_code.co_name))
def fin_scope_class():
print('mix style (fixture) - teardown > {}'.format(sys._getframe().f_code.co_name))
request.addfinalizer(fin_scope_class)
@pytest.fixture(autouse=True, scope='module')
def scope_module(self, request):
print('mix style (fixture) - setup > {}'.format(sys._getframe().f_code.co_name))
def fin_scope_module():
print('mix style (fixture) - teardown > {}'.format(sys._getframe().f_code.co_name))
request.addfinalizer(fin_scope_module)
@pytest.fixture(autouse=True, scope='session')
def scope_session(self, request):
print('mix style (fixture) - setup > {}'.format(sys._getframe().f_code.co_name))
def fin_scope_session():
print('mix style (fixture) - teardown > {}'.format(sys._getframe().f_code.co_name))
request.addfinalizer(fin_scope_session)
# test method
def testSpam(self):
print('mix style > [{}]'.format(sys._getframe().f_code.co_name))
assert True
def testHam(self):
print('mix style > [{}]'.format(sys._getframe().f_code.co_name))
assert True |
"""xiRT main module to run the training and prediction."""
import argparse
import logging
import os
import pickle
import sys
import time
from datetime import datetime
import numpy as np
import pandas as pd
import yaml
from xirt import __version__ as xv
from xirt import features as xf
from xirt import predictor as xr
from xirt import xirtnet, qc
import matplotlib
matplotlib.use('Agg')
logger = logging.getLogger(__name__)
def arg_parser(): # pragma: not covered
"""
Parse the arguments from the CLI.
Returns:
arguments, from parse_args
"""
description = """
xiRT is a machine learning tool for the (multidimensional) RT prediction of linear and
crosslinked peptides. Use --help to see the command line arguments.
Visit the documentation to get more information:
https://xirt.readthedocs.io/en/latest/
Current Version: {}
""".format(xv.__version__)
parser = argparse.ArgumentParser(description=description)
parser.add_argument("-i", "--in_peptides",
help="Input peptide table to learn (and predict) the retention times.",
required=True, action="store", dest="in_peptides")
parser.add_argument("-o", "--out_dir",
help="Directory to store the results",
required=True, action="store", dest="out_dir")
parser.add_argument("-x", "--xirt_params",
help="YAML parameter file to control xiRT's deep learning architecture.",
required=True, action="store", dest="xirt_params")
parser.add_argument("-l", "--learning_params",
help="YAML parameter file to control training and testing splits and data.",
required=True, action="store", dest="learning_params")
parser.add_argument('--write', dest='write', action='store_true',
help="Flag for writing result prediction files. If false only summaries"
"are written (default: --write).")
parser.add_argument('--no-write', dest='write', action='store_false',
help="Flag for writing result prediction files. If false only summaries"
"are written (default: --write).")
parser.set_defaults(write=True)
return parser
def xirt_runner(peptides_file, out_dir, xirt_loc, setup_loc, nrows=None, perform_qc=True,
write=True, write_dummy=True):
"""
Execute xiRT, train a model or generate predictions for RT across multiple RT domains.
Args:
peptides_file: str, location of the input psm/csm file
out_dir: str, folder to store the results to
xirt_loc: str, location of the yaml file for xirt
setup_loc: str, location of the setup yaml
single_pep_predictions:
nrows: int, number of rows to sample (for quicker testing purposes only)
perform_qc: bool, indicates if qc plots should be done.
write: bool, indicates result predictions should be stored
write_dummy: bool if true dummy txt file is written after execution (for snakemake usag)
Returns:
None
"""
start_time = time.time()
xirt_params = yaml.load(open(xirt_loc), Loader=yaml.FullLoader)
learning_params = yaml.load(open(setup_loc), Loader=yaml.FullLoader)
matches_df = pd.read_csv(peptides_file, nrows=nrows)
logger.info("xi params: {}".format(xirt_loc))
logger.info("learning_params: {}".format(setup_loc))
logger.info("peptides: {}".format(peptides_file))
# convenience short cuts
if learning_params["train"]["mode"] == "train":
n_splits = 1
elif learning_params["train"]["mode"] == "predict":
n_splits = 0
else:
n_splits = learning_params["train"]["ncv"]
test_size = learning_params["train"]["test_frac"]
outpath = os.path.abspath(out_dir)
xirt_params["callbacks"]["callback_path"] = os.path.join(outpath, "callbacks")
# preprocess training data
training_data = xr.preprocess(matches_df,
sequence_type=learning_params["train"]["sequence_type"],
max_length=learning_params["preprocessing"]["max_length"],
cl_residue=learning_params["preprocessing"]["cl_residue"],
fraction_cols=xirt_params["predictions"]["fractions"])
# set training index by FDR and duplicates
training_data.set_fdr_mask(fdr_cutoff=learning_params["train"]["fdr"],
str_filter=learning_params["preprocessing"]["filter"])
training_data.set_unique_shuffled_sampled_training_idx(
sample_frac=learning_params["train"]["sample_frac"],
random_state=learning_params["train"]["sample_state"])
# adjust RT if necessary to guarantee smooth learning
# gradient length > 30 minutes (1500 seconds)
for cont_col in xirt_params["predictions"]["continues"]:
if training_data.psms[cont_col].max() > 1500:
training_data.psms[cont_col] = training_data.psms[cont_col] / 60.0
# init neural network structure
xirtnetwork = xirtnet.xiRTNET(xirt_params, input_dim=training_data.features1.shape[1])
# get the columns where the RT information is stored
frac_cols = sorted([xirtnetwork.output_p[tt.lower() + "-column"] for tt in
xirt_params["predictions"]["fractions"]])
cont_cols = sorted([xirtnetwork.output_p[tt.lower() + "-column"] for tt in
xirt_params["predictions"]["continues"]])
# init data structures for results
histories = []
model_summary = []
# manual accuracy for ordinal data
accuracies_all = []
if "ordinal" in ";".join([xirtnetwork.output_p[i + "-column"] for i in xirtnetwork.tasks]):
has_ordinal = True
else:
has_ordinal = False
cv_counter = 1
# perform crossvalidation
# train on n-1 fold, use test_size from n-1 folds for validation and test/predict RT
# on the remaining fold
logger.info("Starting crossvalidation (nfolds={})".format(n_splits))
start_timecv = time.time()
for train_idx, val_idx, pred_idx in training_data.iter_splits(n_splits=n_splits,
test_size=test_size):
logger.info("---------------------------------------------------------")
logger.info("Starting crossvalidation iteration: {}".format(cv_counter))
logger.info("# Train peptides: {}".format(len(train_idx)))
logger.info("# Validation peptides: {}".format(len(val_idx)))
logger.info("# Prediction peptides: {}".format(len(pred_idx)))
logger.info("# Rescoring Candidates peptides: {}".format(len(training_data.predict_idx)))
logger.info("Train indices: {}".format(train_idx[0:10]))
logger.info("Validation indices: {}".format(val_idx[0:10]))
logger.info("Prediction indices: {}".format(pred_idx[0:10]))
logger.info("Rescoring Candidates indices: {}".format(training_data.predict_idx[0:10]))
# init the network model
# either load from existing or create from config
if learning_params["train"]["pretrained_model"].lower() != "none":
logger.info("Loading existing model from reference.")
xirtnetwork.load_model(learning_params["train"]["pretrained_model"])
else:
logger.info("Building new model from config.")
xirtnetwork.build_model(siamese=xirt_params["siamese"]["use"])
# once model architecture is there, check if weights should be loaded
# loading predefined weights
if learning_params["train"]["pretrained_weights"].lower() != "none":
logger.info("Loading pre-trained weights into model.")
xirtnetwork.model.load_weights(learning_params["train"]["pretrained_weights"])
# after loading the weights, we need to pop / re-design the layers for transfer learning
if learning_params["train"]["pretrained_model"].lower() != "none":
logger.info("Adjusting model architecture.")
xirtnetwork.adjust_model()
# finally compile
xirtnetwork.compile()
callbacks = xirtnetwork.get_callbacks(suffix=str(cv_counter).zfill(2))
# assemble training data
xt_cv = training_data.get_features(train_idx)
yt_cv = training_data.get_classes(train_idx, frac_cols=frac_cols, cont_cols=cont_cols)
# validation data
xv_cv = training_data.get_features(val_idx)
yv_cv = training_data.get_classes(val_idx, frac_cols=frac_cols, cont_cols=cont_cols)
# prediction data
xp_cv = training_data.get_features(pred_idx)
yp_cv = training_data.get_classes(pred_idx, frac_cols=frac_cols, cont_cols=cont_cols)
# fit the mode, use the validation split to determine the best
# epoch for selecting the best weights
logger.info("Fitting model.")
history = xirtnetwork.model.fit(xt_cv, yt_cv, validation_data=(xv_cv, yv_cv),
epochs=xirt_params["learning"]["epochs"],
batch_size=xirt_params["learning"]["batch_size"],
verbose=xirt_params["learning"]["verbose"],
callbacks=callbacks)
metric_columns = xirtnetwork.model.metrics_names
# model evaluation training (t), validation (v), prediction (p)
model_summary.append(xirtnetwork.model.evaluate(xt_cv, yt_cv, batch_size=512))
model_summary.append(xirtnetwork.model.evaluate(xv_cv, yv_cv, batch_size=512))
model_summary.append(xirtnetwork.model.evaluate(xp_cv, yp_cv, batch_size=512))
# use the training for predicting unseen RTs
training_data.predict_and_store(xirtnetwork, xp_cv, pred_idx, cv=cv_counter)
if has_ordinal:
train_preds = xirtnetwork.model.predict(xt_cv)
val_preds = xirtnetwork.model.predict(xv_cv)
pred_preds = xirtnetwork.model.predict(xp_cv)
accuracies_all.extend(xr.compute_accuracy(train_preds,
training_data.psms.loc[train_idx],
xirtnetwork.tasks, xirtnetwork.output_p))
accuracies_all.extend(xr.compute_accuracy(val_preds,
training_data.psms.loc[val_idx],
xirtnetwork.tasks, xirtnetwork.output_p))
accuracies_all.extend(xr.compute_accuracy(pred_preds,
training_data.psms.loc[pred_idx],
xirtnetwork.tasks, xirtnetwork.output_p))
# store metrics
# store model? / callback
df_history = pd.DataFrame(history.history)
df_history["CV"] = cv_counter
df_history["epoch"] = np.arange(1, len(df_history) + 1)
cv_counter += 1
histories.append(df_history)
logger.info("Finished CV training model.")
# CV training done, now deal with the data not used for training
if learning_params["train"]["mode"] != "predict":
# store model summary data
model_summary_df = pd.DataFrame(model_summary, columns=metric_columns)
model_summary_df["CV"] = np.repeat(np.arange(1, n_splits + 1), 3)
model_summary_df["Split"] = np.tile(["Train", "Validation", "Prediction"], n_splits)
# store manual accuray for ordinal data
if has_ordinal:
for count, task_i in enumerate(frac_cols):
taski_short = task_i.split("_")[0]
if "ordinal" in xirtnetwork.output_p[taski_short + "-column"]:
# accuracies_all contains accuracies for train, validation, pred
# problem is that accuracies are computed after the loop and stored in a 1d-ar
# retrieve information again
if len(frac_cols) == 1:
model_summary_df[taski_short + "_ordinal-accuracy"] = accuracies_all
else:
model_summary_df[taski_short + "_ordinal-accuracy"] = \
accuracies_all[count::len(frac_cols)]
if learning_params["train"]["refit"]:
logger.info("Refitting model on entire data to predict unseen data.")
callbacks = xirtnetwork.get_callbacks(suffix="full")
xrefit = training_data.get_features(training_data.train_idx)
yrefit = training_data.get_classes(training_data.train_idx, frac_cols=frac_cols,
cont_cols=cont_cols)
xirtnetwork.build_model(siamese=xirt_params["siamese"]["use"])
xirtnetwork.compile()
if learning_params["train"]["pretrained_weights"].lower() != "none":
logger.info("Loading pretrained weights into model for refit option.")
xirtnetwork.model.load_weights(learning_params["train"]["pretrained_weights"])
_ = xirtnetwork.model.fit(xrefit, yrefit, validation_split=test_size,
epochs=xirt_params["learning"]["epochs"],
batch_size=xirt_params["learning"]["batch_size"],
verbose=xirt_params["learning"]["verbose"],
callbacks=callbacks)
else:
logger.info("Selecting best performing CV model to predict unseen data.")
# load the best performing model across cv from the validation split
best_model_idx = np.argmin(
model_summary_df[model_summary_df["Split"] == "Validation"]["loss"].values)
logger.info("Best Model: {}".format(best_model_idx))
logger.info("Loading weights.")
xirtnetwork.model.load_weights(os.path.join(xirtnetwork.callback_p["callback_path"],
"xirt_weights_{}.h5".format(
str(best_model_idx + 1).zfill(2))))
logger.info("Model Summary:")
logger.info(model_summary_df.groupby("Split").agg([np.mean, np.std]).to_string())
else:
logger.info("Loading model weights.")
xirtnetwork.build_model(siamese=xirt_params["siamese"]["use"])
xirtnetwork.compile()
xirtnetwork.model.load_weights(learning_params["train"]["pretrained_weights"])
logger.info("Reassigning prediction index for prediction mode.")
training_data.predict_idx = training_data.psms.index
model_summary_df = pd.DataFrame()
df_history_all = pd.DataFrame()
# get the 'unvalidation data', e.g. data that was not used during training because
# here the CSMs are that we want to save / rescore later!
# assign prediction fold to entire data
xu = training_data.get_features(training_data.predict_idx)
yu = training_data.get_classes(training_data.predict_idx, frac_cols=frac_cols,
cont_cols=cont_cols)
training_data.predict_and_store(xirtnetwork, xu, training_data.predict_idx, cv=-1)
eval_unvalidation = xirtnetwork.model.evaluate(xu, yu, batch_size=512)
if has_ordinal:
accs_tmp = xr.compute_accuracy(xirtnetwork.model.predict(xu),
training_data.psms.loc[training_data.predict_idx],
xirtnetwork.tasks, xirtnetwork.output_p)
eval_unvalidation.extend(np.hstack([-1, "Unvalidation", accs_tmp]))
else:
eval_unvalidation.extend([-1, "Unvalidation"])
# prediction modes dont have any training information
if learning_params["train"]["mode"] != "predict":
# collect epoch training data
model_summary_df.loc[len(model_summary_df)] = eval_unvalidation
df_history_all = pd.concat(histories)
df_history_all = df_history_all.reset_index(drop=False).rename(columns={"index": "epoch"})
df_history_all["epoch"] += 1
# compute features for rescoring
xf.compute_prediction_errors(training_data.psms, training_data.prediction_df,
xirtnetwork.tasks, frac_cols,
(xirtnetwork.siamese_p["single_predictions"]
& xirtnetwork.siamese_p["use"]))
# store results
features_exhaustive = xf.add_interactions(training_data.prediction_df.filter(regex="error"),
degree=len(xirtnetwork.tasks))
# qc
# only training procedure includes qc
if perform_qc and (learning_params["train"]["mode"] != "predict"):
logger.info("Generating qc plots.")
qc.plot_epoch_cv(callback_path=xirtnetwork.callback_p["callback_path"],
tasks=xirtnetwork.tasks, xirt_params=xirt_params, outpath=outpath)
qc.plot_summary_strip(model_summary_df, tasks=xirtnetwork.tasks, xirt_params=xirt_params,
outpath=outpath)
qc.plot_cv_predictions(training_data.prediction_df, training_data.psms,
xirt_params=xirt_params, outpath=outpath)
qc.plot_error_characteristics(training_data.prediction_df, training_data.psms,
xirtnetwork.tasks, xirt_params, outpath, max_fdr=0.01)
logger.info("Writing output tables.")
# store setup in summary
model_summary_df["xirt_params_loc"] = xirt_loc
model_summary_df["xirt_params_base"] = os.path.basename(xirt_loc).split(".")[0]
model_summary_df["learning_params"] = setup_loc
model_summary_df["peptides"] = peptides_file
df_history_all.to_csv(os.path.join(outpath, "epoch_history.csv"))
model_summary_df.to_csv(os.path.join(outpath, "model_summary.csv"))
if write:
# store data
training_data.psms.to_csv(os.path.join(outpath, "processed_psms.csv"))
training_data_Xy = ((training_data.features1, training_data.features2),
training_data.get_classes(training_data.psms.index,
frac_cols=frac_cols, cont_cols=cont_cols))
with open(os.path.join(xirt_params["callbacks"]["callback_path"], "Xy_data.p"), 'wb') as po:
pickle.dump(training_data_Xy, po, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(xirt_params["callbacks"]["callback_path"], "encoder.p"), 'wb') as po:
pickle.dump(training_data.le, po, protocol=pickle.HIGHEST_PROTOCOL)
training_data.psms.to_csv(os.path.join(outpath, "processed_psms.csv"))
training_data.prediction_df.to_csv(os.path.join(outpath, "error_features.csv"))
features_exhaustive.to_csv(os.path.join(outpath, "error_features_interactions.csv"))
# write a text file to indicate xirt is done.
if write_dummy:
with open(xirt_loc.replace(".yaml", ".txt"), "w") as of:
of.write("done.")
logger.info("Completed xiRT run.")
logger.info("End Time: {}".format(datetime.now().strftime("%H:%M:%S")))
end_time = time.time()
logger.info("xiRT CV-training took: {:.2f} minutes".format((end_time - start_timecv) / 60.))
logger.info("xiRT took: {:.2f} minutes".format((end_time - start_time) / 60.))
def main(): # pragma: no cover
"""Run xiRT main function."""
parser = arg_parser()
try:
args = parser.parse_args(sys.argv[1:])
except TypeError:
parser.print_usage()
# create dir if not there
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir)
# create logger
logger = logging.getLogger('xirt')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.FileHandler(os.path.join(args.out_dir, "xirt_logger.log"), "w")
ch.setLevel(logging.DEBUG)
ch.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(ch)
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(logging.DEBUG)
sh.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(sh)
logger.info("command line call:")
logger.info(
"xirt -i {} -o {} -x {} -l {}".format(args.in_peptides, args.out_dir, args.xirt_params,
args.learning_params))
logger.info("Init logging file.")
logger.info("Starting Time: {}".format(datetime.now().strftime("%H:%M:%S")))
logger.info("Starting xiRT.")
logger.info("Using xiRT version: {}".format(xv.__version__))
# call function
xirt_runner(args.in_peptides, args.out_dir, args.xirt_params, args.learning_params,
write=args.write)
if __name__ == "__main__": # pragma: no cover
main()
|
#!/usr/bin/env python
#description:Linkedin employee search module#
from colorama import Fore,Back,Style
import os,sys
import urllib
import requests
import re,string
class module_element(object):
def __init__(self):
self.title = "Linkedin gathering : \n"
self.require = {"enterprise":[{"value":"","required":"yes"}],"limit_search":[{"value":"","required":"yes"}]}
self.export = []
self.export_file = ""
self.export_status = False
def show_options(self):
#print Back.WHITE + Fore.WHITE + "Module parameters" + Style.RESET_ALL
for line in self.require:
if self.require[line][0]["value"] == "":
value = "No value"
else:
value = self.require[line][0]["value"]
if self.require[line][0]["required"] == "yes":
print Fore.RED + Style.BRIGHT + "- "+Style.RESET_ALL + line + ":" + Fore.RED + "is_required" + Style.RESET_ALL + ":" + value
else:
print Fore.WHITE + Style.BRIGHT + "* "+Style.RESET_ALL + line + "(" + Fore.GREEN + "not_required" + Style.RESET_ALL + "):" + value
#print Back.WHITE + Fore.WHITE + "End parameters" + Style.RESET_ALL
def export_data(self, argv=False):
if len(self.export) > 0:
if self.export_file == "":
if argv == False:
user_input = raw_input("operative (export file name ?) > ")
else:
user_input = argv
if os.path.exists("export/"+user_input):
self.export_file = "export/"+user_input
elif os.path.exists(user_input):
self.export_file = user_input
else:
print Fore.GREEN + "Writing " + user_input + " file" + Style.RESET_ALL
self.export_file = "export/"+user_input
self.export_data()
elif self.export_status == False:
file_open = open(self.export_file,"a+")
file_open.write(self.title)
for line in self.export:
file_open.write("- " + line +"\n")
print Fore.GREEN + "File writed : " + self.export_file + Style.RESET_ALL
file_open.close()
self.export_status = True
else:
print Back.YELLOW + Fore.BLACK + "Module empty result" + Style.RESET_ALL
def set_options(self,name,value):
if name in self.require:
self.require[name][0]["value"] = value
else:
print Fore.RED + "Option not found" + Style.RESET_ALL
def check_require(self):
for line in self.require:
for option in self.require[line]:
if option["required"] == "yes":
if option["value"] == "":
return False
return True
def get_options(self,name):
if name in self.require:
return self.require[name][0]["value"]
else:
return False
def set_agv(self, argv):
self.argv = argv
def run_module(self):
ret = self.check_require()
if ret == False:
print Back.YELLOW + Fore.BLACK + "Please set the required parameters" + Style.RESET_ALL
else:
self.main()
def main(self):
userAgent = "(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100115 Firefox/3.6"
quantity = "100"
server = "www.google.com"
word = self.get_options("enterprise")
limit = int(self.get_options("limit_search"))
counter = 0
result = ""
totalresults = ""
print Fore.GREEN + "Search Linkedin research" + Style.RESET_ALL
url="http://"+ server + "/search?num=" + str(limit) + "&start=0&hl=en&meta=&q=site%3Alinkedin.com/in%20" + word
r=requests.get(url)
result = r.content
if result != "":
regex = re.compile('">[a-zA-Z0-9._ -]* \| LinkedIn')
output = regex.findall(result)
if len(output) > 0:
for line in output:
if line.strip() != "":
if " | LinkedIn" in line and '">' in line:
people = line.strip().replace(' | LinkedIn','').replace('">','')
print Fore.BLUE + "* "+ Style.RESET_ALL + people
self.export.append(people)
else:
print Fore.RED + "Nothing on linkedin." + Style.RESET_ALL
else:
print Fore.RED + "Can't get response" + Style.RESET_ALL
|
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resnet."""
import types
from typing import Mapping, Optional, Sequence, Union
from haiku._src import basic
from haiku._src import batch_norm
from haiku._src import conv
from haiku._src import module
from haiku._src import pool
import jax
import jax.numpy as jnp
# If forking replace this block with `import haiku as hk`.
hk = types.ModuleType("haiku")
hk.Module = module.Module
hk.BatchNorm = batch_norm.BatchNorm
hk.Conv2D = conv.Conv2D
hk.Linear = basic.Linear
hk.max_pool = pool.max_pool
del basic, batch_norm, conv, module, pool
class BlockV1(hk.Module):
"""ResNet V1 block with optional bottleneck."""
def __init__(
self,
channels: int,
stride: Union[int, Sequence[int]],
use_projection: bool,
bn_config: Mapping[str, float],
bottleneck: bool,
name: Optional[str] = None,
):
super().__init__(name=name)
self.use_projection = use_projection
bn_config = dict(bn_config)
bn_config.setdefault("create_scale", True)
bn_config.setdefault("create_offset", True)
bn_config.setdefault("decay_rate", 0.999)
if self.use_projection:
self.proj_conv = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=stride,
with_bias=False,
padding="SAME",
name="shortcut_conv")
self.proj_batchnorm = hk.BatchNorm(name="shortcut_batchnorm", **bn_config)
channel_div = 4 if bottleneck else 1
conv_0 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=1 if bottleneck else 3,
stride=1,
with_bias=False,
padding="SAME",
name="conv_0")
bn_0 = hk.BatchNorm(name="batchnorm_0", **bn_config)
conv_1 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=3,
stride=stride,
with_bias=False,
padding="SAME",
name="conv_1")
bn_1 = hk.BatchNorm(name="batchnorm_1", **bn_config)
layers = ((conv_0, bn_0), (conv_1, bn_1))
if bottleneck:
conv_2 = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=1,
with_bias=False,
padding="SAME",
name="conv_2")
bn_2 = hk.BatchNorm(name="batchnorm_2", scale_init=jnp.zeros, **bn_config)
layers = layers + ((conv_2, bn_2),)
self.layers = layers
def __call__(self, inputs, is_training, test_local_stats):
out = shortcut = inputs
if self.use_projection:
shortcut = self.proj_conv(shortcut)
shortcut = self.proj_batchnorm(shortcut, is_training, test_local_stats)
for i, (conv_i, bn_i) in enumerate(self.layers):
out = conv_i(out)
out = bn_i(out, is_training, test_local_stats)
if i < len(self.layers) - 1: # Don't apply relu on last layer
out = jax.nn.relu(out)
return jax.nn.relu(out + shortcut)
class BlockV2(hk.Module):
"""ResNet V2 block with optional bottleneck."""
def __init__(
self,
channels: int,
stride: Union[int, Sequence[int]],
use_projection: bool,
bn_config: Mapping[str, float],
bottleneck: bool,
name: Optional[str] = None,
):
super().__init__(name=name)
self.use_projection = use_projection
bn_config = dict(bn_config)
bn_config.setdefault("create_scale", True)
bn_config.setdefault("create_offset", True)
if self.use_projection:
self.proj_conv = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=stride,
with_bias=False,
padding="SAME",
name="shortcut_conv")
channel_div = 4 if bottleneck else 1
conv_0 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=1 if bottleneck else 3,
stride=1,
with_bias=False,
padding="SAME",
name="conv_0")
bn_0 = hk.BatchNorm(name="batchnorm_0", **bn_config)
conv_1 = hk.Conv2D(
output_channels=channels // channel_div,
kernel_shape=3,
stride=stride,
with_bias=False,
padding="SAME",
name="conv_1")
bn_1 = hk.BatchNorm(name="batchnorm_1", **bn_config)
layers = ((conv_0, bn_0), (conv_1, bn_1))
if bottleneck:
conv_2 = hk.Conv2D(
output_channels=channels,
kernel_shape=1,
stride=1,
with_bias=False,
padding="SAME",
name="conv_2")
# NOTE: Some implementations of ResNet50 v2 suggest initializing
# gamma/scale here to zeros.
bn_2 = hk.BatchNorm(name="batchnorm_2", **bn_config)
layers = layers + ((conv_2, bn_2),)
self.layers = layers
def __call__(self, inputs, is_training, test_local_stats):
x = shortcut = inputs
for i, (conv_i, bn_i) in enumerate(self.layers):
x = bn_i(x, is_training, test_local_stats)
x = jax.nn.relu(x)
if i == 0 and self.use_projection:
shortcut = self.proj_conv(x)
x = conv_i(x)
return x + shortcut
class BlockGroup(hk.Module):
"""Higher level block for ResNet implementation."""
def __init__(
self,
channels: int,
num_blocks: int,
stride: Union[int, Sequence[int]],
bn_config: Mapping[str, float],
resnet_v2: bool,
bottleneck: bool,
use_projection: bool,
name: Optional[str] = None,
):
super().__init__(name=name)
block_cls = BlockV2 if resnet_v2 else BlockV1
self.blocks = []
for i in range(num_blocks):
self.blocks.append(
block_cls(channels=channels,
stride=(1 if i else stride),
use_projection=(i == 0 and use_projection),
bottleneck=bottleneck,
bn_config=bn_config,
name="block_%d" % (i)))
def __call__(self, inputs, is_training, test_local_stats):
out = inputs
for block in self.blocks:
out = block(out, is_training, test_local_stats)
return out
def check_length(length, value, name):
if len(value) != length:
raise ValueError(f"`{name}` must be of length 4 not {len(value)}")
class ResNet(hk.Module):
"""ResNet model."""
BlockGroup = BlockGroup # pylint: disable=invalid-name
BlockV1 = BlockV1 # pylint: disable=invalid-name
BlockV2 = BlockV2 # pylint: disable=invalid-name
def __init__(
self,
blocks_per_group: Sequence[int],
num_classes: int,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
bottleneck: bool = True,
channels_per_group: Sequence[int] = (256, 512, 1024, 2048),
use_projection: Sequence[bool] = (True, True, True, True),
name: Optional[str] = None,
):
"""Constructs a ResNet model.
Args:
blocks_per_group: A sequence of length 4 that indicates the number of
blocks created in each group.
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers. By default the
``decay_rate`` is ``0.9`` and ``eps`` is ``1e-5``.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults to
``False``.
bottleneck: Whether the block should bottleneck or not. Defaults to
``True``.
channels_per_group: A sequence of length 4 that indicates the number
of channels used for each block in each group.
use_projection: A sequence of length 4 that indicates whether each
residual block should use projection.
name: Name of the module.
"""
super().__init__(name=name)
self.resnet_v2 = resnet_v2
bn_config = dict(bn_config or {})
bn_config.setdefault("decay_rate", 0.9)
bn_config.setdefault("eps", 1e-5)
bn_config.setdefault("create_scale", True)
bn_config.setdefault("create_offset", True)
# Number of blocks in each group for ResNet.
check_length(4, blocks_per_group, "blocks_per_group")
check_length(4, channels_per_group, "channels_per_group")
self.initial_conv = hk.Conv2D(
output_channels=64,
kernel_shape=7,
stride=2,
with_bias=False,
padding="SAME",
name="initial_conv")
if not self.resnet_v2:
self.initial_batchnorm = hk.BatchNorm(name="initial_batchnorm",
**bn_config)
self.block_groups = []
strides = (1, 2, 2, 2)
for i in range(4):
self.block_groups.append(
BlockGroup(channels=channels_per_group[i],
num_blocks=blocks_per_group[i],
stride=strides[i],
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=bottleneck,
use_projection=use_projection[i],
name="block_group_%d" % (i)))
if self.resnet_v2:
self.final_batchnorm = hk.BatchNorm(name="final_batchnorm", **bn_config)
self.logits = hk.Linear(num_classes, w_init=jnp.zeros, name="logits")
def __call__(self, inputs, is_training, test_local_stats=False):
out = inputs
out = self.initial_conv(out)
if not self.resnet_v2:
out = self.initial_batchnorm(out, is_training, test_local_stats)
out = jax.nn.relu(out)
out = hk.max_pool(out,
window_shape=(1, 3, 3, 1),
strides=(1, 2, 2, 1),
padding="SAME")
for block_group in self.block_groups:
out = block_group(out, is_training, test_local_stats)
if self.resnet_v2:
out = self.final_batchnorm(out, is_training, test_local_stats)
out = jax.nn.relu(out)
out = jnp.mean(out, axis=[1, 2])
return self.logits(out)
class ResNet18(ResNet):
"""ResNet18."""
def __init__(self,
num_classes: int,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
name: Name of the module.
"""
super().__init__(blocks_per_group=(2, 2, 2, 2),
num_classes=num_classes,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=False,
channels_per_group=(64, 128, 256, 512),
use_projection=(False, True, True, True),
name=name)
class ResNet34(ResNet):
"""ResNet34."""
def __init__(self,
num_classes: int,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
name: Name of the module.
"""
super().__init__(blocks_per_group=(3, 4, 6, 3),
num_classes=num_classes,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=False,
channels_per_group=(64, 128, 256, 512),
use_projection=(False, True, True, True),
name=name)
class ResNet50(ResNet):
"""ResNet50."""
def __init__(self,
num_classes: int,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
name: Name of the module.
"""
super().__init__(blocks_per_group=(3, 4, 6, 3),
num_classes=num_classes,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=True,
name=name)
class ResNet101(ResNet):
"""ResNet101."""
def __init__(self,
num_classes: int,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
name: Name of the module.
"""
super().__init__(blocks_per_group=(3, 4, 23, 3),
num_classes=num_classes,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=True,
name=name)
class ResNet152(ResNet):
"""ResNet152."""
def __init__(self,
num_classes: int,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
name: Name of the module.
"""
super().__init__(blocks_per_group=(3, 8, 36, 3),
num_classes=num_classes,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=True,
name=name)
class ResNet200(ResNet):
"""ResNet200."""
def __init__(self,
num_classes: int,
bn_config: Optional[Mapping[str, float]] = None,
resnet_v2: bool = False,
name: Optional[str] = None):
"""Constructs a ResNet model.
Args:
num_classes: The number of classes to classify the inputs into.
bn_config: A dictionary of two elements, ``decay_rate`` and ``eps`` to be
passed on to the :class:`~haiku.BatchNorm` layers.
resnet_v2: Whether to use the v1 or v2 ResNet implementation. Defaults
to ``False``.
name: Name of the module.
"""
super().__init__(blocks_per_group=(3, 24, 36, 3),
num_classes=num_classes,
bn_config=bn_config,
resnet_v2=resnet_v2,
bottleneck=True,
name=name)
|
# -*- coding: utf-8 -*-
# Author: <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
from pathlib import Path
from shutil import copyfile
import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
import pytest
import mne
from mne.channels import make_standard_montage
from mne.channels.montage import transform_to_head
from mne.datasets.testing import data_path, requires_testing_data
from mne.io import read_raw_nirx, read_fiducials
from mne.utils import check_version
from mne_nirs.io.fold._fold import _generate_montage_locations,\
_find_closest_standard_location, _read_fold_xls
from mne_nirs.io import fold_landmark_specificity
from mne_nirs.io.fold import fold_channel_specificity
thisfile = Path(__file__).parent.resolve()
foldfile = thisfile / "data" / "example.xls"
# https://github.com/mne-tools/mne-testing-data/pull/72
fname_nirx_15_3_short = Path(data_path(download=False)) / \
'NIRx' / 'nirscout' / 'nirx_15_3_recording'
requires_xlrd = pytest.mark.skipif(
not check_version('xlrd', '1.0'), reason='Requires xlrd >= 1.0')
@requires_xlrd
@pytest.mark.parametrize('fold_files', (str, None, list))
def test_channel_specificity(monkeypatch, tmp_path, fold_files):
raw = read_raw_nirx(fname_nirx_15_3_short, preload=True)
raw.pick(range(2))
kwargs = dict()
n_want = 6
if fold_files is list:
kwargs = dict(fold_files=[foldfile])
elif fold_files is str:
kwargs = dict(fold_files=tmp_path)
n_want *= 2
else:
assert fold_files is None
monkeypatch.setenv('MNE_NIRS_FOLD_PATH', str(tmp_path))
assert len(kwargs) == 0
with pytest.raises(FileNotFoundError, match=r'fold_files\[0\] does.*'):
fold_channel_specificity(raw)
n_want *= 2
copyfile(foldfile, tmp_path / '10-10.xls')
copyfile(foldfile, tmp_path / '10-5.xls')
res = fold_channel_specificity(raw, **kwargs)
assert len(res) == 2
assert res[0].shape == (n_want, 14)
montage = make_standard_montage(
'standard_1005', head_size=0.09700884729534559)
fids = read_fiducials(
Path(mne.__file__).parent / 'data' / 'fsaverage' /
'fsaverage-fiducials.fif')[0]
for f in fids:
f['coord_frame'] = montage.dig[0]['coord_frame']
montage.dig[:3] = fids
S, D = raw.ch_names[0].split()[0].split('_')
assert S == 'S1' and D == 'D2'
montage.rename_channels({'PO8': S, 'P6': D}) # not in the tables!
# taken from standard_1020.elc
s_mri = np.array([55.6666, -97.6251, 2.7300]) / 1000.
d_mri = np.array([67.8877, -75.9043, 28.0910]) / 1000.
trans = mne.transforms._get_trans('fsaverage', 'mri', 'head')[0]
ch_pos = montage.get_positions()['ch_pos']
assert_allclose(ch_pos[S], s_mri, atol=1e-6)
assert_allclose(ch_pos[D], d_mri, atol=1e-6)
raw.set_montage(montage)
montage = transform_to_head(montage)
s_head = mne.transforms.apply_trans(trans, s_mri)
d_head = mne.transforms.apply_trans(trans, d_mri)
assert_allclose(montage._get_ch_pos()['S1'], s_head, atol=1e-6)
assert_allclose(montage._get_ch_pos()['D2'], d_head, atol=1e-6)
for ch in raw.info['chs']:
assert_allclose(ch['loc'][3:6], s_head, atol=1e-6)
assert_allclose(ch['loc'][6:9], d_head, atol=1e-6)
res_1 = fold_channel_specificity(raw, **kwargs)[0]
assert res_1.shape == (0, 14)
# TODO: This is wrong, should be P08 not P08h, and distance should be 0 mm!
with pytest.warns(RuntimeWarning, match='.*PO8h?/P6.*TP8/T8.*'):
res_1 = fold_channel_specificity(raw, interpolate=True, **kwargs)[0]
montage.rename_channels({S: D, D: S}) # reversed
with pytest.warns(RuntimeWarning, match='.*PO8h?/P6.*TP8/T8.*'):
res_2 = fold_channel_specificity(raw, interpolate=True, **kwargs)[0]
# We should check the whole thing, but this is probably good enough
assert (res_1['Specificity'] == res_2['Specificity']).all()
@requires_xlrd
def test_landmark_specificity():
raw = read_raw_nirx(fname_nirx_15_3_short, preload=True)
with pytest.warns(RuntimeWarning, match='No fOLD table entry'):
res = fold_landmark_specificity(raw, "L Superior Frontal Gyrus",
[foldfile], interpolate=True)
assert len(res) == len(raw.ch_names)
assert np.max(res) <= 100
assert np.min(res) >= 0
@requires_xlrd
def test_fold_workflow():
# Read raw data
raw = read_raw_nirx(fname_nirx_15_3_short, preload=True)
reference_locations = _generate_montage_locations()
channel_of_interest = raw.copy().pick(1)
# Get source and detector labels
source_locs = channel_of_interest.info['chs'][0]['loc'][3:6]
source_label = _find_closest_standard_location(source_locs,
reference_locations)
assert source_label == "T7"
detector_locs = channel_of_interest.info['chs'][0]['loc'][6:9]
detector_label = _find_closest_standard_location(detector_locs,
reference_locations)
assert detector_label == "TP7"
# Find correct fOLD elements
tbl = _read_fold_xls(foldfile, atlas="Juelich")
tbl = tbl.query("Source == @source_label").\
query("Detector == @detector_label")
# Query region of interest
specificity = tbl.query("Landmark == 'L Mid Orbital Gyrus'")["Specificity"]
assert specificity.values == 12.34
@requires_xlrd
def test_fold_reader():
tbl = _read_fold_xls(foldfile, atlas="Juelich")
assert isinstance(tbl, pd.DataFrame)
assert tbl.shape == (11, 10)
assert "L Superior Frontal Gyrus" in \
list(tbl["Landmark"])
@requires_testing_data
def test_label_finder():
"""Test locating labels."""
raw = read_raw_nirx(fname_nirx_15_3_short, preload=True)
reference_locations = _generate_montage_locations()
# Test central head position source
raw_cz = raw.copy().pick(25)
assert _find_closest_standard_location(
raw_cz.info['chs'][0]['loc'][3:6],
reference_locations) == "Cz"
# Test right auditory position detector
raw_cz = raw.copy().pick(4)
assert _find_closest_standard_location(
raw_cz.info['chs'][0]['loc'][6:9],
reference_locations) == "T8"
# Test right auditory position source
raw_cz = raw.copy().pick(4)
assert _find_closest_standard_location(
raw_cz.info['chs'][0]['loc'][3:6],
reference_locations) == "TP8"
# Test left auditory position source
raw_cz = raw.copy().pick(1)
assert _find_closest_standard_location(
raw_cz.info['chs'][0]['loc'][3:6],
reference_locations) == "T7"
# Test left auditory position detector
raw_cz = raw.copy().pick(1)
assert _find_closest_standard_location(
raw_cz.info['chs'][0]['loc'][6:9],
reference_locations) == "TP7"
# Test rear position detector
raw_cz = raw.copy().pick(9)
assert _find_closest_standard_location(
raw_cz.info['chs'][0]['loc'][6:9],
reference_locations) == "PO2"
|
<reponame>WongLynn/vnpy_Amerlin-1.1.20<filename>vnpy/trader/gateway/ctpGateway/ctpGateway.py
# encoding: UTF-8
'''
vn.ctp的gateway接入
考虑到现阶段大部分CTP中的ExchangeID字段返回的都是空值
vtSymbol直接使用symbol
'''
import os
import json
from copy import copy
from datetime import datetime, timedelta
import pandas as pd
from vnpy.api.ctp import MdApi, TdApi, defineDict
from vnpy.trader.vtGateway import *
from vnpy.trader.vtFunction import getJsonPath, getTempPath
from vnpy.trader.vtConstant import GATEWAYTYPE_FUTURES, VN_SEPARATOR
from .language import text
import re
import pymongo
from vnpy.trader.vtGlobal import globalSetting
# 以下为一些VT类型和CTP类型的映射字典
# 价格类型映射
priceTypeMap = {}
priceTypeMap[PRICETYPE_LIMITPRICE] = defineDict["THOST_FTDC_OPT_LimitPrice"]
priceTypeMap[PRICETYPE_MARKETPRICE] = defineDict["THOST_FTDC_OPT_AnyPrice"]
priceTypeMapReverse = {v: k for k, v in list(priceTypeMap.items())}
# 方向类型映射
directionMap = {}
directionMap[DIRECTION_LONG] = defineDict['THOST_FTDC_D_Buy']
directionMap[DIRECTION_SHORT] = defineDict['THOST_FTDC_D_Sell']
directionMapReverse = {v: k for k, v in list(directionMap.items())}
# 开平类型映射
offsetMap = {}
offsetMap[OFFSET_OPEN] = defineDict['THOST_FTDC_OF_Open']
offsetMap[OFFSET_CLOSE] = defineDict['THOST_FTDC_OF_Close']
offsetMap[OFFSET_CLOSETODAY] = defineDict['THOST_FTDC_OF_CloseToday']
offsetMap[OFFSET_CLOSEYESTERDAY] = defineDict['THOST_FTDC_OF_CloseYesterday']
offsetMapReverse = {v:k for k,v in list(offsetMap.items())}
# 交易所类型映射
exchangeMap = {}
exchangeMap[EXCHANGE_CFFEX] = 'CFFEX'
exchangeMap[EXCHANGE_SHFE] = 'SHFE'
exchangeMap[EXCHANGE_CZCE] = 'CZCE'
exchangeMap[EXCHANGE_DCE] = 'DCE'
exchangeMap[EXCHANGE_SSE] = 'SSE'
exchangeMap[EXCHANGE_INE] = 'INE'
exchangeMap[EXCHANGE_UNKNOWN] = ''
exchangeMapReverse = {v:k for k,v in list(exchangeMap.items())}
# 持仓类型映射
posiDirectionMap = {}
posiDirectionMap[DIRECTION_NET] = defineDict["THOST_FTDC_PD_Net"]
posiDirectionMap[DIRECTION_LONG] = defineDict["THOST_FTDC_PD_Long"]
posiDirectionMap[DIRECTION_SHORT] = defineDict["THOST_FTDC_PD_Short"]
posiDirectionMapReverse = {v:k for k,v in list(posiDirectionMap.items())}
# 产品类型映射
productClassMap = {}
productClassMap[PRODUCT_FUTURES] = defineDict["THOST_FTDC_PC_Futures"]
productClassMap[PRODUCT_OPTION] = defineDict["THOST_FTDC_PC_Options"]
productClassMap[PRODUCT_COMBINATION] = defineDict["THOST_FTDC_PC_Combination"]
productClassMapReverse = {v:k for k,v in list(productClassMap.items())}
# 委托状态映射
statusMap = {}
statusMap[STATUS_ALLTRADED] = defineDict["THOST_FTDC_OST_AllTraded"]
statusMap[STATUS_PARTTRADED] = defineDict["THOST_FTDC_OST_PartTradedQueueing"]
statusMap[STATUS_NOTTRADED] = defineDict["THOST_FTDC_OST_NoTradeQueueing"]
statusMap[STATUS_CANCELLED] = defineDict["THOST_FTDC_OST_Canceled"]
statusMap[STATUS_SUBMITTED] = defineDict["THOST_FTDC_OAS_Submitted"]
statusMapReverse = {v:k for k,v in list(statusMap.items())}
# 全局字典, key:symbol, value:exchange
symbolExchangeDict = {}
# 夜盘交易时间段分隔判断
NIGHT_TRADING = datetime(1900, 1, 1, 20).time()
########################################################################
class CtpGateway(VtGateway):
"""CTP接口"""
BARCOLUMN = ["datetime", "open", "high", "low", "close", "volume"]
#----------------------------------------------------------------------
def __init__(self, eventEngine, gatewayName='CTP'):
"""Constructor"""
super(CtpGateway, self).__init__(eventEngine, gatewayName)
self.mdApi = CtpMdApi(self) # 行情API
self.tdApi = CtpTdApi(self) # 交易API
self.mdConnected = False # 行情API连接状态,登录完成后为True
self.tdConnected = False # 交易API连接状态
self.qryEnabled = False # 循环查询
self.fileName = self.gatewayName + '_connect.json'
self.filePath = getJsonPath(self.fileName, __file__)
self.trade_days = None
self.current_datetime = None
self.dbURI = None
self.dbName = None
self.jaqsUser = None
self.jaqsPass = None
self.ds = None
#----------------------------------------------------------------------
def connect(self):
"""连接"""
try:
f = open(self.filePath,'r', encoding="utf-8")
except IOError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = text.LOADING_ERROR
self.onLog(log)
return
# 解析json文件
setting = json.load(f)
try:
userID = str(setting['userID'])
password = str(setting['password'])
brokerID = str(setting['brokerID'])
tdAddress = str(setting['tdAddress'])
mdAddress = str(setting['mdAddress'])
# 如果json文件提供了验证码
if 'authCode' in setting:
authCode = str(setting['authCode'])
userProductInfo = str(setting['userProductInfo'])
self.tdApi.requireAuthentication = True
else:
authCode = None
userProductInfo = None
except KeyError:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = text.CONFIG_KEY_MISSING
self.onLog(log)
return
# 创建行情和交易接口对象
self.mdApi.connect(userID, password, brokerID, mdAddress)
self.tdApi.connect(userID, password, brokerID, tdAddress,authCode, userProductInfo)
# 初始化并启动查询
setQryEnabled = setting.get('setQryEnabled', False)
self.setQryEnabled(setQryEnabled)
setQryFreq = setting.get('setQryFreq', 60)
self.initQuery(setQryFreq)
self.dbURI = setting.get('mongoDbURI',None)
self.dbName = setting.get('mongoDbName',None)
self.jaqsUser = setting.get('jaqsUser',None)
self.jaqsPass = setting.get('jaqsPass',None)
if not self.dbURI and self.jaqsUser:
from jaqs.data import DataView,RemoteDataService
data_config = {
"remote.data.address": "tcp://data.quantos.org:8910",
"remote.data.username": self.jaqsUser,
"remote.data.password": self.jaqsPass
}
self.ds = RemoteDataService()
self.ds.init_from_config(data_config)
self.trade_days = self.ds.query_trade_dates(19910101, 20291231)
def update_current_datetime(self, dt):
if self.current_datetime is None or dt > self.current_datetime:
self.current_datetime = dt
def onTick(self, tick):
super(CtpGateway, self).onTick(tick)
if tick.datetime is None:
tick.datetime = datetime.strptime(' '.join([tick.date, tick.time]), '%Y%m%d %H:%M:%S.%f')
self.update_current_datetime(tick.datetime)
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅行情"""
self.mdApi.subscribe(subscribeReq)
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
return self.tdApi.sendOrder(orderReq)
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
self.tdApi.cancelOrder(cancelOrderReq)
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户资金"""
self.tdApi.qryAccount()
#----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
self.tdApi.qryPosition()
#----------------------------------------------------------------------
def close(self):
"""关闭"""
if self.mdConnected:
self.mdApi.close()
if self.tdConnected:
self.tdApi.close()
#----------------------------------------------------------------------
def initQuery(self, freq = 60):
"""初始化连续查询"""
if self.qryEnabled:
# 需要循环的查询函数列表
self.qryFunctionList = [self.qryAccount, self.qryPosition]
self.qryCount = 0 # 查询触发倒计时
self.qryTrigger = freq # 查询触发点
self.qryNextFunction = 0 # 上次运行的查询函数索引
self.startQuery()
#----------------------------------------------------------------------
def query(self, event):
"""注册到事件处理引擎上的查询函数"""
self.qryCount += 1
if self.qryCount > self.qryTrigger:
# 清空倒计时
self.qryCount = 0
# 执行查询函数
function = self.qryFunctionList[self.qryNextFunction]
function()
# 计算下次查询函数的索引,如果超过了列表长度,则重新设为0
self.qryNextFunction += 1
if self.qryNextFunction == len(self.qryFunctionList):
self.qryNextFunction = 0
#----------------------------------------------------------------------
def startQuery(self):
"""启动连续查询"""
self.eventEngine.register(EVENT_TIMER, self.query)
#----------------------------------------------------------------------
def setQryEnabled(self, qryEnabled):
"""设置是否要启动循环查询"""
self.qryEnabled = qryEnabled
def _select_trade_days(self, start, end):
s = self.trade_days.searchsorted(start) if start else 0
e = self.trade_days.searchsorted(end, "right")
return self.trade_days[s:e]
def make_dt(self, date, time):
day, month, year = list(self.split_time(date))
second, minute, hour = list(self.split_time(time))
return datetime(year, month, day, hour, minute, second)
@staticmethod
def split_time(time):
for i in range(2):
yield time % 100
time = int(time/100)
yield time
def loadHistoryBar(self, vtSymbol, type_, size=None, since=None):
# if type_ not in ['1min','5min','15min']:
# log = VtLogData()
# log.gatewayName = self.gatewayName
# log.logContent = u'CTP初始化数据只接受1分钟,5分钟,15分钟bar'
# self.onLog(log)
# return
if self.dbURI and not self.jaqsUser:
symbol = vtSymbol.split(':')[0]
maincontract = re.split(r'(\d)', symbol)[0]
query_symbol = '_'.join([maincontract,type_])
self.dbClient = pymongo.MongoClient(self.dbURI)
if self.dbName not in self.dbClient.list_database_names():
return self.tdApi.writeLog('MongoDB not found')
if query_symbol in self.dbClient[self.dbName].collection_names():
collection = self.dbClient[self.dbName][query_symbol]
if since:
since = datetime.strptime(str(since),"%Y%m%d")
Cursor = collection.find({"datetime": {"$gt":since}})
if size:
Cursor = collection.find({}).sort([("datetime",-1)]).limit(size)
data_df = pd.DataFrame(list(Cursor))
data_df.sort_values(by=['datetime'], inplace=True)
else:
self.tdApi.writeLog('History Data of %s not found in DB'%query_symbol)
data_df = pd.DataFrame([])
return data_df
elif self.jaqsUser and not self.dbURI:
if type_ not in ['1min','5min','15min']:
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = u'CTP初始化数据只接受1分钟,5分钟,15分钟bar'
self.onLog(log)
return
typeMap = {}
typeMap['1min'] = '1M'
typeMap['5min'] = '5M'
typeMap['15min'] = '15M'
freq_map = {
"1min": "1M",
"5min": "5M",
"15min": "15M"
}
freq_delta = {
"1M": timedelta(minutes=1),
"5M": timedelta(minutes=5),
"15M": timedelta(minutes=15),
}
symbol = vtSymbol.split(':')[0]
exchange = symbolExchangeDict.get(symbol, EXCHANGE_UNKNOWN)
if exchangeMap[EXCHANGE_SHFE] in exchange:
exchange = 'SHF'
elif exchangeMap[EXCHANGE_CFFEX] in exchange:
exchange = 'CFE'
elif exchangeMap[EXCHANGE_CZCE] in exchange:
exchange = 'CZC'
symbol = symbol + '.' + exchange
freq = typeMap[type_]
delta = freq_delta[freq]
if since:
start = int(since)
else:
start = None
end = self.current_datetime or datetime.now()
end = end.year*10000+end.month*100+end.day
days = self._select_trade_days(start, end)
results = {}
if start is None:
days = reversed(days)
length = 0
for date in days:
bar, msg = self.ds.bar(symbol, trade_date=date, freq=freq)
if msg != "0,":
raise Exception(msg)
bar["datetime"] = list(map(self.make_dt, bar.date, bar.time))
bar["datetime"] -= delta
results[date] = bar[self.BARCOLUMN]
length += len(bar)
if size and (length >= size):
break
data = pd.concat([results[date] for date in sorted(results.keys())], ignore_index=True)
if size:
if since:
data = data.iloc[:size]
else:
data = data.iloc[-size:]
return data
elif not (self.jaqsUser and self.dbURI):
self.tdApi.writeLog('Please fill History Data source in CTP_setting.json')
def qryAllOrders(self, vtSymbol, order_id, status= None):
pass
def initPosition(self,vtSymbol):
self.qryPosition()
self.qryAccount()
def qryInstrument(self):
self.tdApi.restQryInstrument()
########################################################################
class CtpMdApi(MdApi):
"""CTP行情API实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""Constructor"""
super(CtpMdApi, self).__init__()
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.reqID = EMPTY_INT # 操作请求编号
self.connectionStatus = False # 连接状态
self.loginStatus = False # 登录状态
self.subscribedSymbols = set() # 已订阅合约代码
self.userID = EMPTY_STRING # 账号
self.password = EMPTY_STRING # 密码
self.brokerID = EMPTY_STRING # 经纪商代码
self.address = EMPTY_STRING # 服务器地址
self.tradingDt = None # 交易日datetime对象
self.tradingDate = EMPTY_STRING # 交易日期字符串
self.tickTime = None # 最新行情time对象
self.lastTickDict = {}
#----------------------------------------------------------------------
def onFrontConnected(self):
"""服务器连接"""
self.connectionStatus = True
self.writeLog(text.DATA_SERVER_CONNECTED)
self.login()
#----------------------------------------------------------------------
def onFrontDisconnected(self, n):
"""服务器断开"""
self.connectionStatus = False
self.loginStatus = False
self.gateway.mdConnected = False
self.writeLog(text.DATA_SERVER_DISCONNECTED)
#----------------------------------------------------------------------
def onHeartBeatWarning(self, n):
"""心跳报警"""
# 因为API的心跳报警比较常被触发,且与API工作关系不大,因此选择忽略
pass
#----------------------------------------------------------------------
def onRspError(self, error, n, last):
"""错误回报"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg']
#err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
print(data,error,'登陆回报登陆回报登陆回报登陆回报')
# 如果登录成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = True
self.gateway.mdConnected = True
self.writeLog(text.DATA_SERVER_LOGIN)
# 重新订阅之前订阅的合约
for subscribeReq in self.subscribedSymbols:
self.subscribe(subscribeReq)
# 获取交易日
self.tradingDate = data['TradingDay']
self.tradingDt = datetime.strptime(self.tradingDate, '%Y%m%d')
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg']
#err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
# 如果登出成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = False
self.gateway.mdConnected = False
self.writeLog(text.DATA_SERVER_LOGOUT)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg']
#err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspSubMarketData(self, data, error, n, last):
"""订阅合约回报"""
# 通常不在乎订阅错误,选择忽略
pass
#----------------------------------------------------------------------
def onRspUnSubMarketData(self, data, error, n, last):
"""退订合约回报"""
# 同上
pass
#----------------------------------------------------------------------
def onRtnDepthMarketData(self, data):
"""行情推送"""
"""{'AskPrice5': 1.7976931348623157e+308, 'CurrDelta': 1.7976931348623157e+308, 'AskPrice2': 1.7976931348623157e+308, 'BidPrice4': 1.7976931348623157e+308,
'AveragePrice': 35830.96499402925, 'AskVolume4': 0, 'BidPrice1': 3607.0, 'UpdateTime': '07:54:53', 'AskPrice4': 1.7976931348623157e+308, 'PreOpenInterest': 1214400.0,
'LastPrice': 3607.0, 'ExchangeInstID': '', 'BidPrice2': 1.7976931348623157e+308, 'BidPrice3': 1.7976931348623157e+308, 'HighestPrice': 3616.0,
'AskPrice3': 1.7976931348623157e+308, 'BidVolume5': 0, 'ActionDay': '20181203', 'PreSettlementPrice': 3591.0, 'BidVolume4': 0, 'AskVolume2': 0, 'InstrumentID': 'rb1901',
'AskVolume3': 0, 'Volume': 564418, 'Turnover': 20223641600.0, 'BidPrice5': 1.7976931348623157e+308, 'AskVolume5': 0,'OpenPrice': 3590.0, 'PreClosePrice': 3587.0,
'OpenInterest': 1133022.0, 'ClosePrice': 1.7976931348623157e+308, 'LowerLimitPrice': 3339.0, 'BidVolume3': 0, 'BidVolume2': 0, 'UpperLimitPrice': 3842.0, 'BidVolume1': 7,
'TradingDay': '20181203', 'AskVolume1': 94, 'AskPrice1': 3608.0, 'SettlementPrice':1.7976931348623157e+308, 'LowestPrice': 3562.0, 'UpdateMillisec': 500, 'PreDelta': 0.0, 'ExchangeID': ''}
"""
# 创建对象
tick = VtTickData()
tick.gatewayName = self.gatewayName
tick.symbol = data['InstrumentID']
tick.exchange = symbolExchangeDict.get(tick.symbol, EXCHANGE_UNKNOWN)
tick.vtSymbol = VN_SEPARATOR.join([tick.symbol, tick.gatewayName])
tick.lastPrice = data['LastPrice']
tick.volume = data['Volume']
tick.openInterest = data['OpenInterest']
tick.time = '.'.join([data['UpdateTime'], str(data['UpdateMillisec'])])
# 上期所和郑商所可以直接使用,大商所需要转换
tick.date = data['ActionDay']
tick.openPrice = data['OpenPrice']
tick.highPrice = data['HighestPrice']
tick.lowPrice = data['LowestPrice']
tick.preClosePrice = data['PreClosePrice']
tick.upperLimit = data['UpperLimitPrice']
tick.lowerLimit = data['LowerLimitPrice']
# CTP只有一档行情
tick.bidPrice1 = data['BidPrice1']
tick.bidVolume1 = data['BidVolume1']
tick.askPrice1 = data['AskPrice1']
tick.askVolume1 = data['AskVolume1']
# 大商所日期转换
if tick.exchange is EXCHANGE_DCE:
newTime = datetime.strptime(tick.time, '%H:%M:%S.%f').time() # 最新tick时间戳
# 如果新tick的时间小于夜盘分隔,且上一个tick的时间大于夜盘分隔,则意味着越过了12点
if (self.tickTime and
newTime < NIGHT_TRADING and
self.tickTime > NIGHT_TRADING):
self.tradingDt += timedelta(1) # 日期加1
self.tradingDate = self.tradingDt.strftime('%Y%m%d') # 生成新的日期字符串
tick.date = self.tradingDate # 使用本地维护的日期
self.tickTime = newTime # 更新上一个tick时间
# 处理tick成交量
get_tick = self.lastTickDict.get(str(tick.symbol),None)
if get_tick:
tick.lastVolume = tick.volume - get_tick.volume
else:
tick.lastVolume = 0
if tick.lastVolume == 0:
tick.volumeChange = 0
else:
tick.volumeChange = 1
self.gateway.onTick(tick)
self.lastTickDict[str(tick.symbol)] = tick
#----------------------------------------------------------------------
def onRspSubForQuoteRsp(self, data, error, n, last):
"""订阅期权询价"""
pass
#----------------------------------------------------------------------
def onRspUnSubForQuoteRsp(self, data, error, n, last):
"""退订期权询价"""
pass
#----------------------------------------------------------------------
def onRtnForQuoteRsp(self, data):
"""期权询价推送"""
pass
#----------------------------------------------------------------------
def connect(self, userID, password, brokerID, address):
"""初始化连接"""
self.userID = userID # 账号
self.password = password # 密码
self.brokerID = brokerID # 经纪商代码
self.address = address # 服务器地址
# 如果尚未建立服务器连接,则进行连接
if not self.connectionStatus:
# 创建C++环境中的API对象,这里传入的参数是需要用来保存.con文件的文件夹路径
path = getTempPath(self.gatewayName + '_')
self.createFtdcMdApi(path)
# 注册服务器地址
self.registerFront(self.address)
# 初始化连接,成功会调用onFrontConnected
self.init()
# 若已经连接但尚未登录,则进行登录
else:
if not self.loginStatus:
self.login()
#----------------------------------------------------------------------
def subscribe(self, subscribeReq):
"""订阅合约"""
# 这里的设计是,如果尚未登录就调用了订阅方法
# 则先保存订阅请求,登录完成后会自动订阅
if self.loginStatus:
self.subscribeMarketData(str(subscribeReq.symbol))
self.subscribedSymbols.add(subscribeReq)
#----------------------------------------------------------------------
def login(self):
"""登录"""
# 如果填入了用户名密码等,则登录
if self.userID and self.password and self.brokerID:
req = {}
req['UserID'] = self.userID
req['Password'] = <PASSWORD>
req['BrokerID'] = self.brokerID
self.reqID += 1
self.reqUserLogin(req, self.reqID)
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.exit()
#----------------------------------------------------------------------
def writeLog(self, content):
"""发出日志"""
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = content
self.gateway.onLog(log)
########################################################################
class CtpTdApi(TdApi):
"""CTP交易API实现"""
#----------------------------------------------------------------------
def __init__(self, gateway):
"""API对象的初始化函数"""
super(CtpTdApi, self).__init__()
self.gateway = gateway # gateway对象
self.gatewayName = gateway.gatewayName # gateway对象名称
self.reqID = EMPTY_INT # 操作请求编号
self.orderRef = EMPTY_INT # 订单编号
self.connectionStatus = False # 连接状态
self.loginStatus = False # 登录状态
self.authStatus = False # 验证状态
self.loginFailed = False # 登录失败(账号密码错误)
self.userID = EMPTY_STRING # 账号
self.password = EMPTY_STRING # 密码
self.brokerID = EMPTY_STRING # 经纪商代码
self.address = EMPTY_STRING # 服务器地址
self.frontID = EMPTY_INT # 前置机编号
self.sessionID = EMPTY_INT # 会话编号
self.posDict = {}
self.symbolExchangeDict = {} # 保存合约代码和交易所的印射关系
self.symbolSizeDict = {} # 保存合约代码和合约大小的印射关系
self.contractsList = []
self.requireAuthentication = False
#----------------------------------------------------------------------
def onFrontConnected(self):
"""服务器连接"""
self.connectionStatus = True
self.writeLog(text.TRADING_SERVER_CONNECTED)
if self.requireAuthentication:
self.authenticate()
else:
self.login()
#----------------------------------------------------------------------
def onFrontDisconnected(self, n):
"""服务器断开"""
self.connectionStatus = False
self.loginStatus = False
self.gateway.tdConnected = False
self.writeLog(text.TRADING_SERVER_DISCONNECTED)
#----------------------------------------------------------------------
def onHeartBeatWarning(self, n):
""""""
pass
#----------------------------------------------------------------------
def onRspAuthenticate(self, data, error, n, last):
"""验证客户端回报"""
print(data,error,'验证客户端回报验证客户端回报验证客户端回报验证客户端回报')
if error['ErrorID'] == 0:
self.authStatus = True
self.writeLog(text.TRADING_SERVER_AUTHENTICATED)
self.login()
#----------------------------------------------------------------------
def onRspUserLogin(self, data, error, n, last):
"""登陆回报"""
"""{'FFEXTime': '18:26:27', 'UserID': '119247', 'TradingDay': '20181101', 'CZCETime': '18:26:27', 'BrokerID': '9999', 'SHFETime': '18:26:27', 'INETime': '--:--:--', 'DCETime': '18:26:27',
'LoginTime': '16:39:33', 'MaxOrderRef': '1', 'FrontID': 1, 'SystemName': 'TradingHosting', 'SessionID': 221906687} {'ErrorID': 0, 'ErrorMsg': 'CTP:正确'}
"""
# 如果登录成功,推送日志信息
if error['ErrorID'] == 0:
self.frontID = str(data['FrontID'])
self.sessionID = str(data['SessionID'])
self.loginStatus = True
self.gateway.tdConnected = True
self.writeLog(text.TRADING_SERVER_LOGIN)
# 确认结算信息
req = {}
req['BrokerID'] = self.brokerID
req['InvestorID'] = self.userID
self.reqID += 1
self.reqSettlementInfoConfirm(req, self.reqID)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg']
#err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
# 标识登录失败,防止用错误信息连续重复登录
self.loginFailed = True
#----------------------------------------------------------------------
def onRspUserLogout(self, data, error, n, last):
"""登出回报"""
print(data,error,'登出回报登出回报登出回报v')
# 如果登出成功,推送日志信息
if error['ErrorID'] == 0:
self.loginStatus = False
self.gateway.tdConnected = False
self.writeLog(text.TRADING_SERVER_LOGOUT)
# 否则,推送错误信息
else:
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg']
#err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspUserPasswordUpdate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspTradingAccountPasswordUpdate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspOrderInsert(self, data, error, n, last):
"""发单错误(柜台)"""
"""{'TimeCondition': '3', 'BusinessUnit': '', 'UserID': '119247', 'ContingentCondition': '1', 'CombHedgeFlag': '1', 'IsAutoSuspend': 0, 'BrokerID': '9999', 'GTDDate': '', 'StopPrice': 0.0,
'CombOffsetFlag': '0', 'OrderPriceType': '2', 'InvestorID': '119247', 'RequestID': 0, 'InstrumentID': 'I', 'UserForceClose': 0, 'ForceCloseReason': '0', 'VolumeCondition': '1', 'MinVolume': 1,
'LimitPrice': 3178.6, 'IsSwapOrder': 0, 'VolumeTotalOriginal': 1, 'ExchangeID': '', 'OrderRef': '6', 'Direction': '0'} {'ErrorID': 16, 'ErrorMsg': 'CTP:找不到合约'}
{'TimeCondition': '3', 'BusinessUnit': '', 'UserID': '119247', 'ContingentCondition': '1', 'CombHedgeFlag': '1', 'IsAutoSuspend': 0, 'BrokerID': '9999', 'GTDDate': '', 'StopPrice': 0.0,
'CombOffsetFlag': '3', 'OrderPriceType': '2', 'InvestorID': '119247', 'RequestID': 0, 'InstrumentID': 'rb1901', 'UserForceClose': 0, 'ForceCloseReason': '0', 'VolumeCondition': '1', 'MinVolume': 1,
'LimitPrice': 3851.0, 'IsSwapOrder': 0, 'VolumeTotalOriginal': 0, 'ExchangeID': '', 'OrderRef': '17', 'Direction': '0'} {'ErrorID': 15, 'ErrorMsg': 'CTP:报单字段有误'}
"""
# 推送委托信息
order = VtOrderData()
order.gatewayName = self.gatewayName
order.symbol = data['InstrumentID']
order.exchange = exchangeMapReverse[data['ExchangeID']]
order.vtSymbol = VN_SEPARATOR.join([order.symbol, self.gatewayName])
order.orderID = data['OrderRef']
order.vtOrderID = VN_SEPARATOR.join([self.gatewayName, order.orderID])
order.direction = directionMapReverse.get(data['Direction'], DIRECTION_UNKNOWN)
order.offset = offsetMapReverse.get(data['CombOffsetFlag'], OFFSET_UNKNOWN)
order.status = STATUS_REJECTED
order.price = data['LimitPrice']
order.totalVolume = data['VolumeTotalOriginal']
order.orderDatetime = datetime.now()
self.gateway.onOrder(order)
# 推送错误信息
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg']
#err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspParkedOrderInsert(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspParkedOrderAction(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspOrderAction(self, data, error, n, last):
"""撤单错误(柜台)"""
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg']
#err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRspQueryMaxOrderVolume(self, data, error, n, last):
""""""
pass
def restQryInstrument(self):
self.reqID += 1
self.reqQryInstrument({}, self.reqID)
#----------------------------------------------------------------------
def onRspSettlementInfoConfirm(self, data, error, n, last):
"""确认结算信息回报"""
"""{'ConfirmDate': '20181101', 'ConfirmTime': '16:39:33', 'BrokerID': '9999', 'InvestorID': '119247'} {'ErrorID': 0, 'ErrorMsg': '正确'}"""
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = '结算信息确认完成'
self.gateway.onLog(log)
#self.writeLog(text.SETTLEMENT_INFO_CONFIRMED)
# 查询合约代码
self.reqID += 1
self.reqQryInstrument({}, self.reqID)
#查询合约费率
self.reqID += 1
self.reqQryInstrumentMarginRate({}, self.reqID)
#----------------------------------------------------------------------
def onRspRemoveParkedOrder(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspRemoveParkedOrderAction(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspExecOrderInsert(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspExecOrderAction(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspForQuoteInsert(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQuoteInsert(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQuoteAction(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspLockInsert(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspCombActionInsert(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryOrder(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryTrade(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInvestorPosition(self, data, error, n, last):
"""持仓查询回报"""
"""{'ShortFrozen': 0, 'FrozenMargin': 0.0, 'BrokerID': '9999', 'CashIn': 0.0, 'FrozenCommission': 0.0, 'UseMargin': 0.0, 'MarginRateByVolume': 0.0, 'CloseProfitByDate': 720.0,
'InstrumentID': 'rb1901', 'StrikeFrozen': 0, 'CombLongFrozen': 0, 'CloseProfitByTrade': 600.0, 'TodayPosition': 0, 'TradingDay': '20181106', 'CombShortFrozen': 0, 'YdStrikeFrozen': 0,
'PreSettlementPrice': 4037.0, 'OpenVolume': 0, 'CloseVolume': 1, 'SettlementPrice': 3965.0, 'OpenCost': 0.0, 'HedgeFlag': '1', 'OpenAmount': 0.0, 'StrikeFrozenAmount': 0.0, 'InvestorID': '119247',
'PositionCost': 0.0, 'LongFrozenAmount': 0.0, 'ExchangeID': '', 'PreMargin': 0.0, 'CloseProfit': 720.0, 'CloseAmount': 39650.0, 'LongFrozen': 0, 'PosiDirection': '3', 'CombPosition': 0, 'YdPosition': 1,
'PositionDate': '2', 'AbandonFrozen': 0, 'ShortFrozenAmount': 0.0, 'FrozenCash': 0.0, 'SettlementID': 1, 'Position': 0, 'ExchangeMargin': 0.0, 'MarginRateByMoney': 0.1, 'PositionProfit': 0.0,
'Commission': 3.9650000000000003} {'ErrorID': 0, 'ErrorMsg': ''}"""
# print(data,error,'持仓查询回报持仓查询回报持仓查询回报持仓查询回报')
if not data['InstrumentID']:
return
# 获取持仓缓存对象
posName = VN_SEPARATOR.join([data['InstrumentID'], data['PosiDirection']])
if posName in self.posDict:
pos = self.posDict[posName]
else:
pos = VtPositionData()
self.posDict[posName] = pos
pos.gatewayName = self.gatewayName
pos.symbol = data['InstrumentID']
pos.vtSymbol = VN_SEPARATOR.join([pos.symbol, pos.gatewayName])
pos.direction = posiDirectionMapReverse.get(data['PosiDirection'], '')
pos.vtPositionName = VN_SEPARATOR.join([pos.symbol, pos.direction])
# 针对上期所持仓的今昨分条返回(有昨仓、无今仓),读取昨仓数据
pos.ydPosition = 0
exchange = self.symbolExchangeDict.get(pos.symbol, EXCHANGE_UNKNOWN)
if exchange == EXCHANGE_SHFE:
if data['YdPosition'] and not data['TodayPosition']:
pos.ydPosition = data['Position']
# 否则基于总持仓和今持仓来计算昨仓数据
else:
pos.ydPosition = data['Position'] - data['TodayPosition']
# 计算成本
size = self.symbolSizeDict[pos.symbol]
cost = pos.price * pos.position * size
# 汇总总仓
pos.position += data['Position']
pos.positionProfit += data['PositionProfit']
# 计算持仓均价
if pos.position and pos.symbol in self.symbolSizeDict:
pos.price = (cost + data['PositionCost']) / (pos.position * size)
# 读取冻结
if pos.direction is DIRECTION_LONG:
pos.frozen += data['LongFrozen']
else:
pos.frozen += data['ShortFrozen']
# 查询回报结束
if last:
# 遍历推送
for pos in list(self.posDict.values()):
self.gateway.onPosition(pos)
# 清空缓存
self.posDict.clear()
#----------------------------------------------------------------------
def onRspQryTradingAccount(self, data, error, n, last):
"""账户资金查询回报AccountInfo"""
"""{'ReserveBalance': 0.0, 'Reserve': 0.0, 'SpecProductCommission': 0.0, 'FrozenMargin': 0.0, 'BrokerID': '9999', 'CashIn': 0.0, 'FundMortgageOut': 0.0, 'FrozenCommission': 0.0,
'SpecProductPositionProfitByAlg': 0.0, 'Commission': 3.9650000000000003, 'SpecProductPositionProfit': 0.0, 'Deposit': 0.0, 'DeliveryMargin': 0.0, 'TradingDay': '20181106', 'CurrencyID': 'CNY',
'Interest': 0.0, 'PreDeposit': 465082.24000000005, 'Available': 454475.27499999997, 'SpecProductFrozenMargin': 0.0, 'AccountID': '119247', 'SpecProductMargin': 0.0, 'PreFundMortgageOut': 0.0,
'InterestBase': 0.0, 'SpecProductExchangeMargin': 0.0, 'PreBalance': 860199.24, 'Balance': 845555.275, 'MortgageableFund': 363004.22, 'Withdraw': 0.0, 'SpecProductFrozenCommission': 0.0,
'PreMortgage': 0.0, 'SpecProductCloseProfit': 0.0, 'WithdrawQuota': 363004.22, 'FundMortgageAvailable': 0.0, 'BizType': '\x00', 'PreCredit': 0.0, 'FrozenCash': 0.0, 'SettlementID': 1,
'CloseProfit': 720.0, 'ExchangeDeliveryMargin': 0.0, 'Mortgage': 0.0, 'Credit': 0.0, 'CurrMargin': 391080.00000000006, 'FundMortgageIn': 0.0, 'ExchangeMargin': 391080.00000000006,
'PreFundMortgageIn': 0.0, 'PositionProfit': -15360.0, 'PreMargin': 395117.0} {'ErrorID': 0, 'ErrorMsg': ''} """
# print(data,error,'账户资金查询回报AccountInfo账户资金查询回报AccountInfo账户资金查询回报AccountInfo账户资金查询回报AccountInfo')
account = VtAccountData()
account.gatewayName = self.gatewayName
# 账户代码
account.accountID = data['AccountID']
account.vtAccountID = VN_SEPARATOR.join([self.gatewayName, account.accountID])
# 数值相关
account.preBalance = data['PreBalance']
account.available = data['Available']
account.commission = data['Commission']
account.margin = data['CurrMargin']
account.closeProfit = data['CloseProfit']
account.positionProfit = data['PositionProfit']
# 这里的balance和快期中的账户不确定是否一样,需要测试
account.balance = (data['PreBalance'] - data['PreCredit'] - data['PreMortgage'] +
data['Mortgage'] - data['Withdraw'] + data['Deposit'] +
data['CloseProfit'] + data['PositionProfit'] + data['CashIn'] -
data['Commission'])
self.gateway.onAccount(account)
self.writeLog(f"A/C-{account.accountID}: balance:{account.balance}, pre:{account.preBalance}")
self.writeLog(f"unsettled_pnl:{account.positionProfit}, closed_pnl:{account.closeProfit}, commission:{account.commission}")
#----------------------------------------------------------------------
def onRspQryInvestor(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryTradingCode(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInstrumentMarginRate(self, data, error, n, last):
pass
#----------------------------------------------------------------------
def onRspQryInstrumentCommissionRate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryExchange(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryProduct(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInstrument(self, data, error, n, last):
"""合约回报查询"""
"""
{'ShortMarginRatio': 0.08, 'EndDelivDate': '20190422', 'ProductID': 'au', 'PriceTick': 0.05, 'PositionType': '2', 'MinLimitOrderVolume': 1, 'ExchangeID': 'SHFE', 'DeliveryYear': 2019,
'MaxLimitOrderVolume': 500, 'MinSellVolume': 0, 'MinMarketOrderVolume': 1, 'InstrumentName': '黄金1904', 'InstrumentCode': '', 'IsTrading': 1, 'InstrumentID': 'au1904', 'LongMarginRatio': 0.08,
'UnderlyingMultiple': 0.0, 'OptionsType': '\x00', 'CreateDate': '20180206', 'ProductClass': '1', 'CombinationType': '0', 'OpenDate': '20180316', 'MinBuyVolume': 0, 'VolumeMultiple': 1000,
'UnderlyingInstrID': '', 'PositionDateType': '1', 'ExpireDate': '20190415', 'ExchangeInstID': 'au1904', 'DeliveryMonth': 4, 'MaxMarketOrderVolume': 30, 'InstLifePhase': '1', 'MaxMarginSideAlgorithm': '1',
'StartDelivDate': '20190416', 'StrikePrice': 0.0} {'ErrorID': 0, 'ErrorMsg': ''}
{'ShortMarginRatio': 0.2, 'EndDelivDate': '20190315', 'ProductID': 'IF', 'PriceTick': 0.2, 'PositionType': '2', 'MinLimitOrderVolume': 1, 'ExchangeID': 'CFFEX', 'DeliveryYear': 2019,
'MaxLimitOrderVolume': 20, 'MinSellVolume': 0, 'MinMarketOrderVolume': 1, 'InstrumentName': '沪深300股指1903', 'InstrumentCode': '', 'IsTrading': 1, 'InstrumentID': 'IF1903', 'LongMarginRatio': 0.2,
'UnderlyingMultiple': 0.0, 'OptionsType': '\x00', 'CreateDate': '20180613', 'ProductClass': '1', 'CombinationType': '0', 'OpenDate': '20180723', 'MinBuyVolume': 0, 'VolumeMultiple': 300,
'UnderlyingInstrID': '', 'PositionDateType': '2', 'ExpireDate': '20190315', 'ExchangeInstID': 'IF1903', 'DeliveryMonth': 3, 'MaxMarketOrderVolume': 10, 'InstLifePhase': '1', 'MaxMarginSideAlgorithm': '1',
'StartDelivDate': '20190315', 'StrikePrice': 0.0} {'ErrorID': 0, 'ErrorMsg': ''}
"""
contract = VtContractData()
contract.gatewayName = self.gatewayName
contract.symbol = data['InstrumentID']
contract.exchange = exchangeMapReverse[data['ExchangeID']]
contract.vtSymbol = VN_SEPARATOR.join([contract.symbol, contract.gatewayName])
contract.name = data['InstrumentName']
#contract.name = data['InstrumentName'].decode('GBK')
# 合约数值
contract.size = data['VolumeMultiple']
contract.priceTick = data['PriceTick']
contract.strikePrice = data['StrikePrice']
contract.underlyingSymbol = data['UnderlyingInstrID']
contract.productClass = productClassMapReverse.get(data['ProductClass'], PRODUCT_UNKNOWN)
# 期权类型
if contract.productClass is PRODUCT_OPTION:
if data['OptionsType'] == '1':
contract.optionType = OPTION_CALL
elif data['OptionsType'] == '2':
contract.optionType = OPTION_PUT
# 缓存代码和交易所的印射关系
self.symbolExchangeDict[contract.symbol] = contract.exchange
self.symbolSizeDict[contract.symbol] = contract.size
# 推送
self.gateway.onContract(contract)
self.contractsList.append(contract.symbol + VN_SEPARATOR + contract.exchange)
a = {"contracts":self.contractsList}
with open(getTempPath('contractList.json'),'w') as f:
json.dump(a,f,indent=4, ensure_ascii=False)
# 缓存合约代码和交易所映射
symbolExchangeDict[contract.symbol] = contract.exchange
if last:
self.writeLog(text.CONTRACT_DATA_RECEIVED)
#----------------------------------------------------------------------
def onRspQryDepthMarketData(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQrySettlementInfo(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryTransferBank(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInvestorPositionDetail(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryNotice(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQrySettlementInfoConfirm(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInvestorPositionCombineDetail(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryCFMMCTradingAccountKey(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryEWarrantOffset(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInvestorProductGroupMargin(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryExchangeMarginRate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryExchangeMarginRateAdjust(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryExchangeRate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQrySecAgentACIDMap(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryProductExchRate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryProductGroup(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryOptionInstrTradeCost(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryOptionInstrCommRate(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryExecOrder(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryForQuote(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryQuote(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryLock(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryLockPosition(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryInvestorLevel(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryExecFreeze(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryCombInstrumentGuard(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryCombAction(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryTransferSerial(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryAccountregister(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspError(self, error, n, last):
"""错误回报"""
print(error,'错误回报错误回报错误回报错误回报错误回报')
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg']
#err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRtnOrder(self, data):
# self.writeLog('报单回报%s'%data)
"""报单回报"""
"""{'BusinessUnit': '9999cad', 'RelativeOrderSysID': '', 'UserID': '119247', 'ContingentCondition': '1', 'TraderID': '9999cad', 'IsAutoSuspend': 0, 'BrokerID': '9999', 'UpdateTime': '',
'OrderPriceType': '2', 'SequenceNo': 0, 'ActiveTraderID': '', 'ActiveTime': '', 'FrontID': 1, 'RequestID': 0, 'InsertDate': '20181102', 'InstrumentID': 'IF1811', 'ZCETotalTradedVolume': 0,
'ForceCloseReason': '0', 'ClearingPartID': '', 'TradingDay': '20181101', 'CancelTime': '', 'OrderSource': '0', 'ActiveUserID': '', 'MinVolume': 1, 'LimitPrice': 3157.8, 'BrokerOrderSeq': 15467,
'NotifySequence': 0, 'UserForceClose': 0, 'VolumeTotalOriginal': 1, 'ExchangeID': 'CFFEX', 'ClientID': '9999119227', 'OrderRef': '1', 'Direction': '0', 'TimeCondition': '3', 'InsertTime': '16:34:06',
'UserProductInfo': '', 'InvestorID': '119247', 'OrderSysID': '', 'GTDDate': '', 'StatusMsg': '报单已提交', 'BranchID': '', 'CombHedgeFlag': '1', 'StopPrice': 0.0, 'CombOffsetFlag': '0', 'VolumeTraded': 0,
'OrderLocalID': ' 132', 'ParticipantID': '9999', 'OrderType': '0', 'SuspendTime': '', 'SessionID': 200083135, 'VolumeTotal': 1, 'OrderSubmitStatus': '0', 'VolumeCondition': '1', 'SettlementID': 1,
'IsSwapOrder': 0, 'ExchangeInstID': 'IF1811', 'OrderStatus': 'a', 'InstallID': 1}
{'BusinessUnit': '9999cad', 'RelativeOrderSysID': '', 'UserID': '119247', 'ContingentCondition': '1', 'TraderID': '9999cad', 'IsAutoSuspend': 0, 'BrokerID': '9999', 'UpdateTime': '',
'OrderPriceType': '2', 'SequenceNo': 181, 'ActiveTraderID': '9999cad', 'ActiveTime': '', 'FrontID': 1, 'RequestID': 0, 'InsertDate': '20181031', 'InstrumentID': 'IF1811', 'ZCETotalTradedVolume': 0,
'ForceCloseReason': '0', 'ClearingPartID': '', 'TradingDay': '20181101', 'CancelTime': '', 'OrderSource': '\x00', 'ActiveUserID': '', 'MinVolume': 1, 'LimitPrice': 3157.8, 'BrokerOrderSeq': 15467,
'NotifySequence': 1, 'UserForceClose': 0, 'VolumeTotalOriginal': 1, 'ExchangeID': 'CFFEX', 'ClientID': '9999119227', 'OrderRef': '1', 'Direction': '0', 'TimeCondition': '3', 'InsertTime': '18:21:00',
'UserProductInfo': '', 'InvestorID': '119247', 'OrderSysID': ' 15215', 'GTDDate': '', 'StatusMsg': '未成交', 'BranchID': '', 'CombHedgeFlag': '1', 'StopPrice': 0.0, 'CombOffsetFlag': '0',
'VolumeTraded': 0, 'OrderLocalID': ' 132', 'ParticipantID': '9999', 'OrderType': '\x00', 'SuspendTime': '', 'SessionID': 200083135, 'VolumeTotal': 1, 'OrderSubmitStatus': '3',
'VolumeCondition': '1', 'SettlementID': 1, 'IsSwapOrder': 0, 'ExchangeInstID': 'IF1811', 'OrderStatus': '3', 'InstallID': 1}
{'BusinessUnit': '9999cad', 'RelativeOrderSysID': '', 'UserID': '119247', 'ContingentCondition': '1', 'TraderID': '9999cad', 'IsAutoSuspend': 0, 'BrokerID': '9999', 'UpdateTime': '',
'OrderPriceType': '1', 'SequenceNo': 0, 'ActiveTraderID': '', 'ActiveTime': '', 'FrontID': 1, 'RequestID': 0, 'InsertDate': '20181102', 'InstrumentID': 'IF1811', 'ZCETotalTradedVolume': 0,
'ForceCloseReason': '0', 'ClearingPartID': '', 'TradingDay': '20181101', 'CancelTime': '', 'OrderSource': '0', 'ActiveUserID': '', 'MinVolume': 1, 'LimitPrice': 3157.8, 'BrokerOrderSeq': 15676,
'NotifySequence': 1, 'UserForceClose': 0, 'VolumeTotalOriginal': 1, 'ExchangeID': 'CFFEX', 'ClientID': '9999119227', 'OrderRef': '4', 'Direction': '0', 'TimeCondition': '3', 'InsertTime': '16:40:06',
'UserProductInfo': '', 'InvestorID': '119247', 'OrderSysID': '', 'GTDDate': '', 'StatusMsg': '已撤单报单被拒绝CFFEX:不被支持的报单类型', 'BranchID': '', 'CombHedgeFlag': '1', 'StopPrice': 0.0,
'CombOffsetFlag': '0', 'VolumeTraded': 0, 'OrderLocalID': ' 176', 'ParticipantID': '9999', 'OrderType': '0', 'SuspendTime': '', 'SessionID': 221906687, 'VolumeTotal': 1, 'OrderSubmitStatus': '4',
'VolumeCondition': '1', 'SettlementID': 1, 'IsSwapOrder': 0, 'ExchangeInstID': 'IF1811', 'OrderStatus': '5', 'InstallID': 1}
!!!!!!!!!!!SHFE不支持市价单!!!!!!!!!!!!!!!!!
{'BusinessUnit': '9999cad', 'RelativeOrderSysID': '', 'UserID': '119247', 'ContingentCondition': '1', 'TraderID': '9999cad', 'IsAutoSuspend': 0, 'BrokerID': '9999', 'UpdateTime': '', 'OrderPriceType': '2',
'SequenceNo': 205, 'ActiveTraderID': '9999cad', 'ActiveTime': '', 'FrontID': 1, 'RequestID': 0, 'InsertDate': '20181031', 'InstrumentID': 'IF1811', 'ZCETotalTradedVolume': 0, 'ForceCloseReason': '0',
'ClearingPartID': '', 'TradingDay': '20181101', 'CancelTime': '', 'OrderSource': '\x00', 'ActiveUserID': '119247', 'MinVolume': 1, 'LimitPrice': 3157.8, 'BrokerOrderSeq': 15467, 'NotifySequence': 1,
'UserForceClose': 0, 'VolumeTotalOriginal': 1, 'ExchangeID': 'CFFEX', 'ClientID': '9999119227', 'OrderRef': '1', 'Direction': '0', 'TimeCondition': '3', 'InsertTime': '18:21:00', 'UserProductInfo': '',
'InvestorID': '119247', 'OrderSysID': ' 15215', 'GTDDate': '', 'StatusMsg': '已撤单', 'BranchID': '', 'CombHedgeFlag': '1', 'StopPrice': 0.0, 'CombOffsetFlag': '0', 'VolumeTraded': 0,
'OrderLocalID': ' 132', 'ParticipantID': '9999', 'OrderType': '\x00', 'SuspendTime': '', 'SessionID': 200083135, 'VolumeTotal': 1, 'OrderSubmitStatus': '3', 'VolumeCondition': '1',
'SettlementID': 1, 'IsSwapOrder': 0, 'ExchangeInstID': 'IF1811', 'OrderStatus': '5', 'InstallID': 1}
{'BusinessUnit': '9999cad', 'RelativeOrderSysID': '', 'UserID': '119247', 'ContingentCondition': '1', 'TraderID': '9999cad', 'IsAutoSuspend': 0, 'BrokerID': '9999', 'UpdateTime': '', 'OrderPriceType': '2',
'SequenceNo': 321, 'ActiveTraderID': '9999cad', 'ActiveTime': '', 'FrontID': 1, 'RequestID': 0, 'InsertDate': '20181031', 'InstrumentID': 'IF1811', 'ZCETotalTradedVolume': 0, 'ForceCloseReason': '0',
'ClearingPartID': '', 'TradingDay': '20181101', 'CancelTime': '', 'OrderSource': '\x00', 'ActiveUserID': '', 'MinVolume': 1, 'LimitPrice': 3180.0, 'BrokerOrderSeq': 15852, 'NotifySequence': 1,
'UserForceClose': 0, 'VolumeTotalOriginal': 1, 'ExchangeID': 'CFFEX', 'ClientID': '9999119227', 'OrderRef': '5', 'Direction': '0', 'TimeCondition': '3', 'InsertTime': '18:34:00', 'UserProductInfo': '',
'InvestorID': '119247', 'OrderSysID': ' 15591', 'GTDDate': '', 'StatusMsg': '全部成交', 'BranchID': '', 'CombHedgeFlag': '1', 'StopPrice': 0.0, 'CombOffsetFlag': '0', 'VolumeTraded': 1,
'OrderLocalID': ' 207', 'ParticipantID': '9999', 'OrderType': '\x00', 'SuspendTime': '', 'SessionID': 248121201, 'VolumeTotal': 0, 'OrderSubmitStatus': '3', 'VolumeCondition': '1',
'SettlementID': 1, 'IsSwapOrder': 0, 'ExchangeInstID': 'IF1811', 'OrderStatus': '0', 'InstallID': 1}
{'BusinessUnit': '', 'RelativeOrderSysID': '', 'UserID': '119247', 'ContingentCondition': '1', 'TraderID': '9999caf', 'IsAutoSuspend': 0, 'BrokerID': '9999', 'UpdateTime': '', 'OrderPriceType': '2',
'SequenceNo': 949, 'ActiveTraderID': '9999caf', 'ActiveTime': '', 'FrontID': 1, 'RequestID': 0, 'InsertDate': '20181112', 'InstrumentID': 'j1901', 'ZCETotalTradedVolume': 0, 'ForceCloseReason': '0',
'ClearingPartID': '', 'TradingDay': '20181113', 'CancelTime': '', 'OrderSource': '\x00', 'ActiveUserID': '', 'MinVolume': 1, 'LimitPrice': 2302.0, 'BrokerOrderSeq': 1692, 'NotifySequence': 1,
'UserForceClose': 0, 'VolumeTotalOriginal': 1, 'ExchangeID': 'DCE', 'ClientID': '9999119227', 'OrderRef': '6', 'Direction': '0', 'TimeCondition': '3', 'InsertTime': '18:13:49', 'UserProductInfo': '',
'InvestorID': '119247', 'OrderSysID': ' 1596', 'GTDDate': '', 'StatusMsg': '全部成交', 'BranchID': '', 'CombHedgeFlag': '1', 'StopPrice': 0.0, 'CombOffsetFlag': '1', 'VolumeTraded': 1,
'OrderLocalID': ' 506', 'ParticipantID': '9999', 'OrderType': '\x00', 'SuspendTime': '', 'SessionID': -624024875, 'VolumeTotal': 0, 'OrderSubmitStatus': '3', 'VolumeCondition': '1',
'SettlementID': 1, 'IsSwapOrder': 0, 'ExchangeInstID': 'j1901', 'OrderStatus': '0', 'InstallID': 1}
"""
# 更新最大报单编号
newref = data['OrderRef']
self.orderRef = max(self.orderRef, int(newref))
# 创建报单数据对象
order = VtOrderData()
order.gatewayName = self.gatewayName
# 保存代码和报单号
order.symbol = data['InstrumentID']
order.exchange = exchangeMapReverse[data['ExchangeID']]
order.vtSymbol = VN_SEPARATOR.join([order.symbol, order.gatewayName])
order.orderID = data['OrderRef']
# CTP的报单号一致性维护需要基于frontID, sessionID, orderID三个字段
# 但在本接口设计中,已经考虑了CTP的OrderRef的自增性,避免重复
# 唯一可能出现OrderRef重复的情况是多处登录并在非常接近的时间内(几乎同时发单)
# 考虑到VtTrader的应用场景,认为以上情况不会构成问题
order.vtOrderID = VN_SEPARATOR.join([self.gatewayName, order.orderID])
order.direction = directionMapReverse.get(data['Direction'], DIRECTION_UNKNOWN)
order.offset = offsetMapReverse.get(data['CombOffsetFlag'], OFFSET_UNKNOWN)
order.status = statusMapReverse.get(data['OrderStatus'], STATUS_UNKNOWN)
# 价格、报单量等数值
order.price = data['LimitPrice']
order.totalVolume = data['VolumeTotalOriginal']
order.tradedVolume = data['VolumeTraded']
order.orderTime = data['InsertTime']
order.cancelTime = data['CancelTime']
order.frontID = data['FrontID']
order.sessionID = data['SessionID']
order.orderDatetime = datetime.strptime(' '.join([data['TradingDay'], order.orderTime]), '%Y%m%d %H:%M:%S')
if order.cancelTime:
order.cancelDatetime = datetime.strptime(' '.join([data['TradingDay'], order.cancelTime]), '%Y%m%d %H:%M:%S')
# 推送
self.gateway.onOrder(order)
#----------------------------------------------------------------------
def onRtnTrade(self, data):
"""成交回报TradeInfo"""
"""{'TradingRole': '\x00', 'BusinessUnit': '', 'TradeType': '\x00', 'UserID': '119247', 'OrderSysID': ' 15591', 'TraderID': '9999cad', 'ExchangeID': 'CFFEX', 'BrokerID': '9999', 'OrderRef': '5',
'SequenceNo': 322, 'TradeSource': '0', 'ParticipantID': '9999', 'OrderLocalID': ' 207', 'InvestorID': '119247', 'InstrumentID': 'IF1811', 'BrokerOrderSeq': 15852, 'OffsetFlag': '0',
'TradeID': ' 11418', 'PriceSource': '\x00', 'TradingDay': '20181101', 'ClearingPartID': '9999', 'SettlementID': 1, 'Volume': 1, 'Price': 3180.0, 'ExchangeInstID': 'IF1811', 'TradeTime': '18:34:33',
'TradeDate': '20181031', 'ClientID': '9999119227', 'HedgeFlag': '1', 'Direction': '0'}
"""
# 创建报单数据对象
trade = VtTradeData()
trade.gatewayName = self.gatewayName
# 保存代码和报单号
trade.symbol = data['InstrumentID']
trade.exchange = exchangeMapReverse[data['ExchangeID']]
trade.vtSymbol = VN_SEPARATOR.join([trade.symbol, trade.gatewayName])
trade.tradeID = data['TradeID']
trade.vtTradeID = VN_SEPARATOR.join([self.gatewayName, trade.tradeID])
trade.orderID = data['OrderRef']
trade.vtOrderID = VN_SEPARATOR.join([self.gatewayName, trade.orderID])
# 方向
trade.direction = directionMapReverse.get(data['Direction'], '')
# 开平
offset = offsetMapReverse.get(data['OffsetFlag'], '')
if offset in [OFFSET_CLOSEYESTERDAY,OFFSET_CLOSETODAY]:
trade.offset = OFFSET_CLOSE
else:
trade.offset = offset
# 价格、报单量等数值
trade.price = data['Price']
trade.volume = data['Volume']
trade.tradeTime = data['TradeTime']
trade.tradeDatetime =datetime.strptime(' '.join([data['TradeDate'], trade.tradeTime]), '%Y%m%d %H:%M:%S')
# 推送
self.gateway.onTrade(trade)
#----------------------------------------------------------------------
def onErrRtnOrderInsert(self, data, error):
print(data,error)
"""发单错误回报(交易所)"""
"""{'TimeCondition': '3', 'BusinessUnit': '', 'UserID': '119247', 'ContingentCondition': '1', 'CombHedgeFlag': '1', 'IsAutoSuspend': 0, 'BrokerID': '9999', 'GTDDate': '', 'StopPrice': 0.0,
'CombOffsetFlag': '0', 'OrderPriceType': '2', 'InvestorID': '119247', 'RequestID': 0, 'InstrumentID': 'I', 'UserForceClose': 0, 'ForceCloseReason': '0', 'VolumeCondition': '1', 'MinVolume': 1,
'LimitPrice': 3178.6, 'IsSwapOrder': 0, 'VolumeTotalOriginal': 1, 'ExchangeID': '', 'OrderRef': '6', 'Direction': '0'} {'ErrorID': 16, 'ErrorMsg': 'CTP:找不到合约'}
{'TimeCondition': '3', 'BusinessUnit': '', 'UserID': '119247', 'ContingentCondition': '1', 'CombHedgeFlag': '1', 'IsAutoSuspend': 0, 'BrokerID': '9999', 'GTDDate': '', 'StopPrice': 0.0,
'CombOffsetFlag': '1', 'OrderPriceType': '2', 'InvestorID': '119247', 'RequestID': 0, 'InstrumentID': 'rb1901', 'UserForceClose': 0, 'ForceCloseReason': '0', 'VolumeCondition': '1', 'MinVolume': 1,
'LimitPrice': 3988.0, 'IsSwapOrder': 0, 'VolumeTotalOriginal': 10, 'ExchangeID': 'SHFE', 'OrderRef': '4', 'Direction': '0'} {'ErrorID': 51, 'ErrorMsg': 'CTP:平昨仓位不足'}"""
# 推送委托信息
order = VtOrderData()
order.gatewayName = self.gatewayName
order.symbol = data['InstrumentID']
order.exchange = exchangeMapReverse[data['ExchangeID']]
order.vtSymbol = VN_SEPARATOR.join([order.symbol,order.gatewayName])
order.orderID = data['OrderRef']
order.vtOrderID = VN_SEPARATOR.join([self.gatewayName, order.orderID])
order.direction = directionMapReverse.get(data['Direction'], DIRECTION_UNKNOWN)
order.offset = offsetMapReverse.get(data['CombOffsetFlag'], OFFSET_UNKNOWN)
order.status = STATUS_REJECTED
order.price = data['LimitPrice']
order.totalVolume = data['VolumeTotalOriginal']
order.orderDatetime = datetime.now()
self.gateway.onOrder(order)
# 推送错误信息
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg']
#err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onErrRtnOrderAction(self, data, error):
"""撤单错误回报(交易所)"""
print(data,error,'撤单错误回报(交易所)撤单错误回报(交易所)')
err = VtErrorData()
err.gatewayName = self.gatewayName
err.errorID = error['ErrorID']
err.errorMsg = error['ErrorMsg']
#err.errorMsg = error['ErrorMsg'].decode('gbk')
self.gateway.onError(err)
#----------------------------------------------------------------------
def onRtnInstrumentStatus(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnTradingNotice(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnErrorConditionalOrder(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnExecOrder(self, data):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnExecOrderInsert(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnExecOrderAction(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnForQuoteInsert(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onRtnQuote(self, data):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnQuoteInsert(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnQuoteAction(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onRtnForQuoteRsp(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnCFMMCTradingAccountToken(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnLock(self, data):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnLockInsert(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onRtnCombAction(self, data):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnCombActionInsert(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onRspQryContractBank(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryParkedOrder(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryParkedOrderAction(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryTradingNotice(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryBrokerTradingParams(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQryBrokerTradingAlgos(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQueryCFMMCTradingAccountToken(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRtnFromBankToFutureByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnFromFutureToBankByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnRepealFromBankToFutureByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnRepealFromFutureToBankByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnFromBankToFutureByFuture(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnFromFutureToBankByFuture(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnRepealFromBankToFutureByFutureManual(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnRepealFromFutureToBankByFutureManual(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnQueryBankBalanceByFuture(self, data):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnBankToFutureByFuture(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnFutureToBankByFuture(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnRepealBankToFutureByFutureManual(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnRepealFutureToBankByFutureManual(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onErrRtnQueryBankBalanceByFuture(self, data, error):
""""""
pass
#----------------------------------------------------------------------
def onRtnRepealFromBankToFutureByFuture(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnRepealFromFutureToBankByFuture(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRspFromBankToFutureByFuture(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspFromFutureToBankByFuture(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRspQueryBankAccountMoneyByFuture(self, data, error, n, last):
""""""
pass
#----------------------------------------------------------------------
def onRtnOpenAccountByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnCancelAccountByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def onRtnChangeAccountByBank(self, data):
""""""
pass
#----------------------------------------------------------------------
def connect(self, userID, password, brokerID, address, authCode, userProductInfo = 'CTP'):
"""初始化连接"""
self.userID = userID # 账号
self.password = password # 密码
self.brokerID = brokerID # 经纪商代码
self.address = address # 服务器地址
self.authCode = authCode # 验证码
self.userProductInfo = userProductInfo # 产品信息
# 如果尚未建立服务器连接,则进行连接
if not self.connectionStatus:
# 创建C++环境中的API对象,这里传入的参数是需要用来保存.con文件的文件夹路径
path = getTempPath(self.gatewayName + '_')
self.createFtdcTraderApi(path)
# 设置数据同步模式为推送从今日开始所有数据
self.subscribePrivateTopic(0)
self.subscribePublicTopic(0)
# 注册服务器地址
self.registerFront(self.address)
# 初始化连接,成功会调用onFrontConnected
self.init()
# 若已经连接但尚未登录,则进行登录
else:
if self.requireAuthentication and not self.authStatus:
self.authenticate()
elif not self.loginStatus:
self.login()
#----------------------------------------------------------------------
def login(self):
"""连接服务器"""
# 如果之前有过登录失败,则不再进行尝试
if self.loginFailed:
return
# 如果填入了用户名密码等,则登录
if self.userID and self.password and self.brokerID:
req = {}
req['UserID'] = self.userID
req['Password'] = <PASSWORD>
req['BrokerID'] = self.brokerID
self.reqID += 1
self.reqUserLogin(req, self.reqID)
#----------------------------------------------------------------------
def authenticate(self):
"""申请验证"""
if self.userID and self.brokerID and self.authCode and self.userProductInfo:
req = {}
req['UserID'] = self.userID
req['BrokerID'] = self.brokerID
req['AuthCode'] = self.authCode
req['UserProductInfo'] = self.userProductInfo
self.reqID +=1
self.reqAuthenticate(req, self.reqID)
#----------------------------------------------------------------------
def qryAccount(self):
"""查询账户"""
self.reqID += 1
self.reqQryTradingAccount({}, self.reqID)
#----------------------------------------------------------------------
def qryPosition(self):
"""查询持仓"""
self.reqID += 1
req = {}
req['BrokerID'] = self.brokerID
req['InvestorID'] = self.userID
self.reqQryInvestorPosition(req, self.reqID)
#----------------------------------------------------------------------
def sendOrder(self, orderReq):
"""发单"""
"""
{'InstrumentID': 'IF1811', 'LimitPrice': 3157.8, 'VolumeTotalOriginal': 1, 'OrderPriceType': '2', 'Direction': '0', 'CombOffsetFlag': '0', 'OrderRef': '1', 'InvestorID': '119247', 'UserID': '119247',
'BrokerID': '9999', 'CombHedgeFlag': '1', 'ContingentCondition': '1', 'ForceCloseReason': '0', 'IsAutoSuspend': 0, 'TimeCondition': '3', 'VolumeCondition': '1', 'MinVolume': 1}
"""
self.reqID += 1
self.orderRef += 1
req = {}
req['InstrumentID'] = orderReq.symbol
req['LimitPrice'] = orderReq.price
req['VolumeTotalOriginal'] = int(orderReq.volume)
# 下面如果由于传入的类型本接口不支持,则会返回空字符串
req['OrderPriceType'] = priceTypeMap.get(orderReq.priceType, '')
req['Direction'] = directionMap.get(orderReq.direction, '')
req['CombOffsetFlag'] = offsetMap.get(orderReq.offset, '')
req['OrderRef'] = str(self.orderRef)
req['InvestorID'] = self.userID
req['UserID'] = self.userID
req['BrokerID'] = self.brokerID
req['CombHedgeFlag'] = defineDict['THOST_FTDC_HF_Speculation'] # 投机单
req['ContingentCondition'] = defineDict['THOST_FTDC_CC_Immediately'] # 立即发单
req['ForceCloseReason'] = defineDict['THOST_FTDC_FCC_NotForceClose'] # 非强平
req['IsAutoSuspend'] = 0 # 非自动挂起
req['TimeCondition'] = defineDict['THOST_FTDC_TC_GFD'] # 今日有效
req['VolumeCondition'] = defineDict['THOST_FTDC_VC_AV'] # 任意成交量
req['MinVolume'] = 1 # 最小成交量为1
# if orderReq.offset == OFFSET_OPEN:
# req['StopPrice'] = orderReq.price + 15
# 判断FAK和FOK
if orderReq.priceType == PRICETYPE_FAK:
req['OrderPriceType'] = defineDict["THOST_FTDC_OPT_LimitPrice"]
req['TimeCondition'] = defineDict['THOST_FTDC_TC_IOC']
req['VolumeCondition'] = defineDict['THOST_FTDC_VC_AV']
if orderReq.priceType == PRICETYPE_FOK:
req['OrderPriceType'] = defineDict["THOST_FTDC_OPT_LimitPrice"]
req['TimeCondition'] = defineDict['THOST_FTDC_TC_IOC']
req['VolumeCondition'] = defineDict['THOST_FTDC_VC_CV']
self.reqOrderInsert(req, self.reqID)
# 返回订单号(字符串),便于某些算法进行动态管理
self.writeLog('Gateway 发单:%s'%req)
vtOrderID = VN_SEPARATOR.join([self.gatewayName, str(self.orderRef)])
return vtOrderID
#----------------------------------------------------------------------
def cancelOrder(self, cancelOrderReq):
"""撤单"""
self.reqID += 1
req = {}
req['InstrumentID'] = cancelOrderReq.symbol
req['ExchangeID'] = cancelOrderReq.exchange
req['OrderRef'] = cancelOrderReq.orderID
req['FrontID'] = cancelOrderReq.frontID
req['SessionID'] = cancelOrderReq.sessionID
req['ActionFlag'] = defineDict['THOST_FTDC_AF_Delete']
req['BrokerID'] = self.brokerID
req['InvestorID'] = self.userID
self.reqOrderAction(req, self.reqID)
#----------------------------------------------------------------------
def close(self):
"""关闭"""
self.exit()
#----------------------------------------------------------------------
def writeLog(self, content):
"""发出日志"""
log = VtLogData()
log.gatewayName = self.gatewayName
log.logContent = content
self.gateway.onLog(log)
|
import torch
from torch import nn
from torch.autograd import Function
from .voxel_layer import (dynamic_point_to_voxel_backward,
dynamic_point_to_voxel_forward)
class _dynamic_scatter(Function):
@staticmethod
def forward(ctx, feats, coors, reduce_type='max'):
"""convert kitti points(N, >=3) to voxels.
Args:
feats: [N, C] float tensor. points features to be reduced
into voxels.
coors: [N, ndim] int tensor. corresponding voxel coordinates
(specifically multi-dim voxel index) of each points.
reduce_type: str. reduce op. support 'max', 'sum' and 'mean'
Returns:
tuple
voxel_feats: [M, C] float tensor. reduced features. input features
that shares the same voxel coordinates are reduced to one row
coordinates: [M, ndim] int tensor, voxel coordinates.
"""
results = dynamic_point_to_voxel_forward(feats, coors, reduce_type)
(voxel_feats, voxel_coors, point2voxel_map,
voxel_points_count) = results
ctx.reduce_type = reduce_type
ctx.save_for_backward(feats, voxel_feats, point2voxel_map,
voxel_points_count)
ctx.mark_non_differentiable(voxel_coors)
return voxel_feats, voxel_coors
@staticmethod
def backward(ctx, grad_voxel_feats, grad_voxel_coors=None):
(feats, voxel_feats, point2voxel_map,
voxel_points_count) = ctx.saved_tensors
grad_feats = torch.zeros_like(feats)
# TODO: whether to use index put or use cuda_backward
# To use index put, need point to voxel index
dynamic_point_to_voxel_backward(grad_feats,
grad_voxel_feats.contiguous(), feats,
voxel_feats, point2voxel_map,
voxel_points_count, ctx.reduce_type)
return grad_feats, None, None
dynamic_scatter = _dynamic_scatter.apply
class DynamicScatter(nn.Module):
def __init__(self, voxel_size, point_cloud_range, average_points: bool):
super(DynamicScatter, self).__init__()
"""Scatters points into voxels, used in the voxel encoder with
dynamic voxelization
**Note**: The CPU and GPU implementation get the same output, but
have numerical difference after summation and division (e.g., 5e-7).
Args:
average_points (bool): whether to use avg pooling to scatter
points into voxel voxel_size (list): list [x, y, z] size
of three dimension
point_cloud_range (list):
[x_min, y_min, z_min, x_max, y_max, z_max]
"""
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
self.average_points = average_points
def forward_single(self, points, coors):
reduce = 'mean' if self.average_points else 'max'
return dynamic_scatter(points.contiguous(), coors.contiguous(), reduce)
def forward(self, points, coors):
"""
Args:
input: NC points
"""
if coors.size(-1) == 3:
return self.forward_single(points, coors)
else:
batch_size = coors[-1, 0].int() + 1
voxels, voxel_coors = [], []
for i in range(batch_size):
inds = torch.where(coors[:, 0] == i)
voxel, voxel_coor = self.forward_single(
points[inds], coors[inds][:, 1:])
coor_pad = nn.functional.pad(
voxel_coor, (1, 0), mode='constant', value=i)
voxel_coors.append(coor_pad)
voxels.append(voxel)
features = torch.cat(voxels, dim=0)
feature_coors = torch.cat(voxel_coors, dim=0)
return features, feature_coors
def __repr__(self):
tmpstr = self.__class__.__name__ + '('
tmpstr += 'voxel_size=' + str(self.voxel_size)
tmpstr += ', point_cloud_range=' + str(self.point_cloud_range)
tmpstr += ', average_points=' + str(self.average_points)
tmpstr += ')'
return tmpstr
|
<gh_stars>10-100
#!/usr/bin/env python
#################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""
This script is a python implementation of the "boot.go" script in "beam-sdks-python-container"
project of Apache Beam, see in:
https://github.com/apache/beam/blob/release-2.14.0/sdks/python/container/boot.go
It is implemented in golang and will introduce unnecessary dependencies if used in pure python
project. So we add a python implementation which will be used when the python worker runs in
process mode. It downloads and installs users' python artifacts, then launches the python SDK
harness of Apache Beam.
"""
import argparse
import os
from subprocess import call
import grpc
import logging
import sys
from apache_beam.portability.api.beam_provision_api_pb2_grpc import ProvisionServiceStub
from apache_beam.portability.api.beam_provision_api_pb2 import GetProvisionInfoRequest
from apache_beam.portability.api.endpoints_pb2 import ApiServiceDescriptor
from google.protobuf import json_format, text_format
def check_not_empty(check_str, error_message):
if check_str == "":
logging.fatal(error_message)
exit(1)
python_exec = sys.executable
if __name__ == "__main__":
# print INFO and higher level messages
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("--id", default="", help="Local identifier (required).")
parser.add_argument("--logging_endpoint", default="",
help="Logging endpoint (required).")
parser.add_argument("--provision_endpoint", default="",
help="Provision endpoint (required).")
parser.add_argument("--control_endpoint", default="",
help="Control endpoint (required).")
parser.add_argument("--semi_persist_dir", default="/tmp",
help="Local semi-persistent directory (optional).")
args = parser.parse_known_args()[0]
worker_id = args.id
logging_endpoint = args.logging_endpoint
provision_endpoint = args.provision_endpoint
control_endpoint = args.control_endpoint
semi_persist_dir = args.semi_persist_dir
check_not_empty(worker_id, "No id provided.")
check_not_empty(logging_endpoint, "No logging endpoint provided.")
check_not_empty(provision_endpoint, "No provision endpoint provided.")
check_not_empty(control_endpoint, "No control endpoint provided.")
logging.info("Initializing python harness: %s" % " ".join(sys.argv))
metadata = [("worker_id", worker_id)]
# read job information from provision stub
with grpc.insecure_channel(provision_endpoint) as channel:
client = ProvisionServiceStub(channel=channel)
info = client.GetProvisionInfo(GetProvisionInfoRequest(), metadata=metadata).info
options = json_format.MessageToJson(info.pipeline_options)
os.environ["WORKER_ID"] = worker_id
os.environ["PIPELINE_OPTIONS"] = options
os.environ["SEMI_PERSISTENT_DIRECTORY"] = semi_persist_dir
os.environ["LOGGING_API_SERVICE_DESCRIPTOR"] = text_format.MessageToString(
ApiServiceDescriptor(url=logging_endpoint))
os.environ["CONTROL_API_SERVICE_DESCRIPTOR"] = text_format.MessageToString(
ApiServiceDescriptor(url=control_endpoint))
env = dict(os.environ)
if "FLINK_BOOT_TESTING" in os.environ and os.environ["FLINK_BOOT_TESTING"] == "1":
exit(0)
call([python_exec, "-m", "pyflink.fn_execution.beam.beam_sdk_worker_main"],
stdout=sys.stdout, stderr=sys.stderr, env=env)
|
#importing libraries
import turtle
import random
import time
#creating turtle screen
screen = turtle.Screen()
screen.title('SNAKE GAME')
screen.setup(width = 900, height = 750)
screen.tracer(0)
turtle.bgcolor('#f0e4d7')
turtle.speed(5)
turtle.pensize(4)
turtle.penup()
turtle.goto(-310,250)
turtle.pendown()
turtle.color('black')
turtle.forward(700)
turtle.right(90)
turtle.forward(600)
turtle.right(90)
turtle.forward(700)
turtle.right(90)
turtle.forward(600)
turtle.penup()
turtle.hideturtle()
#score
score = 0
delay = 0.1
#snake
snake = turtle.Turtle()
snake.speed(0)
snake.shape('square')
snake.color("black")
snake.penup()
snake.goto(0,0)
snake.direction = 'stop'
#food
fruit = turtle.Turtle()
fruit.speed(0)
fruit.shape('circle')
fruit.color('red')
fruit.penup()
fruit.goto(30,30)
old_fruit=[]
#scoring
scoring = turtle.Turtle()
scoring.speed(0)
scoring.color("black")
scoring.penup()
scoring.hideturtle()
scoring.goto(0,300)
scoring.write("Score :",align="center",font=("Courier",24,"bold"))
#######define how to move
def snake_go_up():
if snake.direction != "down":
snake.direction = "up"
def snake_go_down():
if snake.direction != "up":
snake.direction = "down"
def snake_go_left():
if snake.direction != "right":
snake.direction = "left"
def snake_go_right():
if snake.direction != "left":
snake.direction = "right"
def snake_move():
if snake.direction == "up":
y = snake.ycor()
snake.sety(y + 20)
if snake.direction == "down":
y = snake.ycor()
snake.sety(y - 20)
if snake.direction == "left":
x = snake.xcor()
snake.setx(x - 20)
if snake.direction == "right":
x = snake.xcor()
snake.setx(x + 20)
# Keyboard bindings
screen.listen()
screen.onkeypress(snake_go_up, "Up")
screen.onkeypress(snake_go_down, "Down")
screen.onkeypress(snake_go_left, "Left")
screen.onkeypress(snake_go_right, "Right")
#main loop
while True:
screen.update()
#snake and fruit coliisions
if snake.distance(fruit)< 20:
x = random.randint(-290,270)
y = random.randint(-240,240)
fruit.goto(x,y)
scoring.clear()
score+=1
scoring.write("Score:{}".format(score),align="center",font=("Courier",24,"bold"))
delay-=0.001
## creating new_ball
new_fruit = turtle.Turtle()
new_fruit.speed(0)
new_fruit.shape('square')
new_fruit.color('red')
new_fruit.penup()
old_fruit.append(new_fruit)
#adding ball to snake
for index in range(len(old_fruit)-1,0,-1):
a = old_fruit[index-1].xcor()
b = old_fruit[index-1].ycor()
old_fruit[index].goto(a,b)
if len(old_fruit)>0:
a= snake.xcor()
b = snake.ycor()
old_fruit[0].goto(a,b)
snake_move()
##snake and border collision
if snake.xcor()>380 or snake.xcor()< -300 or snake.ycor()>240 or snake.ycor()<-240:
time.sleep(1)
screen.clear()
screen.bgcolor('#f0e4d7')
scoring.goto(0,0)
scoring.write(" GAME OVER \n Your Score is {}".format(score),align="center",font=("Courier",30,"bold"))
## snake collision
for food in old_fruit:
if food.distance(snake) < 20:
time.sleep(1)
screen.clear()
screen.bgcolor('#f0e4d7')
scoring.goto(0,0)
scoring.write(" GAME OVER \n Your Score is {}".format(score),align="center",font=("Courier",30,"bold"))
time.sleep(delay)
turtle.Terminator()
|
<filename>avalanche/benchmarks/scenarios/online_scenario.py<gh_stars>0
################################################################################
# Copyright (c) 2022 ContinualAI. #
# Copyrights licensed under the MIT License. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 11-04-2022 #
# Author(s): <NAME> #
# E-mail: <EMAIL> #
# Website: avalanche.continualai.org #
################################################################################
from copy import copy
from typing import Callable, Iterable, List
import torch
from avalanche.benchmarks.scenarios.generic_scenario import (
CLExperience,
EagerCLStream,
CLStream,
ExperienceAttribute,
CLScenario,
)
from avalanche.benchmarks.utils import AvalancheSubset
class OnlineCLExperience(CLExperience):
"""Online CL (OCL) Experience.
OCL experiences are created by splitting a larger experience. Therefore,
they keep track of the original experience for logging purposes.
"""
def __init__(
self,
current_experience: int = None,
origin_stream=None,
origin_experience=None,
is_first_subexp: bool = False,
sub_stream_length: int = None,
):
"""Init.
:param current_experience: experience identifier.
:param origin_stream: origin stream.
:param origin_experience: origin experience used to create self.
:param is_first_subexp: whether self is the first in the sub-experiences
stream.
:param sub_stream_length: the sub-stream length.
"""
super().__init__(current_experience, origin_stream)
self.origin_experience = ExperienceAttribute(origin_experience)
self.is_first_subexp = ExperienceAttribute(is_first_subexp)
self.sub_stream_length = ExperienceAttribute(sub_stream_length)
def fixed_size_experience_split(
experience: CLExperience,
experience_size: int,
shuffle: bool = True,
drop_last: bool = False,
):
"""Returns a lazy stream generated by splitting an experience into smaller
ones.
Splits the experience in smaller experiences of size `experience_size`.
:param experience: The experience to split.
:param experience_size: The experience size (number of instances).
:param shuffle: If True, instances will be shuffled before splitting.
:param drop_last: If True, the last mini-experience will be dropped if
not of size `experience_size`
:return: The list of datasets that will be used to create the
mini-experiences.
"""
def gen():
exp_dataset = experience.dataset
exp_indices = list(range(len(exp_dataset)))
if shuffle:
exp_indices = torch.as_tensor(exp_indices)[
torch.randperm(len(exp_indices))
].tolist()
init_idx = 0
is_first = True
while init_idx < len(exp_indices):
final_idx = init_idx + experience_size # Exclusive
if final_idx > len(exp_indices):
if drop_last:
break
final_idx = len(exp_indices)
exp = OnlineCLExperience(
origin_experience=experience, is_first_subexp=is_first
)
exp.dataset = AvalancheSubset(
exp_dataset, indices=exp_indices[init_idx:final_idx]
)
is_first = False
yield exp
init_idx = final_idx
return gen()
def split_online_stream(
original_stream: EagerCLStream,
experience_size: int,
shuffle: bool = False,
drop_last: bool = False,
experience_split_strategy: Callable[
[CLExperience], Iterable[CLExperience]
] = None,
):
"""Split a stream of large batches to create an online stream of small
mini-batches.
The resulting stream can be used for Online Continual Learning (OCL)
scenarios (or data-incremental, or other online-based settings).
For efficiency reasons, the resulting stream is an iterator, generating
experience on-demand.
:param original_stream: The stream with the original data.
:param experience_size: The size of the experience, as an int. Ignored
if `custom_split_strategy` is used.
:param shuffle: If True, experiences will be split by first shuffling
instances in each experience. This will use the default PyTorch
random number generator at its current state. Defaults to False.
Ignored if `experience_split_strategy` is used.
:param drop_last: If True, if the last experience doesn't contain
`experience_size` instances, then the last experience will be dropped.
Defaults to False. Ignored if `experience_split_strategy` is used.
:param experience_split_strategy: A function that implements a custom
splitting strategy. The function must accept an experience and return an
experience's iterator. Defaults to None, which means
that the standard splitting strategy will be used (which creates
experiences of size `experience_size`).
A good starting to understand the mechanism is to look at the
implementation of the standard splitting function
:func:`fixed_size_experience_split_strategy`.
:return: A lazy online stream with experiences of size `experience_size`.
"""
if experience_split_strategy is None:
def split_foo(exp: CLExperience, size: int):
return fixed_size_experience_split(exp, size, shuffle, drop_last)
def exps_iter():
for exp in original_stream:
for sub_exp in split_foo(exp, experience_size):
yield exp
return CLStream(
name=original_stream.name, exps_iter=exps_iter(), set_stream_info=True
)
class OnlineCLScenario(CLScenario):
def __init__(
self,
original_streams: List[EagerCLStream],
experience_size: int = 10,
stream_split_strategy="fixed_size_split",
):
if stream_split_strategy == "fixed_size_split":
def split_foo(s):
return split_online_stream(s, experience_size)
else:
raise ValueError("Unknown experience split strategy")
streams_dict = {s.name: s for s in original_streams}
if "train" not in streams_dict:
raise ValueError("Missing train stream for `original_streams`.")
online_train_stream = split_foo(streams_dict["train"])
streams = [online_train_stream]
for s in original_streams:
s = copy(s)
s.name = "original_" + s.name
streams.append(s)
super().__init__(streams)
|
<reponame>rdo-infra/ci-conf
#!/usr/bin/env python
import csv
import json
import os
import re
import sys
import time
from datetime import datetime
from io import StringIO
from tempfile import mkstemp
import click
import dlrnapi_client
import requests
import yaml
from dlrnapi_client.rest import ApiException
from jinja2 import Environment, FileSystemLoader
from rich import print as rich_print
from rich.console import Console
from rich.table import Table
from urllib3.exceptions import InsecureRequestWarning
console = Console()
def date_diff_in_seconds(dt2, dt1):
timedelta = dt2 - dt1
return timedelta.days * 24 * 3600 + timedelta.seconds
def dhms_from_seconds(seconds):
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return (hours, minutes, seconds)
def strip_date_time_from_string(input_string):
regex_object = re.compile(r'[\d*-]*\d* [\d*:]*')
return regex_object.search(input_string).group()
def convert_string_date_object(date_string):
return datetime.strptime(date_string, '%Y-%m-%d %H:%M:%S')
def download_file(url):
requests.packages.urllib3.disable_warnings(
category=InsecureRequestWarning)
response = requests.get(url, stream=True, verify=False)
response.raise_for_status()
file_descriptor, path = mkstemp(prefix="job-output-")
with open(path, "wb") as file:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
file.write(chunk)
os.close(file_descriptor)
return path
def delete_file(path):
os.remove(path)
def find_job_run_time(url):
try:
path = download_file(url + "/job-output.txt")
except requests.exceptions.RequestException:
return "N/A"
with open(path, "r") as file:
first_line = file.readline()
for last_line in file:
pass
start_time = strip_date_time_from_string(first_line)
start_time_ob = convert_string_date_object(start_time)
end_time = strip_date_time_from_string(last_line)
end_time_ob = convert_string_date_object(end_time)
hours, minutes, seconds = dhms_from_seconds(
date_diff_in_seconds(end_time_ob, start_time_ob))
delete_file(path)
return f"{hours} hr {minutes} mins {seconds} secs"
def find_failure_reason(url):
try:
path = download_file(url + "/logs/failures_file")
except requests.exceptions.RequestException:
return "N/A"
with open(path, "r") as file:
first_line = file.readline()
delete_file(path)
return first_line.rstrip()
def web_scrape(url):
try:
requests.packages.urllib3.disable_warnings(
category=InsecureRequestWarning)
response = requests.get(url, verify=False)
response.raise_for_status()
except requests.exceptions.HTTPError as err:
raise SystemExit(err)
except requests.exceptions.RequestException as error:
raise SystemExit(error)
return response.text
def url_response_in_yaml(url):
text_response = web_scrape(url)
processed_data = yaml.safe_load(text_response)
return processed_data
def gather_basic_info_from_criteria(url):
criteria_content = url_response_in_yaml(url)
api_url = criteria_content['api_url']
base_url = criteria_content['base_url']
return api_url, base_url
def find_jobs_in_integration_criteria(url, promotion_name='current-tripleo'):
criteria_content = url_response_in_yaml(url)
return criteria_content['promotions'][promotion_name]['criteria']
def find_jobs_in_component_criteria(url, component):
criteria_content = url_response_in_yaml(url)
return criteria_content['promoted-components'][component]
def fetch_hashes_from_commit_yaml(url):
"""
This function finds commit hash, distro hash, extended_hash from commit.yaml
:param url for commit.yaml
:returns strings for commit_hash, distro_hash, extended_hash
"""
commit_yaml_content = url_response_in_yaml(url)
commit_hash = commit_yaml_content['commits'][0]['commit_hash']
distro_hash = commit_yaml_content['commits'][0]['distro_hash']
extended_hash = commit_yaml_content['commits'][0]['extended_hash']
return commit_hash, distro_hash, extended_hash
def find_results_from_dlrn_agg(api_url, test_hash):
api_client = dlrnapi_client.ApiClient(host=api_url)
api_instance = dlrnapi_client.DefaultApi(api_client)
params = dlrnapi_client.AggQuery(aggregate_hash=test_hash)
api_response = api_instance.api_agg_status_get(params=params)
return api_response
def format_ts_from_last_modified(ts, pattern='%a, %d %b %Y %H:%M:%S %Z'):
ts = datetime.strptime(ts, pattern)
return int(time.mktime(ts.timetuple()))
# get the date of the consistent link in dlrn
def get_consistent(url, component=None):
if "centos7" not in url:
dlrn_tag = "/promoted-components"
short_url = url.split("/")[:-5]
else:
dlrn_tag = "/consistent"
short_url = url.split("/")[:-3]
short_url = "/".join(short_url)
if component is None:
# integration build, use last promoted_components date
response = requests.get(short_url + dlrn_tag + '/delorean.repo')
if response.ok:
cd = response.headers['Last-Modified']
consistent_date = format_ts_from_last_modified(cd)
else:
return None
else:
# TO-DO normalize component and intergration config
short_url = (short_url + '/component/'
+ component + '/consistent/delorean.repo')
response = requests.get(short_url)
if response.ok:
cd = response.headers['Last-Modified']
consistent_date = format_ts_from_last_modified(cd)
else:
return None
return consistent_date
def get_dlrn_versions_csv(base_url, component, tag):
if component:
control_tag = "{}/component/{}/{}/versions.csv".format(base_url,
component,
tag)
else:
control_tag = "{}/{}/versions.csv".format(base_url,
tag)
return control_tag
def get_csv(url):
response = requests.get(url)
if response.ok:
content = response.content.decode('utf-8')
f = StringIO(content)
reader = csv.reader(f, delimiter=',')
return [content, reader]
def get_diff(component, control_tag, file1, test_tag, file2):
# compare the raw string
if file1[0] == file2[0]:
return False
else:
# for the line by line diff, use csv content
table = Table(show_header=True, header_style="bold")
table.add_column(control_tag, style="dim", width=85)
table.add_column(test_tag, style="dim", width=85)
for f1, f2 in zip(file1[1], file2[1]):
if f1 != f2:
table.add_row(str(f1[9]), str(f2[9]))
return table
def get_dlrn_promotions(api_url,
promotion_name,
aggregate_hash=None,
commit_hash=None,
distro_hash=None,
component=None):
api_client = dlrnapi_client.ApiClient(host=api_url)
api_instance = dlrnapi_client.DefaultApi(api_client)
query = dlrnapi_client.PromotionQuery(limit=1,
promote_name=promotion_name)
if component:
query.component = component
pr = api_instance.api_promotions_get_with_http_info(query)[0][0]
consistent = get_consistent(pr.repo_url, component)
promotion = {}
promotion = pr.to_dict()
promotion['lastest_build'] = consistent
return promotion
def find_results_from_dlrn_repo_status(api_url, commit_hash,
distro_hash, extended_hash):
""" This function returns api_response from dlrn for a particular
commit_hash, distro_hash, extended_hash.
https://github.com/softwarefactory-project/dlrnapi_client/blob/master/
docs/DefaultApi.md#api_repo_status_get
:param api_url: the dlrn api endpoint for a particular release
:param commit_hash: For a particular repo, commit.yaml contains this
info.
:param distro_hash: For a particular repo, commit.yaml contains this
info.
:param extended_hash: For a particular repo, commit.yaml contains this
info.
:return api_response: from dlrnapi server containing result of
passing/failing jobs
"""
if extended_hash == "None":
extended_hash = None
api_client = dlrnapi_client.ApiClient(host=api_url)
api_instance = dlrnapi_client.DefaultApi(api_client)
params = dlrnapi_client.Params2(commit_hash=commit_hash,
distro_hash=distro_hash,
extended_hash=extended_hash)
try:
api_response = api_instance.api_repo_status_get(params=params)
except ApiException as err:
print("Exception when calling DefaultApi->api_repo_status_get:"
" %s\n" % err)
return api_response
def conclude_results_from_dlrn(api_response):
passed_jobs = set()
all_jobs_result_available = set()
for job in api_response:
all_jobs_result_available.add(job.job_id)
if job.success:
passed_jobs.add(job.job_id)
failed_jobs = all_jobs_result_available.difference(passed_jobs)
return all_jobs_result_available, passed_jobs, failed_jobs
def get_job_history(job_name, zuul, component=None):
if 'rdo' in zuul or 'redhat' in zuul:
url = zuul + "?job_name={}".format(job_name)
else:
# upstream
upstream_job = job_name.split("-")
# remove periodic-
del upstream_job[0]
# remove -branch
del upstream_job[-1]
if component:
# component jobs remove both -master and -component
del upstream_job[-1]
upstream_job = '-'.join([str(elem) for elem in upstream_job])
url = zuul + "?job_name={}".format(upstream_job)
out = json.loads(web_scrape(url))
# key job_name, value = { SUCCESS: count,
# FAILURE: count,
# OTHER: count}
job_history = {}
job_history[job_name] = {'SUCCESS': 0, 'FAILURE': 0, 'OTHER': 0}
limit = 5
for index, execution in enumerate(out):
if index == limit:
break
if execution['result'] == "SUCCESS":
job_history[job_name]['SUCCESS'] += 1
elif execution['result'] == "FAILURE":
job_history[job_name]['FAILURE'] += 1
else:
job_history[job_name]['OTHER'] += 1
return job_history
def latest_job_results_url(api_response, all_jobs):
logs_job = {}
for particular_job in all_jobs:
latest_log = {}
for job in api_response:
if job.job_id == particular_job:
latest_log[job.timestamp] = job.url
logs_job[particular_job] = latest_log[max(latest_log.keys())]
return logs_job
def print_a_set_in_table(input_set, header="Job name"):
table = Table(show_header=True, header_style="bold")
table.add_column(header, style="dim", width=80)
for job in input_set:
table.add_row(job)
console.print(table)
def print_failed_in_criteria(input_set,
config,
stream,
compare_upstream,
header="Job name",
component=None):
table = Table(show_header=True, header_style="bold")
table.add_column(header, width=80)
table.add_column("Integration PASSED History", width=15)
table.add_column("Integration FAILURE History", width=15)
table.add_column("Integration Other History", width=15)
if compare_upstream:
table.add_column("Upstream PASSED History", width=10)
table.add_column("Upstream FAILURE History", width=10)
table.add_column("Upstream Other History", width=10)
for job in input_set:
int_history = get_job_history(job,
config[stream]['periodic_builds_url'],
component)
if compare_upstream:
upstream_builds_url = config[stream]['upstream_builds_url']
# do not look for ovb jobs in upstream
if "featureset" not in job:
up_history = get_job_history(job,
upstream_builds_url,
component)
table.add_row(job,
str(int_history[job]['SUCCESS']),
str(int_history[job]['FAILURE']),
str(int_history[job]['OTHER']),
str(up_history[job]['SUCCESS']),
str(up_history[job]['FAILURE']),
str(up_history[job]['OTHER']))
else:
table.add_row(job,
str(int_history[job]['SUCCESS']),
str(int_history[job]['FAILURE']),
str(int_history[job]['OTHER']))
console.print(table)
def load_conf_file(config_file, key):
config = {}
with open(config_file, "r") as file:
config = yaml.safe_load(file)
return config
def influxdb_jobs(jobs_result):
# https://docs.influxdata.com/influxdb/v2.0/reference/syntax/line-protocol/
# jobs_result = the measurement
# job_type is a tag, note the space
# rest of the values are fields in a row of data
# grafana can only color code w/ numbers, not text
# 0 = failed, 5 = pending, 9 = success # grafana thresholds.
if jobs_result['status'] == "failed":
jobs_result['status'] = 0
elif jobs_result['status'] == "pending":
jobs_result['status'] = 5
elif jobs_result['status'] == "passed":
jobs_result['status'] = 9
results_influxdb_line = ('jobs_result,'
'job_type={job_type},'
'job_name={job},'
'release={release} '
'name="{promote_name}",'
'test_hash="{test_hash}",'
'criteria="{criteria}",'
'status="{status}",'
'logs="{logs}",'
'failure_reason="{failure_reason}",'
'duration="{duration}",'
'component="{component}",'
'distro="{distro}"')
if jobs_result['component'] is None:
jobs_result['job_type'] = "integration"
else:
jobs_result['job_type'] = "component"
return results_influxdb_line.format(**jobs_result)
def influxdb_promo(promotion):
# grafana renders epoch only w/ * 1000
gr_promotion_date = str(int(promotion['timestamp']) * 1000000000)
gr_latest_build = str(int(promotion['lastest_build']) * 1000)
promotion['grafana_timestamp'] = gr_promotion_date
promotion['grafana_latest_build'] = gr_latest_build
promotion_influxdb_line = ("dlrn-promotion,"
"release={release},distro={distro},"
"promo_name={promote_name} "
"commit_hash=\"{commit_hash}\","
"distro_hash=\"{distro_hash}\","
"aggregate_hash=\"{aggregate_hash}\","
"repo_hash=\"{repo_hash}\","
"repo_url=\"{repo_url}\","
"latest_build_date={grafana_latest_build},"
"component=\"{component}\","
"promotion_details=\"{dlrn_details}\","
"extended_hash=\"{extended_hash}\" "
"{grafana_timestamp}")
return promotion_influxdb_line.format(**promotion)
def render_testproject_yaml(jobs, hash, stream, config):
jobs_list = jobs
path = os.path.dirname(__file__)
file_loader = FileSystemLoader(path + '/templates')
env = Environment(loader=file_loader)
template = env.get_template('.zuul.yaml.j2')
output = template.render(jobs=jobs_list, hash=hash)
print("\n\n###### testproject to rerun required jobs ######")
print(config[stream]['testproject_url'] + "\n")
print(output)
def track_integration_promotion(args, config):
distro = args['distro']
release = args['release']
aggregate_hash = args['aggregate_hash']
influx = args['influx']
stream = args['stream']
compare_upstream = args['compare_upstream']
promotion_name = args['promotion_name']
url = config[stream]['criteria'][distro][release]['int_url']
dlrn_api_url, dlrn_trunk_url = gather_basic_info_from_criteria(url)
promotions = get_dlrn_promotions(dlrn_api_url, promotion_name)
if distro != "centos-7":
md5sum_url = dlrn_trunk_url + aggregate_hash + '/delorean.repo.md5'
test_hash = web_scrape(md5sum_url)
api_response = find_results_from_dlrn_agg(dlrn_api_url, test_hash)
else:
commit_url = dlrn_trunk_url + aggregate_hash + '/commit.yaml'
commit_hash, distro_hash, extended_hash = fetch_hashes_from_commit_yaml(
commit_url)
api_response = find_results_from_dlrn_repo_status(dlrn_api_url,
commit_hash,
distro_hash,
extended_hash)
test_hash = commit_hash
(all_jobs_result_available,
passed_jobs, failed_jobs) = conclude_results_from_dlrn(api_response)
jobs_in_criteria = set(find_jobs_in_integration_criteria(
url, promotion_name=promotion_name))
jobs_which_need_pass_to_promote = jobs_in_criteria.difference(passed_jobs)
jobs_with_no_result = jobs_in_criteria.difference(all_jobs_result_available)
all_jobs = all_jobs_result_available.union(jobs_with_no_result)
# get the dlrn details, hash under test ( hut ) and promoted hash ( ph )
if distro != "centos-7":
dlrn_api_suffix = "api/civotes_agg_detail.html?ref_hash="
# hash under test
hut = "{}/{}{}".format(dlrn_api_url,
dlrn_api_suffix,
test_hash)
# promoted hash
ph = "{}/{}{}".format(dlrn_api_url,
dlrn_api_suffix,
promotions['aggregate_hash'])
else:
dlrn_api_suffix = "api/civotes_detail.html?commit_hash="
# hash under test
hut = "{}/{}{}&distro_hash={}".format(dlrn_api_url,
dlrn_api_suffix,
commit_hash,
distro_hash)
# promoted hash
ph = "{}/{}{}&distro_hash={}".format(dlrn_api_url,
dlrn_api_suffix,
promotions['commit_hash'],
promotions['distro_hash'])
if influx:
# NOTE(dviroel): excluding jobs results from influx when promotion_name
# is "current-tripleo-rdo" since we are not using this info anywhere.
if promotion_name != 'current-tripleo-rdo':
# print out jobs in influxdb format
log_urls = latest_job_results_url(
api_response, all_jobs_result_available)
for job in all_jobs:
log_url = log_urls.get(job, "N/A")
if job in passed_jobs:
status = 'passed'
elif job in failed_jobs:
status = 'failed'
else:
status = 'pending'
if status == 'failed':
failure_reason = find_failure_reason(log_url)
else:
failure_reason = "N/A"
jobs_result = {}
jobs_result['release'] = release
jobs_result['promote_name'] = promotion_name
jobs_result['job'] = job
jobs_result['test_hash'] = test_hash
jobs_result['component'] = None
jobs_result['criteria'] = job in jobs_in_criteria
jobs_result['status'] = status
jobs_result['logs'] = log_url
jobs_result['failure_reason'] = failure_reason
jobs_result['duration'] = find_job_run_time(
log_url)
jobs_result['distro'] = distro
print(influxdb_jobs(jobs_result))
# print out last promotions in influxdb format
promotions['release'] = release
promotions['distro'] = distro
promotions['dlrn_details'] = ph
print(influxdb_promo(promotions))
else:
last_p = datetime.utcfromtimestamp(promotions['timestamp'])
console.print(f"Hash under test: {hut}",
f"\nlast_promotion={last_p}")
print_a_set_in_table(passed_jobs, "Jobs which passed:")
print_a_set_in_table(failed_jobs, "Jobs which failed:")
print_a_set_in_table(jobs_with_no_result,
"Pending running jobs")
needed_txt = ("Jobs which are in promotion criteria and need "
"pass to promote the Hash:")
print_failed_in_criteria(jobs_which_need_pass_to_promote,
config,
stream,
compare_upstream,
needed_txt)
console.print("Logs of jobs which are failing:-")
log_urls = latest_job_results_url(
api_response, failed_jobs)
for value in log_urls.values():
console.print(value)
# get package diff for the integration test
# control_url
c_url = get_dlrn_versions_csv(dlrn_trunk_url,
None,
promotion_name)
# test_url, what is currently getting tested
t_url = get_dlrn_versions_csv(dlrn_trunk_url,
None,
aggregate_hash)
c_csv = get_csv(c_url)
t_csv = get_csv(t_url)
pkg_diff = get_diff(None,
promotion_name,
c_csv,
aggregate_hash,
t_csv)
if pkg_diff:
console.print("\n Packages Tested")
rich_print(pkg_diff)
# jobs_which_need_pass_to_promote are any job that hasn't registered
# success w/ dlrn. jobs_with_no_result are any jobs in pending.
# We only want test project config for jobs that have completed.
tp_jobs = jobs_which_need_pass_to_promote - jobs_with_no_result
if tp_jobs:
render_testproject_yaml(tp_jobs, test_hash, stream, config)
def track_component_promotion(cargs, config):
""" Find the failing jobs which are blocking promotion of a component.
:param release: The OpenStack release e.g. wallaby
:param component:
"""
distro = cargs['distro']
release = cargs['release']
influx = cargs['influx']
test_component = cargs['component']
stream = cargs['stream']
compare_upstream = cargs['compare_upstream']
url = config[stream]['criteria'][distro][release]['comp_url']
dlrn_api_url, dlrn_trunk_url = gather_basic_info_from_criteria(url)
if distro == "centos-7":
raise Exception("centos-7 components do not exist")
if test_component == "all":
all_components = ["baremetal", "cinder", "clients", "cloudops",
"common", "compute", "glance", "manila",
"network", "octavia", "security", "swift",
"tempest", "tripleo", "ui", "validation"]
pkg_diff = None
else:
all_components = [test_component]
# get package diff for the component
# control_url
c_url = get_dlrn_versions_csv(dlrn_trunk_url,
all_components[0],
"current-tripleo")
# test_url, what is currently getting tested
t_url = get_dlrn_versions_csv(dlrn_trunk_url,
all_components[0],
"component-ci-testing")
c_csv = get_csv(c_url)
t_csv = get_csv(t_url)
pkg_diff = get_diff(all_components[0],
"current-tripleo",
c_csv,
"component-ci-testing",
t_csv)
for component in all_components:
commit_url = '{}component/{}/component-ci-testing/commit.yaml'.format(
dlrn_trunk_url, component)
commit_hash, distro_hash, extended_hash = fetch_hashes_from_commit_yaml(
commit_url)
api_response = find_results_from_dlrn_repo_status(dlrn_api_url,
commit_hash,
distro_hash,
extended_hash)
# component promotion details
promotions = get_dlrn_promotions(dlrn_api_url,
"promoted-components",
component=component)
dlrn_api_suffix = "api/civotes_detail.html?commit_hash="
# hash under test
hut = "{}/{}{}&distro_hash={}".format(dlrn_api_url,
dlrn_api_suffix,
commit_hash,
distro_hash)
ph = "{}/{}{}&distro_hash={}".format(dlrn_api_url,
dlrn_api_suffix,
promotions['commit_hash'],
promotions['distro_hash'])
(all_jobs_result_available,
passed_jobs, failed_jobs) = conclude_results_from_dlrn(api_response)
if 'consistent' in all_jobs_result_available:
all_jobs_result_available.remove('consistent')
if 'consistent' in passed_jobs:
passed_jobs.remove('consistent')
if 'consistent' in failed_jobs:
failed_jobs.remove('consistent')
jobs_in_criteria = set(find_jobs_in_component_criteria(url, component))
jobs_which_need_pass_to_promote = jobs_in_criteria.difference(
passed_jobs)
jobs_with_no_result = jobs_in_criteria.difference(
all_jobs_result_available)
all_jobs = all_jobs_result_available.union(jobs_with_no_result)
if influx:
log_urls = latest_job_results_url(
api_response, all_jobs_result_available)
for job in all_jobs:
log_url = log_urls.get(job, "N/A")
if job in passed_jobs:
status = 'passed'
elif job in failed_jobs:
status = 'failed'
else:
status = 'pending'
if status == 'failed':
failure_reason = find_failure_reason(log_url)
else:
failure_reason = "N/A"
jobs_result = {}
jobs_result['release'] = release
jobs_result['promote_name'] = "promoted-components"
jobs_result['job'] = job
jobs_result['test_hash'] = commit_hash + '_' + distro_hash[0:8]
jobs_result['component'] = component
jobs_result['criteria'] = job in jobs_in_criteria
jobs_result['status'] = status
jobs_result['logs'] = log_url
jobs_result['failure_reason'] = failure_reason
jobs_result['duration'] = find_job_run_time(
log_url)
jobs_result['distro'] = distro
# print out jobs in influxdb format
print(influxdb_jobs(jobs_result))
# print out promtions in influxdb format
promotions['release'] = release
promotions['distro'] = distro
promotions['dlrn_details'] = ph
print(influxdb_promo(promotions))
else:
log_urls = latest_job_results_url(
api_response, failed_jobs)
header = ("{} component jobs which need pass to promote "
"the hash: ").format(component)
if failed_jobs:
component_status = "Red"
elif not jobs_which_need_pass_to_promote:
component_status = "Green"
else:
component_status = "Yellow"
last_p = datetime.utcfromtimestamp(promotions['timestamp'])
console.print(f"{component} component",
f"status={component_status}",
f"last_promotion={last_p}",
f"\nHash_under_test={hut}")
print_a_set_in_table(passed_jobs, "Jobs which passed:")
if component_status != "Green":
print_a_set_in_table(failed_jobs, "Jobs which failed:")
print_a_set_in_table(jobs_with_no_result,
"Pending running jobs")
print_failed_in_criteria(jobs_which_need_pass_to_promote,
config,
stream,
compare_upstream,
header,
component)
if component_status == "Red":
console.print("Logs of failing jobs:")
for value in log_urls.values():
console.print(value)
if pkg_diff:
console.print("\nPackages Tested: {}".format(all_components[0]))
rich_print(pkg_diff)
print('\n')
# jobs_which_need_pass_to_promote are any job that hasn't registered
# success w/ dlrn. jobs_with_no_result are any jobs in pending.
# We only want test project config for jobs that have completed.
tp_jobs = jobs_which_need_pass_to_promote - jobs_with_no_result
# execute if there are failing jobs in criteria and if
# you are only looking at one component and not all components
if tp_jobs and len(all_components) == 1:
render_testproject_yaml(tp_jobs, commit_hash, stream, config)
@ click.command()
@ click.option("--release", default='master',
type=click.Choice(['master', 'wallaby', 'victoria', 'ussuri',
'train', 'stein', 'queens', 'osp17',
'osp16-2']))
@ click.option("--distro", default='centos-8',
type=click.Choice(['centos-8', 'centos-9', 'centos-7',
'rhel-8', 'rhel-9']))
@ click.option("--component",
type=click.Choice(["all", "baremetal", "cinder", "clients",
"cloudops", "common", "compute",
"glance", "manila", "network", "octavia",
"security", "swift", "tempest", "tripleo",
"ui", "validation"]))
@ click.option("--influx", is_flag=True, default=False)
@ click.option("--compare_upstream", is_flag=True, default=False)
@ click.option("--aggregate_hash",
required=False,
default="tripleo-ci-testing",
# TO-DO w/ tripleo-get-hash
help=("default:tripleo-ci-testing"
"\nexample:tripleo-ci-testing/e6/ad/e6ad..."))
@ click.option("--promotion_name", required=False, default="current-tripleo",
type=click.Choice(["current-tripleo", "current-tripleo-rdo"]))
@ click.option("--config_file", default=os.path.dirname(__file__)
+ '/conf_ruck_rover.yaml')
def main(release,
distro,
config_file,
influx=False,
component=None,
compare_upstream=False,
aggregate_hash="tripleo-ci-testing",
promotion_name="current-tripleo"):
if release in ('osp16-2', 'osp17'):
stream = 'downstream'
if config_file != os.path.dirname(__file__) + '/conf_ruck_rover.yaml':
print('using custom config file: {}'.format(config_file))
else:
downstream_urls = 'https://url.corp.redhat.com/ruck-rover-0'
config_file = download_file(downstream_urls)
config = load_conf_file(config_file, "downstream")
else:
config = load_conf_file(config_file, "upstream")
stream = 'upstream'
# come on click, not sure how to get click params.
c_args = {}
for i in dir():
# fix-me eval
c_args[i] = eval(i) # pylint: disable=eval-used
c_args["stream"] = stream
if release in ('stein', 'queens'):
config['distro'] = "centos-7"
if release == 'osp16-2':
config['distro'] = "rhel-8"
c_args['distro'] = "rhel-8"
# prompt user: #osp-17 can be rhel-8 or rhel-9
if release == 'osp17' and distro == 'centos-8':
print("\nOSP-17 can be either --distro rhel-8 or --distro rhel-9")
print("ERROR: please set the distro option on the cli")
sys.exit(1)
if component:
track_component_promotion(c_args, config)
else:
track_integration_promotion(c_args, config)
if __name__ == '__main__':
main() # pylint: disable=no-value-for-parameter
|
'''
Neural networks. Forward propagation in an already trained network in TensorFlow 2.0. Computing the regularised cost function.
TF 2.0:
sigmoid_step_option 0-4 all take 0.1-0.2 sec.
<NAME>
09-19/03/2018, 31/01-07/02, 04/03/2020
'''
import numpy as np
import scipy.io # to open Matlab's .mat files
import tensorflow as tf
from tensorflow.python.ops import math_ops
import matplotlib.pyplot as plt
import time
### User input ###
data_pipeline_option = 0 # {0, 1}
sigmoid_step_option = 0 # {0, 1, 2, 3, 4}
loss_regularization_option = 0 # {0, 1, 2}
digit_selection_option = 0 # {0, 1} - use 0 because 1 makes it ~1.5 sec slower
batch_size = 5000 # try: 50 or 3900 or 4999 or 5000 (which is X.shape[0]) or 50000
### End of input ###
# The network parameters are here for info, they are not actually used.
input_layer_size = 400 # 20x20 Input Images of Digits
hidden_layer_size = 25 # 25 hidden units
num_labels = 10 # 10 labels, from 1 to 10
# (note that we have mapped "0" to label 10)
# =========== Part 1: Loading [and Visualizing] Data =============
data = scipy.io.loadmat('../machine-learning-ex4/ex4/ex4data1.mat')
X = data['X']
y = data['y']
y = y % 10 # Transforming 10 to 0, which is its original meaning.
# ================ Part 2: Loading Pameters ================
# In this part of the exercise, we load some pre-initialized
# neural network parameters.
params = scipy.io.loadmat('../machine-learning-ex4/ex4/ex4weights.mat')
Theta1 = params['Theta1'] # Theta1 has size 25 x 401
Theta2 = params['Theta2'] # Theta2 has size 10 x 26
# To narrow computation to a subset of data for quick testing:
#X, y = X[1990:2010,:], y[1990:2010,:]
# ================ Part 3: Compute Cost (Feedforward) ================
tf.keras.backend.clear_session() # not strictly necessary
start_time = time.time()
if data_pipeline_option==0:
dataset_X = tf.data.Dataset.from_tensor_slices(X).batch(batch_size)
#dataset_y = tf.data.Dataset.from_tensor_slices(y).batch(batch_size) # not needed
else:
dataset = tf.data.Dataset.from_tensor_slices((X,y)).batch(batch_size)
# Both options for l0 are good:
l0 = tf.keras.layers.Dense(Theta1.shape[0], use_bias=True, activation='sigmoid', kernel_initializer=tf.constant_initializer(Theta1[:,1:].T), bias_initializer=tf.constant_initializer(Theta1[:,0]))
#l0 = tf.keras.layers.Dense(Theta1.shape[0], use_bias=True, activation=tf.nn.sigmoid, kernel_initializer=tf.constant_initializer(Theta1[:,1:].T), bias_initializer=tf.constant_initializer(Theta1[:,0]))
# No activation is specified, meaning we get the identity function as activation.
l1 = tf.keras.layers.Dense(Theta2.shape[0], use_bias=True,
kernel_initializer=tf.constant_initializer(Theta2[:,1:].T),
bias_initializer=tf.constant_initializer(Theta2[:,0]))
# With tf.gather, I permute the 10 columns: digits [1,2,...,9,0] are mapped to digits [0,1,2,...,9], that is, column 9 (digit 0, encoded with position 10 of [1, 2, ... 10]) must come first, the rest must be shifted up by one.
if sigmoid_step_option in [0, 3]: # We apply sigmoid function.
# Both options work:
l1.activation=lambda x: tf.gather(tf.sigmoid(x), tf.concat([tf.constant(9, dtype=tf.int32, shape=[1]), tf.range(0,9, dtype=tf.int32)], 0), axis=1)
#l1.activation=lambda x: tf.sigmoid(tf.gather(x, tf.concat([tf.constant(9, dtype=tf.int32, shape=[1]), tf.range(0,9, dtype=tf.int32)], 0), axis=1))
# ...tf.gather(x, np.concatenate(([9], np.arange(0,9))), axis=1)... would do the same with np arrays.
else: # sigmoid_step_option in [1, 2, 4]; in these cases sigmoid function is not yet applied.
# Here activation=tf.nn.sigmoid would be wrong because l2 (in sigmoid_step_option==1) or
# the loss function (in sigmoid_step_option==2) turns the logits values into sigmoid(output),
# so there must be no application of sigmoid in the output of this layer.
l1.activation=lambda x: tf.gather(x, tf.concat([tf.constant(9, dtype=tf.int32, shape=[1]), tf.range(0,9, dtype=tf.int32)], 0), axis=1)
if sigmoid_step_option != 1:
layers_model = [l0, l1]
else: # sigmoid_step_option=1
# Without specifying kernel_initializer to be the identity matrix, it would multiply with a random matrix!
l2 = tf.keras.layers.Dense(Theta2.shape[0], activation='sigmoid', kernel_initializer=tf.constant_initializer(np.eye(Theta2.shape[0])), use_bias=False)
layers_model = [l0, l1, l2]
# For sigmoid_step_option in [0, 1, 3], pred has been fed through a sigmoid function, it's in [0; 1].
# For sigmoid_step_option in [2, 4], pred has not been fed through a sigmoid function, it's in ]-infty; infty[.
model = tf.keras.Sequential(layers_model)
if data_pipeline_option==0:
pred = model.predict(dataset_X)
else:
dataset_X = dataset.map(lambda x, y: x)
pred = model.predict(dataset_X)
# y needs to be turned into one hot encoding
'''
# Version for TensorFlow 1.x
y_idcc = tf.feature_column.categorical_column_with_identity(key='labels', num_buckets=10)
y_onehot = tf.feature_column.indicator_column(y_idcc)
y_layer = tf.feature_column.input_layer({'labels': y_temp}, y_onehot)
loss = tf.losses.sigmoid_cross_entropy(y_layer, logits) * 10
'''
'''
# This works but it is very clumsy and about 0.2 sec slower than tf.one_hot().
l10 = tf.feature_column.categorical_column_with_identity(key='labels', num_buckets=10)
l11 = tf.feature_column.indicator_column(l10)
l12 = tf.keras.layers.DenseFeatures(l11)
layers_y = [l12]
model_y = tf.keras.Sequential(layers_y)
y_onehot = model_y.predict({'labels': y}).astype(np.int16)
'''
y_onehot = tf.one_hot(y.reshape(-1), 10)
# Multiplying by 10 is needed only because the course material divides by number of samples but not by number of classes when taking the mean.
if sigmoid_step_option in [0, 1]:
# These all work well:
#loss = tf.math.reduce_mean(tf.math.add(tf.math.multiply(y_onehot, tf.math.negative(tf.math.log(pred))), tf.math.multiply(tf.math.subtract(1.0, y_onehot), tf.math.negative((tf.math.log(tf.math.subtract(1.0, pred))))))) * 10
#loss = tf.math.reduce_mean(tf.math.multiply(y_onehot, -tf.math.log(pred)) + tf.math.multiply(1-y_onehot, -(tf.math.log(1-pred)))) * 10
#loss = (-tf.reduce_sum(y_onehot * tf.math.log(pred))-tf.reduce_sum((1-y_onehot) * tf.math.log(1-pred)))/batch_size
#loss = np.mean(np.multiply(y_onehot, -np.log(pred)) + np.multiply(1-y_onehot, -np.log(1-pred)), axis=None) * 10
loss = tf.reduce_mean(tf.keras.losses.binary_crossentropy(y_onehot, pred, from_logits=False)) * 10
#loss = np.mean(tf.keras.losses.binary_crossentropy(y_onehot, pred, from_logits=False).numpy()) * 10
elif sigmoid_step_option==2:
# binary_crossentropy() computes -sum_j (t_j log(q_j) + (1-t_j)*log(1-q_j)).
loss = tf.reduce_mean(tf.keras.losses.binary_crossentropy(y_onehot, pred, from_logits=True)) * 10
#loss = np.mean(tf.keras.losses.binary_crossentropy(y_onehot, pred, from_logits=True).numpy()) * 10
#loss = tf.reduce_mean(tf.keras.losses.binary_crossentropy(y_onehot, tf.nn.sigmoid(pred), from_logits=False)) * 10
elif sigmoid_step_option==3:
'''
https://github.com/tensorflow/tensorflow/blob/r2.0/tensorflow/python/ops/nn_impl.py#L112 shows
tf.nn.sigmoid_cross_entropy_with_logits() works as if it applied a sigmoid function to its input.
We compensate for this by applying logit function (log(p/(1-p))), which is the inverse of the
sigmoid or logistic function.
'''
loss = tf.reduce_mean(tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.cast(y_onehot, dtype=tf.float32), logits=math_ops.log(pred/(1-pred))), axis=1))
else: # if sigmoid_step_option==4
'''
https://github.com/tensorflow/tensorflow/blob/r2.0/tensorflow/python/ops/nn_impl.py#L112 shows
tf.nn.sigmoid_cross_entropy_with_logits() works as if it applied a sigmoid function on its input.
'''
loss = tf.reduce_mean(tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.cast(y_onehot, dtype=tf.float32), logits=pred), axis=1))
#loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.cast(y_onehot, tf.float32), logits=pred)) * 10
# These don't work because categorical_crossentropy() computes something else than what we seek!
# categorical_crossentropy() computes -sum_j t_j log(q_j).
#loss = tf.reduce_mean(tf.keras.losses.categorical_crossentropy(y_onehot, pred, from_logits=False)) * 10
#loss = np.mean(tf.keras.losses.categorical_crossentropy(y_onehot, pred, from_logits=False).numpy())*10
#loss = np.mean(tf.keras.losses.categorical_crossentropy(tf.constant(y_onehot), tf.constant(pred), from_logits=False).numpy())*10
#loss = np.sum(tf.keras.losses.categorical_crossentropy(y_onehot, pred, from_logits=False).numpy())*10/batch_size
#loss = np.sum(tf.keras.losses.categorical_crossentropy(y_onehot.T, pred.T, from_logits=False).numpy())/(10*batch_size)
#loss = np.sum(tf.keras.losses.categorical_crossentropy(y_onehot, pred, from_logits=True).numpy())/batch_size
#loss = np.mean(tf.keras.losses.categorical_crossentropy(tf.constant(y_onehot), tf.constant(pred), from_logits=True).numpy())
# Selecting which digit
# If there were no rearrangement of columns by tf.gather, digits would be stored as [1,2,...,9,0]
##digit = (tf.argmax(pred, axis=1)+1) % 10
##digit = tf.map_fn(lambda x: (tf.argmax(x, axis=0, output_type=tf.int32)+1) % 10, pred, dtype=tf.int32)
# If columns are rearranged by tf.gather, then digits are stored as [0,1,2,...,9].
if digit_selection_option == 0:
pred_digit = tf.argmax(pred, axis=1)
else:
pred_digit = tf.map_fn(lambda x: tf.argmax(x, axis=0, output_type=tf.int32), pred, dtype=tf.int32)
pred_np = pred_digit.numpy().reshape(-1,1)
# =============== Part 4: Implement Regularization ===============
# Once your cost function implementation is correct, you should now
# continue to implement the regularization with the cost.
lambda0 = 1
# losswp = loss with penalty from regularisation
if loss_regularization_option == 0:
losswp = loss + lambda0 * 0.5 * (np.sum(Theta1[:,1:]*Theta1[:,1:], axis=None) + np.sum(Theta2[:,1:]*Theta2[:,1:], axis=None)) / len(y)
elif loss_regularization_option == 1:
losswp = loss + lambda0 * 0.5 * tf.math.reduce_sum(tf.math.square(tf.constant(Theta1[:,1:], dtype=tf.float32))) / len(y)\
+ lambda0 * 0.5 * tf.math.reduce_sum(tf.math.square(tf.constant(Theta2[:,1:], dtype=tf.float32))) / len(y)
else:
regularizer = tf.keras.regularizers.l2(lambda0 * 0.5)
losswp = loss + tf.cast(regularizer(Theta1[:,1:]), dtype=tf.float32) / len(y)\
+ tf.cast(regularizer(Theta2[:,1:]), dtype=tf.float32) / len(y)
print('\nCost at parameters (loaded from ex4weights): {0:.6f}.'.format(loss))
print('Expected loss (approx.): 0.287629.')
print('\nRegularised cost at parameters (loaded from ex4weights): {0:.6f}.'.format(losswp))
print('Expected regularised loss (approx.): 0.383770.')
print('\nTraining Set Accuracy: {0:.2f}%.'.format(np.mean(pred_np == y) * 100))
print('Expected training accuracy on complete Training Set (approx.): 97.5%.')
print('\nTime elapsed: {:.2f} sec'.format(time.time() - start_time))
|
<filename>pyxllib/debug/specialist/tictoc.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author : 陈坤泽
# @Email : <EMAIL>
# @Date : 2020/09/20
import time
import timeit
from humanfriendly import format_timespan
from pyxllib.text.pupil import shorten, listalign
from pyxllib.algo.pupil import natural_sort, ValuesStat
__tictoc = """
基于 pytictoc 代码,做了些自定义扩展
原版备注:
Module with class TicToc to replicate the functionality of MATLAB's tic and toc.
Documentation: https://pypi.python.org/pypi/pytictoc
__author__ = '<NAME>'
__version__ = '1.4.0'
__version_date__ = '29 April 2017'
"""
class TicToc:
"""
Replicate the functionality of MATLAB's tic and toc.
#Methods
TicToc.tic() #start or re-start the timer
TicToc.toc() #print elapsed time since timer start
TicToc.tocvalue() #return floating point value of elapsed time since timer start
#Attributes
TicToc.start #Time from timeit.default_timer() when t.tic() was last called
TicToc.end #Time from timeit.default_timer() when t.toc() or t.tocvalue() was last called
TicToc.elapsed #t.end - t.start; i.e., time elapsed from t.start when t.toc() or t.tocvalue() was last called
"""
def __init__(self, title='', *, disable=False):
"""Create instance of TicToc class."""
self.start = timeit.default_timer()
self.end = float('nan')
self.elapsed = float('nan')
self.title = title
self.disable = disable
def tic(self):
"""Start the timer."""
self.start = timeit.default_timer()
def toc(self, msg='', restart=False):
"""
Report time elapsed since last call to tic().
Optional arguments:
msg - String to replace default message of 'Elapsed time is'
restart - Boolean specifying whether to restart the timer
"""
self.end = timeit.default_timer()
self.elapsed = self.end - self.start
if not self.disable:
# print(f'{self.title} {msg} {self.elapsed:.3f} 秒.')
print(f'{self.title} {msg} elapsed {format_timespan(self.elapsed)}.')
if restart:
self.start = timeit.default_timer()
def tocvalue(self, restart=False):
"""
Return time elapsed since last call to tic().
Optional argument:
restart - Boolean specifying whether to restart the timer
"""
self.end = timeit.default_timer()
self.elapsed = self.end - self.start
if restart:
self.start = timeit.default_timer()
return self.elapsed
@staticmethod
def process_time(msg='time.process_time():'):
"""计算从python程序启动到目前为止总用时"""
print(f'{msg} {format_timespan(time.process_time())}.')
def __enter__(self):
"""Start the timer when using TicToc in a context manager."""
from pyxllib.debug.specialist import get_xllog
if self.title == '__main__' and not self.disable:
get_xllog().info(f'time.process_time(): {format_timespan(time.process_time())}.')
self.start = timeit.default_timer()
def __exit__(self, exc_type, exc_val, exc_tb):
"""On exit, print time elapsed since entering context manager."""
from pyxllib.debug.specialist import get_xllog
elapsed = self.tocvalue()
xllog = get_xllog()
if exc_tb is None:
if not self.disable:
xllog.info(f'{self.title} finished in {format_timespan(elapsed)}.')
else:
xllog.info(f'{self.title} interrupt in {format_timespan(elapsed)},')
__timer = """
"""
class Timer:
"""分析性能用的计时器类,支持with语法调用
必须显示地指明每一轮的start()和end(),否则会报错
"""
def __init__(self, title=''):
"""
:param title: 计时器名称
"""
# 不同的平台应该使用的计时器不同,这个直接用timeit中的配置最好
self.default_timer = timeit.default_timer
# 标题
self.title = title
self.data = []
self.start_clock = float('nan')
def start(self):
self.start_clock = self.default_timer()
def stop(self):
self.data.append(self.default_timer() - self.start_clock)
def report(self, msg=''):
""" 报告目前性能统计情况
"""
msg = f'{self.title} {msg}'
n = len(self.data)
if n >= 1:
print(msg, '用时(秒) ' + ValuesStat(self.data).summary(valfmt='.3f'))
elif n == 1:
sum_ = sum(self.data)
print(f'{msg} 用时: {sum_:.3f}s')
else: # 没有统计数据,则补充执行一次stop后汇报
print(f'{msg} 暂无计时信息')
def __enter__(self):
return self
def __exit__(self, *args):
self.report()
def performit(title, stmt="pass", setup="pass", repeat=1, number=1, globals=None):
""" 在timeit.repeat的基础上,做了层封装
200920周日15:33,简化函数,该函数不再获得执行结果,避免重复运行
:param title: 测试标题、名称功能
:return: 返回原函数单次执行结果
"""
data = timeit.repeat(stmt=stmt, setup=setup, repeat=repeat, number=number, globals=globals)
print(title, '用时(秒) ' + ValuesStat(data).summary(valfmt='.3f'))
return data
def perftest(title, stmt="pass", repeat=1, number=1, globals=None, res_width=None, print_=True):
""" 与performit的区别是,自己手动循环,记录程序运行结果
:param title: 测试标题、名称功能
:param res_width: 运行结果内容展示的字符上限数
:param print_: 输出报告
:return: 返回原函数单次执行结果
这里为了同时获得表达式返回值,就没有用标注你的timeit.repeat实现了
"""
# 1 确保stmt是可调用对象
if callable(stmt):
func = stmt
else:
code = compile(stmt, '', 'eval')
def func():
return eval(code, globals)
# 2 原函数运行结果(这里要先重载stdout)
data = []
res = ''
for i in range(repeat):
start = time.time()
for j in range(number):
res = func()
data.append(time.time() - start)
# 3 报告格式
if res_width is None:
# 如果性能报告比较短,只有一次测试,那res_width默认长度可以高一点
res_width = 50 if len(data) > 1 else 200
if res is None:
res = ''
else:
res = '运行结果:' + shorten(str(res), res_width)
if print_:
print(title, '用时(秒) ' + ValuesStat(data).summary(valfmt='.3f'), res)
return data
class PerfTest:
""" 这里模仿了unittest的机制
v0.0.38 重要改动,将number等参数移到perf操作,而不是类初始化中操作,继承使用上会更简单
"""
def perf(self, number=1, repeat=1, globals=None):
"""
:param number: 有些代码运算过快,可以多次运行迭代为一个单元
:param repeat: 对单元重复执行次数,最后会计算平均值、标准差
关于number和repeat的区别:
number张纸量repeat次
如果是纸箱这么厚的纸,number可以不设,默认是1
"""
# 1 找到所有perf_为前缀,且callable的函数方法
funcnames = []
for k in dir(self):
if k.startswith('perf_'):
if callable(getattr(self, k)):
funcnames.append(k)
# 2 自然排序
funcnames = natural_sort(funcnames)
funcnames2 = listalign([fn[5:] for fn in funcnames], 'r')
for i, funcname in enumerate(funcnames):
perftest(funcnames2[i], getattr(self, funcname),
number=number, repeat=repeat, globals=globals)
|
#!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: MG4Lidar Reading Driver testing.
# Author: <NAME> <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2010, <NAME> <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
sys.path.append('../pymod')
import gdaltest
from osgeo import gdal
###############################################################################
# Test reading a MG4Lidar file
#
def mg4lidar_1():
drv = gdal.GetDriverByName('MG4Lidar')
if drv is None:
return 'skip'
if not gdaltest.download_file('http://home.gdal.org/tmp/GDAL_MG4Lidar_Src.zip', 'GDAL_MG4Lidar_Src.zip'):
return 'skip'
try:
os.stat('tmp/cache/GDAL_MG4Lidar_Src')
except OSError:
try:
gdaltest.unzip('tmp/cache', 'tmp/cache/GDAL_MG4Lidar_Src.zip')
try:
os.stat('tmp/cache/GDAL_MG4Lidar_Src')
except OSError:
return 'skip'
except OSError:
return 'skip'
ds = gdal.Open('tmp/cache/GDAL_MG4Lidar_Src/Tetons_200k.view')
if ds is None:
gdaltest.post_reason('could not open dataset')
return 'fail'
prj = ds.GetProjectionRef()
if prj.find('NAD83 / UTM zone 12N') == -1:
gdaltest.post_reason('did not get expected projection')
print(prj)
return 'success'
gt = ds.GetGeoTransform()
ref_gt = (504489.919999999983702, 3.078227571115974, 0, 4795848.389999999664724, 0, -3.078259860787739)
for i in range(6):
if abs(gt[i] - ref_gt[i]) > 1e-6:
gdaltest.post_reason('did not get expected geotransform')
print(gt)
return 'fail'
cs = ds.GetRasterBand(1).Checksum()
if cs != 13216:
gdaltest.post_reason('did not get expected checksum')
print(cs)
return 'success'
cs = ds.GetRasterBand(1).GetOverview(0).Checksum()
if cs != 64099:
gdaltest.post_reason('did not get expected overview checksum')
print(cs)
return 'success'
ds = None
return 'success'
gdaltest_list = [
mg4lidar_1]
if __name__ == '__main__':
gdaltest.setup_run('mg4lidar')
gdaltest.run_tests(gdaltest_list)
gdaltest.summarize()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 25 08:48:00 2019
Recreated from <NAME>
@author: nikkicreange
"""
#!/usr/bin/python
# See http://maggotroot.blogspot.ch/2013/11/constrained-linear-least-squares-in.html for more info
'''
A simple library to solve constrained linear least squares problems
with sparse and dense matrices. Uses cvxopt library for
optimization
'''
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '1.0'
__date__ = '22.11.2013'
__license__ = 'WTFPL'
import numpy as np
from cvxopt import solvers, matrix, spmatrix, mul
import itertools
from scipy import sparse
def scipy_sparse_to_spmatrix(A):
coo = A.tocoo()
SP = spmatrix(coo.data, coo.row.tolist(), coo.col.tolist())
return SP
def spmatrix_sparse_to_scipy(A):
data = np.array(A.V).squeeze()
rows = np.array(A.I).squeeze()
cols = np.array(A.J).squeeze()
return sparse.coo_matrix( (data, (rows, cols)) )
def sparse_None_vstack(A1, A2):
if A1 is None:
return A2
else:
return sparse.vstack([A1, A2])
def numpy_None_vstack(A1, A2):
if A1 is None:
return A2
else:
return np.vstack([A1, A2])
def numpy_None_concatenate(A1, A2):
if A1 is None:
return A2
else:
return np.concatenate([A1, A2])
def get_shape(A):
if isinstance(C, spmatrix):
return C.size
else:
return C.shape
def numpy_to_cvxopt_matrix(A):
if A is None:
return A
if sparse.issparse(A):
if isinstance(A, sparse.spmatrix):
return scipy_sparse_to_spmatrix(A)
else:
return A
else:
if isinstance(A, np.ndarray):
if A.ndim == 1:
return matrix(A, (A.shape[0], 1), 'd')
else:
return matrix(A, A.shape, 'd')
else:
return A
def cvxopt_to_numpy_matrix(A):
if A is None:
return A
if isinstance(A, spmatrix):
return spmatrix_sparse_to_scipy(A)
elif isinstance(A, matrix):
return np.array(A).squeeze()
else:
return np.array(A).squeeze()
def lsqlin(C, d, reg=0, A=None, b=None, Aeq=None, beq=None, \
lb=None, ub=None, x0=None, opts=None):
'''
Solve linear constrained l2-regularized least squares. Can
handle both dense and sparse matrices. Matlab's lsqlin
equivalent. It is actually wrapper around CVXOPT QP solver.
min_x ||C*x - d||^2_2 + reg * ||x||^2_2
s.t. A * x <= b
Aeq * x = beq
lb <= x <= ub
Input arguments:
C is m x n dense or sparse matrix
d is n x 1 dense matrix
reg is regularization parameter
A is p x n dense or sparse matrix
b is p x 1 dense matrix
Aeq is q x n dense or sparse matrix
beq is q x 1 dense matrix
lb is n x 1 matrix or scalar
ub is n x 1 matrix or scalar
Output arguments:
Return dictionary, the output of CVXOPT QP.
Dont pass matlab-like empty lists to avoid setting parameters,
just use None:
lsqlin(C, d, 0.05, None, None, Aeq, beq) #Correct
lsqlin(C, d, 0.05, [], [], Aeq, beq) #Wrong!
'''
sparse_case = False
if sparse.issparse(A): #detects both np and cxopt sparse
sparse_case = True
#We need A to be scipy sparse, as I couldn't find how
#CVXOPT spmatrix can be vstacked
if isinstance(A, spmatrix):
A = spmatrix_sparse_to_scipy(A)
C = numpy_to_cvxopt_matrix(C)
d = numpy_to_cvxopt_matrix(d)
Q = C.T * C
q = - d.T * C
nvars = C.size[1]
if reg > 0:
if sparse_case:
I = scipy_sparse_to_spmatrix(sparse.eye(nvars, nvars,\
format='coo'))
else:
I = matrix(np.eye(nvars), (nvars, nvars), 'd')
Q = Q + reg * I
lb = cvxopt_to_numpy_matrix(lb)
ub = cvxopt_to_numpy_matrix(ub)
b = cvxopt_to_numpy_matrix(b)
if lb is not None: #Modify 'A' and 'b' to add lb inequalities
if lb.size == 1:
lb = np.repeat(lb, nvars)
if sparse_case:
lb_A = -sparse.eye(nvars, nvars, format='coo')
A = sparse_None_vstack(A, lb_A)
else:
lb_A = -np.eye(nvars)
A = numpy_None_vstack(A, lb_A)
b = numpy_None_concatenate(b, -lb)
if ub is not None: #Modify 'A' and 'b' to add ub inequalities
if ub.size == 1:
ub = np.repeat(ub, nvars)
if sparse_case:
ub_A = sparse.eye(nvars, nvars, format='coo')
A = sparse_None_vstack(A, ub_A)
else:
ub_A = np.eye(nvars)
A = numpy_None_vstack(A, ub_A)
b = numpy_None_concatenate(b, ub)
#Convert data to CVXOPT format
A = numpy_to_cvxopt_matrix(A)
Aeq = numpy_to_cvxopt_matrix(Aeq)
b = numpy_to_cvxopt_matrix(b)
beq = numpy_to_cvxopt_matrix(beq)
#Set up options
if opts is not None:
for k, v in opts.items():
solvers.options[k] = v
#Run CVXOPT.SQP solver
sol = solvers.qp(Q, q.T, A, b, Aeq, beq, None, x0)
return sol
def lsqnonneg(C, d, opts):
'''
Solves nonnegative linear least-squares problem:
min_x ||C*x - d||_2^2, where x >= 0
'''
return lsqlin(C, d, reg = 0, A = None, b = None, Aeq = None, \
beq = None, lb = 0, ub = None, x0 = None, opts = opts)
|
<gh_stars>0
from flask import render_template,redirect,url_for,abort,request,flash
from app.main import main
from .forms import UpdateProfile,CreateBlog
from flask_login import login_required,current_user
from ..email import mail_message
from app.models import User,Blog,Comment,Follower
from ..import db
from app.requests import get_quotes
@main.route('/')
@login_required
def index():
quotes = get_quotes()
blogs = Blog.query.all()
if request.method == "POST":
new_follower = Follower(email = request.form.get("follower"))
return render_template("index.html",
blogs = blogs,
quotes = quotes)
def save_picture(form_picture):
picture_path =('app/static/photos')
return picture_path
@main.route('/new_post', methods=['POST','GET'])
@login_required
def new_blog():
followers = Follower.query.all()
form = CreateBlog()
if form.validate_on_submit():
title = form.title.data
content = form.content.data
user_id = current_user._get_current_object().id
blog = Blog(title=title,content=content,user_id=user_id)
blog.save()
for follower in followers:
mail_message("New Blog Post","email/new_blog",follower.email,blog=blog)
return redirect(url_for('main.index'))
flash('You Posted a new Blog')
return render_template('newblogs.html', form = form)
@main.route('/blog/<id>')
@login_required
def blog(id):
comments = Comment.query.filter_by(blog_id=id).all()
blog = Blog.query.get(id)
return render_template('blog.html', blog=blog,comments=comments)
@main.route('/blog/<blog_id>/update',methods = ['GET','POST'])
@login_required
def updatedblog(blog_id):
blog = Blog.query.get(blog_id)
if blog.user != current_user:
abort(403)
form = CreateBlog()
if form.validate_on_submit():
blog.title = form.title.data
blog.content = form.content.data
db.session.commit()
flash("You have updated your Blog!")
return redirect(url_for('main.blog',id = blog.id))
if request.method == 'GET':
form.title.data = blog.title
form.content.data = blog.content
return render_template('newblogs.html', form = form)
@main.route('/blog/<blog_id>/delete', methods=["DELETE"])
@login_required
def delete_post(blog_id):
blog = Blog.query.filter_by(blog_id).first()
db.session.delete(blog)
db.session.commit()
if blog.user != current_user:
abort(403)
blog.delete()
flash("blog deleted")
return redirect(url_for('main.index'))
@main.route('/user/<string:username>')
@login_required
def user_posts(username):
user = User.query.filter_by(username=username).first()
blogs = Blog.query.filter_by(user=user)
return render_template('post.html',blogs=blogs,user = user)
@main.route('/subscribe',methods = ['POST','GET'])
@login_required
def subscribe():
email = request.form.get('follower')
new_follower = Follower(email = email)
new_follower.save_follower()
mail_message("Subscribed to Blog-1","email/follower",new_follower.email,new_follower=new_follower)
flash('Sucessfuly subscribed!')
return redirect(url_for('main.index'))
@main.route('/profile',methods = ['POST','GET'])
@login_required
def profile():
form = UpdateProfile()
if form.validate_on_submit():
if form.profile_pic.data:
picture_file = save_picture(form.profile_pic.data)
current_user.profile_pic_path = picture_file
current_user.username = form.username.data
current_user.email = form.email.data
current_user.bio = form.bio.data
db.session.commit()
flash('Succesfully updated your profile')
return redirect(url_for('main.profile'))
elif request.method == 'GET':
form.username.data = current_user.username
form.email.data = current_user.email
form.bio.data = current_user.bio
profile_pic_path = url_for('static',filename = 'photos/'+ current_user.profile_pic_path)
return render_template('profile/profile.html', profile_pic_path=profile_pic_path, form = form)
@main.route('/user/<name>/updateprofile', methods = ['POST','GET'])
@login_required
def updateprofile(name):
form = UpdateProfile()
user = User.query.filter_by(username = name).first()
if user == None:
abort(404)
if form.validate_on_submit():
user.bio = form.bio.data
user.save()
return redirect(url_for('.profile',name = name))
return render_template('profile/updateprofile.html',form =form)
@main.route('/comment/<blog_id>', methods = ['Post','GET'])
@login_required
def comment(blog_id):
blog = Blog.query.get(blog_id)
comment =request.form.get('newcomment')
new_comment = Comment(comment = comment, user_id = current_user._get_current_object().id, blog_id=blog_id)
new_comment.save()
return redirect(url_for('main.blog',id = blog.id)) |
<reponame>MrEliptik/PaperWithCodeScrapper
import urllib
import requests
from bs4 import BeautifulSoup as bs
from validator_collection import checkers
class Scraper:
def __init__(self):
self.rootURL = 'https://paperswithcode.com'
self.trendingPapersURL = self.rootURL
self.latestURL = 'https://paperswithcode.com/latest'
self.greatestURL = 'https://paperswithcode.com/greatest'
self.linkToPaperPage = None
self.trendingPapers = None
self.latestPapers = None
self.greatestPapers = None
def scrapTrending(self):
req = requests.get(self.trendingPapersURL)
soup = bs(req.text, 'lxml')
self.trendingPapers = self.scrapPage(soup).copy()
return self.trendingPapers
def scrapLatest(self):
req = requests.get(self.latestURL)
soup = bs(req.text, 'lxml')
self.latestPapers = self.scrapPage(soup).copy()
return self.latestPapers
def scrapGreatest(self):
req = requests.get(self.greatestURL)
soup = bs(req.text, 'lxml')
self.greatestPapers = self.scrapPage(soup).copy()
return self.greatestPapers
def scrapPage(self, soup):
papers_dict = {}
papers = []
items_divs = soup.find_all('div', {'class':'row infinite-item item'})
for item in items_divs:
for child in item.children:
# Image
try:
# Check if classes are in child attributes
if set(child.attrs['class']) <= set(['col-lg-3', 'item-image-col']):
# Image url
print(child.find('div', {'class':'item-image'})['style'])
papers_dict['image'] = self.rootURL + str(child.find('div', {'class':'item-image'})['style'].split("('", 1)[1].split("')")[0])
print(papers_dict['image'])
except:
pass
# Content
try:
if set(child.attrs['class']) <= set(['col-lg-9', 'item-col']):
# Title
print(child.find('h1').a.string)
papers_dict['title'] = child.find('h1').a.string
# Nb stars
print(child.find('span', {'class':'badge badge-secondary'}).text.strip())
papers_dict['nb_stars'] = child.find('span', {'class':'badge badge-secondary'}).text.strip()
# Star/hour
print(child.find('div', {'class':'stars-accumulated text-center'}).text.strip())
papers_dict['hourly_stars'] = child.find('div', {'class':'stars-accumulated text-center'}).text.strip()
# Paper page link link
print(child.find('a', {'class':'badge badge-light'})['href'])
linkToPaperPage = child.find('a', {'class':'badge badge-light'})['href']
except:
pass
if linkToPaperPage != None:
req = requests.get(self.rootURL + linkToPaperPage)
linkToPaperPage = None
soup = bs(req.text, 'lxml')
print(soup.find('a', {'class':'badge badge-light'})['href'])
pdf_link = soup.find('a', {'class':'badge badge-light'})['href']
# Check if the link found is the pdf or a search query
if checkers.is_url(pdf_link):
r = requests.get(pdf_link)
else:
r = requests.get(self.rootURL + pdf_link)
content_type = r.headers.get('content-type')
if 'application/pdf' in content_type:
papers_dict['pdf'] = pdf_link
# Github link
print(soup.find('a', {'class':'code-table-link'})['href'])
papers_dict['github'] = soup.find('a', {'class':'code-table-link'})['href']
elif 'text/html' in content_type:
soup = bs(r.text, 'lxml')
# PDF link
print(soup.find('a', {'class':'badge badge-light'})['href'])
papers_dict['pdf'] = soup.find('a', {'class':'badge badge-light'})['href']
# Github link
print(soup.find('a', {'class':'code-table-link'})['href'])
papers_dict['github'] = soup.find('a', {'class':'code-table-link'})['href']
papers.append(papers_dict.copy())
return papers |
<reponame>davidlrobinson/sentdex-blob<filename>env.py
import gym
from gym import spaces
from gym.utils import seeding
import numpy as np
import cv2
import matplotlib.pyplot as plt
import time
from blob import Blob
SIZE = 10
N_EPISODES = 5
MOVE_REWARD = -1
ENEMY_REWARD = -300
FOOD_REWARD = 25
SHOW_EVERY = 1
class BlobEnv(gym.Env):
def __init__(self, size):
self.seed()
self.size = size
self.reset()
self.viewer = None
self.action_space = spaces.Discrete(9) # 8 principle winds and no movement
self.observation_space = spaces.Box(low=0, high=size-1, shape=(3, 2), dtype=int) # x and y coords of player, food, and enemy
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
if action == 0:
x, y = 0, 0 # no movement
elif action == 1:
x, y = 0, 1 # up
elif action == 2:
x, y = 1, 1 # up-right
elif action == 3:
x, y = 1, 0 # right
elif action == 4:
x, y = 1, -1 # down-right
elif action == 5:
x, y = 0, -1 # down
elif action == 6:
x, y = -1, -1 # down-left
elif action == 7:
x, y = -1, 0 # left
elif action == 8:
x, y = -1, 1 # up-left
self.player.move(x, y, bound=self.size-1)
self.enemy.move(*self.np_random.randint(-1, 2, (2,)), bound=self.size-1)
self.state = np.array([self.player.pos, self.food.pos, self.enemy.pos])
if self.player.pos == self.enemy.pos:
reward = ENEMY_REWARD
done = True
elif self.player.pos == self.food.pos:
reward = FOOD_REWARD
done = True
else:
reward = MOVE_REWARD
done = False
return self.state, reward, done, {}
def reset(self):
self.player = Blob(*self.np_random.randint(0, self.size, (2,), dtype=int))
self.food = Blob(*self.np_random.randint(0, self.size, (2,), dtype=int))
self.enemy = Blob(*self.np_random.randint(0, self.size, (2,), dtype=int))
self.state = np.array([self.player.pos, self.food.pos, self.enemy.pos])
return self.state
def render(self, mode='human'):
screen_size = 300
size = self.size
scale = screen_size // size
blob_vertices = [(0, 0), (0, scale), (scale, scale), (scale, 0)]
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(screen_size, screen_size)
player = rendering.FilledPolygon(blob_vertices)
player.set_color(0, 0, 255)
self.playertrans = rendering.Transform()
player.add_attr(self.playertrans)
self.viewer.add_geom(player)
food = rendering.FilledPolygon(blob_vertices)
food.set_color(0, 255, 0)
self.foodtrans = rendering.Transform()
food.add_attr(self.foodtrans)
self.viewer.add_geom(food)
enemy = rendering.FilledPolygon(blob_vertices)
enemy.set_color(255, 0, 0)
self.enemytrans = rendering.Transform()
enemy.add_attr(self.enemytrans)
self.viewer.add_geom(enemy)
self.playertrans.set_translation(self.player.x*scale, self.player.y*scale)
self.foodtrans.set_translation(self.food.x*scale, self.food.y*scale)
self.enemytrans.set_translation(self.enemy.x*scale, self.enemy.y*scale)
return self.viewer.render()
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
if __name__ == "__main__":
env = BlobEnv(SIZE)
for i in range(N_EPISODES):
env.reset()
for t in range(200):
if i % SHOW_EVERY == 0:
env.render()
observation, reward, done, info = env.step(env.action_space.sample())
if done:
print("Episode {} finished after {} timesteps".format(i+1, t+1))
break
env.close() |
#!/usr/bin/python
import logging
from datetime import date
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from django.core.mail import send_mail
from detention_notifier.models import Detention, DetentionMailer, Offense, Code, DetentionErrorNotification
from academics.models import Teacher, Student, AcademicYear, Term
from academics.utils import fmpxmlparser
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Import Detentions"
def add_arguments(self, parser):
parser.add_argument('filename', metavar='FILENAME', help='The filename to process the detentions from')
def handle(self, *args, **kwargs):
logger.info("Beginning detention import routine")
data = fmpxmlparser.parse_from_file(kwargs['filename'])
results = data['results']
seen_ids = set()
with transaction.atomic():
detention_mailer = DetentionMailer.objects.get()
skip_processing_before = detention_mailer.skip_processing_before
for row in results:
fields = row['parsed_fields']
incident_id = fields['IDINCIDENT']
detention_date = fields['Det Date']
raw_code = fields['Code'] or ""
raw_offense = fields['Offense']
comments = fields['Comments'] or ""
student_id = fields['IDSTUDENT']
teacher_id = fields['KSTeachers::IDTEACHER']
raw_academic_year = fields['AcademicYear']
raw_term = fields['Term']
if skip_processing_before and detention_date < skip_processing_before:
continue
try:
code = Code.objects.get(code=raw_code)
except Code.DoesNotExist:
code = Code(code=raw_code)
code.save()
if not code.process:
continue
try:
academic_year = AcademicYear.objects.get(year=raw_academic_year)
except AcademicYear.DoesNotExist:
academic_year = AcademicYear(year=raw_academic_year)
academic_year.save()
try:
term = Term.objects.get(academic_year=academic_year, term=raw_term)
except Term.DoesNotExist:
term = Term(academic_year=academic_year, term=raw_term)
term.save()
if raw_offense:
try:
offense = Offense.objects.get(offense__iexact=raw_offense)
except Offense.DoesNotExist:
error_recipients = [o.address for o in DetentionErrorNotification.objects.filter(mailer=detention_mailer)]
if not error_recipients:
raise ValueError("No error recipients are defined")
send_mail("Error importing detention", "Error importing detention {id:}: offense '{offense:}' does not exist".format(
id=incident_id, offense=raw_offense), '<EMAIL>', error_recipients)
continue
else:
offense = None
if teacher_id:
teacher = Teacher.objects.get(teacher_id=teacher_id)
else:
teacher = None
if student_id:
student = Student.objects.get(student_id=student_id)
else:
student = None
if not incident_id:
logger.error("Blank incident ID")
continue
seen_ids.add(incident_id)
try:
incident = Detention.objects.get(incident_id=incident_id)
logger.info("Found detention {id:}".format(id=incident_id))
force_save = False
except Detention.DoesNotExist:
logger.info("Creating detention {id:}".format(id=incident_id))
incident = Detention(
incident_id=incident_id,
code=code,
student=student,
teacher=teacher
)
force_save = True
attr_map = {
'detention_date': detention_date,
'code': code,
'offense': offense,
'comments': comments,
'term': term,
'student': student,
'teacher': teacher,
}
for attr in attr_map:
db_value = getattr(incident, attr)
if db_value != attr_map[attr]:
setattr(incident, attr, attr_map[attr])
logger.info("Updating {attr:} on {incident_id:} from {old_value:} to {new_value:}".format(
attr=attr, incident_id=incident_id, old_value=db_value, new_value=attr_map[attr]))
force_save = True
if force_save:
incident.save()
extra_detentions = Detention.objects.exclude(incident_id__in=seen_ids)
if extra_detentions:
logger.warn("Deleting {} detentions".format(extra_detentions.count()))
extra_detentions.delete() |
<reponame>noooway/exj
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.gridlayout import GridLayout
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.textinput import TextInput
from ExerciseRunning import *
class ExerciseRunningInputWidget( GridLayout ):
def __init__( self, exercise_running,
current_training_screen, **kwargs ):
super( ExerciseRunningInputWidget, self ).__init__( **kwargs )
self.cols = 1
self.spacing = 1
self.row_default_height = 40
self.row_force_default = True
self.size_hint_y = None
self.bind( minimum_height = self.setter('height') )
self.exercise_name = exercise_running.description['name']
title_layout = BoxLayout( orientation = 'horizontal',
spacing = 30 )
excercise_label = Label( text = self.exercise_name )
title_layout.add_widget( excercise_label )
title_layout.add_widget( Label( text = "Distance (km)" ) )
title_layout.add_widget( Label( text = "Time" ) )
del_excercise_btn = Button( text = "Del Exc", size_hint_x = 0.3 )
del_excercise_btn.on_press = \
lambda: current_training_screen.remove_exercise( self )
title_layout.add_widget( del_excercise_btn )
self.add_widget( title_layout )
self.add_dist_time_intervals_from_exercise(
exercise_running,
current_training_screen )
add_interval_btn_layout = BoxLayout( orientation = 'horizontal',
spacing = 30 )
add_interval_btn_layout.add_widget( Label(
text='',
size_hint = (0.30, 1.0) ) )
add_interval_btn_layout.add_widget( Button(
text = 'Add interval',
size_hint = (0.775, 1.0),
on_press = lambda x: self.add_dist_time_interval(
current_training_screen, index_in_layout = 2 ) ) )
self.add_widget( add_interval_btn_layout )
comment_text = exercise_running.description.get(
'comment',
'Comment Exercise')
self.comment = TextInput( hint_text = comment_text )
self.comment.bind(
text =
current_training_screen.update_training_from_user_input )
self.add_widget( self.comment )
def add_dist_time_intervals_from_exercise( self,
exercise_running, current_training_screen ):
for (dist, time) in zip(
exercise_running.description['distances'],
exercise_running.description['times'] ):
self.add_dist_time_interval( current_training_screen,
dist, time )
def add_dist_time_interval( self,
current_training_screen,
hint_dist = '1.0',
hint_time = '4:00',
index_in_layout = 0 ):
interval_layout = GridLayout( rows = 1, spacing = 30 )
interval_layout.height = 30
pos_shift = Label( text='' )
interval_layout.add_widget( pos_shift )
distance = TextInput( hint_text = str( hint_dist ) )
interval_layout.add_widget( distance )
time = TextInput( hint_text = str( hint_time ) )
interval_layout.add_widget( time )
distance.bind(
text =
current_training_screen.update_training_from_user_input )
time.bind(
text =
current_training_screen.update_training_from_user_input )
del_button = Button( text = "Del Int", size_hint_x = 0.3 )
del_button.on_press = lambda: self.remove_interval_widget(
current_training_screen, interval_layout )
interval_layout.add_widget( del_button )
self.add_widget( interval_layout, index = index_in_layout )
def exercise_from_user_input( self ):
# todo: add input check
intervals = len( self.children[2:-1] )
distances = []
times = []
for dist_time_interval in self.children[2:-1]:
dist_input = dist_time_interval.children[2].text
time_input = dist_time_interval.children[1].text
if dist_input is not None:
distances.insert( 0, dist_input )
if time_input is not None:
times.insert( 0, time_input )
comment = self.comment.text
exc = ExerciseRunning( name = self.exercise_name,
intervals = intervals,
distances = distances,
times = times,
description_dict = { 'comment': comment } )
return( exc )
def remove_interval_widget( self,
current_training_screen,
interval_layout ):
self.remove_widget( interval_layout )
current_training_screen.update_training_from_user_input()
print( 'deleting' )
|
import warnings
import numpy as np
import networkx as nx
from scipy.stats import logistic
from mossspider.estimators.utils import fast_exp_map
def uniform_network(n, degree, pr_w=0.35, seed=None):
"""Generates a uniform random graph for a set number of nodes (n) and specified max and min degree (degree).
Additionally, assigns a binary baseline covariate, W, to each observation.
Parameters
----------
n : int
Number of nodes in the generated network
degree : list, set, array
An array of two elements. The first element is the minimum degree and the second element is the maximum degree.
pr_w : float, optional
Probability of W=1. W is a binary baseline covariate assigned to each unit.
seed : int, None, optional
Random seed to use. Default is None.
Returns
-------
networkx.Graph
Examples
--------
Loading the necessary functions
>>> from mossspider.dgm import uniform_network
Generating the uniform network
>>> G = uniform_network(n=500, degree=[0, 2])
"""
rng = np.random.default_rng(seed)
# Processing degree data
if len(degree) > 2:
warnings.warn('It looks like your specified bounds is more than two floats. Only the first two '
'specified bounds are used by the bound statement. So only ' +
str(degree[0:2]) + ' will be used', UserWarning)
if type(degree) is float or type(degree) is int or type(degree) is str:
raise ValueError("degree must be a container of integers")
elif degree[0] > degree[1]:
raise ValueError('degree thresholds must be listed in ascending order')
elif type(degree[0]) is str or type(degree[1]) is str:
raise ValueError('degree must be integers')
elif type(degree[0]) is float or type(degree[1]) is float:
raise ValueError('degree must be integers')
elif degree[0] < 0 or degree[1] < 0:
raise ValueError('Both degree values must be positive values')
else:
# Completed all checks
pass
# checking if even sum for degrees, since needed
sum = 1
while sum % 2 != 0: # Degree distribution must be even
degree_dist = list(rng.integers(degree[0], # ... proposed degree distribution for min degree
degree[1]+1, # ... and max degree (+1 to be inclusive)
size=n)) # ... for the n units
sum = np.sum(degree_dist) # ... update the sum value to see if valid
# Generate network with proposed degree distribution
G = nx.configuration_model(degree_dist, # Generate network
seed=seed) # ... with seed for consistency
# Removing multiple edges!
G = nx.Graph(G) # No multi-loops in networks we consider here
# Removing self-loops
G.remove_edges_from(nx.selfloop_edges(G)) # No self-loops in networks we consider here
# Generating baseline covariate W
w = rng.binomial(n=1, p=pr_w, size=n) # Generate W
for node in G.nodes(): # Adding W to the network node attributes
G.nodes[node]['W'] = w[node] # ... via simple indexing
# Returning the completed graph
return G
def clustered_power_law_network(n_cluster, edges=3, pr_cluster=0.75, pr_between=0.0007, pr_w=0.35, seed=None):
"""Generate a graph with the following features: follows a power-law degree distribution, high(er) clustering
coefficient, and an underlying community structure. This graph is created by generating a number of subgraphs with
power-law distributions and clustering. The subgraphs are generated using
``networkx.powerlaw_cluster_graph(n=n_cluster[...], m=edges, p=p_cluster)``. This process is repeated for each
element in the ``n_cluster`` argument. Then the subgraphs are then randomly connected by creating random edges
between nodes of the subgraphs.
Parameters
----------
n_cluster : list, set, array, ndarray
Specify the N for each subgraph in the clustered power-law network via a list. List should be positive integers
that correspond to the N for each subgraph.
edges : int, optional
Number of edges to generate within each cluster. Equivalent to the ``m`` argument in
``networkx.powerlaw_cluster_graph``.
pr_cluster : float, optional
Probability of a new node forming a triad with neighbors of connected nodes
pr_between : float, optional
Probability of an edge between nodes of each cluster. Evaluated for all node pairs, so should be relatively
low to keep a high community structure. Default is 0.0007.
pr_w : float, optional
Probability of the binary baseline covariate W for the network. Default is 0.35.
seed : int, None, optional
Random seed. Default is None.
Returns
-------
networkx.Graph
Examples
--------
Loading the necessary functions
>>> from mossspider.dgm import clustered_power_law_network
Generating the clustered power-law network
>>> G = clustered_power_law_network(n_cluster=[50, 50, 50, 50])
"""
# Prep environment
rng = np.random.default_rng(seed)
N = nx.Graph()
for i in range(len(n_cluster)):
# Generate the component / subgraph
G = nx.powerlaw_cluster_graph(int(n_cluster[i]),
m=edges,
p=pr_cluster,
seed=int(rng.integers(10000, 500000, size=1)[0]))
# Re-label nodes so no corresponding overlaps between node labels
if i == 0:
start_label = 0
else:
start_label = np.sum(n_cluster[:i])
mapping = {}
for j in range(n_cluster[i]):
mapping[j] = start_label + j
H = nx.relabel_nodes(G, mapping)
# Adding component / subgraph to overall network
N.add_nodes_from(H.nodes)
N.add_edges_from(H.edges)
# Creating some random connections across groups
for i in range(len(n_cluster)):
# Gettings IDs for the subgraph
first_id = int(np.sum(n_cluster[:i]))
last_id = int(np.sum(n_cluster[:i + 1]))
# Only adding edges to > last_id
for j in range(first_id + 1, last_id + 1):
for n in list(N.nodes()):
if n > last_id:
if rng.uniform(0, 1) < pr_between:
N.add_edge(j, n)
# Generating baseline covariate W
w = rng.binomial(n=1, p=pr_w, size=np.sum(n_cluster)) # Generate W
for node in N.nodes(): # Adding W to the network node attributes
N.nodes[node]['W'] = w[node] # ... via simple indexing
# Returning the generated network
return N
def generate_observed(graph, seed=None):
r"""Simulates the exposure and outcome for the uniform random graph (following mechanisms are from Sofrygin & van
<NAME> 2017).
.. math::
A = \text{Bernoulli}(\text{expit}(-1.2 + 1.5 W + 0.6 W^s)) \\
Y = \text{Bernoulli}(\text{expit}(-2.5 + 0.5 A + 1.5 A^s + 1.5 W + 1.5 W^s))
Parameters
----------
graph : Graph
Graph generated by the `uniform_network` function.
seed : int, None, optional
Random seed to use. Default is None.
Returns
-------
Network object with node attributes
Examples
--------
Loading the necessary functions
>>> from mossspider.dgm import uniform_network, generate_observed
Generating the uniform network
>>> G = uniform_network(n=500, degree=[0, 2])
Generating exposure A and outcome Y for network
>>> H = generate_observed(graph=G)
References
----------
<NAME>, & <NAME>. (2017). Semi-parametric estimation and inference for the mean outcome of the single
time-point intervention in a causally connected population. *Journal of Causal Inference*, 5(1).
"""
rng = np.random.default_rng(seed)
n = len(graph.nodes())
w = np.array([d['W'] for n, d in graph.nodes(data=True)])
adj_mat = nx.adjacency_matrix(graph)
# Calculating map(W), generating A, and adding to network
w_s = fast_exp_map(adj_mat,
w,
measure='sum')
a = rng.binomial(n=1, p=logistic.cdf(-1.2 + 1.5*w + 0.6*w_s), size=n)
for node in graph.nodes():
graph.nodes[node]['A'] = a[node]
# Calculating map(A), generating Y, and adding to network
a_s = fast_exp_map(adj_mat,
a,
measure='sum')
y = rng.binomial(n=1, p=logistic.cdf(-2.5 + 1.5*w + 0.5*a + 1.5*a_s + 1.5*w_s), size=n)
for node in graph.nodes():
graph.nodes[node]['Y'] = y[node]
return graph
def generate_truth(graph, p):
"""Simulates the true conditional mean outcome for a given network, distribution of W, and policy.
The true mean under the policy is simulated as
.. math::
A = Bernoulli(p) \\
Y = Bernoulli(expit(-2.5 + 1.5*W + 0.5*A + 1.5*map(A) + 1.5*map(W)))
Returns
-------
float
Examples
--------
Loading the necessary functions
>>> from mossspider.dgm import uniform_network, generate_truth
Generating the uniform network
>>> G = uniform_network(n=500, degree=[0, 2])
Calculating truth for a policy via a large number of replicates
>>> true_p = []
>>> for i in range(1000):
>>> y_mean = generate_truth(graph=G, p=0.5)
>>> true_p.append(y_mean)
>>> np.mean(true_p) # 'true' value for the stochastic policy
To reduce random error, a large number of replicates should be used
"""
n = len(graph.nodes())
w = np.array([d['W'] for n, d in graph.nodes(data=True)])
# Calculating map(W), generating A, and adding to network
a = np.random.binomial(n=1, p=p, size=n)
for node in graph.nodes():
graph.nodes[node]['A'] = a[node]
# Calculating map(A), generating Y, and adding to network
adj_mat = nx.adjacency_matrix(graph)
w_s = fast_exp_map(adj_mat,
w,
measure='sum')
a_s = fast_exp_map(adj_mat,
a,
measure='sum')
y = np.random.binomial(n=1, p=logistic.cdf(-2.5 + 1.5*w + 0.5*a + 1.5*a_s + 1.5*w_s), size=n)
return np.mean(y)
|
<reponame>JIAQING-XIE/Google_NLP_DL
import re
import torch
import numpy as np
from sklearn.model_selection import train_test_split
from gensim.models import KeyedVectors
from gensim.scripts.glove2word2vec import glove2word2vec
from stanfordcorenlp import StanfordCoreNLP
path = r'C:\\Users\\11415\\Desktop\\stanford-corenlp-4.2.2\\'
nlp = StanfordCoreNLP(path)
class Data():
def __init__(self, data_path, method = "BIOES"):
self.data_path = data_path
self.method = method
def transform(self, method = "re"):
"""
1.分词
2.transform T to B, I, E according to its position
"""
word_lists = []
train_word_lists = []
tag_lists = []
with open(self.data_path, encoding='utf-8') as reader:
for line in reader.readlines():
count = 0
tag_list = []
train_word_list = []
for part in line.strip().split('####'): # take '####' as the split standard
if count == 0:
sen_tk = self.tokenize(part)
word_lists.append(sen_tk)
count+=1
else:
for element in part.split(' '):
if element[0] == "=":
tag_list.append(element.split("=")[2])
train_word_list.append("=")
else:
tag_list.append(element.split("=")[1])
train_word_list.append(element.split("=")[0])
train_word_lists.append(train_word_list)
tag_lists.append(tag_list)
for i in range(len(train_word_lists)):
assert train_word_lists[i] == word_lists[i], " line {}, \n {} \n {}".format(i, word_lists[i], train_word_lists[i])
return train_word_lists, tag_lists
def to_id(self, word_lists, tag_lists, make_vocab = False):
if make_vocab:
word2id = self.build_map(word_lists)
tag2id = self.build_map(tag_lists)
return word_lists, tag_lists, word2id, tag2id
else:
return word_lists, tag_lists
def id2word(self, word2id):
id_to_word = {id: word for (word, id) in word2id.items()}
return id_to_word
def extend_maps(self, word2id, tag2id, for_crf=True):
word2id['<unk>'] = len(word2id)
word2id['<pad>'] = len(word2id)
tag2id['<unk>'] = len(tag2id)
tag2id['<pad>'] = len(tag2id)
# 如果是加了CRF的bilstm 那么还要加入<start> 和 <end>token
if for_crf:
word2id['<start>'] = len(word2id)
word2id['<end>'] = len(word2id)
tag2id['<start>'] = len(tag2id)
tag2id['<end>'] = len(tag2id)
return word2id, tag2id
def check(self, group1, group2):
"""
compare the array with the marked entities after '####' to check if they have the same length.
If not, delete that word in the tokenized group.
"""
new_group = []
i = 0
j = 0
while i < len(group1):
while j < len(group2):
if group1[i] == group2[j]:
new_group.append(group1[i])
i+=1 # move to next
j+=1 # move to next
elif group1[i] in group2[j]: # meet mark behind
current = i
for current in range(i, len(group1) + 1):
if ''.join(group1[i:current]) in group2[j]:
if ''.join(group1[i:current]) == group2[j]:
new_group.append(group2[j])
i = current
j+=1
break
else:
break
return new_group
def substring(self, words):
"""
check one string if its the substring of the other strings
!!!! very slow and takes up a huge amount of CPU. Please do not use it.
"""
for i in range(len(words)):
for j in range(i, len(words)):
if len(words) >= 4:
if words[i] in words[j]:
words[j] = words[i]
elif words[j] in words[i]:
words[i] = words[j]
return words
def statistics(self, tags, processed = False):
for tag in tags:
count = 0
for i in range(len(tag)-1):
if tag[i] != 'O' and not processed: # 一定是entity
if tag[i+1] == 'O' and count == 0:
tag[i] = 'S' + tag[i][1:]
elif tag[i+1] != 'O' and count==0:
tag[i] = 'B' + tag[i][1:]
tag[i+1] = 'I' + tag[i+1][1:]
count+=1
elif count!=0 and tag[i+1] != 'O':
tag[i] = 'I'+ tag[i][1:]
tag[i+1] = 'E' + tag[i+1][1:]
count+=1
elif count!=0 and tag[i+1] == 'O':
tag[i] = 'E' + tag[i][1:]
count = 0
tmp = i + 1
if tmp == len(tag) -1:
if tag[tmp] != 'O' and tag[i] == 'O':
tag[tmp] = 'S' + tag[tmp][1:]
break
# count the number of entities:
num_entities = {'POS':0, 'NEU':0, 'NEG':0}
i = 0
count = 0
for ta in tags:
while i < len(ta):
if ta[i][:1] == 'S' or ta[i][:1] == 'E':
num_entities[ta[i][2:]]+=1
i+=1
i = 0
print(num_entities)
return tags
def tokenize(self, sentence, method = "re"):
"""
tokenize the sentence into an array of words with certain patterns
"""
if method == "nlp":
pattern = re.compile(r'<[^>]+>', re.S)
punc = '!@#¥%&*()—“:’;、。,?》《%'
sentence = sentence.strip()
sentence = pattern.sub('', sentence)
sentence = re.sub(r"[%s]+" % punc, "", sentence)
sen_tk = nlp.word_tokenize(sentence)
elif method == "re":
sentence = re.sub(r'(?<=\d)[\.](?=\d)','',sentence)
sentence = re.sub(r' - ', ",", sentence)
sentence = re.sub(r'-- ', " ", sentence)
sentence = re.sub(r'- ', " ", sentence)
sentence = re.sub(r'--', " -- ", sentence)
sentence = sentence.replace(",", " , ")
sentence = sentence.replace(":", " , ")
sentence = sentence.replace(".", " . ")
sentence = sentence.replace("=)", " = ")
sentence = sentence.replace("?", " ? ")
sentence = sentence.replace("!", " ! ")
sentence = sentence.replace(";", " ; ")
sentence = sentence.replace("$", " $ ")
sentence = sentence.replace("%", " % ")
sentence = sentence.replace("#", " # ")
sentence = sentence.replace(">", " > ")
#sentence = sentence.replace("*", " " )
sentence = sentence.replace("n't", " n't")
sentence = sentence.replace("'few", " few")
sentence = re.sub(r'N\'T', " N'T",sentence)
sentence = re.sub(r'didnt', "did n't",sentence)
sentence = re.sub(r'dont', "do n't",sentence)
sentence = re.sub(r'cant', "ca n't",sentence)
sentence = re.sub(r'wouldnt', "would n't",sentence)
sentence = re.sub(r'\'ve', " 've", sentence)
sentence = re.sub(r'\'have', " have", sentence)
sentence = re.sub(r'\'re', " 're", sentence)
sentence = re.sub(r'\'s', " 's", sentence)
sentence = re.sub(r'\'m ', " 'm ", sentence)
sentence = re.sub(r'\'M ', " 'M ", sentence)
sentence = re.sub(r'\'d', " 'd", sentence)
sentence = re.sub(r'\'ll', " 'll", sentence)
sentence = re.sub(r'[()]', "", sentence)
sentence = re.sub(r'[\"\"]', "", sentence)
sentence = re.sub(r'\' ', " ", sentence)
sentence = re.sub(r'@', " @ ", sentence)
sentence = sentence.replace("cannot", "can not")
sentence = sentence.replace(" ", " ")
sen_tk = re.split(" ", sentence)
while '' in sen_tk:
sen_tk.remove('') # 去除空字符
i = 0
while i < len(sen_tk) -1: # 去除多余句号(句中+句尾)
if sen_tk[i] == "." and sen_tk[i+1] == ".":
del sen_tk[i]
else:
i+=1
return sen_tk
def save(self):
pass
def train_valid_split(self, words, tags):
"""
split to train and vadiation datasets
X : words list->list
y: tags list->list
"""
X_train, X_valid, y_train, y_valid = train_test_split(words, tags, test_size=0.1, random_state=30244)
print("----- Training statistics -----")
train_tags = self.statistics(y_train, processed=True)
print("----- Validation statistics -----")
valid_tags = self.statistics(y_valid, processed=True)
return X_train, X_valid, y_train, y_valid
def build_map(self,lists):
maps = {}
for list_ in lists:
for e in list_:
if e not in maps:
maps[e] = len(maps)
return maps
class Glove():
def __init__(self, vocab_size, embed_size, word2id):
self.vocab_size =len(word2id)
self.embed_size = embed_size
self.word2id = word2id
def glove_word2vec(self, glove_inputfile, word2vec_output_file):
glove2word2vec(glove_inputfile, word2vec_output_file)
def get_weight(self, file, word2id, id2word):
wvmodel = KeyedVectors.load_word2vec_format(file \
, binary=False, encoding='utf-8')
torch.manual_seed(131415)
weight = torch.Tensor(self.vocab_size, self.embed_size).uniform_(-0.5, 0.5)
for i in range(len(wvmodel.index2word)):
try:
index = word2id[wvmodel.indexword[i]]
except:
continue
weight[index, :] = torch.from_numpy(wvmodel.get_vector(
id2word[word2id[wvmodel.index2word[i]]]))
return weight
|
from enum import Enum
from typing import List, Union, Tuple, Optional, Iterator
from smbus2 import SMBus
class PinMode(Enum):
output = 0
input = 1
class PCA9536Pin:
"""A single pin of the PCA9536 GPIO expander."""
def __init__(self, device: "PCA9536", index: int):
"""Initialise the PCA9536Pin.
Args:
device: A PCA9536 object.
index: The pin index, 0 through 3."""
self.device = device
self.index = index
@property
def mode(self) -> PinMode:
"""Get or set the pin input/output mode.
The mode is a PinMode object, either PinMode.input or PinMode.output.
When setting the mode, the strings "input" and "output" can be used interchangeably
with the values PinMode.input and PinMode.output, respectively."""
return self.device.mode[self.index]
@mode.setter
def mode(self, value: Union[PinMode, str]) -> None:
self.device.mode = self._value_to_list(value) # type: ignore
@property
def polarity_inversion(self) -> bool:
"""Get or set the polarity inversion.
If the polarity inversion is True, polarity of the read bit is inverted:
a low logic level will correspond to True, and a high logic level to False.
The polarity inversion does not affect the output."""
return self.device.polarity_inversion[self.index]
@polarity_inversion.setter
def polarity_inversion(self, value: bool) -> None:
self.device.polarity_inversion = self._value_to_list(value) # type: ignore
def read(self) -> bool:
"""Read the current logic level.
If the polarity inversion if False, this returns True if the logic level is high,
and low if it is False. If the polarity inversion is True, these values are inverted."""
return self.device.read()[self.index]
def write(self, value: bool) -> None:
"""Set the output logic level.
Sets the output logic level to low if value is False, and to high if value is True.
This only sets the output flip-flop of the GPIO expander. If the mode of the pin
is set to input, this has no effect on the logic level."""
self.device.write(*self._value_to_list(value))
def _value_to_list(self, value) -> Tuple:
"""Helper function to produce inputs for the function calls to the PCA9536."""
result = [None, None, None, None]
result[self.index] = value
return tuple(result)
class PCA9536:
"""Driver for the PCA9536 GPIO expander."""
def __init__(self, bus: SMBus, address: int = 0x41):
"""Initialise the PCA9536.
Args:
bus: An open SMBus object.
address: The I2C address of the device. Defaults to 0x41.
Since it is fixed, one should never have to change this."""
self.address = address
self.bus = bus
self._pins: List[PCA9536Pin] = [PCA9536Pin(self, index) for index in range(4)]
def __getitem__(self, item: int) -> PCA9536Pin:
return self._pins[item]
def __iter__(self) -> Iterator[PCA9536Pin]:
yield from self._pins
@property
def mode(self) -> Tuple[PinMode, PinMode, PinMode, PinMode]:
"""Get or set the input/output mode of the pins.
The mode is a tuple of four PinMode objects, each representing the mode of a pin.
Setting the mode is done with either a single PinMode object to set the mode of all pins:
device.mode = PinMode.input
Or with a tuple of four PinMode objects:
device.mode = PinMode.input, PinMode.input, PinMode.output, PinMode.output
In such a tuple values can be None in order to leave them unchanged:
device.mode = PinMode.input, PinMode.input, None, None
When setting the mode, the strings "input" and "output" can be used interchangeably
with the values PinMode.input and PinMode.output, respectively. E.g.:
device.mode = "output"
device.mode = PinMode.input, "output", "input", None
"""
data = _read_bits(
bus=self.bus, address=self.address, register=0x03, bitmask=0x0F
)
return (
PinMode(data & 0x01),
PinMode((data & 0x02) >> 1),
PinMode((data & 0x04) >> 2),
PinMode((data & 0x08) >> 3),
)
@mode.setter
def mode(
self,
value: Union[
PinMode,
str,
Tuple[
Optional[Union[PinMode, str]],
Optional[Union[PinMode, str]],
Optional[Union[PinMode, str]],
Optional[Union[PinMode, str]],
],
],
) -> None:
if not isinstance(value, tuple):
value = value, value, value, value
values: List[Optional[PinMode]] = [
PinMode[v] if isinstance(v, str) else v for v in value
]
bitmask = _bools_to_bits(*(value is not None for value in values))
mode = _bools_to_bits(*(value == PinMode.input for value in values))
_write_bits(
bus=self.bus,
address=self.address,
register=0x03,
value=mode,
bitmask=bitmask,
)
@property
def polarity_inversion(self) -> Tuple[bool, bool, bool, bool]:
"""Get or set the polarity inversion of the pins.
The polarity inversion is a tuple of four booleans.
If the polarity inversion of a pin is True, polarity of the read bit is inverted:
a low logic level will correspond to True, and a high logic level to False.
Setting the polarity inversion is done with either a single boolean to set all pins:
device.polarity_inversion = False
Or with a tuple of four booleans:
device.polarity_inversion = False, False, True, True
In such a tuple values can be None in order to leave them unchanged:
device.polarity_inversion = False, None, True, True"""
data = _read_bits(
bus=self.bus, address=self.address, register=0x02, bitmask=0x0F
)
return (
bool(data & 0x01),
bool((data & 0x02) >> 1),
bool((data & 0x04) >> 2),
bool((data & 0x08) >> 3),
)
@polarity_inversion.setter
def polarity_inversion(
self,
value: Union[
bool,
Tuple[
Optional[bool],
Optional[bool],
Optional[bool],
Optional[bool],
],
],
):
if not isinstance(value, tuple):
value = value, value, value, value
bitmask = _bools_to_bits(*(value is not None for value in value))
polarity = _bools_to_bits(*(value is True for value in value))
_write_bits(
bus=self.bus,
address=self.address,
register=0x02,
value=polarity,
bitmask=bitmask,
)
def read(self) -> Tuple[bool, bool, bool, bool]:
"""Read the current logic levels.
Returns a tuple of four booleans.
If the polarity inversion if False, this returns True if the logic level is high,
and low if it is False. If the polarity inversion is True, these values are inverted."""
data = _read_bits(
bus=self.bus, address=self.address, register=0x00, bitmask=0x0F
)
return _bits_to_bools(data)
def write(
self,
pin_0: Optional[bool] = None,
pin_1: Optional[bool] = None,
pin_2: Optional[bool] = None,
pin_3: Optional[bool] = None,
):
"""Set one or more output logic levels.
Sets a output logic level to low if value is False, and to high if value is True.
This only sets the output flip-flops of the GPIO expander. If the mode of the pin
is set to input, this has no effect on the logic level."""
pins = (pin_0, pin_1, pin_2, pin_3)
value = _bools_to_bits(*(pin is True for pin in pins))
bitmask = _bools_to_bits(*(pin is not None for pin in pins))
_write_bits(
bus=self.bus,
address=self.address,
register=0x01,
value=value,
bitmask=bitmask,
)
def _bools_to_bits(bool_0: bool, bool_1: bool, bool_2: bool, bool_3: bool) -> int:
return (bool_0 * 0x01) | (bool_1 * 0x02) | (bool_2 * 0x04) | (bool_3 * 0x08)
def _bits_to_bools(bits: int) -> Tuple[bool, bool, bool, bool]:
return (
bool(bits & 0x01),
bool(bits & 0x02),
bool(bits & 0x04),
bool(bits & 0x08),
)
def _read_bits(bus: SMBus, address: int, register: int, bitmask: int) -> int:
return bus.read_byte_data(address, register=register) & bitmask
def _write_bits(
bus: SMBus, address: int, register: int, value: int, bitmask: int
) -> None:
other_bits = _read_bits(
bus=bus, address=address, register=register, bitmask=0xFF - bitmask
)
value_bits = value & bitmask
bus.write_byte_data(address, register=register, value=other_bits | value_bits)
|
<reponame>genmeblog/twixtbot
#! /usr/bin/env python
""" Shared Memory Message Passing Protocol """
# python
import argparse
import collections
import mmap
import multiprocessing
import os
import select
import socket
import struct
import sys
debug = False
# mine
import timestat
IntPacker = struct.Struct("<L")
QUERY_AVAILABLE = "QQ"
REPLY_AVAILABLE = "RR"
ANSWER_SENT = "SS"
SUICIDE_CODE = 0xdead3149
def checksum(b):
return 0xcc
# return reduce(lambda x,y:x^y, map(ord, b))
class Client:
def __init__(self, location, slots_needed=1, quiet=False):
self.quiet = quiet
self._init_socket(location)
self._init_shmem(location)
self._request_slots(slots_needed)
self.min_timeout = 0.001
self.max_timeout = 3.0
def _init_socket(self, location):
sock_name = location + ".sock"
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.connect(sock_name)
def _init_shmem(self, location):
shmem_name = location + ".shm"
self.shm_file = open(shmem_name, "r+b")
self.shmem = mmap.mmap(self.shm_file.fileno(), 0)
def _request_slots(self, slots_needed):
outdata = struct.pack("<L", slots_needed)
if debug:
print >>sys.stderr, "Sent request for %d slot(s)" % (slots_needed)
self.socket.send(outdata)
if slots_needed == SUICIDE_CODE:
return
self.slots = []
self.cb_by_slot = dict()
# receive a message telling me the query_size, reply_size, and
# slot indices I may use
if debug:
print >>sys.stderr, "expecting %d bytes" % (4*(2+slots_needed))
indata = self.socket.recv(4*(2+slots_needed), socket.MSG_WAITALL)
if debug:
print >>sys.stderr, "received %d bytes" % (len(indata))
self.query_size = IntPacker.unpack_from(indata, 0)[0]
self.reply_size = IntPacker.unpack_from(indata, 4)[0]
for i in range(slots_needed):
self.slots.append(IntPacker.unpack_from(indata, 8+i*4)[0])
self.slot_size = 3 + max(self.query_size, self.reply_size)
self.unused_slots = list(self.slots)
if not self.quiet:
print "query_size=",self.query_size,"reply_size=",self.reply_size,"my_slots=[",",".join(map(str,self.slots)),"]"
sys.stdout.flush()
# end _request_slots
def slot_locations(self, slot):
return slot*self.slot_size, (slot+1)*self.slot_size
def is_full(self):
return len(self.unused_slots) == 0
def write_query(self, request, reply_callback):
if not type(request) == str:
raise ValueError('queries must be bytes (strings)')
if len(request) > self.query_size:
raise ValueError('query too long')
if not self.unused_slots:
raise Exception('Too many queries!')
slot = self.unused_slots.pop()
if debug:
print "POP",slot
x, y = self.slot_locations(slot)
self.shmem[x:x+len(request)] = request
cs = checksum(request)
self.shmem[y-3] = chr(cs)
self.shmem[y-2:y] = QUERY_AVAILABLE
if debug:
print >>sys.stderr, "client sent %d bytes with checksum %d" % (len(request), cs)
self.cb_by_slot[slot] = reply_callback
def handle_ready_replies(self):
ready_slots = []
for slot in self.cb_by_slot.keys():
x, y = self.slot_locations(slot)
status = self.shmem[y-2:y]
if status == REPLY_AVAILABLE:
ready_slots.append(slot)
for slot in ready_slots:
self.process_reply(slot)
return len(ready_slots)
def process_reply(self, slot):
assert not slot in self.unused_slots, (slot, self.unused_slots)
cb = self.cb_by_slot[slot]
del self.cb_by_slot[slot]
self.unused_slots.append(slot)
if debug:
print "PUSH",slot
x, y = self.slot_locations(slot)
replydata = bytes(self.shmem[x:x+self.reply_size])
cs = checksum(replydata)
if debug:
print >>sys.stderr, "client received %d bytes with checksum %d" % (len(replydata), cs)
assert cs == ord(self.shmem[y-3])
cb(replydata)
def handle_read(self):
""" Receive a message telling me which slots have finished replies """
indata = self.socket.recv(4*len(self.slots))
if debug:
print >>sys.stderr, "Client just read %d bytes", (len(indata))
if len(indata) % 4 != 0:
raise Exception("odd number of bytes")
if len(indata) == 0:
raise Exception("Server went away")
for i in range(0, len(indata), 4):
slot = IntPacker.unpack_from(indata, i)[0]
self.process_reply(slot)
########################
class ServerSocketProcess(multiprocessing.Process):
def __init__(self, location, capacity, query_size, reply_size, shmem, notify_pipe):
multiprocessing.Process.__init__(self)
# self.daemon = True
self.location = location
self.capacity = capacity
self.query_size = query_size
self.reply_size = reply_size
self.slot_size = 3 + max(self.query_size, self.reply_size)
self.shmem = shmem
self.notify_pipe = notify_pipe
self._init_socket()
self.unused_slots = list(range(self.capacity))
self.slot_user = dict()
self.unallocated_sockets = []
def _init_socket(self):
self.socket_name = self.location + ".sock"
if os.path.exists(self.socket_name):
os.remove(self.socket_name)
self.main_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.main_socket.bind(self.socket_name)
self.main_socket.listen(5)
self.all_sockets = [self.main_socket, self.notify_pipe]
print "Ready for connections on", self.socket_name
sys.stdout.flush()
def slot_locations(self, slot):
return slot*self.slot_size, (slot+1)*self.slot_size
def read_main_socket(self):
new_sock, _ = self.main_socket.accept()
self.all_sockets.append(new_sock)
self.unallocated_sockets.append(new_sock)
print "opened connection. cons=%d free_slots=%d" % (len(self.all_sockets)-1, len(self.unused_slots))
sys.stdout.flush()
def allocate_other_socket(self, sock):
try:
# Receive the message requesting how many slots the client wants
indata = sock.recv(4, socket.MSG_WAITALL)
if len(indata) == 0:
# counterparty left the house
self.free_socket(sock)
return
elif len(indata) != 4:
print "error: want exactly 4 bytes for capacity request"
sys.stdout.flush()
self.free_socket(sock)
return
assert sock in self.unallocated_sockets
self.unallocated_sockets.remove(sock)
desire = IntPacker.unpack(indata)[0]
if desire == SUICIDE_CODE:
print "suicide requested"
sys.stdout.flush()
self.notify_pipe.send("DIE")
self.notify_pipe.close()
self.main_socket.close()
sys.exit(0)
if desire > len(self.unused_slots):
print "error: more slots requested (%d) than available (%d)" % (desire, len(self.unused_slots))
sys.stdout.flush()
self.free_socket(sock)
return
alloc_slots = [self.unused_slots.pop() for _ in range(desire)]
msg_ints = [self.query_size, self.reply_size] + alloc_slots
assert len(alloc_slots) == desire
assert len(msg_ints) == 2+desire
outdata = ''.join([IntPacker.pack(n) for n in msg_ints])
for slot in alloc_slots:
self.slot_user[slot] = sock
sock.send(outdata)
print "%d slot%s allocated, %d unused, outdata len=%d" % (desire, "" if desire == 1 else "s", len(self.unused_slots), len(outdata))
sys.stdout.flush()
except socket.error as serr:
print "error in sock.send() in allocate_other_socket():",serr
self.free_socket(sock)
#end allocate_other_socket()
def read_other_socket(self, sock):
self.allocate_other_socket(sock)
def free_socket(self, sock):
if not sock in self.all_sockets:
print "attempt to re-close socket!"
return
self.all_sockets.remove(sock)
my_slots = [slot for slot, user in self.slot_user.items() if user == sock]
for slot in my_slots:
self.unused_slots.append(slot)
del self.slot_user[slot]
print "closed connection fielno=%d. conns=%d free_slots=%d" % (sock.fileno(), len(self.all_sockets)-1, len(self.unused_slots))
sock.close()
sys.stdout.flush()
def send_out_replies(self):
self.notify_pipe.recv_bytes()
outs_by_socket = collections.defaultdict(list)
for slot, sock in self.slot_user.items():
x, y = self.slot_locations(slot)
status = self.shmem[y-2:y]
if status == REPLY_AVAILABLE:
outs_by_socket[sock].append(slot)
self.shmem[y-2:y] = ANSWER_SENT
for sock, outs in outs_by_socket.items():
outdata = ''.join([IntPacker.pack(n) for n in outs])
assert len(outdata) == 4*len(outs)
try:
sock.send(outdata)
except socket.error as serr:
print "error sock.send() fileno:",sock.fileno(),"error:",serr
self.free_socket(sock)
# send_out_replies
def run(self):
while True:
rd, _, _ = select.select(self.all_sockets, [], [])
for sock in rd:
if sock == self.main_socket:
self.read_main_socket()
elif sock == self.notify_pipe:
self.send_out_replies()
else:
self.read_other_socket(sock)
class Server:
def __init__(self, location, capacity, query_size, reply_size, milestone_step=0):
self.location = location
self.capacity = capacity
self.query_size = query_size
self.reply_size = reply_size
self.slot_size = 3 + max(query_size, reply_size)
self.next_milestone = milestone_step
self.milestone_step = milestone_step
self.min_timeout = 0.001
# self.max_timeout = 0.064
self.max_timeout = 1.0
self._init_shmem()
self._init_timers()
print "query_size",query_size,"reply_size",reply_size
def slot_locations(self, slot):
return slot*self.slot_size, (slot+1)*self.slot_size
def _init_shmem(self):
self.shmem_name = self.location + ".shm"
self.shmem_file = open(self.shmem_name, "w+b")
self.shmem_file.seek(self.slot_size*self.capacity - 1, 0)
self.shmem_file.write(b'\0')
self.shmem_file.seek(0, 0)
self.shmem = mmap.mmap(self.shmem_file.fileno(), 0)
def _init_timers(self):
self.waiting_timer = timestat.TimeStat("waiting", ignore=15.0)
self.preproc_timer = timestat.TimeStat("preprocessing")
self.gpu_timer = timestat.WorkTimeStat("gpu")
self.pp_shm_timer = timestat.WorkTimeStat("pp_shmem")
self.pp_sock_timer = timestat.WorkTimeStat("pp_socket")
def _look_for_jobs(self):
for slot in range(self.capacity):
x, y = self.slot_locations(slot)
status = self.shmem[y-2:y]
if status == QUERY_AVAILABLE:
self.job_slots.append(slot)
def run(self):
self.sock_conn, self.gpu_conn = multiprocessing.Pipe(True)
socketeer = ServerSocketProcess(self.location, self.capacity,
self.query_size, self.reply_size, self.shmem, self.sock_conn)
socketeer.start()
self.run_gpu_side()
def run_gpu_side(self):
print >>sys.stderr, "gpu side going!"
cur_timeout = self.min_timeout
while True:
self.waiting_timer.start()
self.job_slots = []
self._look_for_jobs()
timeout = 0.0 if self.job_slots else cur_timeout
if debug:
print >>sys.stderr, "timeout=", timeout
rd, _, _ = select.select([self.gpu_conn], [], [], timeout)
for sock in rd:
x = sock.recv()
if x == "DIE" or len(x) == 0:
print "connecting socket closed; gpu side quit."
sys.stdout.flush()
sys.exit(0)
self.waiting_timer.stop()
if self.job_slots:
self.prepare_run_and_reply()
cur_timeout = self.min_timeout
elif cur_timeout < self.max_timeout:
cur_timeout *= 2
if self.milestone_step and self.gpu_timer.total_work() >= self.next_milestone:
print "----"
print str(self.waiting_timer)
print str(self.preproc_timer)
print str(self.gpu_timer)
print str(self.pp_shm_timer)
print str(self.pp_sock_timer)
sys.stdout.flush()
self.next_milestone += self.milestone_step
# end run()
def prepare_run_and_reply(self):
if debug:
print >>sys.stderr, "jobs", ",".join(map(str,self.job_slots))
self.preproc_timer.start()
work = []
for slot in self.job_slots:
x, y = self.slot_locations(slot)
wtd = bytes(self.shmem[x:x+self.query_size])
cs = checksum(wtd)
if debug:
print >>sys.stderr, "server received %d bytes with checksum %d" % (len(wtd), cs)
assert cs == ord(self.shmem[y-3]), (cs, ord(self.shmem[y-3]))
work.append(wtd)
self.preproc_timer.stop()
self.gpu_timer.start(len(self.job_slots))
if debug:
print >>sys.stderr, "about to call self.run_jobs"
replies = self.run_jobs(work)
if debug:
print >>sys.stderr, "finished with self.run_jobs"
self.gpu_timer.stop()
self.pp_shm_timer.start(len(self.job_slots))
for reply, slot in zip(replies, self.job_slots):
assert len(reply) <= self.reply_size
x, y = self.slot_locations(slot)
self.shmem[x:x+len(reply)] = reply
cs = checksum(reply)
self.shmem[y-3] = chr(cs)
self.shmem[y-2:y] = REPLY_AVAILABLE
if debug:
print >>sys.stderr, "server sent %d bytes with checksum %d" % (len(reply), cs)
self.pp_shm_timer.stop()
self.pp_sock_timer.start(len(self.job_slots))
self.gpu_conn.send_bytes("GO")
self.pp_sock_timer.stop()
self.job_slots = []
#end prepare_run_and_reply
class SillyServer(Server):
def run_jobs(self, jobs):
out = [''.join(reversed(job)) for job in jobs]
return out
if __name__ == "__main__":
if len(sys.argv) == 2:
print "running silly server"
sys.stdout.flush()
S = SillyServer(sys.argv[1], 10, 100, 100)
S.run()
elif len(sys.argv) == 3:
print "running silly client"
def cb(x):
print "got back",x
C = Client(sys.argv[1])
C.write_query(sys.argv[2], cb)
C.handle_read()
else:
print >>sys.stderr, "usage: %s location [send_message]" % (sys.argv[0])
|
<gh_stars>0
# -*- coding: utf-8 -*-
import csv
from datetime import datetime, timedelta
import string
try:
import cStringIO as StringIO
except ImportError:
import StringIO
import locale
try:
locale.setlocale(locale.LC_ALL, 'en_US')
except locale.Error:
pass
from gluon.dal import Field
from gluon.html import *
from gluon.http import HTTP
def get_id(args):
"""Return ID at first arg if defined and a number, else zero
"""
id = 0
if args:
try:
id = to_int(args[0])
except ValueError:
pass
return id
def to_int(s):
"""Return integer from this string
"""
return int('0' + ''.join(c for c in s if c.isdigit()))
def safe(*words):
"""Return string safe for using in URL
"""
safe_chars = string.letters + string.digits + '-'
result = []
for word in words:
result.append(''.join(c for c in str(word).strip().replace(' ', '-') if c in safe_chars))
return '-'.join(result)
def format_records(records, num_columns=2):
trs = []
for i, record in enumerate(records):
if i % 2 == 0:
tds = []
tds.append(XML(record.pretty_link))
if i % 2 == num_columns - 1:
trs.append(TR(tds))
return TABLE(trs)
class Places:
def __init__(self, db, expire_db_secs=3600):
"""
expire_db_secs: how long before refresh database
"""
self.db = db
self.fields = 'iso', 'country', 'capital', 'area', 'population', 'continent', 'tld', 'currency_code', 'currency_name', 'phone', 'postal_code_format', 'postal_code_regex', 'languages', 'neighbours'
self.numeric_fields = 'area', 'population'
db.define_table('places',
Field('created_on', 'datetime', writable=False, readable=False, default=datetime.now),
Field('country_id', 'integer', writable=False, readable=False),
Field('national_flag', writable=False, represent=lambda path: IMG(_src=path)),
Field('area', 'double', represent=lambda area: locale.format("%d", area, grouping=True) + ' square kilometres'),
Field('population', 'integer', represent=lambda pop: locale.format("%d", pop, grouping=True)),
#Field('population', 'integer', represent=lambda pop: '{0:,d}'.format(pop)),
*[Field(field, 'string') for field in self.fields if field not in self.numeric_fields]
#format=lambda record: XML('<a href="%(pretty_url)s">%(country)s (%(iso)s)</a>' % record)
)
# define user friendly url for accessing a place
db.places.pretty_url = Field.Virtual('pretty_url', lambda record: URL(c='default', f='view', extension=False, args=safe(record.places.country, record.places.country_id)))
db.places.pretty_link = Field.Virtual('pretty_link', lambda record: str(\
DIV(
A(
IMG(_src=record.places.national_flag),
#db.places.national_flag.represent(record.national_flag),
' %s' % (record.places.country),
_href=URL(c='default', f='view', extension=False, args=safe(record.places.country, record.places.country_id))
)
)
))
db.places.continent.represent = lambda continent: A(continent, _href=URL(f='continent', args=continent))
db.places.neighbours.represent = lambda neighbours: DIV(
[A(neighbour + ' ', _href=URL(f='iso', args=neighbour)) for neighbour in neighbours.split(',')]
)
#db.places.pretty_url = Field.Virtual('pretty_url', lambda record: URL(c='default', f='view', extension=False, args=safe(record.places.country, record.places.country_id)))
if db(db.places).count() == 0:
self.load()
else:
created_on = db(db.places).select().first().created_on
if created_on is None or created_on + timedelta(seconds=expire_db_secs) < datetime.now():
self.load()
def get(self, country_id=None, iso=None):
if country_id is not None:
logic = self.db.places.country_id == country_id
elif iso is not None:
logic = self.db.places.iso == iso
place = self.db(logic).select().first()
if place:
return place
else:
raise HTTP(404, 'Invalid record')
def search(self, logic=None, limitby=None):
return self.db(logic).select(limitby=limitby, orderby=self.db.places.country_id)
def load(self):
"""Load places from CSV file and delete any existing
"""
text = """\
AD,Andorra,Andorra la Vella,468,84000,EU,.ad,EUR,Euro,376,AD###,^(?:AD)*(\d{3})$,ca,"ES,FR"
AE,United Arab Emirates,Abu Dhabi,82880,4975593,AS,.ae,AED,Dirham,971,,,"ar-AE,fa,en,hi,ur","SA,OM"
AF,Afghanistan,Kabul,647500,29121286,AS,.af,AFN,Afghani,93,,,"fa-AF,ps,uz-AF,tk","TM,CN,IR,TJ,PK,UZ"
AG,Antigua and Barbuda,St. John's,443,86754,NA,.ag,XCD,Dollar,+1-268,,,en-AG,
AI,Anguilla,The Valley,102,13254,NA,.ai,XCD,Dollar,+1-264,,,en-AI,
AL,Albania,Tirana,28748,2986952,EU,.al,ALL,Lek,355,,,"sq,el","MK,GR,CS,ME,RS,XK"
AM,Armenia,Yerevan,29800,2968000,AS,.am,AMD,Dram,374,######,^(\d{6})$,hy,"GE,IR,AZ,TR"
AO,Angola,Luanda,1246700,13068161,AF,.ao,AOA,Kwanza,244,,,pt-AO,"CD,NA,ZM,CG"
AQ,Antarctica,,14000000,0,AN,.aq,,,,,,,
AR,Argentina,Buenos Aires,2766890,41343201,SA,.ar,ARS,Peso,54,@####@@@,^([A-Z]\d{4}[A-Z]{3})$,"es-AR,en,it,de,fr,gn","CL,BO,UY,PY,BR"
AS,American Samoa,Pago Pago,199,57881,OC,.as,USD,Dollar,+1-684,,,"en-AS,sm,to",
AT,Austria,Vienna,83858,8205000,EU,.at,EUR,Euro,43,####,^(\d{4})$,"de-AT,hr,hu,sl","CH,DE,HU,SK,CZ,IT,SI,LI"
AU,Australia,Canberra,7686850,21515754,OC,.au,AUD,Dollar,61,####,^(\d{4})$,en-AU,
AW,Aruba,Oranjestad,193,71566,NA,.aw,AWG,Guilder,297,,,"nl-AW,es,en",
AX,Aland Islands,Mariehamn,1580,26711,EU,.ax,EUR,Euro,+358-18,#####,^(?:FI)*(\d{5})$,sv-AX,
AZ,Azerbaijan,Baku,86600,8303512,AS,.az,AZN,Manat,994,AZ ####,^(?:AZ)*(\d{4})$,"az,ru,hy","GE,IR,AM,TR,RU"
BA,Bosnia and Herzegovina,Sarajevo,51129,4590000,EU,.ba,BAM,Marka,387,#####,^(\d{5})$,"bs,hr-BA,sr-BA","CS,HR,ME,RS"
BB,Barbados,Bridgetown,431,285653,NA,.bb,BBD,Dollar,+1-246,BB#####,^(?:BB)*(\d{5})$,en-BB,
BD,Bangladesh,Dhaka,144000,156118464,AS,.bd,BDT,Taka,880,####,^(\d{4})$,"bn-BD,en","MM,IN"
BE,Belgium,Brussels,30510,10403000,EU,.be,EUR,Euro,32,####,^(\d{4})$,"nl-BE,fr-BE,de-BE","DE,NL,LU,FR"
BF,Burkina Faso,Ouagadougou,274200,16241811,AF,.bf,XOF,Franc,226,,,fr-BF,"NE,BJ,GH,CI,TG,ML"
BG,Bulgaria,Sofia,110910,7148785,EU,.bg,BGN,Lev,359,####,^(\d{4})$,"bg,tr-BG","MK,GR,RO,CS,TR,RS"
BH,Bahrain,Manama,665,738004,AS,.bh,BHD,Dinar,973,####|###,^(\d{3}\d?)$,"ar-BH,en,fa,ur",
BI,Burundi,Bujumbura,27830,9863117,AF,.bi,BIF,Franc,257,,,"fr-BI,rn","TZ,CD,RW"
BJ,Benin,Porto-Novo,112620,9056010,AF,.bj,XOF,Franc,229,,,fr-BJ,"NE,TG,BF,NG"
BL,Saint Barthelemy,Gustavia,21,8450,NA,.gp,EUR,Euro,590,### ###,,fr,
BM,Bermuda,Hamilton,53,65365,NA,.bm,BMD,Dollar,+1-441,@@ ##,^([A-Z]{2}\d{2})$,"en-BM,pt",
BN,Brunei,Bandar Seri Begawan,5770,395027,AS,.bn,BND,Dollar,673,@@####,^([A-Z]{2}\d{4})$,"ms-BN,en-BN",MY
BO,Bolivia,Sucre,1098580,9947418,SA,.bo,BOB,Boliviano,591,,,"es-BO,qu,ay","PE,CL,PY,BR,AR"
BQ,"Bonaire, Saint Eustatius and Saba ",,328,18012,NA,.bq,USD,Dollar,599,,,"nl,pap,en",
BR,Brazil,Brasilia,8511965,201103330,SA,.br,BRL,Real,55,#####-###,^(\d{8})$,"pt-BR,es,en,fr","SR,PE,BO,UY,GY,PY,GF,VE,CO,AR"
BS,Bahamas,Nassau,13940,301790,NA,.bs,BSD,Dollar,+1-242,,,en-BS,
BT,Bhutan,Thimphu,47000,699847,AS,.bt,BTN,Ngultrum,975,,,dz,"CN,IN"
BV,Bouvet Island,,49,0,AN,.bv,NOK,Krone,,,,,
BW,Botswana,Gaborone,600370,2029307,AF,.bw,BWP,Pula,267,,,"en-BW,tn-BW","ZW,ZA,NA"
BY,Belarus,Minsk,207600,9685000,EU,.by,BYR,Ruble,375,######,^(\d{6})$,"be,ru","PL,LT,UA,RU,LV"
BZ,Belize,Belmopan,22966,314522,NA,.bz,BZD,Dollar,501,,,"en-BZ,es","GT,MX"
CA,Canada,Ottawa,9984670,33679000,NA,.ca,CAD,Dollar,1,@#@ #@#,^([ABCEGHJKLMNPRSTVXY]\d[ABCEGHJKLMNPRSTVWXYZ]) ?(\d[ABCEGHJKLMNPRSTVWXYZ]\d)$ ,"en-CA,fr-CA,iu",US
CC,Cocos Islands,West Island,14,628,AS,.cc,AUD,Dollar,61,,,"ms-CC,en",
CD,Democratic Republic of the Congo,Kinshasa,2345410,70916439,AF,.cd,CDF,Franc,243,,,"fr-CD,ln,kg","TZ,CF,SS,RW,ZM,BI,UG,CG,AO"
CF,Central African Republic,Bangui,622984,4844927,AF,.cf,XAF,Franc,236,,,"fr-CF,sg,ln,kg","TD,SD,CD,SS,CM,CG"
CG,Republic of the Congo,Brazzaville,342000,3039126,AF,.cg,XAF,Franc,242,,,"fr-CG,kg,ln-CG","CF,GA,CD,CM,AO"
CH,Switzerland,Berne,41290,7581000,EU,.ch,CHF,Franc,41,####,^(\d{4})$,"de-CH,fr-CH,it-CH,rm","DE,IT,LI,FR,AT"
CI,Ivory Coast,Yamoussoukro,322460,21058798,AF,.ci,XOF,Franc,225,,,fr-CI,"LR,GH,GN,BF,ML"
CK,Cook Islands,Avarua,240,21388,OC,.ck,NZD,Dollar,682,,,"en-CK,mi",
CL,Chile,Santiago,756950,16746491,SA,.cl,CLP,Peso,56,#######,^(\d{7})$,es-CL,"PE,BO,AR"
CM,Cameroon,Yaounde,475440,19294149,AF,.cm,XAF,Franc,237,,,"en-CM,fr-CM","TD,CF,GA,GQ,CG,NG"
CN,China,Beijing,9596960,1330044000,AS,.cn,CNY,Yuan Renminbi,86,######,^(\d{6})$,"zh-CN,yue,wuu,dta,ug,za","LA,BT,TJ,KZ,MN,AF,NP,MM,KG,PK,KP,RU,VN,IN"
CO,Colombia,Bogota,1138910,44205293,SA,.co,COP,Peso,57,,,es-CO,"EC,PE,PA,BR,VE"
CR,Costa Rica,San Jose,51100,4516220,NA,.cr,CRC,Colon,506,####,^(\d{4})$,"es-CR,en","PA,NI"
CU,Cuba,Havana,110860,11423000,NA,.cu,CUP,Peso,53,CP #####,^(?:CP)*(\d{5})$,es-CU,US
CV,Cape Verde,Praia,4033,508659,AF,.cv,CVE,Escudo,238,####,^(\d{4})$,pt-CV,
CW,Curacao, Willemstad,444,141766,NA,.cw,ANG,Guilder,599,,,"nl,pap",
CX,Christmas Island,Flying Fish Cove,135,1500,AS,.cx,AUD,Dollar,61,####,^(\d{4})$,"en,zh,ms-CC",
CY,Cyprus,Nicosia,9250,1102677,EU,.cy,EUR,Euro,357,####,^(\d{4})$,"el-CY,tr-CY,en",
CZ,Czech Republic,Prague,78866,10476000,EU,.cz,CZK,Koruna,420,### ##,^(\d{5})$,"cs,sk","PL,DE,SK,AT"
DE,Germany,Berlin,357021,81802257,EU,.de,EUR,Euro,49,#####,^(\d{5})$,de,"CH,PL,NL,DK,BE,CZ,LU,FR,AT"
DJ,Djibouti,Djibouti,23000,740528,AF,.dj,DJF,Franc,253,,,"fr-DJ,ar,so-DJ,aa","ER,ET,SO"
DK,Denmark,Copenhagen,43094,5484000,EU,.dk,DKK,Krone,45,####,^(\d{4})$,"da-DK,en,fo,de-DK",DE
DM,Dominica,Roseau,754,72813,NA,.dm,XCD,Dollar,+1-767,,,en-DM,
DO,Dominican Republic,Santo Domingo,48730,9823821,NA,.do,DOP,Peso,+1-809 and 1-829,#####,^(\d{5})$,es-DO,HT
DZ,Algeria,Algiers,2381740,34586184,AF,.dz,DZD,Dinar,213,#####,^(\d{5})$,ar-DZ,"NE,EH,LY,MR,TN,MA,ML"
EC,Ecuador,Quito,283560,14790608,SA,.ec,USD,Dollar,593,@####@,^([a-zA-Z]\d{4}[a-zA-Z])$,es-EC,"PE,CO"
EE,Estonia,Tallinn,45226,1291170,EU,.ee,EUR,Euro,372,#####,^(\d{5})$,"et,ru","RU,LV"
EG,Egypt,Cairo,1001450,80471869,AF,.eg,EGP,Pound,20,#####,^(\d{5})$,"ar-EG,en,fr","LY,SD,IL"
EH,Western Sahara,El-Aaiun,266000,273008,AF,.eh,MAD,Dirham,212,,,"ar,mey","DZ,MR,MA"
ER,Eritrea,Asmara,121320,5792984,AF,.er,ERN,Nakfa,291,,,"aa-ER,ar,tig,kun,ti-ER","ET,SD,DJ"
ES,Spain,Madrid,504782,46505963,EU,.es,EUR,Euro,34,#####,^(\d{5})$,"es-ES,ca,gl,eu,oc","AD,PT,GI,FR,MA"
ET,Ethiopia,Addis Ababa,1127127,88013491,AF,.et,ETB,Birr,251,####,^(\d{4})$,"am,en-ET,om-ET,ti-ET,so-ET,sid","ER,KE,SD,SS,SO,DJ"
FI,Finland,Helsinki,337030,5244000,EU,.fi,EUR,Euro,358,#####,^(?:FI)*(\d{5})$,"fi-FI,sv-FI,smn","NO,RU,SE"
FJ,Fiji,Suva,18270,875983,OC,.fj,FJD,Dollar,679,,,"en-FJ,fj",
FK,Falkland Islands,Stanley,12173,2638,SA,.fk,FKP,Pound,500,,,en-FK,
FM,Micronesia,Palikir,702,107708,OC,.fm,USD,Dollar,691,#####,^(\d{5})$,"en-FM,chk,pon,yap,kos,uli,woe,nkr,kpg",
FO,Faroe Islands,Torshavn,1399,48228,EU,.fo,DKK,Krone,298,FO-###,^(?:FO)*(\d{3})$,"fo,da-FO",
FR,France,Paris,547030,64768389,EU,.fr,EUR,Euro,33,#####,^(\d{5})$,"fr-FR,frp,br,co,ca,eu,oc","CH,DE,BE,LU,IT,AD,MC,ES"
GA,Gabon,Libreville,267667,1545255,AF,.ga,XAF,Franc,241,,,fr-GA,"CM,GQ,CG"
GB,United Kingdom,London,244820,62348447,EU,.uk,GBP,Pound,44,@# #@@|@## #@@|@@# #@@|@@## #@@|@#@ #@@|@@#@ #@@|GIR0AA,^(([A-Z]\d{2}[A-Z]{2})|([A-Z]\d{3}[A-Z]{2})|([A-Z]{2}\d{2}[A-Z]{2})|([A-Z]{2}\d{3}[A-Z]{2})|([A-Z]\d[A-Z]\d[A-Z]{2})|([A-Z]{2}\d[A-Z]\d[A-Z]{2})|(GIR0AA))$,"en-GB,cy-GB,gd",IE
GD,Grenada,St. George's,344,107818,NA,.gd,XCD,Dollar,+1-473,,,en-GD,
GE,Georgia,Tbilisi,69700,4630000,AS,.ge,GEL,Lari,995,####,^(\d{4})$,"ka,ru,hy,az","AM,AZ,TR,RU"
GF,French Guiana,Cayenne,91000,195506,SA,.gf,EUR,Euro,594,#####,^((97|98)3\d{2})$,fr-GF,"SR,BR"
GG,Guernsey,St Peter Port,78,65228,EU,.gg,GBP,Pound,+44-1481,@# #@@|@## #@@|@@# #@@|@@## #@@|@#@ #@@|@@#@ #@@|GIR0AA,^(([A-Z]\d{2}[A-Z]{2})|([A-Z]\d{3}[A-Z]{2})|([A-Z]{2}\d{2}[A-Z]{2})|([A-Z]{2}\d{3}[A-Z]{2})|([A-Z]\d[A-Z]\d[A-Z]{2})|([A-Z]{2}\d[A-Z]\d[A-Z]{2})|(GIR0AA))$,"en,fr",
GH,Ghana,Accra,239460,24339838,AF,.gh,GHS,Cedi,233,,,"en-GH,ak,ee,tw","CI,TG,BF"
GI,Gibraltar,Gibraltar,6.5,27884,EU,.gi,GIP,Pound,350,,,"en-GI,es,it,pt",ES
GL,Greenland,Nuuk,2166086,56375,NA,.gl,DKK,Krone,299,####,^(\d{4})$,"kl,da-GL,en",
GM,Gambia,Banjul,11300,1593256,AF,.gm,GMD,Dalasi,220,,,"en-GM,mnk,wof,wo,ff",SN
GN,Guinea,Conakry,245857,10324025,AF,.gn,GNF,Franc,224,,,fr-GN,"LR,SN,SL,CI,GW,ML"
GP,Guadeloupe,Basse-Terre,1780,443000,NA,.gp,EUR,Euro,590,#####,^((97|98)\d{3})$,fr-GP,AN
GQ,Equatorial Guinea,Malabo,28051,1014999,AF,.gq,XAF,Franc,240,,,"es-GQ,fr","GA,CM"
GR,Greece,Athens,131940,11000000,EU,.gr,EUR,Euro,30,### ##,^(\d{5})$,"el-GR,en,fr","AL,MK,TR,BG"
GS,South Georgia and the South Sandwich Islands,Grytviken,3903,30,AN,.gs,GBP,Pound,,,,en,
GT,Guatemala,Guatemala City,108890,13550440,NA,.gt,GTQ,Quetzal,502,#####,^(\d{5})$,es-GT,"MX,HN,BZ,SV"
GU,Guam,Hagatna,549,159358,OC,.gu,USD,Dollar,+1-671,969##,^(969\d{2})$,"en-GU,ch-GU",
GW,Guinea-Bissau,Bissau,36120,1565126,AF,.gw,XOF,Franc,245,####,^(\d{4})$,"pt-GW,pov","SN,GN"
GY,Guyana,Georgetown,214970,748486,SA,.gy,GYD,Dollar,592,,,en-GY,"SR,BR,VE"
HK,Hong Kong,Hong Kong,1092,6898686,AS,.hk,HKD,Dollar,852,,,"zh-HK,yue,zh,en",
HM,Heard Island and McDonald Islands,,412,0,AN,.hm,AUD,Dollar, ,,,,
HN,Honduras,Tegucigalpa,112090,7989415,NA,.hn,HNL,Lempira,504,@@####,^([A-Z]{2}\d{4})$,es-HN,"GT,NI,SV"
HR,Croatia,Zagreb,56542,4491000,EU,.hr,HRK,Kuna,385,#####,^(?:HR)*(\d{5})$,"hr-HR,sr","HU,SI,CS,BA,ME,RS"
HT,Haiti,Port-au-Prince,27750,9648924,NA,.ht,HTG,Gourde,509,HT####,^(?:HT)*(\d{4})$,"ht,fr-HT",DO
HU,Hungary,Budapest,93030,9982000,EU,.hu,HUF,Forint,36,####,^(\d{4})$,hu-HU,"SK,SI,RO,UA,CS,HR,AT,RS"
ID,Indonesia,Jakarta,1919440,242968342,AS,.id,IDR,Rupiah,62,#####,^(\d{5})$,"id,en,nl,jv","PG,TL,MY"
IE,Ireland,Dublin,70280,4622917,EU,.ie,EUR,Euro,353,,,"en-IE,ga-IE",GB
IL,Israel,Jerusalem,20770,7353985,AS,.il,ILS,Shekel,972,#####,^(\d{5})$,"he,ar-IL,en-IL,","SY,JO,LB,EG,PS"
IM,Isle of Man,"Douglas, Isle of Man",572,75049,EU,.im,GBP,Pound,+44-1624,@# #@@|@## #@@|@@# #@@|@@## #@@|@#@ #@@|@@#@ #@@|GIR0AA,^(([A-Z]\d{2}[A-Z]{2})|([A-Z]\d{3}[A-Z]{2})|([A-Z]{2}\d{2}[A-Z]{2})|([A-Z]{2}\d{3}[A-Z]{2})|([A-Z]\d[A-Z]\d[A-Z]{2})|([A-Z]{2}\d[A-Z]\d[A-Z]{2})|(GIR0AA))$,"en,gv",
IN,India,New Delhi,3287590,1173108018,AS,.in,INR,Rupee,91,######,^(\d{6})$,"en-IN,hi,bn,te,mr,ta,ur,gu,kn,ml,or,pa,as,bh,sat,ks,ne,sd,kok,doi,mni,sit,sa,fr,lus,inc","CN,NP,MM,BT,PK,BD"
IO,British Indian Ocean Territory,Diego Garcia,60,4000,AS,.io,USD,Dollar,246,,,en-IO,
IQ,Iraq,Baghdad,437072,29671605,AS,.iq,IQD,Dinar,964,#####,^(\d{5})$,"ar-IQ,ku,hy","SY,SA,IR,JO,TR,KW"
IR,Iran,Tehran,1648000,76923300,AS,.ir,IRR,Rial,98,##########,^(\d{10})$,"fa-IR,ku","TM,AF,IQ,AM,PK,AZ,TR"
IS,Iceland,Reykjavik,103000,308910,EU,.is,ISK,Krona,354,###,^(\d{3})$,"is,en,de,da,sv,no",
IT,Italy,Rome,301230,60340328,EU,.it,EUR,Euro,39,#####,^(\d{5})$,"it-IT,de-IT,fr-IT,sc,ca,co,sl","CH,VA,SI,SM,FR,AT"
JE,Jersey,Saint Helier,116,90812,EU,.je,GBP,Pound,+44-1534,@# #@@|@## #@@|@@# #@@|@@## #@@|@#@ #@@|@@#@ #@@|GIR0AA,^(([A-Z]\d{2}[A-Z]{2})|([A-Z]\d{3}[A-Z]{2})|([A-Z]{2}\d{2}[A-Z]{2})|([A-Z]{2}\d{3}[A-Z]{2})|([A-Z]\d[A-Z]\d[A-Z]{2})|([A-Z]{2}\d[A-Z]\d[A-Z]{2})|(GIR0AA))$,"en,pt",
JM,Jamaica,Kingston,10991,2847232,NA,.jm,JMD,Dollar,+1-876,,,en-JM,
JO,Jordan,Amman,92300,6407085,AS,.jo,JOD,Dinar,962,#####,^(\d{5})$,"ar-JO,en","SY,SA,IQ,IL,PS"
JP,Japan,Tokyo,377835,127288000,AS,.jp,JPY,Yen,81,###-####,^(\d{7})$,ja,
KE,Kenya,Nairobi,582650,40046566,AF,.ke,KES,Shilling,254,#####,^(\d{5})$,"en-KE,sw-KE","ET,TZ,SS,SO,UG"
KG,Kyrgyzstan,Bishkek,198500,5508626,AS,.kg,KGS,Som,996,######,^(\d{6})$,"ky,uz,ru","CN,TJ,UZ,KZ"
KH,Cambodia,Phnom Penh,181040,14453680,AS,.kh,KHR,Riels,855,#####,^(\d{5})$,"km,fr,en","LA,TH,VN"
KI,Kiribati,Tarawa,811,92533,OC,.ki,AUD,Dollar,686,,,"en-KI,gil",
KM,Comoros,Moroni,2170,773407,AF,.km,KMF,Franc,269,,,"ar,fr-KM",
KN,Saint Kitts and Nevis,Basseterre,261,51134,NA,.kn,XCD,Dollar,+1-869,,,en-KN,
KP,North Korea,Pyongyang,120540,22912177,AS,.kp,KPW,Won,850,###-###,^(\d{6})$,ko-KP,"CN,KR,RU"
KR,South Korea,Seoul,98480,48422644,AS,.kr,KRW,Won,82,SEOUL ###-###,^(?:SEOUL)*(\d{6})$,"ko-KR,en",KP
XK,Kosovo,Pristina,10908,1800000,EU,,EUR,Euro,,,,"sq,sr","RS,AL,MK,ME"
KW,Kuwait,Kuwait City,17820,2789132,AS,.kw,KWD,Dinar,965,#####,^(\d{5})$,"ar-KW,en","SA,IQ"
KY,Cayman Islands,George Town,262,44270,NA,.ky,KYD,Dollar,+1-345,,,en-KY,
KZ,Kazakhstan,Astana,2717300,15340000,AS,.kz,KZT,Tenge,7,######,^(\d{6})$,"kk,ru","TM,CN,KG,UZ,RU"
LA,Laos,Vientiane,236800,6368162,AS,.la,LAK,Kip,856,#####,^(\d{5})$,"lo,fr,en","CN,MM,KH,TH,VN"
LB,Lebanon,Beirut,10400,4125247,AS,.lb,LBP,Pound,961,#### ####|####,^(\d{4}(\d{4})?)$,"ar-LB,fr-LB,en,hy","SY,IL"
LC,Saint Lucia,Castries,616,160922,NA,.lc,XCD,Dollar,+1-758,,,en-LC,
LI,Liechtenstein,Vaduz,160,35000,EU,.li,CHF,Franc,423,####,^(\d{4})$,de-LI,"CH,AT"
LK,Sri Lanka,Colombo,65610,21513990,AS,.lk,LKR,Rupee,94,#####,^(\d{5})$,"si,ta,en",
LR,Liberia,Monrovia,111370,3685076,AF,.lr,LRD,Dollar,231,####,^(\d{4})$,en-LR,"SL,CI,GN"
LS,Lesotho,Maseru,30355,1919552,AF,.ls,LSL,Loti,266,###,^(\d{3})$,"en-LS,st,zu,xh",ZA
LT,Lithuania,Vilnius,65200,3565000,EU,.lt,LTL,Litas,370,LT-#####,^(?:LT)*(\d{5})$,"lt,ru,pl","PL,BY,RU,LV"
LU,Luxembourg,Luxembourg,2586,497538,EU,.lu,EUR,Euro,352,L-####,^(\d{4})$,"lb,de-LU,fr-LU","DE,BE,FR"
LV,Latvia,Riga,64589,2217969,EU,.lv,EUR,Euro,371,LV-####,^(?:LV)*(\d{4})$,"lv,ru,lt","LT,EE,BY,RU"
LY,Libya,Tripolis,1759540,6461454,AF,.ly,LYD,Dinar,218,,,"ar-LY,it,en","TD,NE,DZ,SD,TN,EG"
MA,Morocco,Rabat,446550,31627428,AF,.ma,MAD,Dirham,212,#####,^(\d{5})$,"ar-MA,fr","DZ,EH,ES"
MC,Monaco,Monaco,1.95,32965,EU,.mc,EUR,Euro,377,#####,^(\d{5})$,"fr-MC,en,it",FR
MD,Moldova,Chisinau,33843,4324000,EU,.md,MDL,Leu,373,MD-####,^(?:MD)*(\d{4})$,"ro,ru,gag,tr","RO,UA"
ME,Montenegro,Podgorica,14026,666730,EU,.me,EUR,Euro,382,#####,^(\d{5})$,"sr,hu,bs,sq,hr,rom","AL,HR,BA,RS,XK"
MF,Saint Martin,Marigot,53,35925,NA,.gp,EUR,Euro,590,### ###,,fr,SX
MG,Madagascar,Antananarivo,587040,21281844,AF,.mg,MGA,Ariary,261,###,^(\d{3})$,"fr-MG,mg",
MH,Marshall Islands,Majuro,181.3,65859,OC,.mh,USD,Dollar,692,,,"mh,en-MH",
MK,Macedonia,Skopje,25333,2062294,EU,.mk,MKD,Denar,389,####,^(\d{4})$,"mk,sq,tr,rmm,sr","AL,GR,CS,BG,RS,XK"
ML,Mali,Bamako,1240000,13796354,AF,.ml,XOF,Franc,223,,,"fr-ML,bm","SN,NE,DZ,CI,GN,MR,BF"
MM,Myanmar,<NAME>,678500,53414374,AS,.mm,MMK,Kyat,95,#####,^(\d{5})$,my,"CN,LA,TH,BD,IN"
MN,Mongolia,Ulan Bator,1565000,3086918,AS,.mn,MNT,Tugrik,976,######,^(\d{6})$,"mn,ru","CN,RU"
MO,Macao,Macao,254,449198,AS,.mo,MOP,Pataca,853,,,"zh,zh-MO,pt",
MP,Northern Mariana Islands,Saipan,477,53883,OC,.mp,USD,Dollar,+1-670,,,"fil,tl,zh,ch-MP,en-MP",
MQ,Martinique,Fort-de-France,1100,432900,NA,.mq,EUR,Euro,596,#####,^(\d{5})$,fr-MQ,
MR,Mauritania,Nouakchott,1030700,3205060,AF,.mr,MRO,Ouguiya,222,,,"ar-MR,fuc,snk,fr,mey,wo","SN,DZ,EH,ML"
MS,Montserrat,Plymouth,102,9341,NA,.ms,XCD,Dollar,+1-664,,,en-MS,
MT,Malta,Valletta,316,403000,EU,.mt,EUR,Euro,356,@@@ ###|@@@ ##,^([A-Z]{3}\d{2}\d?)$,"mt,en-MT",
MU,Mauritius,Port Louis,2040,1294104,AF,.mu,MUR,Rupee,230,,,"en-MU,bho,fr",
MV,Maldives,Male,300,395650,AS,.mv,MVR,Rufiyaa,960,#####,^(\d{5})$,"dv,en",
MW,Malawi,Lilongwe,118480,15447500,AF,.mw,MWK,Kwacha,265,,,"ny,yao,tum,swk","TZ,MZ,ZM"
MX,Mexico,Mexico City,1972550,112468855,NA,.mx,MXN,Peso,52,#####,^(\d{5})$,es-MX,"GT,US,BZ"
MY,Malaysia,Kuala Lumpur,329750,28274729,AS,.my,MYR,Ringgit,60,#####,^(\d{5})$,"ms-MY,en,zh,ta,te,ml,pa,th","BN,TH,ID"
MZ,Mozambique,Maputo,801590,22061451,AF,.mz,MZN,Metical,258,####,^(\d{4})$,"pt-MZ,vmw","ZW,TZ,SZ,ZA,ZM,MW"
NA,Namibia,Windhoek,825418,2128471,AF,.na,NAD,Dollar,264,,,"en-NA,af,de,hz,naq","ZA,BW,ZM,AO"
NC,New Caledonia,Noumea,19060,216494,OC,.nc,XPF,Franc,687,#####,^(\d{5})$,fr-NC,
NE,Niger,Niamey,1267000,15878271,AF,.ne,XOF,Franc,227,####,^(\d{4})$,"fr-NE,ha,kr,dje","TD,BJ,DZ,LY,BF,NG,ML"
NF,Norfolk Island,Kingston,34.6,1828,OC,.nf,AUD,Dollar,672,####,^(\d{4})$,en-NF,
NG,Nigeria,Abuja,923768,154000000,AF,.ng,NGN,Naira,234,######,^(\d{6})$,"en-NG,ha,yo,ig,ff","TD,NE,BJ,CM"
NI,Nicaragua,Managua,129494,5995928,NA,.ni,NIO,Cordoba,505,###-###-#,^(\d{7})$,"es-NI,en","CR,HN"
NL,Netherlands,Amsterdam,41526,16645000,EU,.nl,EUR,Euro,31,#### @@,^(\d{4}[A-Z]{2})$,"nl-NL,fy-NL","DE,BE"
NO,Norway,Oslo,324220,5009150,EU,.no,NOK,Krone,47,####,^(\d{4})$,"no,nb,nn,se,fi","FI,RU,SE"
NP,Nepal,Kathmandu,140800,28951852,AS,.np,NPR,Rupee,977,#####,^(\d{5})$,"ne,en","CN,IN"
NR,Nauru,Yaren,21,10065,OC,.nr,AUD,Dollar,674,,,"na,en-NR",
NU,Niue,Alofi,260,2166,OC,.nu,NZD,Dollar,683,,,"niu,en-NU",
NZ,New Zealand,Wellington,268680,4252277,OC,.nz,NZD,Dollar,64,####,^(\d{4})$,"en-NZ,mi",
OM,Oman,Muscat,212460,2967717,AS,.om,OMR,Rial,968,###,^(\d{3})$,"ar-OM,en,bal,ur","SA,YE,AE"
PA,Panama,Panama City,78200,3410676,NA,.pa,PAB,Balboa,507,,,"es-PA,en","CR,CO"
PE,Peru,Lima,1285220,29907003,SA,.pe,PEN,Sol,51,,,"es-PE,qu,ay","EC,CL,BO,BR,CO"
PF,French Polynesia,Papeete,4167,270485,OC,.pf,XPF,Franc,689,#####,^((97|98)7\d{2})$,"fr-PF,ty",
PG,Papua New Guinea,Port Moresby,462840,6064515,OC,.pg,PGK,Kina,675,###,^(\d{3})$,"en-PG,ho,meu,tpi",ID
PH,Philippines,Manila,300000,99900177,AS,.ph,PHP,Peso,63,####,^(\d{4})$,"tl,en-PH,fil",
PK,Pakistan,Islamabad,803940,184404791,AS,.pk,PKR,Rupee,92,#####,^(\d{5})$,"ur-PK,en-PK,pa,sd,ps,brh","CN,AF,IR,IN"
PL,Poland,Warsaw,312685,38500000,EU,.pl,PLN,Zloty,48,##-###,^(\d{5})$,pl,"DE,LT,SK,CZ,BY,UA,RU"
PM,Saint Pierre and Miquelon,Saint-Pierre,242,7012,NA,.pm,EUR,Euro,508,#####,^(97500)$,fr-PM,
PN,Pitcairn,Adamstown,47,46,OC,.pn,NZD,Dollar,870,,,en-PN,
PR,Puerto Rico,San Juan,9104,3916632,NA,.pr,USD,Dollar,+1-787 and 1-939,#####-####,^(\d{9})$,"en-PR,es-PR",
PS,Palestinian Territory,East Jerusalem,5970,3800000,AS,.ps,ILS,Shekel,970,,,ar-PS,"JO,IL"
PT,Portugal,Lisbon,92391,10676000,EU,.pt,EUR,Euro,351,####-###,^(\d{7})$,"pt-PT,mwl",ES
PW,Palau,Melekeok,458,19907,OC,.pw,USD,Dollar,680,96940,^(96940)$,"pau,sov,en-PW,tox,ja,fil,zh",
PY,Paraguay,Asuncion,406750,6375830,SA,.py,PYG,Guarani,595,####,^(\d{4})$,"es-PY,gn","BO,BR,AR"
QA,Qatar,Doha,11437,840926,AS,.qa,QAR,Rial,974,,,"ar-QA,es",SA
RE,Reunion,Saint-Denis,2517,776948,AF,.re,EUR,Euro,262,#####,^((97|98)(4|7|8)\d{2})$,fr-RE,
RO,Romania,Bucharest,237500,21959278,EU,.ro,RON,Leu,40,######,^(\d{6})$,"ro,hu,rom","MD,HU,UA,CS,BG,RS"
RS,Serbia,Belgrade,88361,7344847,EU,.rs,RSD,Dinar,381,######,^(\d{6})$,"sr,hu,bs,rom","AL,HU,MK,RO,HR,BA,BG,ME,XK"
RU,Russia,Moscow,17100000,140702000,EU,.ru,RUB,Ruble,7,######,^(\d{6})$,"ru,tt,xal,cau,ady,kv,ce,tyv,cv,udm,tut,mns,bua,myv,mdf,chm,ba,inh,tut,kbd,krc,ava,sah,nog","GE,CN,BY,UA,KZ,LV,PL,EE,LT,FI,MN,NO,AZ,KP"
RW,Rwanda,Kigali,26338,11055976,AF,.rw,RWF,Franc,250,,,"rw,en-RW,fr-RW,sw","TZ,CD,BI,UG"
SA,Saudi Arabia,Riyadh,1960582,25731776,AS,.sa,SAR,Rial,966,#####,^(\d{5})$,ar-SA,"QA,OM,IQ,YE,JO,AE,KW"
SB,Solomon Islands,Honiara,28450,559198,OC,.sb,SBD,Dollar,677,,,"en-SB,tpi",
SC,Seychelles,Victoria,455,88340,AF,.sc,SCR,Rupee,248,,,"en-SC,fr-SC",
SD,Sudan,Khartoum,1861484,35000000,AF,.sd,SDG,Pound,249,#####,^(\d{5})$,"ar-SD,en,fia","SS,TD,EG,ET,ER,LY,CF"
SS,South Sudan,Juba,644329,8260490,AF,,SSP,Pound,211,,,en,"CD,CF,ET,KE,SD,UG,"
SE,Sweden,Stockholm,449964,9555893,EU,.se,SEK,Krona,46,### ##,^(?:SE)*(\d{5})$,"sv-SE,se,sma,fi-SE","NO,FI"
SG,Singapore,Singapur,692.7,4701069,AS,.sg,SGD,Dollar,65,######,^(\d{6})$,"cmn,en-SG,ms-SG,ta-SG,zh-SG",
SH,Saint Helena,Jamestown,410,7460,AF,.sh,SHP,Pound,290,STHL 1ZZ,^(STHL1ZZ)$,en-SH,
SI,Slovenia,Ljubljana,20273,2007000,EU,.si,EUR,Euro,386,####,^(?:SI)*(\d{4})$,"sl,sh","HU,IT,HR,AT"
SJ,Svalbard and <NAME>,Longyearbyen,62049,2550,EU,.sj,NOK,Krone,47,,,"no,ru",
SK,Slovakia,Bratislava,48845,5455000,EU,.sk,EUR,Euro,421,### ##,^(\d{5})$,"sk,hu","PL,HU,CZ,UA,AT"
SL,Sierra Leone,Freetown,71740,5245695,AF,.sl,SLL,Leone,232,,,"en-SL,men,tem","LR,GN"
SM,San Marino,San Marino,61.2,31477,EU,.sm,EUR,Euro,378,4789#,^(4789\d)$,it-SM,IT
SN,Senegal,Dakar,196190,12323252,AF,.sn,XOF,Franc,221,#####,^(\d{5})$,"fr-SN,wo,fuc,mnk","GN,MR,GW,GM,ML"
SO,Somalia,Mogadishu,637657,10112453,AF,.so,SOS,Shilling,252,@@ #####,^([A-Z]{2}\d{5})$,"so-SO,ar-SO,it,en-SO","ET,KE,DJ"
SR,Suriname,Paramaribo,163270,492829,SA,.sr,SRD,Dollar,597,,,"nl-SR,en,srn,hns,jv","GY,BR,GF"
ST,Sao Tome and Principe,Sao Tome,1001,175808,AF,.st,STD,Dobra,239,,,pt-ST,
SV,El Salvador,San Salvador,21040,6052064,NA,.sv,USD,Dollar,503,CP ####,^(?:CP)*(\d{4})$,es-SV,"GT,HN"
SX,Sint Maarten,Philipsburg,34,37429,NA,.sx,ANG,Guilder,599,,,"nl,en",MF
SY,Syria,Damascus,185180,22198110,AS,.sy,SYP,Pound,963,,,"ar-SY,ku,hy,arc,fr,en","IQ,JO,IL,TR,LB"
SZ,Swaziland,Mbabane,17363,1354051,AF,.sz,SZL,Lilangeni,268,@###,^([A-Z]\d{3})$,"en-SZ,ss-SZ","ZA,MZ"
TC,Turks and Caicos Islands,Cockburn Town,430,20556,NA,.tc,USD,Dollar,+1-649,TKCA 1ZZ,^(TKCA 1ZZ)$,en-TC,
TD,Chad,N'Djamena,1284000,10543464,AF,.td,XAF,Franc,235,,,"fr-TD,ar-TD,sre","NE,LY,CF,SD,CM,NG"
TF,French Southern Territories,Port-aux-Francais,7829,140,AN,.tf,EUR,Euro ,,,,fr,
TG,Togo,Lome,56785,6587239,AF,.tg,XOF,Franc,228,,,"fr-TG,ee,hna,kbp,dag,ha","BJ,GH,BF"
TH,Thailand,Bangkok,514000,67089500,AS,.th,THB,Baht,66,#####,^(\d{5})$,"th,en","LA,MM,KH,MY"
TJ,Tajikistan,Dushanbe,143100,7487489,AS,.tj,TJS,Somoni,992,######,^(\d{6})$,"tg,ru","CN,AF,KG,UZ"
TK,Tokelau,,10,1466,OC,.tk,NZD,Dollar,690,,,"tkl,en-TK",
TL,East Timor,Dili,15007,1154625,OC,.tl,USD,Dollar,670,,,"tet,pt-TL,id,en",ID
TM,Turkmenistan,Ashgabat,488100,4940916,AS,.tm,TMT,Manat,993,######,^(\d{6})$,"tk,ru,uz","AF,IR,UZ,KZ"
TN,Tunisia,Tunis,163610,10589025,AF,.tn,TND,Dinar,216,####,^(\d{4})$,"ar-TN,fr","DZ,LY"
TO,Tonga,Nuku'alofa,748,122580,OC,.to,TOP,Pa'anga,676,,,"to,en-TO",
TR,Turkey,Ankara,780580,77804122,AS,.tr,TRY,Lira,90,#####,^(\d{5})$,"tr-TR,ku,diq,az,av","SY,GE,IQ,IR,GR,AM,AZ,BG"
TT,Trinidad and Tobago,Port of Spain,5128,1228691,NA,.tt,TTD,Dollar,+1-868,,,"en-TT,hns,fr,es,zh",
TV,Tuvalu,Funafuti,26,10472,OC,.tv,AUD,Dollar,688,,,"tvl,en,sm,gil",
TW,Taiwan,Taipei,35980,22894384,AS,.tw,TWD,Dollar,886,#####,^(\d{5})$,"zh-TW,zh,nan,hak",
TZ,Tanzania,Dodoma,945087,41892895,AF,.tz,TZS,Shilling,255,,,"sw-TZ,en,ar","MZ,KE,CD,RW,ZM,BI,UG,MW"
UA,Ukraine,Kiev,603700,45415596,EU,.ua,UAH,Hryvnia,380,#####,^(\d{5})$,"uk,ru-UA,rom,pl,hu","PL,MD,HU,SK,BY,RO,RU"
UG,Uganda,Kampala,236040,33398682,AF,.ug,UGX,Shilling,256,,,"en-UG,lg,sw,ar","TZ,KE,SS,CD,RW"
UM,United States Minor Outlying Islands,,0,0,OC,.um,USD,Dollar ,1,,,en-UM,
US,United States,Washington,9629091,310232863,NA,.us,USD,Dollar,1,#####-####,^\d{5}(-\d{4})?$,"en-US,es-US,haw,fr","CA,MX,CU"
UY,Uruguay,Montevideo,176220,3477000,SA,.uy,UYU,Peso,598,#####,^(\d{5})$,es-UY,"BR,AR"
UZ,Uzbekistan,Tashkent,447400,27865738,AS,.uz,UZS,Som,998,######,^(\d{6})$,"uz,ru,tg","TM,AF,KG,TJ,KZ"
VA,Vatican,Vatican City,0.44,921,EU,.va,EUR,Euro,379,#####,^(\d{5})$,"la,it,fr",IT
VC,Saint Vincent and the Grenadines,Kingstown,389,104217,NA,.vc,XCD,Dollar,+1-784,,,"en-VC,fr",
VE,Venezuela,Caracas,912050,27223228,SA,.ve,VEF,Bolivar,58,####,^(\d{4})$,es-VE,"GY,BR,CO"
VG,British Virgin Islands,Road Town,153,21730,NA,.vg,USD,Dollar,+1-284,,,en-VG,
VI,U.S. Virgin Islands,Charlotte Amalie,352,108708,NA,.vi,USD,Dollar,+1-340,#####-####,^\d{5}(-\d{4})?$,en-VI,
VN,Vietnam,Hanoi,329560,89571130,AS,.vn,VND,Dong,84,######,^(\d{6})$,"vi,en,fr,zh,km","CN,LA,KH"
VU,Vanuatu,Port Vila,12200,221552,OC,.vu,VUV,Vatu,678,,,"bi,en-VU,fr-VU",
WF,Wallis and Futuna,Mata Utu,274,16025,OC,.wf,XPF,Franc,681,#####,^(986\d{2})$,"wls,fud,fr-WF",
WS,Samoa,Apia,2944,192001,OC,.ws,WST,Tala,685,,,"sm,en-WS",
YE,Yemen,Sanaa,527970,23495361,AS,.ye,YER,Rial,967,,,ar-YE,"SA,OM"
YT,Mayotte,Mamoudzou,374,159042,AF,.yt,EUR,Euro,262,#####,^(\d{5})$,fr-YT,
ZA,South Africa,Pretoria,1219912,49000000,AF,.za,ZAR,Rand,27,####,^(\d{4})$,"zu,xh,af,nso,en-ZA,tn,st,ts,ss,ve,nr","ZW,SZ,MZ,BW,NA,LS"
ZM,Zambia,Lusaka,752614,13460305,AF,.zm,ZMW,Kwacha,260,#####,^(\d{5})$,"en-ZM,bem,loz,lun,lue,ny,toi","ZW,TZ,MZ,CD,NA,MW,AO"
ZW,Zimbabwe,Harare,390580,11651858,AF,.zw,ZWL,Dollar,263,,,"en-ZW,sn,nr,nd","ZA,MZ,BW,ZM"
CS,Serbia and Montenegro,Belgrade,102350,10829175,EU,.cs,RSD,Dinar,381,#####,^(\d{5})$,"cu,hu,sq,sr","AL,HU,MK,RO,HR,BA,BG"
AN,Netherlands Antilles,Willemstad,960,136197,NA,.an,ANG,Guilder,599,,,"nl-AN,en,es",GP"""
fp = StringIO.StringIO(text)
rows = list(csv.reader(fp))
# sort by country name
rows.sort(key=lambda row: row[1])
# delete existing places before inserting new ones
self.db(self.db.places).delete()
for i, row in enumerate(rows):
# remove surrounding text
row = [e.strip() for e in row]
record = dict(zip(self.fields, row))
# convert numeric fields to proper type
for field in self.numeric_fields:
try:
value = int(record[field])
except ValueError:
value = float(record[field])
record[field] = value
# add country_id for ordering on GAE, which does not use sequential ID's
record['country_id'] = i + 1
record['national_flag'] = URL(c='static', f='images/flags/%s.png' % record['iso'].lower())
self.db.places.insert(**record)
|
<gh_stars>0
"""
validataclass
Copyright (c) 2021, binary butterfly GmbH and contributors
Use of this source code is governed by an MIT-style license that can be found in the LICENSE file.
"""
import re
from typing import Any, Optional, Union
from .string_validator import StringValidator
from validataclass.exceptions import RegexMatchError
__all__ = [
'RegexValidator',
]
class RegexValidator(StringValidator):
"""
Validator that matches strings against a specified regular expression, optionally with minimal/maximal length requirements.
This validator is based on the `StringValidator` which first handles type checking and optional length requirements.
The input string is then matched against the regex using `re.fullmatch()` from the Python standard library, which means that the
*full* string must match the regex.
The regex can be specified either as a precompiled pattern (see `re.compile()`) or as a string which will be compiled by the class.
Regex flags (e.g. `re.IGNORECASE` for case-insensitive matching) can only be set by precompiling a pattern with those flags.
For further information on regular expressions, see: https://docs.python.org/3/library/re.html
If the input string does not match the regex, a `RegexMatchError` validation error with the error code 'invalid_string_format' is
raised. The error code can be overridden with the 'custom_error_code' parameter to get more explicit error messages.
By default only "safe" singleline strings are allowed (i.e. no non-printable characters). See the `StringValidator` options 'unsafe'
and 'multiline' for more details.
Examples:
```
# Import Python standard library for regular expressions
import re
# Use a precompiled regular expression to match lower-case hexadecimal numbers (e.g. '0', '123abc', '00ff00')
RegexValidator(re.compile(r'[0-9a-f]+'))
# Same as above, but with the re.IGNORECASE flag for case-insensitive matching (e.g. '123abc', '123ABC')
RegexValidator(re.compile(r'[0-9a-f]+', re.IGNORECASE))
# Same as above, but using a raw string instead of a precompiled pattern (explicitly allowing uppercase letters in character class)
RegexValidator(r'[0-9a-fA-F]+')
# As above, but setting string length requirements to only allow 6-digit hex numbers (e.g. '123abc')
RegexValidator(re.compile(r'[0-9a-f]+', re.IGNORECASE), min_length=6, max_length=6)
# Set a custom error code (on error, validator will raise RegexMatchError with dict representation {'code': 'invalid_hex_number'})
RegexValidator(re.compile(r'[0-9a-f]+'), custom_error_code='invalid_hex_number')
```
Valid input: Any `str` that matches the regex
Output: `str` (unmodified input if valid)
"""
# Precompiled regex pattern
regex_pattern: re.Pattern
# Error code to use in RegexMatchError exception (use default if None)
custom_error_code: Optional[str] = None
def __init__(self, pattern: Union[re.Pattern, str], *, custom_error_code: Optional[str] = None, **kwargs):
"""
Create a RegexValidator with a specified regex pattern (as string or precompiled `re.Pattern` object).
Optionally with a custom error code. Other keyword arguments (e.g. 'min_length', 'max_length', 'multiline' and 'unsafe') will be
passed to `StringValidator`.
"""
# Initialize base StringValidator (may set min_length/max_length via kwargs)
super().__init__(**kwargs)
# Save regex pattern (precompile if necessary)
if isinstance(pattern, re.Pattern):
self.regex_pattern = pattern
else:
self.regex_pattern = re.compile(pattern)
# Save custom error code
if custom_error_code is not None:
self.custom_error_code = custom_error_code
def validate(self, input_data: Any) -> str:
"""
Validate input as string and match full string against regular expression. Returns unmodified string.
"""
# Validate input with base StringValidator (checks length requirements)
input_data = super().validate(input_data)
# Match full string against Regex pattern
if not self.regex_pattern.fullmatch(input_data):
raise RegexMatchError(code=self.custom_error_code)
return input_data
|
<filename>tests/test_scanner.py
import pytest
from utils.scanner import StringScanner
@pytest.fixture
def scanner():
src = 'Hello, world!'
return StringScanner(src)
class TestScanner:
def test_init(self):
src = 'Hello, world!'
scanner = StringScanner(src)
assert scanner.text == src
assert scanner.pos == 0
assert scanner.match == None
def test_repr(self, scanner):
should_be = '<StringScanner: position=0 text="Hello, world!">'
assert repr(scanner) == should_be
def test_check_valid(self, scanner):
stuff = scanner.check(r'\w+')
assert stuff == 'Hello'
assert scanner.match == stuff
assert scanner.pos == 0
def test_check_invalid(self, scanner):
stuff = scanner.check(r'\s+')
assert stuff == None
assert scanner.match == stuff
assert scanner.pos == 0
def test_skip_valid(self, scanner):
num_skipped = scanner.skip(r'\w+')
assert num_skipped == 5
assert scanner.pos == 5
assert scanner.match == 'Hello'
def test_skip_invalid(self, scanner):
num_skipped = scanner.skip(r'\s+')
assert num_skipped == 0
assert scanner.pos == 0
assert scanner.match ==None
def test_unscan(self, scanner):
scanner.scan(r'\w+')
assert scanner.pos == 5
assert scanner.match == 'Hello'
scanner.unscan()
assert scanner.pos == 0
assert scanner.match == None
def test_getch(self, scanner):
first_char = scanner.getch()
assert scanner.pos == 1
assert first_char == 'H'
assert scanner.match == 'H'
second_char = scanner.getch()
assert scanner.pos == 2
assert second_char == 'e'
assert scanner.match == 'e'
def test_append(self, scanner):
text = scanner.text
old_pos = scanner.pos
old_match = scanner.match
scanner.append('blah')
assert scanner.text == text + 'blah'
assert scanner.pos == old_pos
assert scanner.match == old_match
def test_not_at_eos(self, scanner):
assert scanner.pos == 0
assert len(scanner.text) > 0
assert not scanner.end_of_string
def test_at_eos(self, scanner):
scanner.scan('Hello, world!')
assert scanner.pos == len(scanner.text)
assert scanner.end_of_string
def test_simple_getitem(self, scanner):
assert scanner[2] == 'l'
assert scanner[-1] == '!'
def test_getitem_slice(self, scanner):
assert scanner[:5] == 'Hello'
assert scanner[1:5:2] == 'el'
assert scanner[5:] == ', world!'
def test_invalid_getitem(self, scanner):
with pytest.raises(IndexError):
scanner[123]
with pytest.raises(TypeError):
# Because we shouldn't be trying to access a string using a
# string index
scanner['blah']
def test_current_char(self, scanner):
assert scanner.current_char == 'H'
scanner.scan(r'\w+,')
assert scanner.current_char == ' '
def test_current_char_at_eos(self, scanner):
scanner.scan('Hello, world!')
assert scanner.end_of_string
assert scanner.current_char == None
def test_peek(self, scanner):
"""
Make sure that using peek() doesn't affect the state of the scanner
"""
assert scanner.pos == 0
assert scanner.current_char == 'H'
assert scanner.match == None
assert scanner.peek() == 'e'
assert scanner.pos == 0
assert scanner.current_char == 'H'
assert scanner.match == None
def test_peek_multiple_chars(self, scanner):
assert scanner.peek(2) == 'el'
assert scanner.peek(5) == 'ello,'
def test_rest(self, scanner):
scanner.scan(r'\w+')
old_pos = scanner.pos
assert scanner.rest == ', world!'
assert scanner.pos == old_pos
|
<gh_stars>1-10
import numpy as np
import nibabel
import pytest
from nilearn._utils.testing import write_tmp_imgs
from nilearn.decomposition.dict_learning import DictLearning
from nilearn.decomposition.tests.test_canica import _make_canica_test_data
from nilearn.image import iter_img, get_data
from nilearn.input_data import NiftiMasker
from nilearn.decomposition.tests.test_multi_pca import _tmp_dir
def test_dict_learning():
data, mask_img, components, rng = _make_canica_test_data(n_subjects=8)
masker = NiftiMasker(mask_img=mask_img).fit()
mask = get_data(mask_img) != 0
flat_mask = mask.ravel()
dict_init = masker.inverse_transform(components[:, flat_mask])
dict_learning = DictLearning(n_components=4, random_state=0,
dict_init=dict_init,
mask=mask_img,
smoothing_fwhm=0., alpha=1)
dict_learning_auto_init = DictLearning(n_components=4, random_state=0,
mask=mask_img,
smoothing_fwhm=0., n_epochs=10,
alpha=1)
maps = {}
for estimator in [dict_learning,
dict_learning_auto_init]:
estimator.fit(data)
maps[estimator] = get_data(estimator.components_img_)
maps[estimator] = np.reshape(
np.rollaxis(maps[estimator], 3, 0)[:, mask],
(4, flat_mask.sum()))
masked_components = components[:, flat_mask]
for this_dict_learning in [dict_learning]:
these_maps = maps[this_dict_learning]
S = np.sqrt(np.sum(masked_components ** 2, axis=1))
S[S == 0] = 1
masked_components /= S[:, np.newaxis]
S = np.sqrt(np.sum(these_maps ** 2, axis=1))
S[S == 0] = 1
these_maps /= S[:, np.newaxis]
K = np.abs(masked_components.dot(these_maps.T))
recovered_maps = np.sum(K > 0.9)
assert(recovered_maps >= 2)
# Smoke test n_epochs > 1
dict_learning = DictLearning(n_components=4, random_state=0,
dict_init=dict_init,
mask=mask_img,
smoothing_fwhm=0., n_epochs=2, alpha=1)
dict_learning.fit(data)
def test_component_sign():
# Regression test
# We should have a heuristic that flips the sign of components in
# DictLearning to have more positive values than negative values, for
# instance by making sure that the largest value is positive.
data, mask_img, components, rng = _make_canica_test_data(n_subjects=2,
noisy=True)
for mp in components:
assert -mp.min() <= mp.max()
dict_learning = DictLearning(n_components=4, random_state=rng,
mask=mask_img,
smoothing_fwhm=0., alpha=1)
dict_learning.fit(data)
for mp in iter_img(dict_learning.components_img_):
mp = get_data(mp)
assert np.sum(mp[mp <= 0]) <= np.sum(mp[mp > 0])
def test_masker_attributes_with_fit():
# Test base module at sub-class
data, mask_img, components, rng = _make_canica_test_data(n_subjects=3)
# Passing mask_img
dict_learning = DictLearning(n_components=3, mask=mask_img, random_state=0)
dict_learning.fit(data)
assert dict_learning.mask_img_ == mask_img
assert dict_learning.mask_img_ == dict_learning.masker_.mask_img_
# Passing masker
masker = NiftiMasker(mask_img=mask_img)
dict_learning = DictLearning(n_components=3, mask=masker, random_state=0)
dict_learning.fit(data)
assert dict_learning.mask_img_ == dict_learning.masker_.mask_img_
dict_learning = DictLearning(mask=mask_img, n_components=3)
with pytest.raises(ValueError,
match="Object has no components_ attribute. "
"This is probably because "
"fit has not been called"):
dict_learning.transform(data)
# Test if raises an error when empty list of provided.
with pytest.raises(ValueError,
match='Need one or more Niimg-like objects '
'as input, an empty list was given.'):
dict_learning.fit([])
# Test passing masker arguments to estimator
dict_learning = DictLearning(n_components=3,
target_affine=np.eye(4),
target_shape=(6, 8, 10),
mask_strategy='background')
dict_learning.fit(data)
def test_components_img():
data, mask_img, _, _ = _make_canica_test_data(n_subjects=3)
n_components = 3
dict_learning = DictLearning(n_components=n_components, mask=mask_img)
dict_learning.fit(data)
components_img = dict_learning.components_img_
assert isinstance(components_img, nibabel.Nifti1Image)
check_shape = data[0].shape + (n_components,)
assert components_img.shape, check_shape
def test_with_globbing_patterns_with_single_subject():
# single subject
data, mask_img, _, _ = _make_canica_test_data(n_subjects=1)
n_components = 3
dictlearn = DictLearning(n_components=n_components, mask=mask_img)
with write_tmp_imgs(data[0], create_files=True, use_wildcards=True) as img:
input_image = _tmp_dir() + img
dictlearn.fit(input_image)
components_img = dictlearn.components_img_
assert isinstance(components_img, nibabel.Nifti1Image)
# n_components = 3
check_shape = data[0].shape[:3] + (3,)
assert components_img.shape, check_shape
def test_with_globbing_patterns_with_multi_subjects():
# multi subjects
data, mask_img, _, _ = _make_canica_test_data(n_subjects=3)
n_components = 3
dictlearn = DictLearning(n_components=n_components, mask=mask_img)
with write_tmp_imgs(data[0], data[1], data[2], create_files=True,
use_wildcards=True) as img:
input_image = _tmp_dir() + img
dictlearn.fit(input_image)
components_img = dictlearn.components_img_
assert isinstance(components_img, nibabel.Nifti1Image)
# n_components = 3
check_shape = data[0].shape[:3] + (3,)
assert components_img.shape, check_shape
|
<reponame>kirylkrauchuk/redistimeseries
import time
from RLTest import Env
import time
def assert_msg(env, msg, expected_type, expected_data):
env.assertEqual(expected_type, msg['type'])
env.assertEqual(expected_data, msg['data'])
def test_keyspace():
sample_len = 1024
env = Env()
with env.getClusterConnectionIfNeeded() as r:
r.execute_command('config', 'set', 'notify-keyspace-events', 'KEA')
pubsub = r.pubsub()
pubsub.psubscribe('__key*')
time.sleep(1)
env.assertEqual('psubscribe', pubsub.get_message()['type'])
r.execute_command('ts.add', 'tester{2}', 100, 1.1)
assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.add')
assert_msg(env, pubsub.get_message(), 'pmessage', b'tester{2}')
# Test MADD generate events for each key updated
r.execute_command("ts.madd", 'tester{2}', "*", 10, 'test_key2{2}', 2000, 20)
assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.add')
assert_msg(env, pubsub.get_message(), 'pmessage', b'tester{2}')
assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.add')
assert_msg(env, pubsub.get_message(), 'pmessage', b'test_key2{2}')
# Test INCRBY generate event on key
r.execute_command("ts.INCRBY", 'tester{2}', "100")
assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.incrby')
assert_msg(env, pubsub.get_message(), 'pmessage', b'tester{2}')
# Test DECRBY generate event on key
r.execute_command("ts.DECRBY", 'tester{2}', "13")
assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.decrby')
assert_msg(env, pubsub.get_message(), 'pmessage', b'tester{2}')
def test_keyspace_create_rules():
sample_len = 1024
env = Env()
with env.getClusterConnectionIfNeeded() as r:
r.execute_command('config', 'set', 'notify-keyspace-events', 'KEA')
pubsub = r.pubsub()
pubsub.psubscribe('__key*')
time.sleep(1)
env.assertEqual('psubscribe', pubsub.get_message()['type'])
r.execute_command('TS.CREATE', 'tester_src{2}')
assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.create')
assert_msg(env, pubsub.get_message(), 'pmessage', b'tester_src{2}')
r.execute_command('TS.CREATE', 'tester_dest{2}')
assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.create')
assert_msg(env, pubsub.get_message(), 'pmessage', b'tester_dest{2}')
r.execute_command('TS.CREATERULE', 'tester_src{2}', 'tester_dest{2}', 'AGGREGATION', 'COUNT', 10)
assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.createrule:src')
assert_msg(env, pubsub.get_message(), 'pmessage', b'tester_src{2}')
assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.createrule:dest')
assert_msg(env, pubsub.get_message(), 'pmessage', b'tester_dest{2}')
r.execute_command('TS.DELETERULE', 'tester_src{2}', 'tester_dest{2}')
assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.deleterule:src')
assert_msg(env, pubsub.get_message(), 'pmessage', b'tester_src{2}')
assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.deleterule:dest')
assert_msg(env, pubsub.get_message(), 'pmessage', b'tester_dest{2}')
def test_keyspace_rules_send():
sample_len = 1024
env = Env()
with env.getClusterConnectionIfNeeded() as r:
r.execute_command('config', 'set', 'notify-keyspace-events', 'KEA')
pubsub = r.pubsub()
pubsub.psubscribe('__key*')
time.sleep(1)
env.assertEqual('psubscribe', pubsub.get_message()['type'])
r.execute_command('TS.CREATE', 'tester_src{2}')
assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.create')
assert_msg(env, pubsub.get_message(), 'pmessage', b'tester_src{2}')
r.execute_command('TS.CREATE', 'tester_dest{2}')
assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.create')
assert_msg(env, pubsub.get_message(), 'pmessage', b'tester_dest{2}')
r.execute_command('TS.CREATERULE', 'tester_src{2}', 'tester_dest{2}', 'AGGREGATION', 'MAX', 1)
assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.createrule:src')
assert_msg(env, pubsub.get_message(), 'pmessage', b'tester_src{2}')
assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.createrule:dest')
assert_msg(env, pubsub.get_message(), 'pmessage', b'tester_dest{2}')
r.execute_command('ts.add', 'tester_src{2}', 100, 1.1)
assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.add')
assert_msg(env, pubsub.get_message(), 'pmessage', b'tester_src{2}')
r.execute_command('ts.add', 'tester_src{2}', 101, 1.1)
# First getting the event from the dest on the previous window
assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.add:dest')
assert_msg(env, pubsub.get_message(), 'pmessage', b'tester_dest{2}')
assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.add')
assert_msg(env, pubsub.get_message(), 'pmessage', b'tester_src{2}')
r.execute_command('ts.incrby', 'tester_src{2}', 3)
assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.add:dest')
assert_msg(env, pubsub.get_message(), 'pmessage', b'tester_dest{2}')
assert_msg(env, pubsub.get_message(), 'pmessage', b'ts.incrby')
assert_msg(env, pubsub.get_message(), 'pmessage', b'tester_src{2}')
|
<reponame>daojiaxu/semeval_11
import os
import numpy as np
# from semeval.datasets import pre_deal
import pre_deal_bert
import new_pre_deal
from keras.preprocessing import sequence
from mxnet.contrib import text
from transformers import BertTokenizer
import pandas as pd
from bert_serving.client import BertClient
max_len = 1000
tokenizer = BertTokenizer.from_pretrained("bert-large-uncased")
def get_vector():
# 文本
texts = []
list = os.listdir("train-articles")
for i in range(0, len(list)):
f = open("train-articles/" + list[i], encoding='utf8')
texts.append(f.read())
glove_6b50d = text.embedding.create("glove", pretrained_file_name='glove.6B.300d.txt')
vectors = []
for i in range(0, len(texts)):
a = glove_6b50d.get_vecs_by_tokens(tokenizer.tokenize(texts[i])[0:1200])
b = a.asnumpy()
if (a.shape[0] < 1200):
x = 1200 - int(a.shape[0])
shape_zeros = np.zeros((x, 300))
vector = np.vstack((b, shape_zeros))
vector = vector.tolist()
else:
vector = b.tolist()
vectors.append(vector)
vectors = np.array(vectors)
np.save("glove_300d_1200.npy", vectors)
return vectors
def get_vector_test():
# 文本
texts = []
list = os.listdir("dev-articles")
for i in range(0, len(list)):
f = open("dev-articles/" + list[i], encoding='utf8')
texts.append(f.read())
glove_6b50d = text.embedding.create("glove", pretrained_file_name='glove.6B.300d.txt')
vectors = []
for i in range(0, len(texts)):
a = glove_6b50d.get_vecs_by_tokens(tokenizer.tokenize(texts[i])[0:1200])
b = a.asnumpy()
if (a.shape[0] < 1200):
x = 1200 - int(a.shape[0])
shape_zeros = np.zeros((x, 300))
vector = np.vstack((b, shape_zeros))
vector = vector.tolist()
else:
vector = b.tolist()
vectors.append(vector)
vectors = np.array(vectors)
np.save("glove_test_300d_1200.npy", vectors)
return vectors
def get_labels_vector():
labels_vector_dict, texts = pre_deal_bert.get_labels_vector()
labels_vector = []
for key in labels_vector_dict.keys():
labels_vector.append(labels_vector_dict[key])
labels_vector = sequence.pad_sequences(np.array(labels_vector), maxlen=1200, padding='post')
np.save("train_labels_vector_1200.npy", labels_vector)
def get_labels_vector_new():
labels_vector_dict = new_pre_deal.get_labels_vector_new()
labels_vector = []
for key in labels_vector_dict.keys():
labels_vector.append(labels_vector_dict[key])
labels_vector = sequence.pad_sequences(np.array(labels_vector), maxlen=1200, padding='post')
np.save("new_train_labels_vector_1200.npy", labels_vector)
def get_bert_labels_vector_new():
labels_vector_dict = new_pre_deal.get_labels_vector_new()
labels_vector = []
for key in labels_vector_dict.keys():
labels_vector.append(labels_vector_dict[key])
labels_vector = sequence.pad_sequences(np.array(labels_vector), maxlen=500, padding='post')
np.save("new_train_dev_labels_vector_500.npy", labels_vector)
def new_get_train_dev_vector():
# 文本
texts = []
list = os.listdir("train_dev_articles")
for i in range(0, len(list)):
f = open("train_dev_articles/" + list[i], encoding='utf8')
texts.append(f.read())
glove_6b50d = text.embedding.create("glove", pretrained_file_name='glove.6B.300d.txt')
vectors = []
for i in range(0, len(texts)):
a = glove_6b50d.get_vecs_by_tokens(tokenizer.tokenize(texts[i])[0:1200])
b = a.asnumpy()
if (a.shape[0] < 1200):
x = 1200 - int(a.shape[0])
shape_zeros = np.zeros((x, 300))
vector = np.vstack((b, shape_zeros))
vector = vector.tolist()
else:
vector = b.tolist()
vectors.append(vector)
vectors = np.array(vectors)
np.save("new_train_dev_glove_300d_1200.npy", vectors)
return vectors
def new_get_train_dev_vector_bert():
# 文本
texts = []
list = os.listdir("train_dev_articles")
for i in range(0, len(list)):
f = open("train_dev_articles/" + list[i], encoding='utf8')
texts.append(f.read())
bc = BertClient(ip='192.168.3.11', port=5555, port_out=5556, check_version=False)
texts_vector = bc.encode(texts)
np.save("train_dev_vector_bert_450.npy", texts_vector)
return texts_vector
def new_get_dev_vector_bert():
# 文本
texts = []
list = os.listdir("dev_articles")
for i in range(0, len(list)):
f = open("dev_articles/" + list[i], encoding='utf8')
texts.append(f.read())
bc = BertClient(ip='192.168.3.11', port=5555, port_out=5556, check_version=False)
texts_vector = bc.encode(texts)
np.save("dev_vector_bert_500.npy", texts_vector)
return texts_vector
def get_vector_train_tc():
# 文本
train_articles = pd.read_excel("mapping_TC.xlsx")
text_list_train = []
labels_list_train = []
for i in range(0, 6369):
text_list_train.append(str(train_articles['Associated_Propaganda'][i]))
labels_list_train.append(train_articles['Classification'][i])
glove_6b50d = text.embedding.create("glove", pretrained_file_name='glove.6B.300d.txt')
vectors = []
for i in range(0, len(text_list_train)):
a = glove_6b50d.get_vecs_by_tokens(tokenizer.tokenize(text_list_train[i])[0:100])
b = a.asnumpy()
if (a.shape[0] < 100):
x = 100 - int(a.shape[0])
shape_zeros = np.zeros((x, 300))
vector = np.vstack((b, shape_zeros))
vector = vector.tolist()
else:
vector = b.tolist()
vectors.append(vector)
vectors = np.array(vectors)
np.save("tc_glove_train.npy", vectors)
return vectors
def get_vector_test_tc():
# 文本
dev_articles = pd.read_excel("TC_dev_predict.xlsx")
text_list_dev = []
for i in range(0, 1063):
text_list_dev.append(dev_articles['Associated_Propaganda'][i])
glove_6b50d = text.embedding.create("glove", pretrained_file_name='glove.6B.300d.txt')
vectors = []
for i in range(0, len(text_list_dev)):
a = glove_6b50d.get_vecs_by_tokens(tokenizer.tokenize(text_list_dev[i])[0:100])
b = a.asnumpy()
if (a.shape[0] < 100):
x = 100 - int(a.shape[0])
shape_zeros = np.zeros((x, 300))
vector = np.vstack((b, shape_zeros))
vector = vector.tolist()
else:
vector = b.tolist()
vectors.append(vector)
vectors = np.array(vectors)
np.save("tc_glove_test.npy", vectors)
return vectors
def get_vector_test_final():
# 文本
texts = []
list = os.listdir("test-articles")
for i in range(0, len(list)):
f = open("test-articles/" + list[i], encoding='utf8')
texts.append(f.read())
glove_6b50d = text.embedding.create("glove", pretrained_file_name='glove.6B.300d.txt')
vectors = []
for i in range(0, len(texts)):
a = glove_6b50d.get_vecs_by_tokens(tokenizer.tokenize(texts[i])[0:1200])
b = a.asnumpy()
if (a.shape[0] < 1200):
x = 1200 - int(a.shape[0])
shape_zeros = np.zeros((x, 300))
vector = np.vstack((b, shape_zeros))
vector = vector.tolist()
else:
vector = b.tolist()
vectors.append(vector)
vectors = np.array(vectors)
np.save("glove_final_300d_1200.npy", vectors)
return vectors
def get_test():
texts_vector = np.load("train_case_512.npy")
return texts_vector
if __name__ == '__main__':
# get_labels_vector_new()
#new_get_train_dev_vector()
#new_get_train_dev_vector_bert()
#get_bert_labels_vector_new()
text_vector = get_test() |
import asyncio
import socket
import threading
import time
from collections import deque
from contextlib import suppress
from btclib_node.constants import NodeStatus, P2pConnStatus
from btclib_node.p2p.address import NetworkAddress, to_ipv6
from btclib_node.p2p.connection import Connection
from btclib_node.p2p.messages.ping import Ping
class P2pManager(threading.Thread):
def __init__(self, node, port, peer_db):
super().__init__()
self.node = node
self.logger = node.logger
self.port = port
self.peer_db = peer_db
self.connections = {}
self.messages = deque()
self.handshake_messages = deque()
self.nonces = []
self.last_connection_id = -1
self.loop = asyncio.new_event_loop()
def create_connection(self, client, address):
client.settimeout(0.0)
self.last_connection_id += 1
conn = Connection(self, client, address, self.last_connection_id)
self.connections[self.last_connection_id] = conn
task = asyncio.run_coroutine_threadsafe(conn.run(), self.loop)
conn.task = task
def remove_connection(self, id):
if id in self.connections.keys():
self.connections[id].stop()
self.connections.pop(id)
async def async_create_connection(self, address):
client = await address.connect()
if client:
self.create_connection(client, address)
def connect(self, address):
address = NetworkAddress(ip=to_ipv6(address[0]), port=address[1])
asyncio.run_coroutine_threadsafe(
self.async_create_connection(address), self.loop
)
async def manage_connections(self, loop):
await self.peer_db.get_dns_nodes()
while True:
now = time.time()
for conn in self.connections.copy().values():
if conn.status == P2pConnStatus.Closed:
self.remove_connection(conn.id)
if now - conn.last_receive > 120:
if not conn.ping_sent:
ping_msg = Ping()
conn.send(ping_msg)
conn.ping_sent = now
conn.ping_nonce = ping_msg.nonce
elif now - conn.ping_sent > 120:
self.remove_connection(conn.id)
if self.node.status < NodeStatus.HeaderSynced:
connection_num = 1
else:
connection_num = 10
if len(self.connections) < connection_num and not self.peer_db.is_empty():
already_connected = [conn.address for conn in self.connections.values()]
try:
address = self.peer_db.random_address()
if address not in already_connected:
sock = await address.connect()
if sock:
self.create_connection(sock, address)
except Exception:
self.logger.exception("Exception occurred")
await asyncio.sleep(0.1)
async def server(self, loop):
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(("0.0.0.0", self.port))
server_socket.listen()
server_socket.settimeout(0.0)
with server_socket:
while True:
client, addr = await loop.sock_accept(server_socket)
address = NetworkAddress(ip=to_ipv6(addr[0]), port=addr[1])
self.create_connection(client, address)
def run(self):
self.logger.info("Starting P2P manager")
loop = self.loop
asyncio.set_event_loop(loop)
asyncio.run_coroutine_threadsafe(self.server(loop), loop)
asyncio.run_coroutine_threadsafe(self.manage_connections(loop), loop)
loop.run_forever()
def stop(self):
self.loop.call_soon_threadsafe(self.loop.stop)
for conn in self.connections.copy().values():
conn.stop()
while self.loop.is_running():
pass
pending = asyncio.all_tasks(self.loop)
for task in pending:
task.cancel()
with suppress(asyncio.CancelledError):
self.loop.run_until_complete(task)
self.loop.close()
self.logger.info("Stopping P2P Manager")
def send(self, msg, id):
if id in self.connections:
self.connections[id].send(msg)
def sendall(self, msg):
for conn in self.connections.copy().values():
conn.send(msg)
|
<filename>app/core/migrations/0011_auto_20200903_2018.py
# Generated by Django 3.0.10 on 2020-09-03 20:18
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0010_orgunit_is_hqunit'),
]
operations = [
migrations.CreateModel(
name='Action',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('in_process_step', models.PositiveSmallIntegerField()),
('name', models.CharField(max_length=255)),
('notes', models.TextField()),
('trigger', models.CharField(max_length=255)),
('effects', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='BCMActivity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('id_BIA', models.CharField(max_length=15)),
('MTPD', models.PositiveSmallIntegerField(choices=[(1, '4 godz.'), (2, '1 dzień'), (3, '2 dni'), (4, '1 tydzień'), (5, '2 tygodnie'), (6, 'do odwołania')])),
('min_recovery_level', models.TextField()),
('TTN', models.PositiveSmallIntegerField(choices=[(1, '4 godz.'), (2, '1 dzień'), (3, '2 dni'), (4, '1 tydzień'), (5, '2 tygodnie'), (6, 'do odwołania')])),
('performer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.OrgUnit')),
],
),
migrations.CreateModel(
name='Procedure',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.CharField(max_length=5)),
('effective_date', models.DateField()),
('goal', models.CharField(max_length=255)),
('actions', models.ManyToManyField(to='core.Action')),
('developed_by', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.OrgUnit')),
],
),
migrations.CreateModel(
name='WorkTask',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('in_action_order', models.PositiveSmallIntegerField()),
('task', models.TextField()),
('participants', models.CharField(max_length=63)),
('applications', models.CharField(max_length=63)),
('action', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Action')),
('performer', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.OrgUnit')),
],
),
migrations.RemoveField(
model_name='activity',
name='input',
),
migrations.RemoveField(
model_name='activity',
name='performer',
),
migrations.RemoveField(
model_name='activity',
name='process',
),
migrations.RemoveField(
model_name='activity',
name='product',
),
migrations.DeleteModel(
name='Product',
),
migrations.AlterField(
model_name='process',
name='type',
field=models.CharField(choices=[('Operacyjny', 'Operacyjny'), ('Zarządzania', 'Zarządzania'), ('Wsparcia', 'Wsparcia')], max_length=15),
),
migrations.DeleteModel(
name='Activity',
),
migrations.AddField(
model_name='procedure',
name='process',
field=models.ForeignKey(limit_choices_to={'is_megaprocess': False}, on_delete=django.db.models.deletion.CASCADE, to='core.Process'),
),
migrations.AddField(
model_name='bcmactivity',
name='process',
field=models.ForeignKey(limit_choices_to={'is_megaprocess': False}, on_delete=django.db.models.deletion.CASCADE, to='core.Process'),
),
]
|
<reponame>KrishnanS2006/HackDefyProject<gh_stars>1-10
from flask import Flask, render_template, redirect, url_for, request, flash, session
from flask_socketio import SocketIO
from flask_socketio import send, emit, join_room, leave_room
from data import *
import random
app = Flask(__name__)
app.secret_key = b'_5#y2L"F4Q8z\n\xec]/'
socketio = SocketIO(app, cors_allowed_origins="*")
#Generates Random 6 digit Room Code
def generateCode(n):
range_start = 10**(n-1)
range_end = (10**n)-1
return random.randint(range_start, range_end)
# End
@socketio.on('joined', namespace='/room')
def joined(message):
"""Sent by clients when they enter a room.
A status message is broadcast to all people in the room."""
room = session.get('room')
join_room(room)
print("Joined!!!")
emit('status', {'msg': session.get('username') + ' has entered the room.'}, room=room)
@socketio.on('text', namespace='/room')
def text(message):
"""Sent by a client when the user entered a new message.
The message is sent to all people in the room."""
room = session.get('room')
print("Message!!!", message)
emit('message', {'msg': session.get('username') + ':' + message['msg']}, room=room)
@socketio.on('left', namespace='/room')
def left(message):
"""Sent by clients when they leave a room.
A status message is broadcast to all people in the room."""
room = session.get('room')
leave_room(room)
print("Left :( :( :(")
emit('status', {'msg': session.get('username') + ' has left the room.'}, room=room)
@app.route("/", methods=["GET", "POST"])
def index():
if request.method == "POST":
if not session.get("username"):
flash("You are not logged in!", category="error")
return redirect(url_for("login"))
video = request.form.get("video")
code = request.form.get("code")
if video and code:
flash("You can't create and join a room!", category="error")
return redirect(url_for("index"))
if video:
code = generateCode(6)
while not d_add_room(code, session["username"], video):
code = generateCode(6)
flash("Your Room was successfully created!", category="success")
return redirect(url_for("room_view", code=code))
elif code:
room_exists = d_get_room(code)
if room_exists:
flash("You have successfully joined the room!", category="success")
return redirect(url_for("room_view", code=code))
flash("This room code does not exist. Please use a valid room code!", category="error")
return redirect(url_for("index"))
return render_template("index.html")
@app.route("/room/<code>", methods=["GET", "POST"])
def room_view(code=None):
if not session.get("username"):
flash("You are not logged in!", category="success")
return redirect(url_for("login"))
if not code:
flash("This code could not be found! Please check your code.", category="error")
return redirect(url_for("index"))
room_exists = d_get_room(code)
if not room_exists:
flash("This room code does not exist. Please use a valid room code!", category="error")
return redirect(url_for("index"))
session["room"] = code
return render_template("room.html", name=d_get_room(code)["Host"].id, room=code, video=d_get_video(code))
@app.route("/register", methods= ["GET", "POST"])
def register():
if session.get("username"):
flash("You are already logged in!", category="success")
if request.method == "POST":
username = request.form.get("username")
password = request.form.get("password")
if d_add_user(username, password):
flash("Successfully registered!", category="success")
session["username"] = username
return redirect(url_for("index"))
else:
flash("This Username already exists! Please choose another username.", category="error")
return redirect(url_for("login"))
return render_template("register.html")
@app.route("/login", methods= ["GET", "POST"])
def login():
if session.get("username"):
flash("You are already logged in!", category="success")
if request.method == "POST":
username = request.form.get("username")
password = request.form.get("password")
if d_login(username, password):
flash("You have been successfully logged in!", category="success")
session["username"] = username
return redirect(url_for("index"))
else:
flash("Invalid Username or Password!", category="error")
return redirect(url_for("login"))
return render_template("login.html")
@app.route("/logout")
def logout():
if not session.get("username"):
flash("You are not logged in!", category="error")
session.clear()
return redirect(url_for("login"))
if __name__ == "__main__":
try:
from pip._internal.operations import freeze
except ImportError: # pip < 10.0
from pip.operations import freeze
x = freeze.freeze()
for p in x:
print(p)
socketio.run(app)
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import webob.exc
from neutron_lib import context
from neutron_lib.plugins import constants
from oslo_config import cfg
from oslo_utils import uuidutils
from gbpservice.neutron.db import servicechain_db as svcchain_db
from gbpservice.neutron.extensions import servicechain as service_chain
from gbpservice.neutron.tests.unit import common as cm
from gbpservice.neutron.tests.unit.db.grouppolicy import test_group_policy_db
JSON_FORMAT = 'json'
GP_PLUGIN_KLASS = (
"gbpservice.neutron.services.grouppolicy.plugin.GroupPolicyPlugin")
class ServiceChainDBTestBase(test_group_policy_db.GroupPolicyDBTestBase):
def _get_resource_plural(self, resource):
if resource.endswith('y'):
resource_plural = resource.replace('y', 'ies')
else:
resource_plural = resource + 's'
return resource_plural
def _test_list_resources(self, resource, items,
neutron_context=None,
query_params=None):
resource_plural = self._get_resource_plural(resource)
res = self._list(resource_plural,
neutron_context=neutron_context,
query_params=query_params)
params = query_params.split('&')
params = dict((x.split('=')[0], x.split('=')[1].split(','))
for x in params)
count = getattr(self.plugin, 'get_%s_count' % resource_plural)(
neutron_context or context.get_admin_context(), params)
self.assertEqual(len(res[resource_plural]), count)
resource = resource.replace('-', '_')
self.assertEqual(sorted([i['id'] for i in res[resource_plural]]),
sorted([i[resource]['id'] for i in items]))
def _create_profiled_servicechain_node(
self, service_type=constants.LOADBALANCERV2, shared_profile=False,
profile_tenant_id=None, **kwargs):
prof = self.create_service_profile(
service_type=service_type,
shared=shared_profile,
tenant_id=profile_tenant_id or self._tenant_id)['service_profile']
return self.create_servicechain_node(
service_profile_id=prof['id'], **kwargs)
class ServiceChainDBTestPlugin(svcchain_db.ServiceChainDbPlugin):
supported_extension_aliases = ['servicechain'] + (
test_group_policy_db.UNSUPPORTED_REQUIRED_EXTS)
path_prefix = "/servicechain"
DB_GP_PLUGIN_KLASS = (ServiceChainDBTestPlugin.__module__ + '.' +
ServiceChainDBTestPlugin.__name__)
class ServiceChainDbTestCase(test_group_policy_db.GroupPolicyDbTestCase):
def setUp(self, core_plugin=None, sc_plugin=None, service_plugins=None,
ext_mgr=None, gp_plugin=None):
super(ServiceChainDbTestCase, self).setUp(
gp_plugin=gp_plugin or GP_PLUGIN_KLASS, core_plugin=core_plugin,
sc_plugin=sc_plugin, service_plugins=service_plugins,
ext_mgr=ext_mgr)
self.plugin = self._sc_plugin
class TestServiceChainResources(ServiceChainDbTestCase):
def _test_show_resource(self, resource, resource_id, attrs):
resource_plural = self._get_resource_plural(resource)
req = self.new_show_request(resource_plural, resource_id,
fmt=self.fmt)
res = self.deserialize(self.fmt,
req.get_response(self.ext_api))
for k, v in six.iteritems(attrs):
self.assertEqual(v, res[resource][k])
def test_create_servicechain_specs_same_node(self):
template1 = '{"key1":"value1"}'
sp = self.create_service_profile(
service_type=constants.FIREWALL)['service_profile']
scn = self.create_servicechain_node(
config=template1, service_profile_id=sp['id'])
scn_id = scn['servicechain_node']['id']
spec1 = {"servicechain_spec": {'name': 'scs1',
'tenant_id': self._tenant_id,
'nodes': [scn_id]}}
spec_req = self.new_create_request('servicechain_specs',
spec1,
self.fmt)
spec_res = spec_req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPCreated.code, spec_res.status_int)
res = self.deserialize(self.fmt, spec_res)
self.assertIn('servicechain_spec', res)
self.assertEqual([scn_id], res['servicechain_spec']['nodes'])
spec2 = {"servicechain_spec": {'name': 'scs2',
'tenant_id': self._tenant_id,
'nodes': [scn_id]}}
spec_req = self.new_create_request('servicechain_specs',
spec2,
self.fmt)
spec_res = spec_req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPCreated.code, spec_res.status_int)
res = self.deserialize(self.fmt, spec_res)
self.assertIn('servicechain_spec', res)
self.assertEqual([scn_id], res['servicechain_spec']['nodes'])
def test_create_and_show_servicechain_node(self):
profile = self.create_service_profile(service_type=constants.FIREWALL)
attrs = cm.get_create_servicechain_node_default_attrs(
service_profile_id=profile['service_profile']['id'],
config="config1")
scn = self.create_servicechain_node(
service_profile_id=profile['service_profile']['id'],
config="config1")
for k, v in six.iteritems(attrs):
self.assertEqual(v, scn['servicechain_node'][k])
self._test_show_resource('servicechain_node',
scn['servicechain_node']['id'],
attrs)
def test_list_servicechain_nodes(self):
scns = [
self._create_profiled_servicechain_node(name='scn1',
description='scn'),
self._create_profiled_servicechain_node(name='scn2',
description='scn'),
self._create_profiled_servicechain_node(name='scn3',
description='scn')]
self._test_list_resources('servicechain_node', scns,
query_params='description=scn')
def test_update_servicechain_node(self):
name = 'new_servicechain_node'
description = 'new desc'
config = 'new_config'
profile = self.create_service_profile(service_type=constants.FIREWALL)
attrs = cm.get_create_servicechain_node_default_attrs(
name=name, description=description,
config=config,
service_profile_id=profile['service_profile']['id'])
scn = self.create_servicechain_node(
service_profile_id=profile['service_profile']['id'])
data = {'servicechain_node': {'name': name,
'description': description,
'config': config}}
req = self.new_update_request('servicechain_nodes', data,
scn['servicechain_node']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
for k, v in six.iteritems(attrs):
self.assertEqual(v, res['servicechain_node'][k])
self._test_show_resource('servicechain_node',
scn['servicechain_node']['id'],
attrs)
def test_delete_servicechain_node(self):
ctx = context.get_admin_context()
scn = self._create_profiled_servicechain_node()
scn_id = scn['servicechain_node']['id']
scs = self.create_servicechain_spec(nodes=[scn_id])
scs_id = scs['servicechain_spec']['id']
# Deleting Service Chain Node in use by a Spec should fail
self.assertRaises(service_chain.ServiceChainNodeInUse,
self.plugin.delete_servicechain_node, ctx, scn_id)
req = self.new_delete_request('servicechain_specs', scs_id)
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
# After deleting the Service Chain Spec, node delete should succeed
req = self.new_delete_request('servicechain_nodes', scn_id)
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
self.assertRaises(service_chain.ServiceChainNodeNotFound,
self.plugin.get_servicechain_node,
ctx, scn_id)
def test_create_and_show_servicechain_spec(self):
name = "scs1"
scn = self._create_profiled_servicechain_node()
scn_id = scn['servicechain_node']['id']
attrs = cm.get_create_servicechain_spec_default_attrs(
name=name, nodes=[scn_id])
scs = self.create_servicechain_spec(name=name, nodes=[scn_id])
for k, v in six.iteritems(attrs):
self.assertEqual(v, scs['servicechain_spec'][k])
self._test_show_resource('servicechain_spec',
scs['servicechain_spec']['id'],
attrs)
def test_create_spec_multiple_nodes(self):
name = "scs1"
scn1 = self._create_profiled_servicechain_node()
scn1_id = scn1['servicechain_node']['id']
scn2 = self._create_profiled_servicechain_node()
scn2_id = scn2['servicechain_node']['id']
attrs = cm.get_create_servicechain_spec_default_attrs(
name=name, nodes=[scn1_id, scn2_id])
scs = self.create_servicechain_spec(
name=name, nodes=[scn1_id, scn2_id])
for k, v in six.iteritems(attrs):
self.assertEqual(v, scs['servicechain_spec'][k])
def test_list_servicechain_specs(self):
scs = [self.create_servicechain_spec(name='scs1', description='scs'),
self.create_servicechain_spec(name='scs2', description='scs'),
self.create_servicechain_spec(name='scs3', description='scs')]
self._test_list_resources('servicechain_spec', scs,
query_params='description=scs')
def test_node_ordering_list_servicechain_specs(self):
scn1_id = self._create_profiled_servicechain_node()[
'servicechain_node']['id']
scn2_id = self._create_profiled_servicechain_node()[
'servicechain_node']['id']
nodes_list = [scn1_id, scn2_id]
scs = self.create_servicechain_spec(name='scs1',
nodes=nodes_list)
self.assertEqual(nodes_list, scs['servicechain_spec']['nodes'])
res = self._list('servicechain_specs')
self.assertEqual(1, len(res['servicechain_specs']))
self.assertEqual(nodes_list, res['servicechain_specs'][0]['nodes'])
# Delete the service chain spec and create another with nodes in
# reverse order and verify that that proper ordering is maintained
req = self.new_delete_request('servicechain_specs',
scs['servicechain_spec']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
nodes_list.reverse()
scs = self.create_servicechain_spec(name='scs1',
nodes=nodes_list)
self.assertEqual(scs['servicechain_spec']['nodes'], nodes_list)
res = self._list('servicechain_specs')
self.assertEqual(1, len(res['servicechain_specs']))
self.assertEqual(nodes_list, res['servicechain_specs'][0]['nodes'])
def test_update_servicechain_spec(self):
name = "new_servicechain_spec1"
description = 'new desc'
scn_id = self._create_profiled_servicechain_node()[
'servicechain_node']['id']
attrs = cm.get_create_servicechain_spec_default_attrs(
name=name, description=description, nodes=[scn_id])
scs = self.create_servicechain_spec()
data = {'servicechain_spec': {'name': name, 'description': description,
'nodes': [scn_id]}}
req = self.new_update_request('servicechain_specs', data,
scs['servicechain_spec']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
for k, v in six.iteritems(attrs):
self.assertEqual(v, res['servicechain_spec'][k])
self._test_show_resource('servicechain_spec',
scs['servicechain_spec']['id'], attrs)
def test_delete_servicechain_spec(self):
ctx = context.get_admin_context()
scs = self.create_servicechain_spec()
scs_id = scs['servicechain_spec']['id']
req = self.new_delete_request('servicechain_specs', scs_id)
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
self.assertRaises(service_chain.ServiceChainSpecNotFound,
self.plugin.get_servicechain_spec, ctx, scs_id)
def test_delete_spec_in_use_by_policy_action_rejected(self):
ctx = context.get_admin_context()
scs_id = self.create_servicechain_spec()['servicechain_spec']['id']
data = {'policy_action': {'action_type': 'redirect',
'tenant_id': self._tenant_id,
'action_value': scs_id}}
pa_req = self.new_create_request('grouppolicy/policy_actions',
data, self.fmt)
res = pa_req.get_response(self.ext_api)
if res.status_int >= webob.exc.HTTPClientError.code:
raise webob.exc.HTTPClientError(code=res.status_int)
self.assertRaises(service_chain.ServiceChainSpecInUse,
self.plugin.delete_servicechain_spec, ctx, scs_id)
def test_delete_spec_in_use_by_instance_rejected(self):
ctx = context.get_admin_context()
scs_id = self.create_servicechain_spec()['servicechain_spec']['id']
sci = self.create_servicechain_instance(servicechain_specs=[scs_id])
sci_id = sci['servicechain_instance']['id']
# Deleting the Spec used by Instance should not be allowed
self.assertRaises(service_chain.ServiceChainSpecInUse,
self.plugin.delete_servicechain_spec, ctx, scs_id)
req = self.new_delete_request('servicechain_instances', sci_id)
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
self.assertRaises(service_chain.ServiceChainInstanceNotFound,
self.plugin.get_servicechain_instance,
ctx, sci_id)
# Deleting the spec should succeed after the instance is deleted
req = self.new_delete_request('servicechain_specs', scs_id)
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
self.assertRaises(service_chain.ServiceChainSpecNotFound,
self.plugin.get_servicechain_spec, ctx, scs_id)
def test_create_and_show_servicechain_instance(self):
scs_id = self.create_servicechain_spec()['servicechain_spec']['id']
policy_target_group_id = uuidutils.generate_uuid()
classifier_id = uuidutils.generate_uuid()
config_param_values = "{}"
attrs = cm.get_create_servicechain_instance_default_attrs(
servicechain_specs=[scs_id],
provider_ptg_id=policy_target_group_id,
consumer_ptg_id=policy_target_group_id,
management_ptg_id=policy_target_group_id,
classifier_id=classifier_id,
config_param_values=config_param_values)
sci = self.create_servicechain_instance(
servicechain_specs=[scs_id],
provider_ptg_id=policy_target_group_id,
consumer_ptg_id=policy_target_group_id,
management_ptg_id=policy_target_group_id,
classifier_id=classifier_id,
config_param_values=config_param_values)
for k, v in six.iteritems(attrs):
self.assertEqual(v, sci['servicechain_instance'][k])
self._test_show_resource('servicechain_instance',
sci['servicechain_instance']['id'],
attrs)
req = self.new_delete_request('servicechain_instances',
sci['servicechain_instance']['id'])
req.get_response(self.ext_api)
def test_list_servicechain_instances(self):
servicechain_instances = [self.create_servicechain_instance(
name='sci1', description='sci'),
self.create_servicechain_instance(name='sci2', description='sci'),
self.create_servicechain_instance(name='sci3', description='sci')]
self._test_list_resources('servicechain_instance',
servicechain_instances,
query_params='description=sci')
def test_spec_ordering_list_servicechain_instances(self):
scs1_id = self.create_servicechain_spec()['servicechain_spec']['id']
scs2_id = self.create_servicechain_spec()['servicechain_spec']['id']
specs_list = [scs1_id, scs2_id]
sci = self.create_servicechain_instance(name='sci1',
servicechain_specs=specs_list)
self.assertEqual(specs_list,
sci['servicechain_instance']['servicechain_specs'])
res = self._list('servicechain_instances')
self.assertEqual(1, len(res['servicechain_instances']))
result_instance = res['servicechain_instances'][0]
self.assertEqual(specs_list, result_instance['servicechain_specs'])
# Delete the service chain instance and create another with specs in
# reverse order and verify that that proper ordering is maintained
req = self.new_delete_request('servicechain_instances',
sci['servicechain_instance']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
specs_list.reverse()
sci = self.create_servicechain_instance(name='sci1',
servicechain_specs=specs_list)
self.assertEqual(specs_list,
sci['servicechain_instance']['servicechain_specs'])
res = self._list('servicechain_instances')
self.assertEqual(1, len(res['servicechain_instances']))
result_instance = res['servicechain_instances'][0]
self.assertEqual(specs_list,
result_instance['servicechain_specs'])
def test_update_servicechain_instance(self):
name = "new_servicechain_instance"
description = 'new desc'
config_param_values = "{}"
scs_id = self.create_servicechain_spec()['servicechain_spec']['id']
provider_ptg_id = uuidutils.generate_uuid()
consumer_ptg_id = uuidutils.generate_uuid()
management_ptg_id = uuidutils.generate_uuid()
classifier_id = uuidutils.generate_uuid()
attrs = cm.get_create_servicechain_instance_default_attrs(
name=name, description=description, servicechain_specs=[scs_id],
provider_ptg_id=provider_ptg_id, consumer_ptg_id=consumer_ptg_id,
management_ptg_id=management_ptg_id,
classifier_id=classifier_id,
config_param_values=config_param_values)
sci = self.create_servicechain_instance(
servicechain_specs=[scs_id], provider_ptg_id=provider_ptg_id,
consumer_ptg_id=consumer_ptg_id,
management_ptg_id=management_ptg_id, classifier_id=classifier_id,
config_param_values=config_param_values)
new_classifier_id = uuidutils.generate_uuid()
new_scs_id = self.create_servicechain_spec()['servicechain_spec']['id']
attrs.update({'servicechain_specs': [new_scs_id],
'classifier_id': new_classifier_id})
data = {'servicechain_instance': {'name': name,
'description': description,
'servicechain_specs': [new_scs_id],
'classifier_id': new_classifier_id}}
req = self.new_update_request('servicechain_instances', data,
sci['servicechain_instance']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
for k, v in six.iteritems(attrs):
self.assertEqual(v, res['servicechain_instance'][k])
self._test_show_resource('servicechain_instance',
sci['servicechain_instance']['id'], attrs)
req = self.new_delete_request('servicechain_instances',
sci['servicechain_instance']['id'])
req.get_response(self.ext_api)
def test_delete_servicechain_instance(self):
ctx = context.get_admin_context()
sci = self.create_servicechain_instance()
sci_id = sci['servicechain_instance']['id']
req = self.new_delete_request('servicechain_instances', sci_id)
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
self.assertRaises(service_chain.ServiceChainInstanceNotFound,
self.plugin.get_servicechain_instance,
ctx, sci_id)
def test_create_and_show_service_profile(self):
attrs = cm.get_create_service_profile_default_attrs(
service_type=constants.FIREWALL, vendor="vendor1")
scn = self.create_service_profile(
service_type=constants.FIREWALL, vendor="vendor1")
for k, v in six.iteritems(attrs):
self.assertEqual(scn['service_profile'][k], v)
self._test_show_resource('service_profile',
scn['service_profile']['id'], attrs)
def test_list_service_profile(self):
scns = [self.create_service_profile(name='sp1', description='sp',
service_type='LOADBALANCERV2'),
self.create_service_profile(name='sp2', description='sp',
service_type='LOADBALANCERV2'),
self.create_service_profile(name='sp3', description='sp',
service_type='LOADBALANCERV2')]
self._test_list_resources('service_profile', scns,
query_params='description=sp')
def test_update_service_profile(self):
name = 'new_service_profile'
description = 'new desc'
attrs = cm.get_create_service_profile_default_attrs(
name=name, description=description,
service_type=constants.FIREWALL)
scn = self.create_service_profile(service_type=constants.FIREWALL)
data = {'service_profile': {'name': name,
'description': description}}
req = self.new_update_request('service_profiles', data,
scn['service_profile']['id'])
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
for k, v in six.iteritems(attrs):
self.assertEqual(res['service_profile'][k], v)
self._test_show_resource('service_profile',
scn['service_profile']['id'], attrs)
def test_delete_service_profile(self):
ctx = context.get_admin_context()
sp = self.create_service_profile(service_type='LOADBALANCERV2')
sp_id = sp['service_profile']['id']
scn = self.create_servicechain_node(service_profile_id=sp_id)
scn_id = scn['servicechain_node']['id']
# Deleting Service Chain Node in use by a Spec should fail
self.assertRaises(service_chain.ServiceProfileInUse,
self.plugin.delete_service_profile, ctx, sp_id)
req = self.new_delete_request('servicechain_nodes', scn_id)
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
# After deleting the Service Chain Spec, node delete should succeed
req = self.new_delete_request('service_profiles', sp_id)
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
self.assertRaises(service_chain.ServiceProfileNotFound,
self.plugin.get_service_profile,
ctx, sp_id)
class TestServiceChainStatusAttributesForResources(
test_group_policy_db.TestStatusAttributesForResources):
def test_set_status_attrs(self):
for resource_name in service_chain.RESOURCE_ATTRIBUTE_MAP:
self._test_set_status_attrs(self._get_resource_singular(
resource_name), self._sc_plugin)
class TestQuotasForServiceChain(ServiceChainDbTestCase):
def setUp(self, core_plugin=None, sc_plugin=None,
gp_plugin=None, service_plugins=None, ext_mgr=None):
cfg.CONF.set_override('quota_servicechain_node', 1,
group='QUOTAS')
cfg.CONF.set_override('quota_servicechain_spec', 1,
group='QUOTAS')
cfg.CONF.set_override('quota_servicechain_instance', 1,
group='QUOTAS')
cfg.CONF.set_override('quota_service_profile', 1,
group='QUOTAS')
super(TestQuotasForServiceChain, self).setUp(
core_plugin=core_plugin, sc_plugin=sc_plugin,
gp_plugin=gp_plugin, service_plugins=service_plugins,
ext_mgr=ext_mgr)
def tearDown(self):
cfg.CONF.set_override('quota_servicechain_node', -1,
group='QUOTAS')
cfg.CONF.set_override('quota_servicechain_spec', -1,
group='QUOTAS')
cfg.CONF.set_override('quota_servicechain_instance', -1,
group='QUOTAS')
cfg.CONF.set_override('quota_service_profile', -1,
group='QUOTAS')
super(TestQuotasForServiceChain, self).tearDown()
def test_servicechain_node_quota(self):
self.create_servicechain_node()
self.assertRaises(webob.exc.HTTPClientError,
self.create_servicechain_node)
def test_servicechain_spec_quota(self):
self.create_servicechain_spec()
self.assertRaises(webob.exc.HTTPClientError,
self.create_servicechain_spec)
def test_servicechain_instance_quota(self):
self.create_servicechain_instance()
self.assertRaises(webob.exc.HTTPClientError,
self.create_servicechain_instance)
def test_service_profile(self):
self.create_service_profile(service_type=constants.FIREWALL)
self.assertRaises(webob.exc.HTTPClientError,
self.create_service_profile,
service_type=constants.FIREWALL)
|
import unittest
import functools
from ..symbol_type import SymbolType
from ..lex_token import Token
from ..syntax_nonterminal import Nonterminal
from ..syntax_productions import productionList
# todo, 把formatter改写为一个类
# Config
isKeepComment = True
isKeepGap = True
class Indenter:
IndentWidth = 4
def __init__(self):
self.nowLevel = 0
def toCode(self):
return ' ' * self.IndentWidth * self.nowLevel
def postAddAdd(self):
code = self.toCode()
self.nowLevel += 1
return code
def preAddAdd(self):
self.nowLevel += 1
return self.toCode()
def postSubSub(self):
code = self.toCode()
self.nowLevel -= 1
return code
def preSubSub(self):
if self.nowLevel > 0:
self.nowLevel -= 1
return self.toCode()
class GapManager:
def __init__(self):
self.count = 0
self.commentListAhead = []
self.commentListBehind = []
def insertCommentAhead(self, comment):
self.commentListAhead.append(comment)
def insertCommentBehind(self, comment):
self.commentListBehind.append(comment)
def placeGap(self, increment):
self.count += increment
return ''
def startNewBlock(self):
code = ''
for comment in self.commentListAhead:
code += comment + '\n'
if self.count > 0:
code += '\n'
for comment in self.commentListBehind:
code += comment + '\n'
self.count = 0
self.commentListAhead = []
self.commentListBehind = []
return code
SCFormatter = None
class Formatter:
def __init__(self, tokens, ast):
global SCFormatter
SCFormatter = self
self.tokens = tokens
self.ast = ast
self.indenter = Indenter()
self.gapManager = GapManager()
self.tokenIndex = -1
self.reset()
def toCode(self):
return self.ast.toCode()
# ---- for Token and String ----
def Token2Code(self, token):
self.tokenIndex += 1
code = token.text
return code
def String2Code(self, s):
self.tokenIndex += 1
return s;
# ---- for Utility ----
@staticmethod
def _eatSpace(token):
enterCount = 0
while token.kind == SymbolType.TokenType.SpaceLike:
if token.text == '\n':
enterCount += 1
token = token.nextToken
return token, enterCount
def restoreComment(self):
TokenType = SymbolType.TokenType
if self.tokenIndex < 0:
return ''
commentList = []
lastToken = self.tokens[self.tokenIndex]
stopToken = self.tokens[self.tokenIndex + 1] if self.tokenIndex + 1 < len(self.tokens) else None
nextToken = lastToken.nextToken
while nextToken is not stopToken:
if isKeepComment and nextToken.kind == TokenType.Comment:
commentWithIndent = self.indenter.toCode() + nextToken.text
commentList.append(commentWithIndent)
elif isKeepGap and nextToken.text == '\n':
nextToken, enterCount = Formatter._eatSpace(nextToken)
if enterCount >= 2:
self.gapManager.placeGap(1)
continue
nextToken = nextToken.nextToken
lastToken = None
code = ''
if len(commentList) == 0:
pass
elif len(commentList) == 1:
self.gapManager.insertCommentBehind(commentList[0])
else:
self.gapManager.insertCommentAhead(commentList[0])
for i in range(1, len(commentList)):
self.gapManager.insertCommentBehind(commentList[i])
return code
def reset(self):
self.indenter = Indenter()
self.gapManager = GapManager()
self.tokenIndex = -1
# do injection
from . import syntax_tree_to_code
syntax_tree_to_code.doInjection(productionList, Token, Nonterminal)
# shortcut for frequently call
def I():
# 正确的排版,新的一行总会是G()或者I()开头
code = SCFormatter.restoreComment()
code += SCFormatter.gapManager.startNewBlock()
code += SCFormatter.indenter.toCode()
return code
def IAA():
code = SCFormatter.restoreComment()
code += SCFormatter.gapManager.startNewBlock()
code += SCFormatter.indenter.postAddAdd()
return code
def SSI():
code = SCFormatter.restoreComment()
code += SCFormatter.gapManager.startNewBlock()
code += SCFormatter.indenter.preSubSub()
return code
def STR(s):
return SCFormatter.String2Code(s)
def Token2Code(token):
return SCFormatter.Token2Code(token)
def G(increment = 1):
return SCFormatter.gapManager.placeGap(increment)
def E():
return '\n'
class Test(unittest.TestCase):
def DtestIndenter(self):
I = Indenter()
print(I.toCode())
def test(self):
pass
|
<reponame>zeta1999/OpenJij
# Copyright 2019 Jij Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cxxjij
import openjij
from openjij.sampler import SASampler
from openjij.model import BinaryQuadraticModel, ChimeraModel
from .base_gpu_chimera import BaseGPUChimeraSampler
from openjij.utils.graph_utils import chimera_to_ind
import numpy as np
import dimod
class GPUChimeraSASampler(SASampler, BaseGPUChimeraSampler):
"""Sampler with Simulated Annealing (SA) on GPU.
Inherits from :class:`openjij.sampler.sampler.BaseSampler`.
Args:
beta_min (float): Minimum inverse temperature.
beta_max (float): Maximum inverse temperature.
num_sweeps (int): Length of Monte Carlo step.
schedule_info (dict): Information about a annealing schedule.
num_reads (int): Number of iterations.
unit_num_L (int): Length of one side of two-dimensional lattice in which chimera unit cells are arranged.
Raises:
ValueError: If variables violate as below.
- trotter number is odd.
- no input "unit_num_L" to an argument or this constructor.
- given problem graph is incompatible with chimera graph.
AttributeError: If GPU doesn't work.
"""
def __init__(self,
beta_min=None, beta_max=None,
num_sweeps=1000, schedule=None,
num_reads=1, unit_num_L=None,
):
super().__init__(beta_min=beta_min, beta_max=beta_max,
num_reads=num_reads, num_sweeps=num_sweeps,
schedule=schedule)
self.unit_num_L = unit_num_L
self._make_system = {
'singlespinflip': cxxjij.system.make_chimera_classical_gpu
}
self._algorithm = {
'singlespinflip': cxxjij.algorithm.Algorithm_GPU_run
}
def sample_ising(self, h, J, beta_min=None, beta_max=None,
num_sweeps=None, num_reads=1, schedule=None,
initial_state=None, updater='single spin flip',
reinitialize_state=True, seed=None, unit_num_L=None,
):
"""sample with Ising model.
Args:
h (dict): linear biases
J (dict): quadratic biases
beta_min (float): minimal value of inverse temperature
beta_max (float): maximum value of inverse temperature
num_sweeps (int): number of sweeps
num_reads (int): number of reads
schedule (list): list of inverse temperature
initial_state (dict): initial state
updater(str): updater algorithm
reinitialize_state (bool): if true reinitialize state for each run
seed (int): seed for Monte Carlo algorithm
unit_num_L (int): number of chimera units
Returns:
:class:`openjij.sampler.response.Response`: results
Examples::
>>> sampler = oj.GPUChimeraSASampler(unit_num_L=2)
>>> h = {0: -1, 1: -1, 2: 1, 3: 1},
>>> J = {(0, 4): -1, (2, 5): -1}
>>> res = sampler.sample_ising(h, J)
"""
self.unit_num_L = unit_num_L if unit_num_L else self.unit_num_L
model = openjij.ChimeraModel(linear=h, quadratic=J, var_type='SPIN',
unit_num_L=self.unit_num_L, gpu=True)
# define Chimera structure
structure = {}
structure['size'] = 8 * self.unit_num_L * self.unit_num_L
structure['dict'] = {}
if isinstance(model.indices[0], int):
# identity dict
for ind in model.indices:
structure['dict'][ind] = ind
elif isinstance(model.indices[0], tuple):
# map chimera coordinate to index
for ind in model.indices:
structure['dict'][ind] = model.to_index(*ind, self.unit_num_L)
return self._sampling(model, beta_min, beta_max,
num_sweeps, num_reads, schedule,
initial_state, updater,
reinitialize_state, seed, structure)
|
# -*- coding: utf-8 -*-
"""
"""
import pytest
import sqlalchemy as sa
from pynuget import db
def test_count_packages(session):
session.add(db.Package(name="pkg_2", latest_version="0.0.1"))
session.commit()
assert db.count_packages(session) == 2
def test_search_packages(session):
# Test with no args
result = db.search_packages(session)
assert len(result) == 3
assert type(result[0]) == db.Version
assert result[0].package.latest_version == "0.0.3"
# add a little more dummy data
pkg = db.Package(name="test_proj", latest_version="0.1.3")
session.add(pkg)
session.commit()
session.add(db.Version(package_id=pkg.package_id, version="0.1.3"))
session.commit()
# Test with a search query. Note that the wildcards are added by
# the function, and so they're not needed.
result = db.search_packages(session, search_query='test')
assert len(result) == 1
assert result[0].package.name == "test_proj"
# Test with a filter.
result = db.search_packages(session,
filter_='IsLatestVersion',
search_query='%dummy%')
assert len(result) == 1
assert result[0].version == "0.0.3"
# Test with a different order_by
result = db.search_packages(session,
order_by=sa.desc(db.Version.version))
assert len(result) == 4
assert result[0].version == '0.1.3'
assert result[-1].version == '0.0.1'
def test_package_updates(session):
# Add more more dummy data
pkg = db.Package(name="test_proj", latest_version="0.1.4")
session.add(pkg)
session.commit()
session.add(db.Version(package_id=pkg.package_id, version="0.1.3"))
session.add(db.Version(package_id=pkg.package_id, version="0.1.4"))
session.commit()
data = {1: '0.0.2', 2: '0.1.3'}
result = db.package_updates(session, data)
assert len(result) == 2
assert type(result[0]) == db.Version
assert result[0].package.name == "dummy"
assert result[0].version == "0.0.3"
assert result[1].package.name == "test_proj"
assert result[1].version == "0.1.4"
# if we currently have the latest version, return nothing. Do not return
# packages that we don't have installed.
data = {1: '0.0.3'}
result = db.package_updates(session, data)
assert len(result) == 0
def test_find_by_pkg_name(session):
result_1 = db.find_by_pkg_name(session, 'dummy')
assert type(result_1) == list
assert len(result_1) == 3
assert type(result_1[0]) == db.Version
result_2 = db.find_by_pkg_name(session, 'dummy', "0.0.1")
assert len(result_2) == 1
assert result_2[0].version == "0.0.1"
def test_validate_id_and_version(session):
result_1 = db.validate_id_and_version(session, 'dummy', "0.0.1")
assert result_1 is True
result_2 = db.validate_id_and_version(session, 'dummy', "9.9.9")
assert result_2 is False
def test_increment_download_count(session):
# Get the previous values
version_sql = (session.query(db.Version.version_download_count)
.filter(db.Version.version_id == 1))
package_sql = (session.query(db.Package.download_count)
.filter(db.Package.name == 'dummy'))
prev_version_count = version_sql.scalar()
prev_package_count = package_sql.scalar()
# Run the function
db.increment_download_count(session, 'dummy', '0.0.1')
# Make our asserations
assert prev_version_count + 1 == version_sql.scalar()
assert prev_package_count + 1 == package_sql.scalar()
def test_insert_or_update_package(session):
name = "NewPackage"
title = "Some Title"
latest_version = "0.0.1"
# Test Insert
db.insert_or_update_package(session, name, title, latest_version)
sql = (session.query(db.Package)
.filter(db.Package.name == name)
)
result = sql.scalar()
assert isinstance(result, db.Package)
assert result.name == name
assert result.title == title
assert result.latest_version == latest_version
assert result.download_count == 0
# Test update
db.insert_or_update_package(session, name, "New Title", "0.0.2")
result = sql.scalar()
assert result.title == "New Title"
assert result.latest_version == '0.0.2'
assert result.download_count == 0
def test_insert_version(session):
sql = session.query(sa.func.count(db.Version.version_id))
version_count = sql.scalar()
db.insert_version(session, package_id=1,
version="0.0.1")
# Make sure it was added
assert version_count + 1 == sql.scalar()
def test_delete_version(session):
# Add additional dummy data.
pkg = db.Package(name="Foo", latest_version="0.9.6")
session.add(pkg)
session.commit()
session.add(db.Version(package_id=pkg.package_id, version="0.9.6"))
session.add(db.Version(package_id=pkg.package_id, version="0.9.7"))
session.commit()
# The package we're interested in.
pkg_id = 'dummy'
# get our initial counts
version_count = (session.query(sa.func.count(db.Version.version_id))
.join(db.Package)
.filter(db.Package.name == pkg_id))
package_count = session.query(sa.func.count(db.Package.package_id))
initial_version_count = version_count.scalar()
initial_package_count = package_count.scalar()
db.delete_version(session, pkg_id, '0.0.2')
# The number of versions for our package should have decreased by 1.
assert initial_version_count - 1 == version_count.scalar()
# We should still have 2 packages
assert initial_package_count == 2
assert initial_package_count == package_count.scalar()
# The deleted version should not show up at all. (Note this particular
# test only works because our dummy data only has 1 instance of "0.0.2")
assert '0.0.2' not in session.query(db.Version.version).all()
# Deleting the highest version should change the `latest_version` value
qry = session.query(db.Package).filter(db.Package.name == 'dummy')
db.delete_version(session, pkg_id, '0.0.3')
assert qry.one().latest_version == '0.0.1'
# Deleting the last version of a package should remove the row from the
# Packages table.
db.delete_version(session, pkg_id, '0.0.1')
assert version_count.scalar() == 0
assert package_count.scalar() == 1
|
import re
from lxml.etree import XMLSyntaxError
from galaxy.tool_util.verify import asserts
from galaxy.util import (
asbool,
parse_xml_string,
unicodify,
)
def assert_is_valid_xml(output):
""" Simple assertion that just verifies the specified output
is valid XML."""
try:
parse_xml_string(output)
except XMLSyntaxError as e:
raise AssertionError(f"Expected valid XML, but could not parse output. {unicodify(e)}")
def assert_has_element_with_path(output, path, negate: bool = False):
""" Asserts the specified output has at least one XML element with a
path matching the specified path argument. Valid paths are the
simplified subsets of XPath implemented by lxml.etree;
https://lxml.de/xpathxslt.html for more information."""
assert_xml_element(output, path, negate=negate)
def assert_has_n_elements_with_path(output, path, n: int = None, delta: int = 0, min: int = None, max: int = None, negate: bool = False):
""" Asserts the specified output has exactly n elements matching the
path specified."""
assert_xml_element(output, path, n=n, delta=delta, min=min, max=max, negate=negate)
def assert_element_text_matches(output, path, expression, negate: bool = False):
""" Asserts the text of the first element matching the specified
path matches the specified regular expression."""
sub = {"tag": "has_text_matching", 'attributes': {'expression': expression, 'negate': negate}}
assert_xml_element(output, path, asserts.verify_assertions, [sub])
def assert_element_text_is(output, path, text, negate: bool = False):
""" Asserts the text of the first element matching the specified
path matches exactly the specified text. """
assert_element_text_matches(output, path, re.escape(text) + "$", negate=negate)
def assert_attribute_matches(output, path, attribute, expression, negate: bool = False):
""" Asserts the specified attribute of the first element matching
the specified path matches the specified regular expression."""
sub = {"tag": "has_text_matching", 'attributes': {'expression': expression, 'negate': negate}}
assert_xml_element(output, path, asserts.verify_assertions, [sub], attribute=attribute)
def assert_attribute_is(output, path, attribute, text, negate: bool = False):
""" Asserts the specified attribute of the first element matching
the specified path matches exactly the specified text."""
assert_attribute_matches(output, path, attribute, re.escape(text) + "$", negate=negate)
def assert_element_text(output, path, verify_assertions_function, children, negate: bool = False):
""" Recursively checks the specified assertions against the text of
the first element matching the specified path."""
assert_xml_element(output, path, verify_assertions_function, children, negate=negate)
def assert_xml_element(output, path, verify_assertions_function=None, children=None, attribute=None, all=False, n: int = None, delta: int = 0, min: int = None, max: int = None, negate: bool = False):
"""
Check if path occurs in the xml. If n and delta or min and max are given
also the number of occurences is checked.
If there are any sub assertions then check them against
- the element's text if attribute is None
- the content of the attribute
If all is True then the sub assertions are checked for all occurences.
"""
children = children or []
all = asbool(all)
# assert that path is in output (the specified number of times)
xml = parse_xml_string(output)
asserts._util._assert_presence_number(xml, path, n, delta, min, max, negate,
lambda x, p: x.find(p) is not None,
lambda x, p: len(x.findall(p)),
"{expected} path '{text}' in xml",
"{expected} {n}+-{delta} occurrences of path '{text}' in xml",
"{expected} that the number of occurences of path '{text}' in xml is in [{min}:{max}]")
# check sub-assertions
if len(children) == 0 or verify_assertions_function is None:
return
for occ in xml.findall(path):
if attribute is None or attribute == "":
content = occ.text
else:
content = occ.attrib[attribute]
try:
verify_assertions_function(content, children)
except AssertionError as e:
if attribute is not None and attribute != "":
raise AssertionError(f"Attribute '{attribute}' on element with path '{path}': {str(e)}")
else:
raise AssertionError(f"Text of element with path '{path}': {str(e)}")
if not all:
break
|
import re
import uuid
from mapswipe_workers.auth import firebaseDB
from mapswipe_workers.definitions import CustomError, logger
def remove_all_team_members(team_id):
"""Remove teamId attribute for all users of the team."""
fb_db = firebaseDB() # noqa E841
try:
# check if team exist in firebase
if not fb_db.reference(f"v2/teams/{team_id}").get():
raise CustomError(f"can't find team in firebase: {team_id}")
# get team name from firebase
team_name = fb_db.reference(f"v2/teams/{team_id}/teamName").get()
# generate random uuid4 token
team_members = (
fb_db.reference("v2/users/")
.order_by_child("teamId")
.equal_to(team_id)
.get()
)
# remove teamId attribute for each members
if not team_members:
logger.info(f"there are no members of the team {team_id} - '{team_name}'")
else:
for user_id in team_members.keys():
# update data in firebase
ref = fb_db.reference(f"v2/users/{user_id}/")
ref.update({"teamId": None})
logger.info(
f"removed teamId {team_id} - '{team_name}' for user {user_id}"
)
logger.info(
f"removed all team members from team: {team_id} - '{team_name}'"
)
except Exception as e:
logger.info(f"could not create team: {team_name}")
raise CustomError(e)
def create_team(team_name):
"""Create new team in Firebase."""
fb_db = firebaseDB() # noqa E841
try:
# generate random uuid4 token
team_token = str(uuid.uuid4())
# set data in firebase
ref = fb_db.reference("v2/teams/")
team_ref = ref.push()
team_ref.set({"teamName": team_name, "teamToken": team_token})
logger.info(f"created team: {team_ref.key} - '{team_name}' - {team_token}")
return team_ref.key, team_token
except Exception as e:
logger.info(f"could not create team: {team_name}")
raise CustomError(e)
def delete_team(team_id):
"""Delete team in Firebase."""
# TODO: What is the consequence of this on projects and users
# do we expect that the teamId is removed there as well?
# teamId is removed for users, but not for projects at the moment
fb_db = firebaseDB() # noqa E841
try:
# check if team exist in firebase
if not fb_db.reference(f"v2/teams/{team_id}").get():
raise CustomError(f"can't find team in firebase: {team_id}")
# remove all team members
remove_all_team_members(team_id)
# get team name from firebase
team_name = fb_db.reference(f"v2/teams/{team_id}/teamName").get()
# check if reference path is valid, e.g. if team_id is None
ref = fb_db.reference(f"v2/teams/{team_id}")
if not re.match(r"/v2/\w+/[-a-zA-Z0-9]+", ref.path):
raise CustomError(
f"""Given argument resulted in invalid Firebase Realtime Database reference.
{ref.path}"""
)
# delete team in firebase
ref.delete()
logger.info(f"deleted team: {team_id} - '{team_name}'")
except Exception as e:
logger.info(f"could not delete team: {team_id}")
raise CustomError(e)
def renew_team_token(team_id):
"""Create new team in Firebase."""
fb_db = firebaseDB() # noqa E841
try:
# check if team exist in firebase
if not fb_db.reference(f"v2/teams/{team_id}").get():
raise CustomError(f"can't find team in firebase: {team_id}")
# get team name from firebase
team_name = fb_db.reference(f"v2/teams/{team_id}/teamName").get()
# check if reference path is valid
ref = fb_db.reference(f"v2/teams/{team_id}")
if not re.match(r"/v2/\w+/[-a-zA-Z0-9]+", ref.path):
raise CustomError(
f"""Given argument resulted in invalid Firebase Realtime Database reference.
{ref.path}"""
)
# generate new uuid4 token
new_team_token = str(uuid.uuid4())
# set team token in firebase
ref.update({"teamToken": new_team_token})
logger.info(f"renewed team token: {team_id} - '{team_name}' - {new_team_token}")
return new_team_token
except Exception as e:
logger.info(f"could not delete team: {team_id}")
raise CustomError(e)
|
<reponame>nickpartner-goahead/resilient-community-apps<gh_stars>10-100
# -*- coding: utf-8 -*-
"""Tests using pytest_resilient_circuits"""
import pytest
from resilient_circuits.util import get_config_data, get_function_definition
from resilient_circuits import SubmitTestFunction, FunctionResult
from mock import patch
PACKAGE_NAME = "fn_qradar_integration"
FUNCTION_NAME = "qradar_get_reference_tables"
# Read the default configuration-data section from the package
config_data = get_config_data(PACKAGE_NAME)
# Provide a simulation of the Resilient REST API (uncomment to connect to a real appliance)
resilient_mock = "pytest_resilient_circuits.BasicResilientMock"
MOCK_GET_TABLE_RESPONSE = [
{
"timeout_type": "LAST_SEEN",
"number_of_elements": 0,
"creation_time": 1464119414213,
"name": "Mock URLs Data",
"namespace": "SHARED",
"key_name_types": {
"Brand": "ALNIC",
"First Seen Date": "DATE",
"Identifier": "NUM",
"Confidence": "NUM",
"Last Seen Date": "DATE",
"Report URL": "ALNIC",
"Malware Family": "ALNIC",
"Portal URL": "ALNIC",
"Infrastructure Type": "ALNIC",
"Provider": "ALN"
},
"element_type": "ALNIC",
"collection_id": 181
},
{
"timeout_type": "LAST_SEEN",
"number_of_elements": 100,
"creation_time": 1464119421471,
"name": "Les faux logiciels malveillants hachent les données SHA",
"namespace": "SHARED",
"key_name_types": {
"Brand": "ALNIC",
"First Seen Date": "DATE",
"Identifier": "NUM",
"Confidence": "NUM",
"Last Seen Date": "DATE",
"Portal URL": "ALNIC",
"Malware Family": "ALNIC",
"Report URL": "ALNIC",
"Infrastructure Type": "ALNIC",
"Provider": "ALN"
},
"element_type": "ALNIC",
"collection_id": 190
}]
def call_qradar_get_reference_tables_function(circuits, function_params, timeout=5):
# Create the submitTestFunction event
evt = SubmitTestFunction("qradar_get_reference_tables", function_params)
# Fire a message to the function
circuits.manager.fire(evt)
# circuits will fire an "exception" event if an exception is raised in the FunctionComponent
# return this exception if it is raised
exception_event = circuits.watcher.wait("exception", parent=None, timeout=timeout)
if exception_event is not False:
exception = exception_event.args[1]
raise exception
# else return the FunctionComponent's results
else:
event = circuits.watcher.wait("qradar_get_reference_tables_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
class TestQradarGetReferenceTables:
""" Tests for the qradar_get_reference_tables function"""
def test_function_definition(self):
""" Test that the package provides customization_data that defines the function """
func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)
assert func is not None
mock_inputs_1 = {
}
expected_results_1 = MOCK_GET_TABLE_RESPONSE
mock_inputs_2 = {
}
expected_results_2 = MOCK_GET_TABLE_RESPONSE
@pytest.mark.parametrize("mock_inputs, expected_results", [
(mock_inputs_1, expected_results_1),
(mock_inputs_2, expected_results_2)
])
@pytest.mark.livetest
def test_live_success(self, circuits_app, mock_inputs, expected_results):
""" Test calling with sample values for the parameters """
results = call_qradar_get_reference_tables_function(circuits_app, mock_inputs)
assert results
assert results['content'] # Ensure we have some results
for entry in results['content']:
assert entry.get('collection_id', False)
assert entry.get('name', False)
assert isinstance(entry.get('number_of_elements', False), int)
@pytest.mark.parametrize("mock_inputs, expected_results", [
(mock_inputs_1, expected_results_1),
(mock_inputs_2, expected_results_2)
])
def test_mocked_success(self, circuits_app, mock_inputs, expected_results):
""" Test calling with sample values for the parameters """
with patch('fn_qradar_integration.lib.reference_data.ReferenceTableFacade.ReferenceTableFacade.get_all_reference_tables') as patched_add_element:
patched_add_element.return_value = expected_results
results = call_qradar_get_reference_tables_function(circuits_app, mock_inputs)
assert results
assert results['content'] # Ensure we have some results
for entry in results['content']:
assert entry.get('collection_id', False)
assert entry.get('name', False)
assert isinstance(entry.get('number_of_elements', False), int)
|
<gh_stars>1-10
import sys
sys.path.append("../../utils")
import numpy as np
from scipy.ndimage import convolve, distance_transform_edt
from utils.gaussian_kernel import getGaussianKernel
def _funcHeavyside(x, eps=1.):
''' Return a value of the H(x); heavyside function
'''
return .5*(1 + (2./np.pi*np) * np.arctan(x/eps))
def _funcDelta(x, eps=1.):
''' Return a value of the delta(x); delta function
'''
return eps / (x**2. + eps**2.) / np.pi
def _calcLaplacian(x):
''' Apply Laplacian operator
'''
# Laplacian kernel
LAPLACIAN = np.array([[0., 1., 0.],
[1., -4., 1.],
[0., 1., 0.]])
return convolve(x, LAPLACIAN, mode="nearest")
def _calcGradient(x):
''' Return gradient map
'''
# Pad input domain
pad = np.pad(x, pad_width=1, mode="edge")
# Calculate derivatives
gradx = .5*(pad[2:, 1:-1] - pad[:-2, 1:-1])
grady = .5*(pad[1:-1, 2:] - pad[1:-1, :-2])
return (gradx, grady)
def _calcCurvature(u, eta=1.e-8):
''' Return curvature energy
'''
# Pad input domain
pad = np.pad(u, pad_width=1, mode="edge")
# Calculate derivatives
fx = .5*(pad[2:, 1:-1] - pad[:-2, 1:-1])
fy = .5*(pad[1:-1, 2:] - pad[1:-1, :-2])
fxx = pad[2:, 1:-1] - 2.*u + pad[:-2, 1:-1]
fyy = pad[1:-1, 2:] - 2.*u + pad[1:-1, :-2]
fxy = .25*(pad[2:, 2:] + pad[:-2, :-2]
- pad[:-2, 2:] - pad[2:, :-2])
return ((fxx*fy**2 - 2*fxy*fx*fy + fyy*fx**2)
/ (np.power(fx**2. + fy**2., 1.5) + eta))
class RSF(object):
''' Minimization of Region scalable fitting energy
Parameters
----------------
image: (H, W) ndarray
input image.
seed: (H, W) ndarray
input seed.
Returns
----------------
Region (H, W) ndarray
segmentation label
'''
def __init__(self, maxIter=500, dt=.1, tol=1.e-4,
mu=1., nu=.3e-3, lambda1=1., lambda2=1.,
threshold=0.):
# Model parameter
self.mu = mu
self.nu = nu
self.lambda1 = lambda1
self.lambda2 = lambda2
self.maxIter = 500
self.dt = dt
self.tol = tol
self.threshold = threshold
def getInitLevelSet(self, seed):
''' Initialize level-set as signed distance function
'''
# Make seed be float type
seed = seed.astype(np.float)
# Get signed distance function based on seed map
phi = (distance_transform_edt(seed)
- distance_transform_edt(1-seed)
+ distance_transform_edt(seed-.5 > 0))
return phi
def getRevLevelSet(self, u, phi, sigma=3):
''' Evolve level-set
'''
# Curvature
curvature = _calcCurvature(phi)
Ecurv = self.nu*_funcDelta(phi)*curvature
# Regularization
laplacian = _calcLaplacian(phi)
Ereg = self.mu*(laplacian - curvature);
# Build scaling kernel
#height, width = (4*sigma + 1, 4*sigma + 1)
#KERNEL = np.ones((height, width), dtype=np.float64)
#KERNEL = KERNEL / height / width
KERNEL = getGaussianKernel(sigma, (4*sigma+1, 4*sigma+1), ndim=2)
# Region scalable fitting energy
region = _funcHeavyside(phi)
KIH1 = convolve(region*u, KERNEL, mode="nearest")
KH1 = convolve(region, KERNEL, mode="nearest")
f1 = KIH1 / KH1
KIH2 = convolve((1. - region)*u, KERNEL, mode="nearest")
KH2 = convolve((1. - region), KERNEL, mode="nearest")
f2 = KIH2 / KH2
R1 = (self.lambda1 - self.lambda2)*u*y;
R2 = convolve(self.lambda1*f1, KERNEL, mode="nearest")
R3 = convolve(self.lambda2*f1*f1 - self.lambda2*f2*f2, KERNEL, mode="nearest")
Ersf = -_funcDelta(phi)*(R1 - 2.*R2*u + R3)
# Evolve level-set
revPhi = phi + self.dt*(Ecurv + Ereg + Ersf)
return revPhi
def run(self, image, seed):
# Convert input image format to be a float container
image = np.array(image, dtype=np.float32)
image = (image - image.min())/(image.max() - image.min())
# Initialize level-set
phi = self.getInitLevelSet(seed)
for _ in range(self.maxIter):
# keep previous level
prevPhi = phi.copy()
# Update level-set function
phi = self.getRevLevelSet(image, phi)
# Evaluate mean-square energy to confine convergence
mse = np.sqrt(((phi-prevPhi)**2.).mean())
if mse < self.tol:
break
# Return positive levels as region
region = np.uint8(phi > self.threshold)
return region
|
<reponame>NeonOcean/Environment<filename>S4/S4 Library/simulation/gsi_handlers/animation_archive_handlers.py
import itertools
from animation.animation_utils import clip_event_type_name
from gsi_handlers.gameplay_archiver import GameplayArchiver
from sims4.gsi.schema import GsiGridSchema, GsiFieldVisualizers
from sims4.utils import setdefault_callable
from uid import UniqueIdGenerator
import services
import sims4.log
logger = sims4.log.Logger('GSI')
with sims4.reload.protected(globals()):
gsi_log_id = UniqueIdGenerator()
animation_archive = {}
class AnimationArchiveGSILog:
def __init__(self):
self.clear_log()
def clear_log(self):
self.id = gsi_log_id()
services_time_service = services.time_service()
if services_time_service is not None and services_time_service.sim_timeline is not None:
self.now = str(services_time_service.sim_timeline.now)
else:
self.now = 'Unavailable'
self.events = []
self.asm_requests = {}
animation_archive_schema = GsiGridSchema(label='Animation Archive', sim_specific=True)
animation_archive_schema.add_field('game_time', label='GameTime', hidden=True)
animation_archive_schema.add_field('arb_id', label='ARB ID', visualizer=GsiFieldVisualizers.INT)
animation_archive_schema.add_field('asm', label='ASM', width=20)
animation_archive_schema.add_field('request', label='Request', width=20)
animation_archive_schema.add_field('arb', label='ARB String', width=75)
with animation_archive_schema.add_has_many('Actors', GsiGridSchema) as sub_schema:
sub_schema.add_field('name', label='Actor Name', width=20)
sub_schema.add_field('actor', label='Actor', width=35)
sub_schema.add_field('actor_id', label='Actor ID', width=35)
sub_schema.add_field('suffix', label='Suffix', width=10)
with animation_archive_schema.add_has_many('Params', GsiGridSchema) as sub_schema:
sub_schema.add_field('name', label='Param Name', width=25)
sub_schema.add_field('value', label='Value', width=25)
sub_schema.add_field('type', label='Type', width=25)
sub_schema.add_field('data', label='Data', width=25)
with animation_archive_schema.add_has_many('Events', GsiGridSchema) as sub_schema:
sub_schema.add_field('clip_name', label='Clip Name', width=20)
sub_schema.add_field('type', label='Type', width=15)
sub_schema.add_field('event_id', label='ID', width=5, visualizer=GsiFieldVisualizers.INT)
sub_schema.add_field('callbacks', label='Callbacks', width=30)
sub_schema.add_field('event_data', label='Event Data', width=30)
sub_schema.add_field('tag', label='Tag', width=5)
sub_schema.add_field('errors', label='Errors', width=10)
archiver = GameplayArchiver('animation_archive', animation_archive_schema, add_to_archive_enable_functions=True)
def get_animation_log(arb, clear=False):
animation_log = setdefault_callable(animation_archive, id(arb), AnimationArchiveGSILog)
if clear:
del animation_archive[id(arb)]
return animation_log
def process_actors(animation_log, asm):
actors = []
if asm not in animation_log.asm_requests:
for (name, obj, suffix) in asm.actors_info_gen():
actors.append({'name': name, 'actor': str(obj), 'actor_id': str(obj.id), 'suffix': suffix})
return actors
def process_parameters(animation_log, asm):
params = []
if asm not in animation_log.asm_requests:
all_param_dicts = asm.get_all_parameters()
for param_dict in all_param_dicts:
for (k, v) in param_dict.items():
if isinstance(k, str):
params.append({'name': k, 'value': str(v), 'type': 'Global'})
else:
params.append({'name': '{}:{}'.format(k[1], k[0]), 'value': str(v), 'type': 'Object', 'data': str(k[2])})
return params
def process_animation_request(arb, animation_log):
pass
def process_handled_events(arb, animation_log):
pass
def archive_animation_request(arb):
animation_log = get_animation_log(arb, clear=True)
if animation_log is None:
return
process_handled_events(arb, animation_log)
process_animation_request(arb, animation_log)
object_manager = services.object_manager()
for asm_request in animation_log.asm_requests.values():
asm_request['arb_id'] = int(animation_log.id)
asm_request['game_time'] = animation_log.now
asm_request['arb'] = animation_log.arb_contents
for actor_id in arb.actor_ids:
actor = object_manager.get(actor_id)
if actor is not None and actor.is_sim:
archiver.archive(data=asm_request, object_id=actor.id)
|
import typing as t
import discord
from discord.ext.commands import Cog, Context, command, has_any_role
from bot import constants
from bot.bot import Bot
from bot.decorators import in_whitelist
from bot.log import get_logger
from bot.utils.checks import InWhitelistCheckFailure
log = get_logger(__name__)
# Sent via DMs once user joins the guild
ON_JOIN_MESSAGE = """
Welcome to Python Discord!
To show you what kind of community we are, we've created this video:
https://youtu.be/ZH26PuX3re0
As a new user, you have read-only access to a few select channels to give you a taste of what our server is like. \
In order to see the rest of the channels and to send messages, you first have to accept our rules.
"""
VERIFIED_MESSAGE = f"""
You are now verified!
You can find a copy of our rules for reference at <https://pythondiscord.com/pages/rules>.
Additionally, if you'd like to receive notifications for the announcements \
we post in <#{constants.Channels.announcements}>
from time to time, you can send `!subscribe` to <#{constants.Channels.bot_commands}> at any time \
to assign yourself the **Announcements** role. We'll mention this role every time we make an announcement.
If you'd like to unsubscribe from the announcement notifications, simply send `!unsubscribe` to \
<#{constants.Channels.bot_commands}>.
To introduce you to our community, we've made the following video:
https://youtu.be/ZH26PuX3re0
"""
async def safe_dm(coro: t.Coroutine) -> None:
"""
Execute `coro` ignoring disabled DM warnings.
The 50_0007 error code indicates that the target user does not accept DMs.
As it turns out, this error code can appear on both 400 and 403 statuses,
we therefore catch any Discord exception.
If the request fails on any other error code, the exception propagates,
and must be handled by the caller.
"""
try:
await coro
except discord.HTTPException as discord_exc:
log.trace(f"DM dispatch failed on status {discord_exc.status} with code: {discord_exc.code}")
if discord_exc.code != 50_007: # If any reason other than disabled DMs
raise
class Verification(Cog):
"""
User verification and role management.
Statistics are collected in the 'verification.' namespace.
Additionally, this cog offers the !subscribe and !unsubscribe commands,
"""
def __init__(self, bot: Bot) -> None:
"""Start internal tasks."""
self.bot = bot
self.pending_members = set()
# region: listeners
@Cog.listener()
async def on_member_join(self, member: discord.Member) -> None:
"""Attempt to send initial direct message to each new member."""
if member.guild.id != constants.Guild.id:
return # Only listen for PyDis events
# If the user has the pending flag set, they will be using the alternate
# gate and will not need a welcome DM with verification instructions.
# We will send them an alternate DM once they verify with the welcome
# video when they pass the gate.
if member.pending:
return
log.trace(f"Sending on join message to new member: {member.id}")
try:
await safe_dm(member.send(ON_JOIN_MESSAGE))
except discord.HTTPException:
log.exception("DM dispatch failed on unexpected error code")
@Cog.listener()
async def on_member_update(self, before: discord.Member, after: discord.Member) -> None:
"""Check if we need to send a verification DM to a gated user."""
if before.pending is True and after.pending is False:
try:
# If the member has not received a DM from our !accept command
# and has gone through the alternate gating system we should send
# our alternate welcome DM which includes info such as our welcome
# video.
await safe_dm(after.send(VERIFIED_MESSAGE))
except discord.HTTPException:
log.exception("DM dispatch failed on unexpected error code")
# endregion
# region: subscribe commands
@command(name='subscribe')
@in_whitelist(channels=(constants.Channels.bot_commands,))
async def subscribe_command(self, ctx: Context, *_) -> None: # We don't actually care about the args
"""Subscribe to announcement notifications by assigning yourself the role."""
has_role = False
for role in ctx.author.roles:
if role.id == constants.Roles.announcements:
has_role = True
break
if has_role:
await ctx.send(f"{ctx.author.mention} You're already subscribed!")
return
log.debug(f"{ctx.author} called !subscribe. Assigning the 'Announcements' role.")
await ctx.author.add_roles(discord.Object(constants.Roles.announcements), reason="Subscribed to announcements")
log.trace(f"Deleting the message posted by {ctx.author}.")
await ctx.send(
f"{ctx.author.mention} Subscribed to <#{constants.Channels.announcements}> notifications.",
)
@command(name='unsubscribe')
@in_whitelist(channels=(constants.Channels.bot_commands,))
async def unsubscribe_command(self, ctx: Context, *_) -> None: # We don't actually care about the args
"""Unsubscribe from announcement notifications by removing the role from yourself."""
has_role = False
for role in ctx.author.roles:
if role.id == constants.Roles.announcements:
has_role = True
break
if not has_role:
await ctx.send(f"{ctx.author.mention} You're already unsubscribed!")
return
log.debug(f"{ctx.author} called !unsubscribe. Removing the 'Announcements' role.")
await ctx.author.remove_roles(
discord.Object(constants.Roles.announcements), reason="Unsubscribed from announcements"
)
log.trace(f"Deleting the message posted by {ctx.author}.")
await ctx.send(
f"{ctx.author.mention} Unsubscribed from <#{constants.Channels.announcements}> notifications."
)
# endregion
# region: miscellaneous
# This cannot be static (must have a __func__ attribute).
async def cog_command_error(self, ctx: Context, error: Exception) -> None:
"""Check for & ignore any InWhitelistCheckFailure."""
if isinstance(error, InWhitelistCheckFailure):
error.handled = True
@command(name='verify')
@has_any_role(*constants.MODERATION_ROLES)
async def perform_manual_verification(self, ctx: Context, user: discord.Member) -> None:
"""Command for moderators to verify any user."""
log.trace(f'verify command called by {ctx.author} for {user.id}.')
if not user.pending:
log.trace(f'{user.id} is already verified, aborting.')
await ctx.send(f'{constants.Emojis.cross_mark} {user.mention} is already verified.')
return
# Adding a role automatically verifies the user, so we add and remove the Announcements role.
temporary_role = self.bot.get_guild(constants.Guild.id).get_role(constants.Roles.announcements)
await user.add_roles(temporary_role)
await user.remove_roles(temporary_role)
log.trace(f'{user.id} manually verified.')
await ctx.send(f'{constants.Emojis.check_mark} {user.mention} is now verified.')
# endregion
def setup(bot: Bot) -> None:
"""Load the Verification cog."""
bot.add_cog(Verification(bot))
|
"""Call arbitrary API endpoints."""
import json
import click
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
from SoftLayer.CLI import formatting
from SoftLayer.CLI import helpers
from SoftLayer import utils
SPLIT_TOKENS = [
('in', ' IN '),
('eq', '='),
]
def _build_filters(_filters):
"""Builds filters using the filter options passed into the CLI.
This only supports the equals keyword at the moment.
"""
root = utils.NestedDict({})
for _filter in _filters:
operation = None
for operation, token in SPLIT_TOKENS:
# split "some.key=value" into ["some.key", "value"]
top_parts = _filter.split(token, 1)
if len(top_parts) == 2:
break
else:
raise exceptions.CLIAbort('Failed to find valid operation for: %s' % _filter)
key, value = top_parts
current = root
# split "some.key" into ["some", "key"]
parts = [part.strip() for part in key.split('.')]
# Actually drill down and add the filter
for part in parts[:-1]:
current = current[part]
if operation == 'eq':
current[parts[-1]] = utils.query_filter(value.strip())
elif operation == 'in':
current[parts[-1]] = {
'operation': 'in',
'options': [{
'name': 'data',
'value': [p.strip() for p in value.split(',')],
}],
}
return root.to_dict()
def _build_python_example(args, kwargs):
sorted_kwargs = sorted(kwargs.items())
call_str = 'import SoftLayer\n\n'
call_str += 'client = SoftLayer.create_client_from_env()\n'
call_str += 'result = client.call('
arg_list = [repr(arg) for arg in args]
arg_list += [key + '=' + repr(value)
for key, value in sorted_kwargs if value]
call_str += ',\n '.join(arg_list)
call_str += ')'
return call_str
def _validate_filter(ctx, param, value): # pylint: disable=unused-argument
"""Validates a JSON style object filter"""
_filter = None
if value:
try:
_filter = json.loads(value)
if not isinstance(_filter, dict):
raise exceptions.CLIAbort("\"{}\" should be a JSON object, but is a {} instead.".
format(_filter, type(_filter)))
except json.JSONDecodeError as error:
raise exceptions.CLIAbort("\"{}\" is not valid JSON. {}".format(value, error))
return _filter
def _validate_parameters(ctx, param, value): # pylint: disable=unused-argument
"""Checks if value is a JSON string, and converts it to a datastructure if that is true"""
validated_values = []
for parameter in value:
if isinstance(parameter, str):
# looks like a JSON string...
if '{' in parameter or '[' in parameter:
try:
parameter = json.loads(parameter)
except json.JSONDecodeError as error:
click.secho("{} looked like json, but was invalid, passing to API as is. {}".
format(parameter, error), fg='red')
validated_values.append(parameter)
return validated_values
@click.command('call', short_help="Call arbitrary API endpoints.")
@click.argument('service')
@click.argument('method')
@click.argument('parameters', nargs=-1, callback=_validate_parameters)
@click.option('--id', '_id', help="Init parameter")
@helpers.multi_option('--filter', '-f', '_filters',
help="Object filters. This should be of the form: 'property=value' or 'nested.property=value'."
"Complex filters should use --json-filter.")
@click.option('--mask', help="String-based object mask")
@click.option('--limit', type=click.INT, help="Result limit")
@click.option('--offset', type=click.INT, help="Result offset")
@click.option('--output-python / --no-output-python',
help="Show python example code instead of executing the call")
@click.option('--json-filter', callback=_validate_filter,
help="A JSON string to be passed in as the object filter to the API call. "
"Remember to use double quotes (\") for variable names. Can NOT be used with --filter. "
"Dont use whitespace outside of strings, or the slcli might have trouble parsing it.")
@environment.pass_env
def cli(env, service, method, parameters, _id, _filters, mask, limit, offset,
output_python=False, json_filter=None):
"""Call arbitrary API endpoints with the given SERVICE and METHOD.
For parameters that require a datatype, use a JSON string for that parameter.
Example::
slcli call-api Account getObject
slcli call-api Account getVirtualGuests --limit=10 --mask=id,hostname
slcli call-api Virtual_Guest getObject --id=12345
slcli call-api Metric_Tracking_Object getBandwidthData --id=1234 \\
"2015-01-01 00:00:00" "2015-01-1 12:00:00" public
slcli call-api Account getVirtualGuests \\
-f 'virtualGuests.datacenter.name=dal05' \\
-f 'virtualGuests.maxCpu=4' \\
--mask=id,hostname,datacenter.name,maxCpu
slcli call-api Account getVirtualGuests \\
-f 'virtualGuests.datacenter.name IN dal05,sng01'
slcli call-api Account getVirtualGuests \\
--json-filter '{"virtualGuests":{"hostname":{"operation":"^= test"}}}' --limit=10
slcli -v call-api SoftLayer_User_Customer addBulkPortalPermission --id=1234567 \\
'[{"keyName": "NETWORK_MESSAGE_DELIVERY_MANAGE"}]'
"""
if _filters and json_filter:
raise exceptions.CLIAbort("--filter and --json-filter cannot be used together.")
object_filter = _build_filters(_filters)
if json_filter:
object_filter.update(json_filter)
args = [service, method] + list(parameters)
kwargs = {
'id': _id,
'filter': object_filter,
'mask': mask,
'limit': limit,
'offset': offset,
}
if output_python:
env.out(_build_python_example(args, kwargs))
else:
result = env.client.call(*args, **kwargs)
env.fout(formatting.iter_to_table(result))
|
# coding: utf-8
from django.core import mail
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from django_factory_boy import auth
from journalmanager import models as jmodels
from journalmanager.tests import modelfactories
from editorialmanager import notifications
from . import modelfactories as editorial_modelfactories
class IssueBoardMessageTests(TestCase):
ACTIONS = [
'issue_add_no_replicated_board',
'issue_add_replicated_board',
]
def setUp(self):
self.editor = auth.UserF(is_active=True)
self.journal = modelfactories.JournalFactory.create(editor=self.editor)
self.issue = modelfactories.IssueFactory(journal=self.journal)
self.expected_recipients = [self.editor.email, ]
self.expected_subject_sufix_by_action = {
'issue_add_no_replicated_board': "Issue Board can't be replicated",
'issue_add_replicated_board': "Issue has a new replicated board",
}
def _make_subject(self, action, subject=''):
subject_prefix = settings.EMAIL_SUBJECT_PREFIX
subject_suffix = self.expected_subject_sufix_by_action[action]
return ' '.join([subject_prefix, subject, subject_suffix])
def tearDown(self):
"""
Restore the default values.
"""
@override_settings(CELERY_EAGER_PROPAGATES_EXCEPTIONS=True, CELERY_ALWAYS_EAGER=True, BROKER_BACKEND='memory')
def test_each_action(self):
email_count = 0
for action in self.ACTIONS:
# with
expected_subject = self._make_subject(action)
email_count += 1
# when
result = notifications.issue_board_replica(issue=self.issue, action=action)
# then
self.assertTrue(result)
self.assertEqual(len(mail.outbox), email_count)
new_email = mail.outbox[-1]
self.assertEqual(new_email.subject, expected_subject)
self.assertEqual(len(self.expected_recipients), len(new_email.to))
for recipient in self.expected_recipients:
self.assertIn(recipient, new_email.to)
@override_settings(CELERY_EAGER_PROPAGATES_EXCEPTIONS=True, CELERY_ALWAYS_EAGER=True, BROKER_BACKEND='memory')
def test_each_action_with_disable_notifications_for_editor(self):
"""
for each action, test notifications are sent, but editor will have disable notifications
"""
editor_profile = self.editor.get_profile()
editor_profile.email_notifications = False
editor_profile.save()
# update self.expected_recipients
self.expected_recipients.remove(editor_profile.user.email)
for action in self.ACTIONS:
# with
expected_subject = self._make_subject(action)
# when
result = notifications.issue_board_replica(issue=self.issue, action=action)
# then
# no mail sent, since the only recipient: editor, "choose" to not receive emails
self.assertIsNone(result)
class BoardMembersMessageTests(TestCase):
ACTIONS = [
'board_add_member',
'board_edit_member',
'board_delete_member',
]
def setUp(self):
# create librarian group and those members
self.librarian_group = modelfactories.GroupFactory(name="Librarian")
self.librarian1 = auth.UserF(is_active=True)
self.librarian2 = auth.UserF(is_active=True)
self.librarian1.groups.add(self.librarian_group)
self.librarian1.save()
self.librarian2.groups.add(self.librarian_group)
self.librarian2.save()
self.collection = modelfactories.CollectionFactory.create()
self.editor = auth.UserF(is_active=True)
self.journal = modelfactories.JournalFactory.create(editor=self.editor)
self.issue = modelfactories.IssueFactory(journal=self.journal)
self.board = editorial_modelfactories.EditorialBoardFactory.create(issue=self.issue)
self.member = editorial_modelfactories.EditorialMemberFactory.create(board=self.board)
# link journal to collection
jmodels.Membership.objects.create(journal=self.journal, collection=self.collection, created_by=auth.UserF(is_active=True))
# link librarians and collection
self.collection.add_user(self.librarian1)
self.collection.add_user(self.librarian2)
self.expected_recipients = []
self.expected_bcc_recipients = [self.librarian1.email, self.librarian2.email, ]
self.expected_subject_sufix_by_action = {
'board_add_member': "Member of the journal board, was added",
'board_edit_member': "Member of the journal board, was edited",
'board_delete_member': "Member of the journal board, was deleted",
}
def _make_subject(self, action, subject=''):
subject_prefix = settings.EMAIL_SUBJECT_PREFIX
subject_suffix = self.expected_subject_sufix_by_action[action]
return ' '.join([subject_prefix, subject, subject_suffix])
@override_settings(CELERY_EAGER_PROPAGATES_EXCEPTIONS=True, CELERY_ALWAYS_EAGER=True, BROKER_BACKEND='memory')
def test_each_action(self):
email_count = 0
for action in self.ACTIONS:
# with
expected_subject = self._make_subject(action)
message = 'Audit Log change message goes here!'
email_count += 1
# when
result = notifications.board_members_send_email_by_action(self.member, self.editor, message, action)
# then
self.assertTrue(result)
self.assertEqual(len(mail.outbox), email_count)
new_email = mail.outbox[-1]
self.assertEqual(new_email.subject, expected_subject)
self.assertEqual(len(self.expected_recipients), len(new_email.to))
self.assertEqual(len(self.expected_bcc_recipients), len(new_email.bcc))
for recipient in self.expected_recipients:
self.assertIn(recipient, new_email.to)
for recipient in self.expected_bcc_recipients:
self.assertIn(recipient, new_email.bcc)
@override_settings(CELERY_EAGER_PROPAGATES_EXCEPTIONS=True, CELERY_ALWAYS_EAGER=True, BROKER_BACKEND='memory')
def test_each_action_with_disable_notifications_for_one_librarian(self):
"""
for each action, test notifications are sent, but librarian2 will have disable notifications
"""
librarian2_profile = self.librarian2.get_profile()
librarian2_profile.email_notifications = False
librarian2_profile.save()
# remove it from expected_bcc_recipients
self.expected_bcc_recipients.remove(librarian2_profile.user.email)
email_count = 0
for action in self.ACTIONS:
# with
expected_subject = self._make_subject(action)
message = 'Audit Log change message goes here!'
email_count += 1
# when
result = notifications.board_members_send_email_by_action(self.member, self.editor, message, action)
# then
self.assertTrue(result)
self.assertEqual(len(mail.outbox), email_count)
new_email = mail.outbox[-1]
self.assertEqual(new_email.subject, expected_subject)
self.assertEqual(len(self.expected_recipients), len(new_email.to))
self.assertEqual(len(self.expected_bcc_recipients), len(new_email.bcc))
for recipient in self.expected_recipients:
self.assertIn(recipient, new_email.to)
for recipient in self.expected_bcc_recipients:
self.assertIn(recipient, new_email.bcc)
|
import logging
from typing import Optional
import xbmcaddon
import os
from .kodi_rpc import get_item_info, get_properties
from .periodic_updater import PeriodicUpdater
from .preferences import Preferences
ADDON = xbmcaddon.Addon()
logger = logging.getLogger(ADDON.getAddonInfo('id'))
def same_audio(audio1, audio2) -> bool:
"""Check if same audio, needed because other attributes can differ"""
if (audio1['name'] == audio2['name'] and
audio1['language'] == audio2['language'] and
audio1['isimpaired'] == audio2['isimpaired']):
return True
return False
def same_subtitle(subtitle1, subtitle2) -> bool:
"""Check if same subtitle, needed because other attributes can be differ"""
if subtitle1 == subtitle2:
return True
if subtitle1 is None or subtitle2 is None:
return False
if (subtitle1['name'] == subtitle2['name'] and
subtitle1['language'] == subtitle2['language'] and
subtitle1['isforced'] == subtitle2['isforced'] and
subtitle1['isimpaired'] == subtitle2['isimpaired']):
return True
return False
def find_audio_stream(requested_audio, audio_streams) -> Optional[int]:
for audio_stream in audio_streams:
if same_audio(requested_audio, audio_stream):
return audio_stream["index"]
return None
def find_subtitle_stream(requested_subtitle, subtitles) -> Optional[int]:
for subtitle in subtitles:
if same_subtitle(requested_subtitle, subtitle):
return subtitle["index"]
return None
class Tracker():
def __init__(self, periodic_updater: PeriodicUpdater, preferences: Preferences):
logger.debug("--> Tracker Init")
self.preferences = preferences
self.periodic_updater = periodic_updater
periodic_updater._callback = self._update_item # Hmmm
self.audio = None
self.subtitle = None
self.set_audio_stream = None
self.set_subtitle_stream = None
def _reset(self):
self.audio = None
self.subtitle = None
def _get_audio(self, properties):
return properties["currentaudiostream"]
def _get_subtitle(self, properties):
if properties["subtitleenabled"] == False:
return None
sub = properties["currentsubtitle"]
logger.debug(sub)
return sub
def _update_item(self, initial: bool = False):
logger.debug("--> Update item")
item_info = get_item_info()
item_type = item_info["type"]
if item_type != "episode" and item_type != "unknown":
logger.debug("Not an episode: %s", item_type)
return
try:
file = item_info["file"]
filename = os.path.basename(file)
path = os.path.dirname(file)
if item_type == "episode":
show_id = item_info["tvshowid"]
season = item_info["season"]
episode = item_info["episode"]
else:
show_id = path
season = 1
episode = 1
logger.debug("path: %s, filename: %s, show: %s, season:%d, episode:%d",
path, filename, show_id, season, episode)
item_props = get_properties()
current_audio = self._get_audio(item_props)
current_subtitle = self._get_subtitle(item_props)
current_percentage = item_props["percentage"]
logger.debug(
"percentage: %.1f", current_percentage)
logger.debug(
"audio: %d, %s, %s", current_audio["index"], current_audio["language"], current_audio["name"])
logger.debug(
"subtitle: %s", f'{current_subtitle["index"]}, {current_subtitle["language"]}, {current_subtitle["name"]}' if current_subtitle else "subtitles disabled")
if initial:
self.audio = current_audio
self.subtitle = current_subtitle
stored_info = self.preferences.get(show_id, season, episode)
logger.debug("Initial stored info: %s", stored_info)
if stored_info is not None:
# Will match the stored settings
self.audio = stored_info["audio"]
self.subtitle = stored_info["subtitle"]
if not same_audio(stored_info["audio"], current_audio):
logger.info(
"Current audio is different from data audio -> Overriding audio")
logger.debug(stored_info["audio"])
logger.debug(item_props["audiostreams"])
stream = find_audio_stream(
stored_info["audio"], item_props["audiostreams"])
if stream is not None and self.set_audio_stream:
logger.debug("Setting audiostream: %d, %s, %s", stream,
stored_info["audio"]["language"], stored_info["audio"]["name"])
self.set_audio_stream(stream)
if not same_subtitle(stored_info["subtitle"], current_subtitle):
logger.info(
"Current subtitle is different from data subtitle -> Overriding subtitle")
logger.debug(stored_info["subtitle"])
logger.debug(item_props["subtitles"])
stream = find_subtitle_stream(
stored_info["subtitle"], item_props["subtitles"])
logger.debug("Setting subtitlestream: %d, %s", stream,
f'{stored_info["subtitle"]["language"]}, {stored_info["subtitle"]["name"]}' if stored_info["subtitle"] else "subtitles disabled")
enabled = stored_info["subtitle"] is not None
if stream is not None and self.set_subtitle_stream:
self.set_subtitle_stream(enabled, stream)
if not initial and current_percentage > 2:
# Check for changes and store if different cause it is what the user specified
if not same_audio(self.audio, current_audio) or not same_subtitle(self.subtitle, current_subtitle):
logger.debug("same_audio: %s\n %s\n %s", same_audio(
self.audio, current_audio), self.audio, current_audio)
logger.debug("same_subtitle: %s\n %s\n %s", same_subtitle(
self.subtitle, current_subtitle), self.subtitle, current_subtitle)
self.preferences.set(show_id, season, episode, {
"audio": current_audio, "subtitle": current_subtitle})
self.audio = current_audio
self.subtitle = current_subtitle
except Exception as e:
logger.error(e)
def start(self):
logger.debug("--> Start")
self._reset()
self._update_item(initial=True)
self.periodic_updater.start()
def stop(self):
logger.debug("--> Stop")
self.periodic_updater.stop()
|
import logging
import syslog
from passlib.hash import sha512_crypt
import ajenti
import ajenti.usersync
from ajenti.api import *
def restrict(permission):
"""
Marks a decorated function as requiring ``permission``.
If the invoking user doesn't have one, :class:`SecurityError` is raised.
"""
def decorator(fx):
def wrapper(*args, **kwargs):
UserManager.get().require_permission(extract_context(), permission)
return fx(*args, **kwargs)
return wrapper
return decorator
class SecurityError (Exception):
"""
Indicates that user didn't have a required permission.
.. attribute:: permission
permission ID
"""
def __init__(self, permission):
self.permission = permission
def __str__(self):
return 'Permission "%s" required' % self.permission
@plugin
@persistent
@rootcontext
class UserManager (BasePlugin):
default_classconfig = {'sync-provider': ''}
classconfig_root = True
def check_password(self, username, password, env=None):
"""
Verifies the given username/password combo
:type username: str
:type password: str
:rtype: bool
"""
if not username or not password:
return False
provider = self.get_sync_provider(fallback=True)
if username == 'root' and not provider.syncs_root:
provider = ajenti.usersync.AjentiSyncProvider.get()
if not username in ajenti.config.tree.users:
return False
try:
provider.sync()
except Exception as e:
logging.error(str(e))
result = provider.check_password(username, password)
provider_name = type(provider).__name__
ip_notion = ''
ip = env.get('REMOTE_ADDR', None) if env else None
if ip:
ip_notion = ' from %s' % ip
if not result:
msg = 'failed login attempt for %s ("%s") through %s%s' % \
(username, password, provider_name, ip_notion)
syslog.syslog(syslog.LOG_WARNING, msg)
logging.warn(msg)
else:
msg = 'user %s logged in through %s%s' % (username, provider_name, ip_notion)
syslog.syslog(syslog.LOG_INFO, msg)
logging.info(msg)
return result
def hash_password(self, password):
"""
:type password: str
:rtype: str
"""
if not password.startswith('sha512|'):
password = '<PASSWORD>|%s' % <PASSWORD>.encrypt(password)
return password
def hash_passwords(self):
for user in ajenti.config.tree.users.values():
if not user.password.startswith('sha512|'):
user.password = self.hash_password(user.password)
def has_permission(self, context, permission):
"""
Checks whether the current user has a permission
:type permission: str
:rtype: bool
"""
if context.user.name == 'root':
return True
if not permission in context.user.permissions:
return False
return True
def require_permission(self, context, permission):
"""
Checks current user for given permission and
raises :class:`SecurityError` if he doesn't have one
:type permission: str
:raises: SecurityError
"""
if not self.has_permission(context, permission):
raise SecurityError(permission)
def get_sync_provider(self, fallback=False):
"""
:type fallback: bool
:rtype: ajenti.usersync.UserSyncProvider
"""
for p in ajenti.usersync.UserSyncProvider.get_classes():
p.get()
if p.id == self.classconfig['sync-provider']:
try:
p.get().test()
except:
if fallback:
return ajenti.usersync.AjentiSyncProvider.get()
return p.get()
def set_sync_provider(self, provider_id):
self.classconfig['sync-provider'] = provider_id
self.save_classconfig()
def set_password(self, username, password):
ajenti.config.tree.users[username].password = self.hash_password(password)
@interface
class PermissionProvider (object):
"""
Override to create your own set of permissions
"""
def get_permissions(self):
"""
Should return a list of permission names
:rtype: list
"""
return []
def get_name(self):
"""
Should return a human-friendly name for this set
of permissions (displayed in Configurator)
:rtype: str
"""
return ''
__all__ = ['restrict', 'PermissionProvider', 'SecurityError', 'UserManager']
|
<filename>monitoring/monitorlib/scd.py<gh_stars>1-10
import math
from datetime import datetime, timedelta
from typing import Dict, List, Optional, Tuple, Literal
from .typing import ImplicitDict, StringBasedDateTime
import s2sphere
import shapely.geometry
from monitoring.monitorlib import geo
TIME_FORMAT_CODE = 'RFC3339'
DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
EARTH_CIRCUMFERENCE_M = 40.075e6
API_0_3_5 = '0.3.5'
# API version 0.3.17 is programmatically identical to version 1.0.0, so both these versions can be used interchangeably.
API_1_0_0 = '1.0.0'
API_0_3_17 = API_1_0_0
# In Both
SCOPE_SC = 'utm.strategic_coordination'
SCOPE_CM = 'utm.constraint_management'
# In 0.3.5
SCOPE_CI = 'utm.constraint_consumption'
# In 0.3.17
SCOPE_CP = 'utm.constraint_processing'
SCOPE_CM_SA = 'utm.conformance_monitoring_sa'
SCOPE_AA = 'utm.availability_arbitration'
NO_OVN_PHRASES = {'', 'Available from USS'}
def latitude_degrees(distance_meters: float) -> float:
return 360 * distance_meters / EARTH_CIRCUMFERENCE_M
def parse_time(time: Dict) -> datetime:
t_str = time['value']
if t_str[-1] == 'Z':
t_str = t_str[0:-1]
return datetime.fromisoformat(t_str)
def offset_time(vol4s: List[Dict], dt: timedelta) -> List[Dict]:
for vol4 in vol4s:
vol4['time_start'] = make_time(parse_time(vol4['time_start']) + dt)
vol4['time_end'] = make_time(parse_time(vol4['time_end']) + dt)
return vol4s
class Subscription(dict):
@property
def valid(self) -> bool:
if self.version is None:
return False
return True
@property
def version(self) -> Optional[int]:
return self.get('version', None)
################################################################################
#################### Start of ASTM-standard definitions #####################
#################### interfaces/astm-utm/Protocol/utm.yaml #####################
################################################################################
class LatLngPoint(ImplicitDict):
'''A class to hold information about a location as Latitude / Longitude pair '''
lat: float
lng: float
class Radius(ImplicitDict):
''' A class to hold the radius of a circle for the outline_circle object '''
value: float
units: str
class Polygon(ImplicitDict):
''' A class to hold the polygon object, used in the outline_polygon of the Volume3D object '''
vertices: List[LatLngPoint] # A minimum of three LatLngPoints are required
class Circle(ImplicitDict):
''' A class the details of a circle object used in the outline_circle object '''
center: LatLngPoint
radius: Radius
class Altitude(ImplicitDict):
''' A class to hold altitude information '''
value:float
reference:Literal['W84']
units: str
class Time(ImplicitDict):
''' A class to hold Time details '''
value: StringBasedDateTime
format:Literal['RFC3339']
class Volume3D(ImplicitDict):
'''A class to hold Volume3D objects '''
outline_circle: Optional[Circle]
outline_polygon: Optional[Polygon]
altitude_lower: Optional[Altitude]
altitude_upper: Optional[Altitude]
class Volume4D(ImplicitDict):
'''A class to hold Volume4D objects '''
volume: Volume3D
time_start: Optional[Time]
time_end: Optional[Time]
class OperationalIntentReference(ImplicitDict):
id: str
manager: str
uss_availability: str
version: int
state: str
ovn: str
time_start: Time
time_end: Time
uss_base_url: str
subscription_id: str
class ErrorResponse(ImplicitDict):
message: str
class QueryOperationalIntentReferenceParameters(ImplicitDict):
area_of_interest: Volume4D
class QueryOperationalIntentReferenceResponse(ImplicitDict):
operational_intent_references: List[OperationalIntentReference]
class ImplicitSubscriptionParameters(ImplicitDict):
uss_base_url: str
notify_for_constraints: Optional[bool]
class PutOperationalIntentReferenceParameters(ImplicitDict):
extents: Volume4D
key: List[str]
state: str
uss_base_url: str
subscription_id: Optional[str]
new_subscription: Optional[ImplicitSubscriptionParameters]
class SubscriptionState(ImplicitDict):
subscription_id: str
notification_index: int
class SubscriberToNotify(ImplicitDict):
uss_base_url: str
subscriptions: List[SubscriptionState]
class ChangeOperationalIntentReferenceResponse(ImplicitDict):
subscribers: List[SubscriberToNotify]
operational_intent_reference: OperationalIntentReference
class OperationalIntentDetails(ImplicitDict):
volumes: List[Volume4D]
off_nominal_volumes: List[Volume4D]
priority: int
class OperationalIntent(ImplicitDict):
reference: OperationalIntentReference
details: OperationalIntentDetails
class GetOperationalIntentDetailsResponse(ImplicitDict):
operational_intent: OperationalIntent
class PutOperationalIntentDetailsParameters(ImplicitDict):
operational_intent_id: str
operational_intent: Optional[OperationalIntent]
subscriptions: List[SubscriptionState]
################################################################################
#################### End of ASTM-standard definitions #####################
#################### interfaces/astm-utm/Protocol/utm.yaml #####################
################################################################################
class DeleteAllFlightsRequest(ImplicitDict):
extents: List[Volume4D]
def make_vol4(
t0: Optional[datetime] = None,
t1: Optional[datetime] = None,
alt0: Optional[float] = None,
alt1: Optional[float] = None,
circle: Dict = None,
polygon: Dict = None) -> Volume4D:
kwargs = dict()
if circle is not None:
kwargs['outline_circle'] = circle
if polygon is not None:
kwargs['outline_polygon'] = polygon
if alt0 is not None:
kwargs['altitude_lower'] = make_altitude(alt0)
if alt1 is not None:
kwargs['altitude_upper'] = make_altitude(alt1)
vol3 = Volume3D(**kwargs)
kwargs = {'volume': vol3}
if t0 is not None:
kwargs['time_start'] = make_time(t0)
if t1 is not None:
kwargs['time_end'] = make_time(t1)
return Volume4D(**kwargs)
def make_time(t: datetime) -> Time:
return Time(value=t.isoformat() + 'Z', format='RFC3339')
def make_altitude(alt_meters: float) -> Altitude:
return Altitude(value=alt_meters, reference='W84', units='M')
def make_circle(lat: float, lng: float, radius: float) -> Circle:
return Circle(
center=LatLngPoint(lat=lat, lng=lng),
radius=Radius(value=radius, units='M'))
def make_polygon(coords: List[Tuple[float, float]]=None, latlngrect: s2sphere.LatLngRect=None) -> Polygon:
if coords is not None:
return Polygon(vertices=[LatLngPoint(lat=lat, lng=lng) for (lat, lng) in coords])
return Polygon(vertices=[
LatLngPoint(lat=latlngrect.lat_lo().degrees, lng=latlngrect.lng_lo().degrees),
LatLngPoint(lat=latlngrect.lat_lo().degrees, lng=latlngrect.lng_hi().degrees),
LatLngPoint(lat=latlngrect.lat_hi().degrees, lng=latlngrect.lng_hi().degrees),
LatLngPoint(lat=latlngrect.lat_hi().degrees, lng=latlngrect.lng_lo().degrees),
])
def start_of(vol4s: List[Volume4D]) -> datetime:
return min([parse_time(vol4['time_start']) for vol4 in vol4s])
def end_of(vol4s: List[Volume4D]) -> datetime:
return max([parse_time(vol4['time_end']) for vol4 in vol4s])
def rect_bounds_of(vol4s: List[Volume4D]) -> s2sphere.LatLngRect:
lat_min = 90
lat_max = -90
lng_min = 360
lng_max = -360
for vol4 in vol4s:
if 'outline_polygon' in vol4.volume:
for v in vol4.volume.outline_polygon.vertices:
lat_min = min(lat_min, v.lat)
lat_max = max(lat_max, v.lat)
lng_min = min(lng_min, v.lng)
lng_max = max(lng_max, v.lng)
if 'outline_circle' in vol4.volume:
circle = vol4.volume.outline_circle
if circle.radius.units != 'M':
raise ValueError('Unsupported circle radius units: {}'.format(circle.radius.units))
lat_radius = 360 * circle.radius.value / geo.EARTH_CIRCUMFERENCE_M
lng_radius = 360 * circle.radius.value / (geo.EARTH_CIRCUMFERENCE_M * math.cos(math.radians(lat_radius)))
lat_min = min(lat_min, circle.center.lat - lat_radius)
lat_max = max(lat_max, circle.center.lat + lat_radius)
lng_min = min(lng_min, circle.center.lng - lng_radius)
lng_max = max(lng_max, circle.center.lng + lng_radius)
p1 = s2sphere.LatLng.from_degrees(lat_min, lng_min)
p2 = s2sphere.LatLng.from_degrees(lat_max, lng_max)
return s2sphere.LatLngRect.from_point_pair(p1, p2)
def meter_altitude_bounds_of(vol4s: List[Volume4D]) -> Tuple[float, float]:
alt_lo = min(vol4.volume.altitude_lower.value for vol4 in vol4s if 'altitude_lower' in vol4.volume)
alt_hi = max(vol4.volume.altitude_upper.value for vol4 in vol4s if 'altitude_upper' in vol4.volume)
if not all(vol4.volume.altitude_lower.units == 'M' for vol4 in vol4s if 'altitude_lower' in vol4.volume):
raise ValueError('altitude_lower units must always be M')
if not all(vol4.volume.altitude_upper.units == 'M' for vol4 in vol4s if 'altitude_upper' in vol4.volume):
raise ValueError('altitude_upper units must always be M')
return alt_lo, alt_hi
def vol4_intersect(vol4_1: Volume4D, vol4_2: Volume4D) -> bool:
if parse_time(vol4_1.time_end) < parse_time(vol4_2.time_start):
return False
if parse_time(vol4_1.time_start) > parse_time(vol4_2.time_end):
return False
if vol4_1.volume.altitude_upper.value < vol4_2.volume.altitude_lower.value:
return False
if vol4_1.volume.altitude_lower.value > vol4_2.volume.altitude_upper.value:
return False
if 'outline_circle' in vol4_1.volume:
circle = vol4_1.volume.outline_circle
if circle.radius.units != 'M':
raise ValueError('Unsupported circle radius units: {}'.format(circle.radius.units))
ref = s2sphere.LatLng.from_degrees(circle.center.lat, circle.center.lng)
footprint1 = shapely.geometry.Point(0, 0).buffer(vol4_1.volume.outline_circle.radius.value)
elif 'outline_polygon' in vol4_1.volume:
p = vol4_1.volume.outline_polygon.vertices[0]
ref = s2sphere.LatLng.from_degrees(p.lat, p.lng)
footprint1 = shapely.geometry.Polygon(
geo.flatten(ref, s2sphere.LatLng.from_degrees(v.lat, v.lng))
for v in vol4_1.volume.outline_polygon.vertices)
else:
raise ValueError('Neither outline_circle nor outline_polygon specified')
if 'outline_circle' in vol4_2.volume:
circle = vol4_2.volume.outline_circle
if circle.radius.units != 'M':
raise ValueError('Unsupported circle radius units: {}'.format(circle.radius.units))
xy = geo.flatten(ref, s2sphere.LatLng.from_degrees(circle.center.lat, circle.center.lng))
footprint2 = shapely.geometry.Point(*xy).buffer(circle.radius.value)
elif 'outline_polygon' in vol4_1.volume:
footprint2 = shapely.geometry.Polygon(
geo.flatten(ref, s2sphere.LatLng.from_degrees(v.lat, v.lng))
for v in vol4_2.volume.outline_polygon.vertices)
else:
raise ValueError('Neither outline_circle nor outline_polygon specified')
return footprint1.intersects(footprint2)
def vol4s_intersect(vol4s_1: List[Volume4D], vol4s_2: List[Volume4D]) -> bool:
for v1 in vol4s_1:
for v2 in vol4s_2:
if vol4_intersect(v1, v2):
return True
return False
|
<filename>src/server.py<gh_stars>0
import numpy as np
import sys
import os
import random
from collections import defaultdict
from bottle import route, run, response, abort, static_file, request, redirect
import mimetypes, base64
from decorators import gzipped, session_logged
from index import Index
if len(sys.argv) < 2:
print('usage: %s <index-directory+>' % sys.argv[0])
sys.exit(1)
indexes = {}
default_index = None
for directory in sys.argv[1:]:
name = directory.split('/')[-1]
if default_index is None:
default_index = name
indexes[name] = Index(directory)
def closest(db, target, n):
return db.closest(target, n)
def closest_diverse(db, target, n, random_ratio = .4, mmr_redundancy = .95):
np.random.seed(target)
split = int(n * random_ratio)
#selected = np.random.randint(0, db.vectors.shape[0], n * 2)
selected = np.random.choice(range(db.vectors.shape[0]), n * 3, replace=False)
scores = np.dot(db.vectors[selected], db.vectors[target].T)
result = list(db.closest(target, n - split)) + [(scores[i], selected[i]) for i in range(len(selected))]
filtered_for_doubles = []
seen_images = set()
for score, i in result:
if i not in seen_images:
filtered_for_doubles.append((score, i))
seen_images.add(i)
sorted_results = sorted(filtered_for_doubles, key=lambda x: -x[0])
selected = np.zeros((n, db.vectors.shape[1]))
selected[0] = db.vectors[target]
selected_ids = [target]
v = db.vectors[sorted_results[0][1]]
j = 0
for i in range(1, n):
value = 1
while j < len(sorted_results) - 1 and value > mmr_redundancy:
j += 1
v = db.vectors[sorted_results[j][1]]
value = max(np.dot(selected[:len(selected_ids)], v.T))
#print(i, j, value)
if j == len(sorted_results) - 1:
break
selected[i] = v
selected_ids.append(j)
if len(selected_ids) < len(sorted_results):
selected_ids = range(len(sorted_results))[:n]
return [(1, target)] + [sorted_results[j] for j in selected_ids[1:]]
#########################
@route('/static/<path:path>')
def callback(path):
return static_file(path, root='./static/')
def template(body):
with open('static/style.css') as fp:
css = fp.read()
return '''<DOCTYPE html>
<html>
<head>
<style>''' + css + '''</style>
</head>
<body>
<div style="position:fixed; left:0px; bottom:0px; font-size: 300%">
<a href="javascript:history.back()">🡄</a>
<a href="javascript:history.forward()">🡆</a>
<a href="/">🏠</a>
</div>
''' + body + '''</body>
</html>'''
def get_image_url(index, num, use_data_url=False):
if index not in indexes:
print('index "%s" not found' % index)
return ''
db = indexes[index]
name = db.image(num)
mime = mimetypes.guess_type(name)
if mime[0] is not None:
if use_data_url:
try:
encoded = base64.b64encode(open(name, 'rb').read())
return 'data:%s;base64,%s' % (mime[0], encoded.decode())
except:
print('not found', name)
return ''
else:
return '/image/%s/%d' % (index, num)
return ''
@route('/image/<index>/<num:int>')
def image(index, num):
if index not in indexes:
abort(404, 'index "%s" not found' % index)
db = indexes[index]
name = db.image(num)
mime = mimetypes.guess_type(name)
if mime[0] is not None:
return static_file(name.split('/')[-1], root=db.image_directory, mimetype=mime[0])
abort(404, 'image %d not found' % num)
@gzipped()
@route('/1d-sorted/<index>/m=<method>/t=<target:int>/n=<num:int>/r=<redundancy:float>')
@session_logged()
def sorted_1d(index, method='diverse', target=0, num=109, redundancy=.95):
if index not in indexes:
abort(404, 'index "%s" not found' % index)
db = indexes[index]
if db.image(target) is None:
abort(404, 'not found')
if method == 'diverse':
found = closest_diverse(db, target, num, mmr_redundancy=redundancy)
elif method == 'closest':
found = db.closest(target, num)
else:
abort(501, 'method not supported')
content = ''
for score, i in found:
description = db.description(i)
if i == target:
content += '<img style="float:left" title="%s" alt="%s" width="224" height="224" src="%s">' % (description, description, get_image_url(index, i))
else:
content += '<a href="/1d-sorted/%s/m=%s/t=%d/n=%d/r=%g"><img title="%s" alt="%s" width="112" height="112" src="%s"></a>' % (index, method, i, num, redundancy, description, description, get_image_url(index, i))
#response.set_header("Cache-Control", "public, max-age=604800")
return template(content)
@gzipped()
@route('/2d-sorted/<index>/m=<method>/t=<target:int>/w=<N:int>/h=<M:int>/r=<random_ratio:float>/t=<redundancy_threshold:float>')
@session_logged()
def sorted_2d(index, method='diverse', target=0, N=12, M=8, random_ratio=.1, redundancy_threshold=.75):
if index not in indexes:
abort(404, 'index "%s" not found' % index)
db = indexes[index]
if db.image(target) is None:
abort(404, 'not found')
import grid_sort
NUM = N * M
if method == 'diverse':
found = np.array([x[1] for x in closest_diverse(db, target, NUM - 3, random_ratio=random_ratio, mmr_redundancy=redundancy_threshold)])
elif method == 'closest':
found = np.array([x[1] for x in db.closest(target, NUM - 3)])
else:
abort(501, 'method not supported')
vectors = db.vectors[found]
indices = np.arange(NUM).reshape(N, M)
fixed = np.zeros((N, M))
CN, CM = N // 2 - 1, M // 2 - 1
indices[N - 1, M - 1] = indices[CN, CM]
indices[N - 1, M - 2] = indices[CN + 1, CM]
indices[N - 1, M - 3] = indices[CN, CM + 1]
indices[0, 0] = indices[CN + 1, CM + 1]
indices[CN, CM] = 0
indices[CN + 1, CM] = 0
indices[CN, CM + 1] = 0
indices[CN + 1, CM + 1] = 0
fixed[CN, CM] = 1
fixed[CN + 1, CM] = 1
fixed[CN, CM + 1] = 1
fixed[CN + 1, CM + 1] = 1
grid_sort.seed(target)
indices = grid_sort.sort(vectors, N, M, indices, fixed)
table = '<table border="0" cellspacing="0" cellpadding="0" style="margin: auto">'
for j in range(M):
row = '<tr>'
for i in range(N):
image = found[indices[i, j]]
description = db.description(image)
if i == CN and j == CM:
row += '<td colspan="2" rowspan="2" style="text-align: center"><img title="%s" alt="%s" src="%s" width="210" height="210"></td>' % (description, description, get_image_url(index, image))
elif i == CN + 1 and j == CM:
pass
elif i == CN and j == CM + 1:
pass
elif i == CN + 1 and j == CM + 1:
pass
else:
row += '<td><a href="/2d-sorted/%s/m=%s/t=%d/w=%d/h=%d/r=%g/t=%g"><img title="%s" alt="%s" src="%s" width="112" height="112"></a></td>' % (index, method, image, N, M, random_ratio, redundancy_threshold, description, description, get_image_url(index, image))
row += '</tr>'
table += row
table += '</table>'
return template(table)
@route('/starting-point/<index>/w=<width:int>/h=<height:int>')
@session_logged()
def starting_point(index, width=14, height=8):
if index not in indexes:
abort(404, 'index "%s" not found' % index)
db = indexes[index]
random.seed(hash(request.query.parameters))
indices = random.sample(range(0, len(db.images)), width * height)
parameters = request.query.parameters
parameters = parameters.replace('$INDEX', index).replace('$WIDTH', str(width)).replace('$HEIGHT', str(height))
table = '<table border="0" cellspacing="0" cellpadding="0" style="margin: auto">'
for j in range(height):
row = '<tr>'
for i in range(width):
image = indices[j * width + i]
description = db.description(image)
row += '<td><a href="%s"><img title="%s" alt="%s" src="%s" width="112" height="112"></a></td>' % (parameters.replace('$TARGET', str(image)), description, description, get_image_url(index, image))
row += '</tr>'
table += row
table += '</table>'
return template(table)
@route('/')
@session_logged()
def index():
body = ''
for name in indexes:
body += '<h1>%s</h1>' % name
body += '<ul>'
body += '<li><a href="/starting-point/%s/w=14/h=8?parameters=/2d-sorted/$INDEX/m=diverse/t=$TARGET/w=$WIDTH/h=$HEIGHT/r=.5/t=.85">Diverse, 2d-sorted</a></li>' % (name)
body += '<li><a href="/starting-point/%s/w=14/h=8?parameters=/2d-sorted/$INDEX/m=closest/t=$TARGET/w=$WIDTH/h=$HEIGHT/r=.5/t=.85">Closest, 2d-sorted</a></li>' % (name)
body += '<li><a href="/starting-point/%s/w=14/h=8?parameters=/1d-sorted/$INDEX/m=diverse/t=$TARGET/n=115/r=.95">Diverse, 1d-sorted</a></li>' % (name)
body += '<li><a href="/starting-point/%s/w=14/h=8?parameters=/1d-sorted/$INDEX/m=closest/t=$TARGET/n=115/r=.95">Closest, 1d-sorted</a></li>' % (name)
body += '</ul>'
return template(body)
if __name__ == '__main__':
run(server='bjoern', host='localhost', port=8080)
|
<filename>evology/bin/gp/gp_bin/gp.py
import operator
import math
import random
import numpy as np
from deap import algorithms
from deap import base
from deap import creator
from deap import tools
from deap import gp
import matplotlib.pyplot as plt
from networkx.drawing.nx_agraph import graphviz_layout
import networkx as nx
import pydot
from networkx.drawing.nx_pydot import graphviz_layout
# in Conda prompt conda install -c alubbock pygraphviz run in admin mode
# Define new functions
def protectedDiv(left, right):
try:
return left / right
except ZeroDivisionError:
return 1
pset = gp.PrimitiveSet("MAIN", 1)
pset.addPrimitive(operator.add, 2)
pset.addPrimitive(operator.sub, 2)
pset.addPrimitive(operator.mul, 2)
pset.addPrimitive(protectedDiv, 2)
pset.addPrimitive(operator.neg, 1)
# pset.addPrimitive(math.cos, 1)
# pset.addPrimitive(math.sin, 1)
# pset.addPrimitive(math.exp, 1)
# pset.addEphemeralConstant("rand101", lambda: random.randint(-1,1))
pset.renameArguments(ARG0="x")
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
creator.create("Individual", gp.PrimitiveTree, fitness=creator.FitnessMin)
toolbox = base.Toolbox()
toolbox.register("expr", gp.genHalfAndHalf, pset=pset, min_=1, max_=2)
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.expr)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("compile", gp.compile, pset=pset)
def evalSymbReg(individual, points):
# Transform the tree expression in a callable function
func = toolbox.compile(expr=individual)
# Evaluate the mean squared error between the expression
# and the real function : x**4 + x**3 + x**2 + x
sqerrors = ((func(x) - x**4 - x**3 - x**2 - x)**2 for x in points)
return math.fsum(sqerrors) / len(points),
toolbox.register("evaluate", evalSymbReg, points=[x / 10.0 for x in range(-10, 10)])
toolbox.register("select", tools.selTournament, tournsize=3)
toolbox.register("mate", gp.cxOnePoint)
toolbox.register("expr_mut", gp.genFull, min_=0, max_=2)
toolbox.register("mutate", gp.mutUniform, expr=toolbox.expr_mut, pset=pset)
toolbox.decorate(
"mate", gp.staticLimit(key=operator.attrgetter("height"), max_value=17)
)
toolbox.decorate(
"mutate", gp.staticLimit(key=operator.attrgetter("height"), max_value=17)
)
def main():
random.seed(8)
hof = tools.HallOfFame(1)
halloffame = hof
population = toolbox.population(n=300)
cxpb = 0.5
ngen = 50
mutpb = 0.1
verbose = True
stats_fit = tools.Statistics(lambda ind: ind.fitness.values)
stats_size = tools.Statistics(len)
mstats = tools.MultiStatistics(fitness=stats_fit, size=stats_size)
mstats.register("avg", np.mean)
mstats.register("std", np.std)
mstats.register("min", np.min)
mstats.register("max", np.max)
stats = mstats
logbook = tools.Logbook()
logbook.header = ["gen", "nevals"] + (stats.fields if stats else [])
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in population if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
if halloffame is not None:
halloffame.update(population)
record = stats.compile(population) if stats else {}
logbook.record(gen=0, nevals=len(invalid_ind), **record)
if verbose:
print(logbook.stream)
# Begin the generational process
for gen in range(1, ngen + 1):
# Select the next generation individuals
offspring = toolbox.select(population, len(population))
# Vary the pool of individuals
offspring = [toolbox.clone(ind) for ind in population]
for i in range(1, len(offspring), 2):
if random.random() < cxpb:
offspring[i - 1], offspring[i] = toolbox.mate(
offspring[i - 1], offspring[i]
)
del offspring[i - 1].fitness.values, offspring[i].fitness.values
for i in range(len(offspring)):
if random.random() < mutpb:
(offspring[i],) = toolbox.mutate(offspring[i])
del offspring[i].fitness.values
# Evaluate the individuals with an invalid fitness
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = toolbox.map(toolbox.evaluate, invalid_ind)
for ind, fit in zip(invalid_ind, fitnesses):
ind.fitness.values = fit
# Update the hall of fame with the generated individuals
if halloffame is not None:
halloffame.update(offspring)
# Replace the current population by the offspring
population[:] = offspring
# Append the current generation statistics to the logbook
record = stats.compile(population) if stats else {}
logbook.record(gen=gen, nevals=len(invalid_ind), **record)
if verbose:
print(logbook.stream)
# print log
return population, logbook, hof
pop, log, hof = main()
bests = tools.selBest(pop, k=1)
# def plot_graph(tree):
# nodes, edges, labels = gp.graph(tree)
# graph = nx.Graph()
# graph.add_nodes_from(nodes)
# graph.add_edges_from(edges)
# pos = graphviz_layout(graph, prog="dot") # run dot -c in conda prompt
# plt.figure(figsize=(7, 7))
# nx.draw_networkx_nodes(graph, pos, node_size=900, node_color="w")
# nx.draw_networkx_edges(graph, pos)
# nx.draw_networkx_labels(graph, pos, labels)
# plt.axis("off")
# plt.show()
# print("Pop")
# for i in range(len(pop)):
# print(pop[i])
# plot_graph(pop[i])
print("Best ")
print(bests[0])
print(type(bests[0]))
print(toolbox.compile(expr=bests[0]))
# Show solution
def func(x):
return x ** 3 + x ** 2 + x + 1
X = np.linspace(-1, 1, 100)
Y_true = func(X)
Y_reconstructed = [toolbox.compile(bests[0])(x) for x in X]
plt.plot(X, Y_true, label = 'True function')
plt.plot(X, Y_reconstructed, label = 'GP symoblic regression')
plt.legend()
plt.show()
nodes, edges, labels = gp.graph(bests[0])
graph = nx.Graph()
graph.add_nodes_from(nodes)
graph.add_edges_from(edges)
pos = graphviz_layout(graph, prog = 'dot') # prog="twopi") # run dot -c in conda prompt #"dot"
plt.figure(figsize=(7, 7))
nx.draw_networkx_nodes(graph, pos, node_size=900, node_color="w", node_shape='o')
nx.draw_networkx_edges(graph, pos)
nx.draw_networkx_labels(graph, pos, labels)
plt.axis("off")
plt.show()
gen = log.select("gen")
fit_mins = log.chapters["fitness"].select("min")
size_avgs = log.chapters["size"].select("avg")
fig, ax1 = plt.subplots()
line1 = ax1.plot(gen, fit_mins, "b-", label="Minimum Fitness")
ax1.set_xlabel("Generation")
ax1.set_ylabel("Fitness", color="b")
for tl in ax1.get_yticklabels():
tl.set_color("b")
ax2 = ax1.twinx()
line2 = ax2.plot(gen, size_avgs, "r-", label="Average Size")
ax2.set_ylabel("Size", color="r")
for tl in ax2.get_yticklabels():
tl.set_color("r")
lns = line1 + line2
labs = [l.get_label() for l in lns]
ax1.legend(lns, labs, loc="center right")
plt.show() |
"""
Residential Efficiency outputs
------------------------------
output functions for Residential Efficiency component
"""
import os.path
import aaem.constants as constants
from aaem.components import comp_order
import aaem_summaries.web_lib as wl
from pandas import DataFrame
COMPONENT_NAME = "Residential Energy Efficiency"
DESCRIPTION = """
This component calculates the potential change in heating fuel usage from residential-building energy efficiency improvements.
"""
def generate_web_summary (web_object, community):
"""generate html summary for a community.
generates web_object.directory/community/<component>.html and
associated csv files.
Parameters
----------
web_object: WebSummary
a WebSummary object
community: str
community name
See also
--------
aaem.web :
WebSummary object definition
"""
## get the template
template = web_object.component_html
## get the component (the modeled one)
modeled = web_object.results[community][COMPONENT_NAME]
start_year = modeled.start_year
end_year = modeled.actual_end_year
## for make table functions
projects = {'Modeled ' + COMPONENT_NAME: modeled}
## get forecast stuff (consumption, generation, etc)
fc = modeled.forecast
generation = fc.generation['generation diesel'].\
ix[start_year:end_year]
## get the diesel prices
diesel_price = web_object.results[community]['community data'].\
get_item('community','diesel prices').ix[start_year:end_year]
## get diesel generator efficiency
eff = modeled.cd['diesel generation efficiency']
## get generation fuel costs per year (modeled)
base_cost = generation/eff * diesel_price
base_cost.name = 'Base Cost'
base_cost = DataFrame(base_cost)
base_cost['Base Cost'] = (modeled.baseline_HF_cost + modeled.baseline_kWh_cost)[:modeled.actual_project_life]
table1 = wl.make_costs_table(community, COMPONENT_NAME, projects, base_cost,
web_object.directory)
## get generation fuel used (modeled)
base_con = generation/eff
base_con.name = 'Base Consumption'
base_con = DataFrame(base_con)
#~ base_con['Base Consumption'] = modeled.baseline_kWh_consumption
#~ table2 = wl.make_consumption_table(community, COMPONENT_NAME,
#~ projects, base_con,
#~ web_object.directory,
#~ 'proposed_kWh_consumption')
base_con['Base Consumption'] = modeled.baseline_fuel_Hoil_consumption[:modeled.actual_project_life]
table3 = wl.make_consumption_table(community, COMPONENT_NAME,
projects, base_con,
web_object.directory,
'savings_HF')
#~ table3[0][-1]
year = modeled.comp_specs['data']['Year']
current = [
{'words':'Households ('+ str(int(year)) + ')' ,
'value': int(modeled.comp_specs['data']['Total Occupied'])},
{'words':'Households with BEES certification' + \
' ('+ str(int(year)) +')',
'value': str(modeled.comp_specs['data']['BEES Number'])},
{'words':
'Households participating in weatherization of home ' + \
'energy rebate programs ('+ str(int(year)) +')',
'value': str(modeled.comp_specs['data']['Post-Retrofit Number'])},
{'words':'Estimated Total Households (' +\
str(int(modeled.start_year)) +')',
'value': modeled.init_HH},
]
## info for modeled
info = create_project_details_list (modeled)
## info table (list to send to template)
info_for_projects = [{'name': 'Current System', 'info':current},
{'name':'Modeled Efficiency Project','info':info}]
## create list of charts
charts = [
{'name':'costs', 'data': str(table1).replace('nan','null'),
'title': 'Estimated Heating Fuel + Electricity Costs for residential sector',
'type': "'$'", 'plot': True,},
#~ {'name':'E_consumption', 'data': str(table2).replace('nan','null'),
#~ 'title':'Electricity Consumed',
#~ 'type': "'other'",'plot': True,},
{'name':'H_consumption', 'data': str(table3).replace('nan','null'),
'title':'Heating Oil Consumed by residential sector',
'type': "'other'", 'plot': True,}
]
## generate html
## generate html
msg = None
if community in web_object.bad_data_coms:
msg = web_object.bad_data_msg
pth = os.path.join(web_object.directory, community.replace("'",''),
COMPONENT_NAME.replace(' ','_').lower() + '.html')
with open(pth, 'w') as html:
html.write(template.render( info = info_for_projects,
type = COMPONENT_NAME,
com = community.replace("'",'') ,
charts = charts,
summary_pages = ['Summary'] + comp_order ,
sections = web_object.get_summary_pages(),
description = DESCRIPTION,
metadata = web_object.metadata,
message = msg
))
def create_project_details_list (project):
"""makes a projects details section for the html
Parameters
----------
project: ResidentialBuildings
A ResidentialBuildings object thats run function has been called
Returns
-------
A dictionary with values used by summary
"""
ex_h_savings = (1 - \
(
project.proposed_HF_consumption / project.baseline_HF_consumption
)[0])*100
#~ print ex_h_savings
return [
{'words':'Capital cost',
'value': '${:,.0f}'.format(project.get_NPV_costs())},
{'words':'Lifetime energy cost savings',
'value': '${:,.0f}'.format(project.get_NPV_benefits())},
{'words':'Net lifetime savings',
'value': '${:,.0f}'.format(project.get_NPV_net_benefit())},
{'words':'Benefit-cost ratio',
'value': '{:,.1f}'.format(project.get_BC_ratio())},
{'words':'Expected space heating savings ',
'value': '{:,.2f}%'.format(ex_h_savings)},
{'words':
'Estimated households to be retrofit ('\
+ str(int(project.start_year)) +')' ,
'value': int(project.opportunity_HH)},
{'words':'Estimated cost to refit household',
'value': '${:,.2f}/home'.format(project.refit_cost_rate)},
]
|
<reponame>frank-chris/ScrapingTwitterPostsOnPolitics<filename>scripts/calc_answers.py
"""
<NAME> (<EMAIL>)
Script to find the answers for the Assignment 1 question from the scraped CSVs
"""
usage = "Usage:\n\n\
\
python3 calc_answers.py DIR_PATH\n\n\
\
* DIR_PATH: \n\
path to the directory which contains the scraped CSV files for the subtopics \
should not contain any other CSV file except the 'n' CSV files corresponding to \
'n' subtopics (all processing, like removing duplicates, concatenation etc should \
be done before)\n"
import pandas as pd
import plotly.graph_objects as go
from tqdm import tqdm
from ast import literal_eval
import numpy as np
import sys
import os
if __name__ == "__main__":
try:
assert(len(sys.argv) == 2)
assert(os.path.isdir(sys.argv[1]))
except:
print(usage)
exit()
dir_path = sys.argv[1]
file_paths = [os.path.join(dir_path, path) for path in os.listdir(dir_path)]
month_fig = go.Figure()
stats = open('stats.txt', 'a')
for f_path in tqdm(file_paths):
print('\n' + f_path + ':\n', file=stats)
# read subtopic file
subtopic_df = pd.read_csv(f_path, sep='\t', dtype={'place':str}, error_bad_lines=False)
subtopic_df['hashtags'].fillna(value='[]', inplace=True)
subtopic_df['urls'].fillna(value='[]', inplace=True)
subtopic_df['photos'].fillna(value='[]', inplace=True)
subtopic_df['mentions'].fillna(value='[]', inplace=True)
# top 10 hashtags
hashtags = pd.Series(subtopic_df['hashtags'].apply(literal_eval).sum())
top10hashtags = hashtags.value_counts()[:10]
print('top 10 hashtags\n', top10hashtags, file=stats)
fig = go.Figure()
fig.add_trace(go.Bar(x=['#' + i for i in top10hashtags.index], y=top10hashtags))
fig.update_layout(font_size=13, title_text='Top 10 hashtags for ' + os.path.basename(f_path).replace('.csv', ''),
xaxis_title='Hashtag', yaxis_title='Frequency')
fig.show()
# top 10 mentions
mentions = pd.Series([x['screen_name'] for x in subtopic_df['mentions'].apply(literal_eval).sum()])
top10mentions = mentions.value_counts()[:10]
print('top 10 mentions\n', top10mentions, file=stats)
fig = go.Figure()
fig.add_trace(go.Bar(x=['@' + i for i in top10mentions.index], y=top10mentions))
fig.update_layout(font_size=13, title_text='Top 10 mentions for ' + os.path.basename(f_path).replace('.csv', ''),
xaxis_title='Mention', yaxis_title='Frequency')
fig.show()
# top 5 languages
top5langs = subtopic_df['language'].value_counts()[:5]
print('language tags', subtopic_df['language'].value_counts(), file=stats)
fig = go.Figure()
fig.add_trace(go.Bar(x=top5langs.index, y=top5langs))
fig.update_layout(font_size=13, title_text='Top 5 language tags for ' + os.path.basename(f_path).replace('.csv', ''),
xaxis_title='Language Tag', yaxis_title='Frequency')
fig.show()
# month-wise plot
months = subtopic_df['date'].value_counts().sort_index()
month_fig.add_trace(go.Scatter(x=months.index, y = months, name=os.path.basename(f_path).replace('.csv', '')))
# words
words = (' '.join([str(i) for i in list(subtopic_df['tweet'])])).split()
words_df = pd.DataFrame()
words_df['words'] = words
words_df.to_csv(f_path.replace('.csv', '_words.csv'), index=False, header=False)
# stats
print('No of tweets', subtopic_df.index.size, file=stats)
print('No of mentions', len(mentions), file=stats)
print('No of hashtags', len(hashtags), file=stats)
print('No of unique mentions', mentions.nunique(), file=stats)
print('No of unique hashtags', hashtags.nunique(), file=stats)
print('No of unique languages', subtopic_df['language'].nunique(), file=stats)
days = subtopic_df['date'].value_counts().sort_index()
print('Max number of tweets in a day', days.max(), file=stats)
print('Time zones', subtopic_df['timezone'].nunique(), file=stats)
print('No of conversations/threads', subtopic_df['conversation_id'].nunique(), file=stats)
print('No of users', subtopic_df['user_id'].nunique(), file=stats)
print('No of places', subtopic_df['place'].nunique(), file=stats)
# urls = pd.Series(subtopic_df['urls'].apply(literal_eval).sum())
# print('No of urls', urls.nunique(), file=stats)
# photos = pd.Series(subtopic_df['photos'].apply(literal_eval).sum())
# print('No of photos', photos.nunique(), file=stats)
print('Total number of likes', subtopic_df['likes_count'].sum(), file=stats)
print('No of videos', subtopic_df['video'].sum(), file=stats)
print('geo tags count', subtopic_df['geo'].nunique(), file=stats)
print('percentage of tweets that are retweets', 100 - (subtopic_df['retweet']=='False').sum()*100/subtopic_df.index.size, file=stats)
print('Total no of retweets received', subtopic_df['retweets_count'].sum(), file=stats)
print('Total no of replies received', subtopic_df['replies_count'].sum(), file=stats)
month_fig.update_layout(font_size=13, title_text='Monthly distribution of tweets',
xaxis_title='Month', yaxis_title='No. of tweets', legend_title_text='Subtopic')
month_fig.show()
stats.close() |
<gh_stars>0
import datetime, time
import re
import sys
try:
import simplejson as json
except ImportError:
import json
from dogshell.common import report_errors, report_warnings, CommandLineClient
def prettyprint_event(event):
title = event['title'] or ''
handle = event.get('handle', '') or ''
date = event['date_happened']
dt = datetime.datetime.fromtimestamp(date)
link = event['url']
print((title + ' (' + handle + ')').strip())
print(dt.isoformat(' ') + ' | '+ link)
def print_event(event):
prettyprint_event(event)
def prettyprint_event_details(event):
prettyprint_event(event)
def print_event_details(event):
prettyprint_event(event)
time_pat = re.compile(r'(?P<delta>[0-9]*\.?[0-9]+)(?P<unit>[mhd])')
def parse_time(timestring):
now = time.mktime(datetime.datetime.now().timetuple())
if timestring is None:
t = now
else:
try:
t = int(timestring)
except:
match = time_pat.match(timestring)
if match is None:
raise Exception
delta = float(match.group('delta'))
unit = match.group('unit')
if unit == 'm':
delta = delta * 60
if unit == 'h':
delta = delta * 60 * 60
if unit == 'd':
delta = delta * 60 * 60 * 24
t = now - int(delta)
return int(t)
class EventClient(CommandLineClient):
def setup_parser(self, subparsers):
parser = subparsers.add_parser('event', help='Post events, get event details, and view the event stream.')
verb_parsers = parser.add_subparsers(title='Verbs')
post_parser = verb_parsers.add_parser('post', help='Post events.')
post_parser.add_argument('title', help='event title')
post_parser.add_argument('--date_happened', help='POSIX timestamp when the event occurred. if unset defaults to the current time.')
post_parser.add_argument('--handle', help='user to post as. if unset, submits as the generic API user.')
post_parser.add_argument('--priority', help='"normal" or "low". defaults to "normal"')
post_parser.add_argument('--related_event_id', help='event to post as a child of. if unset, posts a top-level event')
post_parser.add_argument('--tags', help='comma separated list of tags')
post_parser.add_argument('--host', help='related host')
post_parser.add_argument('--device', help='related device (e.g. eth0, /dev/sda1)')
post_parser.add_argument('--type', help='type of event, e.g. nagios, jenkins, etc.')
post_parser.add_argument('message', help='event message body. if unset, reads from stdin.', nargs="?")
post_parser.set_defaults(func=self._post)
show_parser = verb_parsers.add_parser('show', help='Show event details.')
show_parser.add_argument('event_id', help='event to show')
show_parser.set_defaults(func=self._show)
stream_parser = verb_parsers.add_parser('stream', help='Delete comments.', description="Stream start and end times can be specified as either a POSIX timestamp (e.g. the output of `date +%s`) or as a period of time in the past (e.g. '5m', '6h', '3d').")
stream_parser.add_argument('start', help='start date for the stream request')
stream_parser.add_argument('end', help='end date for the stream request (defaults to "now")', nargs='?')
stream_parser.add_argument('--priority', help='filter by priority. "normal" or "low". defaults to "normal"')
stream_parser.add_argument('--sources', help='comma separated list of sources to filter by')
stream_parser.add_argument('--tags', help='comma separated list of tags to filter by')
stream_parser.set_defaults(func=self._stream)
def _post(self, args):
self.dog.timeoue = args.timeout
format = args.format
message = args.message
if message is None:
message = sys.stdin.read()
if args.tags is not None:
tags = [t.strip() for t in args.tags.split(',')]
else:
tags = None
res = self.dog.event_with_response(args.title,
message,
args.date_happened,
args.handle,
args.priority,
args.related_event_id,
tags,
args.host,
args.device)
report_warnings(res)
report_errors(res)
if format == 'pretty':
prettyprint_event(res['event'])
elif format == 'raw':
print(json.dumps(res))
else:
print_event(res['event'])
def _show(self, args):
self.dog.timeoue = args.timeout
format = args.format
res = self.dog.get_event(args.event_id)
report_warnings(res)
report_errors(res)
if format == 'pretty':
prettyprint_event_details(res['event'])
elif format == 'raw':
print(json.dumps(res))
else:
print_event_details(res['event'])
def _stream(self, args):
self.dog.timeoue = args.timeout
format = args.format
if args.sources is not None:
sources = [s.strip() for s in args.sources.split(',')]
else:
sources = None
if args.tags is not None:
tags = [t.strip() for t in args.tags.split(',')]
else:
tags = None
start = parse_time(args.start)
end = parse_time(args.end)
res = self.dog.stream(start, end, args.priority, sources, tags)
report_warnings(res)
report_errors(res)
if format == 'pretty':
for event in res['events']:
prettyprint_event(event)
print()
elif format == 'raw':
print(json.dumps(res))
else:
for event in res['events']:
print_event(event)
print()
|
<gh_stars>100-1000
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.core.management import call_command
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"""
Load the two statuses.
"""
call_command('loaddata', 'state.json')
def backwards(self, orm):
"""
No backwards.
"""
raise RuntimeError('No reverse for state.json')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('<PASSWORD>', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'idea.comment': {
'Meta': {'object_name': 'Comment'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idea': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idea.Idea']"}),
'text': ('django.db.models.fields.TextField', [], {}),
'time': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'idea.idea': {
'Meta': {'object_name': 'Idea'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idea.State']"}),
'text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 31, 0, 0)'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'})
},
'idea.state': {
'Meta': {'object_name': 'State'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'previous': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['idea.State']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'idea.vote': {
'Meta': {'object_name': 'Vote'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idea': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['idea.Idea']"}),
'time': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['idea']
symmetrical = True
|
<filename>disp/database/api.py
"""
Module for providing a MongoDB database (collection) interface for AIRSS searches
"""
import os
import zlib
import enum
from logging import getLogger, INFO, WARNING
import hashlib
import time
from datetime import datetime, timedelta
from monty.serialization import loadfn
from fireworks.utilities.fw_utilities import get_my_host, get_my_ip
import pymongo
from pymongo import MongoClient
from pymongo.errors import PyMongoError
import gridfs
import pandas as pd
from mongoengine import connect, get_connection
from mongoengine.connection import ConnectionFailure
from disp.database.odm import (ResFile, ParamFile, SeedFile,
InitialStructureFile, Creator)
# pylint: disable=too-many-instance-attributes, too-many-arguments, import-outside-toplevel, no-member, protected-access
def get_db_file_path():
"""Returns the path of the db file"""
return os.environ.get('DISP_DB_FILE')
DB_FILE = get_db_file_path()
class DocumentType(enum.Enum):
RES = 'res'
SEED = 'seed'
INITIAL_STRUCTURE = 'initial_structure'
PARAM = 'param'
class SearchDB:
"""
Database backend for storing search results
"""
logger = getLogger(__name__)
INDICES = [
'project_name', 'seed_name', 'created_on', 'md5hash', 'struct_name'
]
_ATOMATE_TASK_COLL = 'atomate_tasks' # Name of the collection for atomate tasks
INDICES_ATOMATE = [
'project_name', 'seed_name', 'struct_name', 'uuid', 'unique_name',
'task_label', 'disp_type', 'last_updated'
]
def __init__(self,
host: str = 'localhost',
port: int = 27017,
database: str = 'disp-db',
user: str = None,
password: str = <PASSWORD>,
collection: str = 'disp_entry',
lpad=None,
**kwargs):
self.host = host
self.db_name = database
self.user = user
self.password = password
self.port = int(port)
self.identity = {}
self.lpad = lpad
try:
self._engine_connection = connect(db=self.db_name,
alias='disp',
host=host,
username=user,
port=int(port),
password=password,
authentication_source=kwargs.get(
'authsource', None))
except ConnectionFailure:
self.logger.info('Reusing existing connections')
self._engine_connection = get_connection('disp')
# Direct connection with PyMongo
try:
self.connection = MongoClient(host=self.host,
port=self.port,
username=self.user,
password=self.password,
**kwargs)
self.database = self.connection[self.db_name]
except PyMongoError:
self.logger.error('Mongodb connection failed')
raise RuntimeError
# Authenticate through pymongo
try:
if self.user:
self.database.authenticate(self.user,
self.password,
source=kwargs.get(
'authsource', None))
except PyMongoError:
self.logger.error('Mongodb authentication failed')
raise RuntimeError
self.collection = self.database[collection]
self.gfs = gridfs.GridFS(self.database, collection=collection + '-fs')
def set_identity(self, fw_id, uuid=None, fw_worker=None):
"""Populate the identity dictionary"""
self.identity['fw_id'] = fw_id
if uuid:
self.identity['uuid'] = uuid
self.identity['hostname'] = get_my_host()
self.identity['ip_address'] = get_my_ip()
if fw_worker:
self.identity['fw_worker'] = fw_worker
def include_creator(self, entry):
"""Return a 'Creator' embedded document for recording the creator of any created document"""
if self.identity:
entry.creator = Creator(**self.identity)
def build_indexes(self,
additional_fields=None,
fw_additional_fields=None,
wf_additional_fields=None,
background=True):
"""
Build the indexes to accelerate queries
Args:
indexes (list): list of single field indexes to be built.
background (bool): Run in the background or not.
"""
if additional_fields is None:
additional_fields = []
if fw_additional_fields is None:
fw_additional_fields = []
if wf_additional_fields is None:
wf_additional_fields = []
_indices = list(self.INDICES)
_indices.extend(additional_fields)
for key in _indices:
self.collection.create_index(key, background=background)
# Create compound index for project_name and seed_name
for key in ['project_name', 'seed_name']:
self.collection.create_index([(key, pymongo.DESCENDING),
('created_on', pymongo.DESCENDING)])
# For selection with _cls
self.collection.create_index([(key, pymongo.DESCENDING),
('_cls', pymongo.DESCENDING)])
self.collection.create_index(key)
# Build indices for atomate tasks collection
# Assuming the collection is in the same database
for key in self.INDICES_ATOMATE:
self.database[self._ATOMATE_TASK_COLL].create_index(
key, background=background)
# Create compound index for project_name and seed_name
for key in ['project_name', 'seed_name']:
self.database[self._ATOMATE_TASK_COLL].create_index([
(key, pymongo.DESCENDING), ('last_update', pymongo.DESCENDING)
])
# Fireworks tasks related
if self.lpad is not None:
lpad = self.lpad
for name in [
'project_name', 'seed_name', 'struct_name',
*fw_additional_fields
]:
lpad.fireworks.create_index('spec.' + name,
background=background)
for name in [
'project_name', 'seed_name', 'disp_type',
*wf_additional_fields
]:
lpad.workflows.create_index('metadata.' + name,
background=background)
def insert_seed(self, project_name: str, seed_name: str,
seed_content: str):
"""Insert a single record of the seed for structure generation"""
md5hash = hashlib.md5(seed_content.encode()).hexdigest()
seed = SeedFile.objects(md5hash=md5hash,
project_name=project_name,
seed_name=seed_name).first()
if not seed:
seed = SeedFile(md5hash=md5hash,
seed_name=seed_name,
content=seed_content,
project_name=project_name)
self.include_creator(seed)
seed.save()
return seed
def insert_param(self, project_name: str, param_content: str,
seed_name: str):
"""Insert a single record for paramter"""
md5hash = hashlib.md5(param_content.encode()).hexdigest()
param = ParamFile.objects(md5hash=md5hash,
project_name=project_name).first()
if not param:
param = ParamFile(md5hash=md5hash,
content=param_content,
project_name=project_name,
seed_name=seed_name)
self.include_creator(param)
param.save()
return param
def insert_search_record(self,
project_name: str,
struct_name: str,
res_content: str,
param_content=None,
seed_name=None,
seed_hash=None,
seed_content=None):
"""Insert a record of the resultant structure of a search"""
if seed_hash and seed_content:
md5hash = hashlib.md5(seed_content.encode()).hexdigest()
assert md5hash == seed_hash, 'The seed_hash does not match seed_content!!'
# Seed content supplied but no hash given - insert this seed to the database
if (not seed_hash) and seed_name and seed_content:
seed_file = self.insert_seed(project_name, seed_name, seed_content)
else:
seed_file = None
if param_content:
param_file = self.insert_param(project_name, param_content,
seed_name)
else:
param_file = None
res_record = ResFile(seed_name=seed_name,
project_name=project_name,
content=res_content,
struct_name=struct_name)
# Link to the Param and Seed files
res_record.param_file = param_file
res_record.seed_file = seed_file
self.include_creator(res_record)
# Link with the initial structure of this record
# Link to the last record in case of Firework level rerun
init = InitialStructureFile.objects(
project_name=project_name,
seed_name=seed_name,
struct_name=struct_name).order_by('-created_on').first()
if init:
res_record.init_structure_file = init
res_record.save()
return res_record
def insert_initial_structure(self, project_name: str, struct_name: str,
struct_content: str, seed_name: str,
seed_content: str):
"""Insert a record of a randomly generated structure"""
seed_file = self.insert_seed(project_name, seed_name, seed_content)
init_structure = InitialStructureFile(project_name=project_name,
struct_name=struct_name,
seed_name=seed_name,
content=struct_content)
init_structure.seed_file = seed_file
self.include_creator(init_structure)
init_structure.save()
return init_structure
@staticmethod
def retrieve_project(project_name: str,
include_seed=False,
include_param=False,
additional_filters=None,
include_initial_structure=False):
"""
Retrieve all results from a single projection
Args:
project_name: Name of the projection to query
include_seed: Include the content of thhe seed in the result.
include_param: Include the 'param' file content in the result.
include_initial_structure: Include initial structures
Returns:
list: a QuerySet object containing the ResFile instances
"""
qset = ResFile.objects(project_name=project_name)
if not include_seed:
qset = qset.exclude('seed_file')
if not include_initial_structure:
qset = qset.exclude('init_structure_file')
if not include_param:
qset = qset.exclude('param_file')
if additional_filters:
qset = qset.filter(__raw__=additional_filters)
return qset.all()
@staticmethod
def get_summary_df(projects=None, seeds=None):
"""
Summarise the database using pandas.DataFrame
Generate a report for the status of the database. Not including the
actual content of the seed.
"""
res = ResFile.objects()
if projects:
res = res.filter(project_name__in=projects)
if seeds:
res = res.filter(seed_name__in=seeds)
fields = ['seed_name', 'project_name', 'created_on', 'struct_name']
res = res.only(*fields)
# Here we use what is not part of the public API....
results = [rfile._data for rfile in res]
if results:
dataframe = pd.DataFrame(results)[fields]
return dataframe
return pd.DataFrame([], columns=fields)
def throughput_summary(self,
past_days=2,
start_date=None,
projects=None,
seeds=None,
aggregate='H',
group_by='seed_name',
plot=True):
"""
Summarise the througput of search
Args:
projects(list): List of projects to include
seeds(list): :List of seeds to include
Returns:
A dataframe of search results per hour
"""
import matplotlib.pyplot as plt
query = ResFile.objects()
if projects:
query = query.filter(project_name__in=projects)
if seeds:
query = query.filter(seed_name__in=seeds)
now = datetime.utcnow()
if start_date is None:
dstart = now - timedelta(days=past_days)
else:
dstart = datetime.strptime(start_date, '%Y-%m-%d')
dfinish = dstart + timedelta(days=past_days)
query = query.filter(created_on__gte=dstart, created_on__lte=dfinish)
included = ['id', 'created_on', 'seed_name', 'project_name']
query = query.only(*included)
results = [resfile._data for resfile in query]
if not results:
self.logger.warning('No structure is found.')
return None
# Add in worker information
dataframe = pd.DataFrame(results)[included]
if group_by == 'worker_name':
worker_res = query.aggregate(worker_aggregation())
worker_info = pd.DataFrame(worker_res)
worker_info['worker_name'] = worker_info['worker_name'].apply(
lambda x: x[0] if x else None)
dataframe = dataframe.merge(worker_info,
left_on='id',
right_on='_id',
how='left')
dataframe.set_index('created_on', inplace=True)
dataframe['uid'] = [
row.project_name + ':' + row.seed_name
for _, row in dataframe.iterrows()
]
#dataframe = dataframe.set_index('created_on', inplace=True)
tdf = dataframe.groupby(group_by).resample(
aggregate)[group_by].count().unstack(level=0)
tdf.name = 'Completed'
if plot:
tdf.index = tdf.index.tz_localize('UTC').tz_convert(
'Europe/London')
tdf.index = [x.strftime('%d/%m %H00') for x in tdf.index]
tdf.plot.bar(stacked=True)
plt.xlabel('Creation time')
plt.ylabel('Count')
plt.title('New structures')
plt.tight_layout()
plt.legend(loc=None)
plt.show()
else:
# Save the data
tdf.to_csv('throughput.csv')
return tdf
def throughput_summary_atomate(self,
past_days=2,
start_date=None,
projects=None,
seeds=None,
aggregate='H',
group_by='seed_name',
plot=True):
"""
Summarise the througput of atomate calculations.
Args:
projects (list): List of projects to include
seeds (list): :List of seeds to include
Returns:
A dataframe of search results per hour
"""
import matplotlib.pyplot as plt
task_coll = self.database[self._ATOMATE_TASK_COLL]
query = {}
if projects:
query['project_name'] = {'$in': projects}
if seeds:
query['seed_name'] = {'$in': seeds}
if start_date is None:
dstart = datetime.utcnow() - timedelta(days=past_days)
else:
dstart = datetime.strptime(start_date, '%Y-%m-%d')
query['last_updated'] = {
'$gte': dstart,
'$lte': dstart + timedelta(days=past_days)
}
included = ['last_updated', 'seed_name', 'project_name', 'dir_name']
results = list(task_coll.find(query, included))
if not results:
self.logger.warning('No structure is found.')
return None
# Add in worker information
dataframe = pd.DataFrame(results)[included]
dataframe.dropna(inplace=True)
dataframe['uid'] = [
row.project_name + ':' + row.seed_name
for _, row in dataframe.iterrows()
]
machine = []
for value in dataframe['dir_name']:
host = value.split(':')[0]
if '.' in host:
host = host.split('.',
1)[1] # Skip the first part of the hostname
machine.append(host)
dataframe['worker_name'] = machine
dataframe.set_index('last_updated', inplace=True)
dataframe = dataframe.groupby(group_by).resample(
aggregate)[group_by].count().unstack(level=0)
dataframe.name = 'Completed'
dataframe.index = dataframe.index.tz_localize('UTC').tz_convert(
'Europe/London')
if plot:
dataframe.plot.bar(stacked=True)
plt.xlabel('Creation time')
plt.xlabel('Count')
plt.title('Completed Calculations')
plt.tight_layout()
plt.show()
else:
# Save the data
dataframe.to_csv('throughput.csv')
return dataframe
def show_struct_counts(self,
project_regex=None,
seed_regex=None,
states=None,
include_workflows=True,
include_atomate=False,
show_priority=False,
include_res=True,
verbose=False):
"""
Display count of the structures
"""
if include_workflows and not include_atomate:
wf_mode = 'search'
elif include_atomate:
wf_mode = 'ato'
else:
wf_mode = 'none'
counter = StructCounts(self.collection,
self.database.fireworks,
self.database.workflows,
states=states,
seed_regex=seed_regex,
project_regex=project_regex,
wf_mode=wf_mode,
show_priority=show_priority,
include_res=include_res,
verbose=verbose)
return counter.get_summary_df()
@classmethod
def from_db_file(cls, db_file: str):
"""
Create from a database file. File requires host, port, database,
collection, username and password
Args:
db_file (str): path to the file containing the credentials
Returns:
MMDb object
"""
creds = loadfn(db_file)
user = creds.get('user')
password = creds.get('password')
kwargs = creds.get('mongoclient_kwargs',
{}) # any other MongoClient kwargs can go here ...
if 'authsource' in creds:
kwargs['authsource'] = creds['authsource']
else:
kwargs['authsource'] = creds['database']
return cls(host=creds['host'],
port=int(creds.get('port', 27017)),
database=creds['database'],
collection=creds['collection'],
user=user,
password=password,
**kwargs)
def upload_dot_castep(self, struct_name, seed_name, project_name):
"""Update the dot CASTEP files"""
fname = struct_name + '.castep'
query = {
'struct_name': struct_name,
'seed_name': seed_name,
'project_name': project_name
}
if self.gfs.exists(query):
raise FileExistsError(f'File {fname} exists already')
with open(fname, 'rb') as fhandle:
content = zlib.compress(fhandle.read())
self.gfs.put(content,
filename=fname,
project_name=project_name,
seed_name=seed_name,
struct_name=struct_name)
def retrieve_dot_castep(self, struct_name, seed_name, project_name):
"""Retrieve a dot CASTEP file"""
fname = struct_name + '.castep'
query = {
'struct_name': struct_name,
'seed_name': seed_name,
'project_name': project_name
}
gfile = self.gfs.find_one(query)
if not gfile:
raise FileNotFoundError(f'Cannot found {fname}!')
content = gfile.read()
with open(fname, 'wb') as fhandle:
fhandle.write(zlib.decompress(content))
def delete_dot_castep(self, struct_name, seed_name, project_name):
"""Delete dot CASTEP files"""
query = {
'struct_name': struct_name,
'seed_name': seed_name,
'project_name': project_name
}
to_delete = []
for gfile in self.gfs.find(query):
to_delete.append(gfile._id) # pytest: disable=protected-access
for _id in to_delete:
self.gfs.delete(_id)
def get_hash(string):
"""Returns the md5hash for a string"""
return hashlib.md5(string.encode()).hexdigest()
def get_pipeline(cls_string,
project_regex=None,
seed_regex=None,
projects=None,
seeds=None):
"""
Obtain the pipline for querying SearchDB
"""
pipeline = [{
'$match': {
'_cls': cls_string
}
}, {
'$group': {
'_id': {
'seed': '$seed_name',
'project': '$project_name'
},
'count': {
'$sum': 1
}
}
}]
# Add regular expression matches
if project_regex:
pipeline[0]['$match']['project_name'] = {'$regex': project_regex}
if seed_regex:
pipeline[0]['$match']['seed_name'] = {'$regex': seed_regex}
# Query directly by list of seeds/projects - this overrides the regex options
if projects:
pipeline[0]['$match']['project_name'] = {'$in': projects}
if seeds:
pipeline[0]['$match']['seed_name'] = {'$in': seeds}
return pipeline
def get_atomate_wflows(wf_coll,
states,
seed_regex=None,
project_regex=None) -> pd.DataFrame:
"""Obtain workflow informaton for atomate jobs"""
return get_workflows(wf_coll, ['atomate-relax'],
states,
seed_regex=seed_regex,
project_regex=project_regex)
def get_std_wflows(wf_coll,
states,
seed_regex=None,
project_regex=None) -> pd.DataFrame:
"""Obtain workflow informaton for standard search jobs"""
return get_workflows(wf_coll, ['relax', 'search'],
states,
seed_regex=seed_regex,
project_regex=project_regex)
def get_workflows(wf_coll,
disp_types,
states,
seed_regex=None,
project_regex=None) -> pd.DataFrame:
"""Obtain atomate workflows matching certain criteria"""
query = {}
if seed_regex:
query['metadata.seed_name'] = {'$regex': seed_regex}
if project_regex:
query['metadata.project_name'] = {'$regex': project_regex}
if states:
query['state'] = {'$in': states}
query['metadata.disp_type'] = {'$in': disp_types}
projection = ['state', 'metadata']
cursor = wf_coll.find(query, projection)
records = []
for entry in cursor:
dtmp = {
'state': entry['state'],
}
dtmp.update(entry['metadata'])
records.append(dtmp)
return pd.DataFrame(records)
def worker_aggregation(launch_col='launches'):
"""
Find the worker identify for each creator.fw_id
"""
pipline = [{'$project': {'_id': 1, 'creator.fw_id': 1}}]
lookup_stage = {
'$lookup': {
'from':
launch_col,
'let': {
'creator_id': '$creator.fw_id'
},
'as':
'launch',
'pipeline': [{
'$match': {
'$expr': {
'$and': [{
'$eq': ['$fw_id', '$$creator_id']
}, {
'$eq':
['$action.stored_data.relax_status', 'FINISHED']
}]
}
}
}, {
'$project': {
'fworker.name': 1
}
}]
}
}
pipline.append(lookup_stage)
# Project worker name to the top level
project_stage = {'$project': {'worker_name': '$launch.fworker.name'}}
pipline.append(project_stage)
return pipline
class StructCounts:
"""Class for querying the database to obtain the structure counts"""
def __init__(self,
disp_coll: str,
fw_coll: str,
wf_coll: str,
states=None,
seed_regex=None,
project_regex=None,
wf_mode='search',
show_priority=False,
include_res=True,
verbose=True):
"""Initialise a StructCounts object"""
self.disp_coll = disp_coll
self.fw_coll = fw_coll
self.wf_coll = wf_coll
self.states = states
self.seed_regex = seed_regex
self.project_regex = project_regex
self.seeds = []
self.projects = []
self.verbose = verbose
self.wf_mode = wf_mode
self.show_priority = show_priority # Not used
self.include_res = include_res # Query the structure counts
self.logger = getLogger(__name__)
if verbose:
self.logger.setLevel(INFO)
else:
self.logger.setLevel(WARNING)
def get_summary_df(self):
"""Main loginc for getting the summary of the data"""
# First, obtain the workflows to be included
if self.verbose:
self.logger.info('Collecting workflow information')
ttmp = time.time()
# This would set some filters - based on the state and selected projects
wdf = self.get_wf_collection()
if len(wdf) == 0 and self.wf_mode != 'none':
self.logger.info('No workflow matches the query.')
if self.projects is not None or self.seeds is None:
return wdf
if self.verbose:
ttmp = time.time() - ttmp
self.logger.info(
'Workflow information collected - time elapsed: %.2f s', ttmp)
sdf, idf = self.get_res_entries()
# No relaxed structures - just return the workflow information
if len(sdf) == 0 or not self.include_res:
return wdf
# Get summary of the relaxed / initial structures
struct_count = sdf.groupby(['project', 'seed'])[['res']].sum()
init_count = idf.groupby(['project', 'seed'])[['init_structs']].sum()
try:
final_df = struct_count.merge(init_count,
left_index=True,
right_index=True,
how='outer')
except ValueError:
final_df = struct_count.copy()
final_df['init_structures'] = 0.0
# Handle cases with missing columns
if len(final_df.columns) == 2:
final_df.columns = pd.MultiIndex.from_tuples([('Structure', 'RES'),
('Structure', 'Init')
])
elif 'res' in final_df.columns:
final_df.columns = pd.MultiIndex.from_tuples([
('Structure', 'RES'),
])
elif 'init_structs' in final_df.columns:
final_df.columns = pd.MultiIndex.from_tuples([
('Structure', 'Init'),
])
# Blend in workflow information
if len(wdf) > 0:
final_df = final_df.merge(wdf,
left_index=True,
right_index=True,
how='right')
# Fill NaN as 0.0
final_df = final_df.fillna(0.0)
return final_df
def get_wf_collection(self):
"""Get atomate workflow statistics"""
if self.wf_mode == 'search':
wf_records = get_std_wflows(self.wf_coll, self.states,
self.seed_regex, self.project_regex)
elif self.wf_mode == 'ato':
wf_records = get_atomate_wflows(self.wf_coll, self.states,
self.seed_regex,
self.project_regex)
elif self.wf_mode == 'none':
wf_records = []
else:
raise ValueError(f'Unknown wf_mode: {self.wf_mode}')
if len(wf_records) != 0:
# Group data by project_name, 'seed_name' and 'state' then unstack
wf_df = wf_records.groupby(['project_name', 'seed_name', 'state'
]).count().unstack()[['disp_type'
]].fillna(0.0)
wf_df[('disp_type', 'ALL')] = wf_df.sum(axis=1)
wf_df = wf_df.rename(
columns={'disp_type': f'WF count - {self.wf_mode}'})
wf_df.columns.names = [None, None]
wf_df.index.names = ['project', 'seed']
# If no contrains on the seeds / projects is imposed, use the workflow results to
# limit them
if self.seed_regex is None:
self.projects = wf_records.project_name.unique().tolist()
if self.project_regex is None:
self.seeds = wf_records.seed_name.unique().tolist()
else:
# NO entry - return an empty dataframe
wf_df = pd.DataFrame()
return wf_df
def get_res_entries(self):
"""Obtain the entry of res files"""
ttmp = time.time()
# Check if any filters have been applied - warning if thait is not the case
has_no_filter = all(
tmp is None for tmp in
[self.project_regex, self.projects, self.seeds, self.seed_regex])
if has_no_filter:
self.logger.info(
'WARNING: No effective filters applied - projecting the entire database!!'
)
# Find the SHELX entries for the matching entries
res = self.disp_coll.aggregate(
get_pipeline('DispEntry.ResFile',
project_regex=self.project_regex,
projects=self.projects,
seeds=self.seeds,
seed_regex=self.seed_regex))
data = [(item['_id']['seed'], item['_id']['project'], item['count'])
for item in res]
sdf = pd.DataFrame(data, columns=['seed', 'project', 'res'])
dtime = time.time() - ttmp
self.logger.info(
f'Obtained relaxed structure counts - time elapsed {dtime:.2f}')
# Include initial structures
ttmp = time.time()
res = self.disp_coll.aggregate(
get_pipeline('DispEntry.InitialStructureFile',
project_regex=self.project_regex,
seed_regex=self.seed_regex,
projects=self.projects,
seeds=self.seeds))
data = [(item['_id']['seed'], item['_id']['project'], item['count'])
for item in res]
idf = pd.DataFrame(data, columns=['seed', 'project', 'init_structs'])
dtime = time.time() - ttmp
self.logger.info(
f'Obtained initial structure counts - time elapsed {dtime:.2f} s')
return sdf, idf
|
<filename>Federated_Learning/paper_model/FedAvg/tensorflow/Models.py<gh_stars>0
import os
import tensorflow as tf
import numpy as np
from dataSets import DataSet
class Models(object):
def __init__(self, modelName, inputs):
self.inputs = inputs
self.model_name = modelName
if self.model_name == 'mnist_2nn':
self.mnist_2nn_construct(inputs)
elif self.model_name == 'mnist_cnn':
self.mnist_cnn_construct(inputs)
elif self.model_name == 'cifar10_cnn':
self.cifar10_cnn_construct(inputs)
def mnist_2nn_construct(self, inputs):
self.fc1 = self.full_connect(inputs, 784, 200, 'h1')
self.fc2 = self.full_connect(self.fc1, 200, 200, 'h2')
self.outputs = self.full_connect(self.fc2, 200, 10, 'last_layer', relu=False)
def mnist_cnn_construct(self, inputs):
self.trans_inputs = tf.reshape(inputs, [-1, 28, 28, 1])
self.cov1 = self.convolve(self.trans_inputs, 1, 5, 1, 1, 32, 'cov1', True, 'SAME')
self.pool1 = self.max_pool_nxn(self.cov1, 2, 2, 'pool1')
self.cov2 = self.convolve(self.pool1, 32, 5, 1, 1, 64, 'cov2', True, 'SAME')
self.pool2 = self.max_pool_nxn(self.cov2, 2, 2, 'pool2')
with tf.variable_scope('transform') as scope:
self.trans_pool2 = tf.reshape(self.pool2, [-1, 7 * 7 * 64])
self.fc1 = self.full_connect(self.trans_pool2, 7 * 7 * 64, 512, 'fc1')
self.outputs = self.full_connect(self.fc1, 512, 10, 'last_layer', relu=False)
def cifar10_cnn_construct(self, inputs):
self.cov1 = self.convolve(inputs, 3, 5, 1, 1, 64, 'cov1', True, 'SAME')
self.pool1 = self.max_pool_nxn(self.cov1, 3, 2, 'pool1')
self.cov2 = self.convolve(self.pool1, 64, 5, 1, 1, 64, 'cov2', True, 'SAME')
self.pool2 = self.max_pool_nxn(self.cov2, 3, 2, 'pool2')
with tf.variable_scope('transform') as scope:
self.trans_pool2 = tf.reshape(self.pool2, [-1, 6 * 6 * 64])
self.fc1 = self.full_connect(self.trans_pool2, 6 * 6 * 64, 384, 'fc1')
self.fc2 = self.full_connect(self.fc1, 384, 192, 'fc2')
self.outputs = self.full_connect(self.fc2, 192, 10, 'last_layer', relu=False)
def full_connect(self, inputs, num_in, num_out, name, relu=True):
with tf.variable_scope(name) as scope:
weights = tf.get_variable('weights', shape=[num_in, num_out], dtype=tf.float32, trainable=True)
biases = tf.get_variable('biases', shape=[num_out], dtype=tf.float32, trainable=True)
ws_plus_bs = tf.nn.xw_plus_b(inputs, weights, biases)
if relu == True:
outputs = tf.nn.relu(ws_plus_bs)
return outputs
else:
return ws_plus_bs
def convolve(self, inputs, inputs_channels, kernel_size, stride_y, stride_x, num_features, name, relu=True, padding='SAME'):
with tf.variable_scope(name) as scope:
weights = tf.get_variable('weights', shape=[kernel_size, kernel_size, inputs_channels, num_features],
dtype=tf.float32, trainable=True)
biases = tf.get_variable('baises', shape=[num_features], dtype=tf.float32, trainable=True)
conv = tf.nn.conv2d(inputs, weights, [1, stride_y, stride_x, 1], padding=padding)
cov_puls_bs = tf.nn.bias_add(conv, biases)
if relu == True:
outputs = tf.nn.relu(cov_puls_bs)
return outputs
else:
return cov_puls_bs
def max_pool_nxn(self, inputs, ksize, ssize, name):
with tf.variable_scope(name) as scope:
return tf.nn.max_pool(inputs, ksize=[1, ksize, ksize, 1], strides=[1, ssize, ssize, 1], padding='SAME')
if __name__=='__main__':
# GPU preparation
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1,2,3,4,5,6,7'
mnist = DataSet('mnist', is_IID=1)
with tf.variable_scope('inputs') as scope:
input_images = tf.placeholder(shape=[None, 784], dtype=tf.float32, name='input_images')
true_label = tf.placeholder(shape=[None, 10], dtype=tf.float32, name='true_label')
mnist_2nn = Models('mnist_2nn', input_images)
predict_label = tf.nn.softmax(mnist_2nn.outputs)
with tf.variable_scope('loss') as scope:
Cross_entropy = -tf.reduce_mean(true_label*tf.log(predict_label), axis=1)
with tf.variable_scope('train') as scope:
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(Cross_entropy)
with tf.variable_scope('validation') as scope:
correct_prediction = tf.equal(tf.argmax(predict_label, axis=1), tf.argmax(true_label, axis=1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
# ---------------------------------------- train --------------------------------------------- #
with tf.Session(config=tf.ConfigProto(
log_device_placement=False, \
allow_soft_placement=True, \
gpu_options=tf.GPUOptions(allow_growth=True))) as sess:
sess.run(tf.initialize_all_variables())
for i in range(1000):
batch_images, batch_labels = mnist.next_batch(100)
sess.run(train, feed_dict={input_images: batch_images, true_label: batch_labels})
if i%20 == 0:
batch_images = mnist.test_data
batch_labels = mnist.test_label
print(sess.run(accuracy, feed_dict={input_images: batch_images, true_label: batch_labels}))
|
# Copyright 2020-present <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#: This should be an up to date list of strings which discord renders as emojis as of 2021-03-22
#: Each one adds an html element to the message toward the 200 element rendering issue.
DISCORD_EMOJIS = [
"\ud83d\ude00",
"\ud83d\ude03",
"\ud83d\ude04",
"\ud83d\ude01",
"\ud83d\ude06",
"\ud83d\ude06",
"\ud83d\ude05",
"\ud83d\ude02",
"\ud83e\udd23",
"\ud83e\udd23",
"\N{WHITE SMILING FACE}\N{VARIATION SELECTOR-16}",
"\ud83d\ude0a",
"\ud83d\ude07",
"\ud83d\ude42",
"\ud83d\ude42",
"\ud83d\ude43",
"\ud83d\ude43",
"\ud83d\ude09",
"\ud83d\ude0c",
"\ud83d\ude0d",
"\ud83e\udd70",
"\ud83d\ude18",
"\ud83d\ude17",
"\ud83d\ude19",
"\ud83d\ude1a",
"\ud83d\ude0b",
"\ud83d\ude1b",
"\ud83d\ude1d",
"\ud83d\ude1c",
"\ud83e\udd2a",
"\ud83e\udd28",
"\ud83e\uddd0",
"\ud83e\udd13",
"\ud83e\udd13",
"\ud83d\ude0e",
"\ud83e\udd29",
"\ud83e\udd73",
"\ud83d\ude0f",
"\ud83d\ude12",
"\ud83d\ude1e",
"\ud83d\ude14",
"\ud83d\ude1f",
"\ud83d\ude15",
"\ud83d\ude41",
"\ud83d\ude41",
"\N{WHITE FROWNING FACE}\N{VARIATION SELECTOR-16}",
"\N{WHITE FROWNING FACE}\N{VARIATION SELECTOR-16}",
"\ud83d\ude23",
"\ud83d\ude16",
"\ud83d\ude2b",
"\ud83d\ude29",
"\ud83e\udd7a",
"\ud83d\ude22",
"\ud83d\ude2d",
"\ud83d\ude24",
"\ud83d\ude20",
"\ud83d\ude21",
"\ud83e\udd2c",
"\ud83e\udd2f",
"\ud83d\ude33",
"\ud83e\udd75",
"\ud83e\udd76",
"\ud83d\ude31",
"\ud83d\ude28",
"\ud83d\ude30",
"\ud83d\ude25",
"\ud83d\ude13",
"\ud83e\udd17",
"\ud83e\udd17",
"\ud83e\udd14",
"\ud83e\udd14",
"\ud83e\udd2d",
"\ud83e\udd71",
"\ud83e\udd2b",
"\ud83e\udd25",
"\ud83e\udd25",
"\ud83d\ude36",
"\ud83d\ude10",
"\ud83d\ude11",
"\ud83d\ude2c",
"\ud83d\ude44",
"\ud83d\ude44",
"\ud83d\ude2f",
"\ud83d\ude26",
"\ud83d\ude27",
"\ud83d\ude2e",
"\ud83d\ude32",
"\ud83d\ude34",
"\ud83e\udd24",
"\ud83e\udd24",
"\ud83d\ude2a",
"\ud83d\ude35",
"\ud83e\udd10",
"\ud83e\udd10",
"\ud83e\udd74",
"\ud83e\udd22",
"\ud83e\udd22",
"\ud83e\udd2e",
"\ud83e\udd27",
"\ud83e\udd27",
"\ud83d\ude37",
"\ud83e\udd12",
"\ud83e\udd12",
"\ud83e\udd15",
"\ud83e\udd15",
"\ud83e\udd11",
"\ud83e\udd11",
"\ud83e\udd20",
"\ud83e\udd20",
"\ud83d\ude08",
"\ud83d\udc7f",
"\ud83d\udc79",
"\ud83d\udc7a",
"\ud83e\udd21",
"\ud83e\udd21",
"\ud83d\udca9",
"\ud83d\udca9",
"\ud83d\udca9",
"\ud83d\udca9",
"\ud83d\udc7b",
"\ud83d\udc80",
"\ud83d\udc80",
"\N{SKULL AND CROSSBONES}\N{VARIATION SELECTOR-16}",
"\N{SKULL AND CROSSBONES}\N{VARIATION SELECTOR-16}",
"\ud83d\udc7d",
"\ud83d\udc7e",
"\ud83e\udd16",
"\ud83e\udd16",
"\ud83c\udf83",
"\ud83d\ude3a",
"\ud83d\ude38",
"\ud83d\ude39",
"\ud83d\ude3b",
"\ud83d\ude3c",
"\ud83d\ude3d",
"\ud83d\ude40",
"\ud83d\ude3f",
"\ud83d\ude3e",
"\ud83e\udd32",
"\ud83d\udc50",
"\ud83d\ude4c",
"\ud83d\udc4f",
"\ud83e\udd1d",
"\ud83e\udd1d",
"\ud83d\udc4d",
"\ud83d\udc4d",
"\ud83d\udc4d",
"\ud83d\udc4e",
"\ud83d\udc4e",
"\ud83d\udc4e",
"\ud83d\udc4a",
"\N{RAISED FIST}",
"\ud83e\udd1b",
"\ud83e\udd1b",
"\ud83e\udd1c",
"\ud83e\udd1c",
"\ud83e\udd1e",
"\ud83e\udd1e",
"\N{VICTORY HAND}\N{VARIATION SELECTOR-16}",
"\ud83e\udd1f",
"\ud83e\udd18",
"\ud83e\udd18",
"\ud83d\udc4c",
"\ud83e\udd0f",
"\ud83d\udc48",
"\ud83d\udc49",
"\ud83d\udc46",
"\ud83d\udc47",
"\N{WHITE UP POINTING INDEX}\N{VARIATION SELECTOR-16}",
"\N{RAISED HAND}",
"\ud83e\udd1a",
"\ud83e\udd1a",
"\ud83d\udd90\N{VARIATION SELECTOR-16}",
"\ud83d\udd90\N{VARIATION SELECTOR-16}",
"\ud83d\udd96",
"\ud83d\udd96",
"\ud83d\udc4b",
"\ud83e\udd19",
"\ud83e\udd19",
"\ud83d\udcaa",
"\ud83e\uddbe",
"\ud83d\udd95",
"\ud83d\udd95",
"\N{WRITING HAND}\N{VARIATION SELECTOR-16}",
"\ud83d\ude4f",
"\ud83e\uddb6",
"\ud83e\uddb5",
"\ud83e\uddbf",
"\ud83d\udc84",
"\ud83d\udc8b",
"\ud83d\udc44",
"\ud83e\uddb7",
"\ud83e\uddb4",
"\ud83d\udc45",
"\ud83d\udc42",
"\ud83e\uddbb",
"\ud83d\udc43",
"\ud83d\udc63",
"\ud83d\udc41\N{VARIATION SELECTOR-16}",
"\ud83d\udc40",
"\ud83e\udde0",
"\ud83d\udde3\N{VARIATION SELECTOR-16}",
"\ud83d\udde3\N{VARIATION SELECTOR-16}",
"\ud83d\udc64",
"\ud83d\udc65",
"\ud83d\udc76",
"\ud83d\udc67",
"\ud83e\uddd2",
"\ud83d\udc66",
"\ud83d\udc69",
"\ud83e\uddd1",
"\ud83d\udc68",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83e\uddb1",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83e\uddb1",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83e\uddb0",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83e\uddb0",
"\ud83d\udc71\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udc71",
"\ud83d\udc71",
"\ud83d\udc71\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83e\uddb3",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83e\uddb3",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83e\uddb2",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83e\uddb2",
"\ud83e\uddd4",
"\ud83d\udc75",
"\ud83d\udc75",
"\ud83e\uddd3",
"\ud83d\udc74",
"\ud83d\udc72",
"\ud83d\udc72",
"\ud83d\udc73",
"\ud83d\udc73",
"\ud83d\udc73\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udc73\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\uddd5",
"\ud83d\udc6e",
"\ud83d\udc6e",
"\ud83d\udc6e\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udc6e\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udc77",
"\ud83d\udc77\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udc77\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udc82",
"\ud83d\udc82",
"\ud83d\udc82\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udc82\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udd75\N{VARIATION SELECTOR-16}",
"\ud83d\udd75\N{VARIATION SELECTOR-16}",
"\ud83d\udd75\N{VARIATION SELECTOR-16}",
"\ud83d\udd75\N{VARIATION SELECTOR-16}\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udd75\N{VARIATION SELECTOR-16}\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\N{STAFF OF AESCULAPIUS}\N{VARIATION SELECTOR-16}",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\N{STAFF OF AESCULAPIUS}\N{VARIATION SELECTOR-16}",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83c\udf3e",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83c\udf3e",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83c\udf73",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83c\udf73",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83c\udf93",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83c\udf93",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83c\udfa4",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83c\udfa4",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83c\udfeb",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83c\udfeb",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83c\udfed",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83c\udfed",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83d\udcbb",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83d\udcbb",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83d\udcbc",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83d\udcbc",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83d\udd27",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83d\udd27",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83d\udd2c",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83d\udd2c",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83c\udfa8",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83c\udfa8",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83d\ude92",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83d\ude92",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\N{AIRPLANE}\N{VARIATION SELECTOR-16}",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\N{AIRPLANE}\N{VARIATION SELECTOR-16}",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83d\ude80",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83d\ude80",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\N{SCALES}\N{VARIATION SELECTOR-16}",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\N{SCALES}\N{VARIATION SELECTOR-16}",
"\ud83d\udc70",
"\ud83e\udd35",
"\ud83d\udc78",
"\ud83e\udd34",
"\ud83e\uddb8",
"\ud83e\uddb8\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\uddb8\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\uddb9",
"\ud83e\uddb9\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\uddb9\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\udd36",
"\ud83e\udd36",
"\ud83c\udf85",
"\ud83e\uddd9",
"\ud83e\uddd9\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\uddd9\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\udddd",
"\ud83e\udddd\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\udddd\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\udddb",
"\ud83e\udddb\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\udddb\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\udddf",
"\ud83e\udddf\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\udddf\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\uddde",
"\ud83e\uddde\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\uddde\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\udddc",
"\ud83e\udddc\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\udddc\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\uddda",
"\ud83e\uddda\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\uddda\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udc7c",
"\ud83e\udd30",
"\ud83e\udd30",
"\ud83e\udd31",
"\ud83d\ude47",
"\ud83d\ude47",
"\ud83d\ude47\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\ude47\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udc81",
"\ud83d\udc81",
"\ud83d\udc81\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udc81\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\ude45",
"\ud83d\ude45",
"\ud83d\ude45\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\ude45\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\ude46",
"\ud83d\ude46",
"\ud83d\ude46\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\ude46\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\ude4b",
"\ud83d\ude4b",
"\ud83d\ude4b\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\ude4b\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\uddcf",
"\ud83e\uddcf\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\uddcf\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\udd26",
"\ud83e\udd26",
"\ud83e\udd26",
"\ud83e\udd26\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\udd26\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\udd37",
"\ud83e\udd37",
"\ud83e\udd37\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\udd37\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\ude4e",
"\ud83d\ude4e",
"\ud83d\ude4e\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\ude4e\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\ude4d",
"\ud83d\ude4d\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\ude4d\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udc87",
"\ud83d\udc87",
"\ud83d\udc87\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udc87\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udc86",
"\ud83d\udc86",
"\ud83d\udc86\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udc86\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\uddd6",
"\ud83e\uddd6\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\uddd6\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udc85",
"\ud83e\udd33",
"\ud83d\udc83",
"\ud83d\udd7a",
"\ud83d\udd7a",
"\ud83d\udc6f",
"\ud83d\udc6f",
"\ud83d\udc6f\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udc6f\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udd74\N{VARIATION SELECTOR-16}",
"\ud83d\udd74\N{VARIATION SELECTOR-16}",
"\ud83d\udeb6",
"\ud83d\udeb6",
"\ud83d\udeb6\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udeb6\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83c\udfc3",
"\ud83c\udfc3",
"\ud83c\udfc3\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83c\udfc3\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\uddcd",
"\ud83e\uddcd\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\uddcd\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\uddce",
"\ud83e\uddce\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\uddce\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83e\uddaf",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83e\uddaf",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83e\uddbc",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83e\uddbc",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83e\uddbd",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83e\uddbd",
"\ud83e\uddd1\N{ZERO WIDTH JOINER}\ud83e\udd1d\N{ZERO WIDTH JOINER}\ud83e\uddd1",
"\ud83d\udc6b",
"\ud83d\udc6d",
"\ud83d\udc6c",
"\ud83d\udc91",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\N{HEAVY BLACK HEART}\N{VARIATION SELECTOR-16}\N{ZERO WIDTH JOINER}\ud83d\udc68",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\N{HEAVY BLACK HEART}\N{VARIATION SELECTOR-16}\N{ZERO WIDTH JOINER}\ud83d\udc69",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\N{HEAVY BLACK HEART}\N{VARIATION SELECTOR-16}\N{ZERO WIDTH JOINER}\ud83d\udc69",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\N{HEAVY BLACK HEART}\N{VARIATION SELECTOR-16}\N{ZERO WIDTH JOINER}\ud83d\udc68",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\N{HEAVY BLACK HEART}\N{VARIATION SELECTOR-16}\N{ZERO WIDTH JOINER}\ud83d\udc68",
"\ud83d\udc8f",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\N{HEAVY BLACK HEART}\N{VARIATION SELECTOR-16}\N{ZERO WIDTH JOINER}\ud83d\udc8b\N{ZERO WIDTH JOINER}\ud83d\udc68",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\N{HEAVY BLACK HEART}\N{VARIATION SELECTOR-16}\N{ZERO WIDTH JOINER}\ud83d\udc8b\N{ZERO WIDTH JOINER}\ud83d\udc69",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\N{HEAVY BLACK HEART}\N{VARIATION SELECTOR-16}\N{ZERO WIDTH JOINER}\ud83d\udc8b\N{ZERO WIDTH JOINER}\ud83d\udc69",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\N{HEAVY BLACK HEART}\N{VARIATION SELECTOR-16}\N{ZERO WIDTH JOINER}\ud83d\udc8b\N{ZERO WIDTH JOINER}\ud83d\udc68",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\N{HEAVY BLACK HEART}\N{VARIATION SELECTOR-16}\N{ZERO WIDTH JOINER}\ud83d\udc8b\N{ZERO WIDTH JOINER}\ud83d\udc68",
"\ud83d\udc6a",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83d\udc66",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83d\udc67",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83d\udc67\N{ZERO WIDTH JOINER}\ud83d\udc66",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83d\udc66\N{ZERO WIDTH JOINER}\ud83d\udc66",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83d\udc67\N{ZERO WIDTH JOINER}\ud83d\udc67",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83d\udc66",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83d\udc67",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83d\udc67\N{ZERO WIDTH JOINER}\ud83d\udc66",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83d\udc66\N{ZERO WIDTH JOINER}\ud83d\udc66",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83d\udc67\N{ZERO WIDTH JOINER}\ud83d\udc67",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83d\udc66",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83d\udc67",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83d\udc67\N{ZERO WIDTH JOINER}\ud83d\udc66",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83d\udc66\N{ZERO WIDTH JOINER}\ud83d\udc66",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83d\udc67\N{ZERO WIDTH JOINER}\ud83d\udc67",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83d\udc66",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83d\udc67",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83d\udc67\N{ZERO WIDTH JOINER}\ud83d\udc66",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83d\udc66\N{ZERO WIDTH JOINER}\ud83d\udc66",
"\ud83d\udc69\N{ZERO WIDTH JOINER}\ud83d\udc67\N{ZERO WIDTH JOINER}\ud83d\udc67",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83d\udc66",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83d\udc67",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83d\udc67\N{ZERO WIDTH JOINER}\ud83d\udc66",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83d\udc66\N{ZERO WIDTH JOINER}\ud83d\udc66",
"\ud83d\udc68\N{ZERO WIDTH JOINER}\ud83d\udc67\N{ZERO WIDTH JOINER}\ud83d\udc67",
"\ud83e\uddf6",
"\ud83e\uddf5",
"\ud83e\udde5",
"\ud83e\udd7c",
"\ud83e\uddba",
"\ud83d\udc5a",
"\ud83d\udc55",
"\ud83d\udc56",
"\ud83e\ude73",
"\ud83d\udc54",
"\ud83d\udc57",
"\ud83d\udc59",
"\ud83e\ude71",
"\ud83d\udc58",
"\ud83e\udd7b",
"\ud83e\udd7f",
"\ud83d\udc60",
"\ud83d\udc61",
"\ud83d\udc62",
"\ud83e\ude70",
"\ud83d\udc5e",
"\ud83d\udc5f",
"\ud83e\udd7e",
"\ud83e\ude72",
"\ud83e\udde6",
"\ud83e\udde4",
"\ud83e\udde3",
"\ud83c\udfa9",
"\ud83e\udde2",
"\ud83d\udc52",
"\ud83c\udf93",
"\N{HELMET WITH WHITE CROSS}\N{VARIATION SELECTOR-16}",
"\N{HELMET WITH WHITE CROSS}\N{VARIATION SELECTOR-16}",
"\ud83d\udc51",
"\ud83d\udc8d",
"\ud83d\udc5d",
"\ud83d\udc5b",
"\ud83d\udc5c",
"\ud83d\udcbc",
"\ud83c\udf92",
"\ud83e\uddf3",
"\ud83d\udc53",
"\ud83d\udd76\N{VARIATION SELECTOR-16}",
"\ud83e\udd7d",
"\ud83e\udd3f",
"\ud83c\udf02",
"\ud83d\udc36",
"\ud83d\udc31",
"\ud83d\udc2d",
"\ud83d\udc39",
"\ud83d\udc30",
"\ud83e\udd8a",
"\ud83e\udd8a",
"\ud83d\udc3b",
"\ud83d\udc3c",
"\ud83d\udc28",
"\ud83d\udc2f",
"\ud83e\udd81",
"\ud83e\udd81",
"\ud83d\udc2e",
"\ud83d\udc37",
"\ud83d\udc3d",
"\ud83d\udc38",
"\ud83d\udc35",
"\ud83d\ude48",
"\ud83d\ude49",
"\ud83d\ude4a",
"\ud83d\udc12",
"\ud83d\udc14",
"\ud83d\udc27",
"\ud83d\udc26",
"\ud83d\udc24",
"\ud83d\udc23",
"\ud83d\udc25",
"\ud83e\udd86",
"\ud83e\udd85",
"\ud83e\udd89",
"\ud83e\udd87",
"\ud83d\udc3a",
"\ud83d\udc17",
"\ud83d\udc34",
"\ud83e\udd84",
"\ud83e\udd84",
"\ud83d\udc1d",
"\ud83d\udc1b",
"\ud83e\udd8b",
"\ud83d\udc0c",
"\ud83d\udc1a",
"\ud83d\udc1e",
"\ud83d\udc1c",
"\ud83e\udd9f",
"\ud83e\udd97",
"\ud83d\udd77\N{VARIATION SELECTOR-16}",
"\ud83d\udd78\N{VARIATION SELECTOR-16}",
"\ud83e\udd82",
"\ud83d\udc22",
"\ud83d\udc0d",
"\ud83e\udd8e",
"\ud83e\udd96",
"\ud83e\udd95",
"\ud83d\udc19",
"\ud83e\udd91",
"\ud83e\udd90",
"\ud83e\udd9e",
"\ud83e\uddaa",
"\ud83e\udd80",
"\ud83d\udc21",
"\ud83d\udc20",
"\ud83d\udc1f",
"\ud83d\udc2c",
"\ud83d\udc33",
"\ud83d\udc0b",
"\ud83e\udd88",
"\ud83d\udc0a",
"\ud83d\udc05",
"\ud83d\udc06",
"\ud83e\udd93",
"\ud83e\udd8d",
"\ud83e\udda7",
"\ud83d\udc18",
"\ud83e\udd9b",
"\ud83e\udd8f",
"\ud83e\udd8f",
"\ud83d\udc2a",
"\ud83d\udc2b",
"\ud83e\udd92",
"\ud83e\udd98",
"\ud83d\udc03",
"\ud83d\udc02",
"\ud83d\udc04",
"\ud83d\udc0e",
"\ud83d\udc16",
"\ud83d\udc0f",
"\ud83e\udd99",
"\ud83d\udc11",
"\ud83d\udc10",
"\ud83e\udd8c",
"\ud83d\udc15",
"\ud83e\uddae",
"\ud83d\udc15\N{ZERO WIDTH JOINER}\ud83e\uddba",
"\ud83d\udc29",
"\ud83d\udc08",
"\ud83d\udc13",
"\ud83e\udd83",
"\ud83e\udd9a",
"\ud83e\udd9c",
"\ud83e\udda2",
"\ud83e\udda9",
"\ud83d\udd4a\N{VARIATION SELECTOR-16}",
"\ud83d\udd4a\N{VARIATION SELECTOR-16}",
"\ud83d\udc07",
"\ud83e\udda5",
"\ud83e\udda6",
"\ud83e\udda8",
"\ud83e\udd9d",
"\ud83e\udda1",
"\ud83d\udc01",
"\ud83d\udc00",
"\ud83d\udc3f\N{VARIATION SELECTOR-16}",
"\ud83e\udd94",
"\ud83d\udc3e",
"\ud83d\udc3e",
"\ud83d\udc09",
"\ud83d\udc32",
"\ud83c\udf35",
"\ud83c\udf84",
"\ud83c\udf32",
"\ud83c\udf33",
"\ud83c\udf34",
"\ud83c\udf31",
"\ud83c\udf3f",
"\N{SHAMROCK}\N{VARIATION SELECTOR-16}",
"\ud83c\udf40",
"\ud83c\udf8d",
"\ud83c\udf8b",
"\ud83c\udf43",
"\ud83c\udf42",
"\ud83c\udf41",
"\ud83c\udf44",
"\ud83c\udf3e",
"\ud83d\udc90",
"\ud83c\udf37",
"\ud83c\udf39",
"\ud83e\udd40",
"\ud83e\udd40",
"\ud83c\udf3a",
"\ud83c\udf38",
"\ud83c\udf3c",
"\ud83c\udf3b",
"\ud83c\udf1e",
"\ud83c\udf1d",
"\ud83c\udf1b",
"\ud83c\udf1c",
"\ud83c\udf1a",
"\ud83c\udf15",
"\ud83c\udf16",
"\ud83c\udf17",
"\ud83c\udf18",
"\ud83c\udf11",
"\ud83c\udf12",
"\ud83c\udf13",
"\ud83c\udf14",
"\ud83c\udf19",
"\ud83c\udf0e",
"\ud83c\udf0d",
"\ud83c\udf0f",
"\ud83e\ude90",
"\ud83d\udcab",
"\N{WHITE MEDIUM STAR}",
"\ud83c\udf1f",
"\N{SPARKLES}",
"\N{HIGH VOLTAGE SIGN}",
"\N{COMET}\N{VARIATION SELECTOR-16}",
"\ud83d\udca5",
"\ud83d\udd25",
"\ud83d\udd25",
"\ud83c\udf2a\N{VARIATION SELECTOR-16}",
"\ud83c\udf2a\N{VARIATION SELECTOR-16}",
"\ud83c\udf08",
"\N{BLACK SUN WITH RAYS}\N{VARIATION SELECTOR-16}",
"\ud83c\udf24\N{VARIATION SELECTOR-16}",
"\ud83c\udf24\N{VARIATION SELECTOR-16}",
"\N{SUN BEHIND CLOUD}",
"\ud83c\udf25\N{VARIATION SELECTOR-16}",
"\ud83c\udf25\N{VARIATION SELECTOR-16}",
"\N{CLOUD}\N{VARIATION SELECTOR-16}",
"\ud83c\udf26\N{VARIATION SELECTOR-16}",
"\ud83c\udf26\N{VARIATION SELECTOR-16}",
"\ud83c\udf27\N{VARIATION SELECTOR-16}",
"\ud83c\udf27\N{VARIATION SELECTOR-16}",
"\N{THUNDER CLOUD AND RAIN}\N{VARIATION SELECTOR-16}",
"\N{THUNDER CLOUD AND RAIN}\N{VARIATION SELECTOR-16}",
"\ud83c\udf29\N{VARIATION SELECTOR-16}",
"\ud83c\udf29\N{VARIATION SELECTOR-16}",
"\ud83c\udf28\N{VARIATION SELECTOR-16}",
"\ud83c\udf28\N{VARIATION SELECTOR-16}",
"\N{SNOWFLAKE}\N{VARIATION SELECTOR-16}",
"\N{SNOWMAN}\N{VARIATION SELECTOR-16}",
"\N{SNOWMAN WITHOUT SNOW}",
"\ud83c\udf2c\N{VARIATION SELECTOR-16}",
"\ud83d\udca8",
"\ud83d\udca7",
"\ud83d\udca6",
"\N{UMBRELLA WITH RAIN DROPS}",
"\N{UMBRELLA}\N{VARIATION SELECTOR-16}",
"\ud83c\udf0a",
"\ud83c\udf2b\N{VARIATION SELECTOR-16}",
"\ud83c\udf4f",
"\ud83c\udf4e",
"\ud83c\udf50",
"\ud83c\udf4a",
"\ud83c\udf4b",
"\ud83c\udf4c",
"\ud83c\udf49",
"\ud83c\udf47",
"\ud83c\udf53",
"\ud83c\udf48",
"\ud83c\udf52",
"\ud83c\udf51",
"\ud83e\udd6d",
"\ud83c\udf4d",
"\ud83e\udd65",
"\ud83e\udd5d",
"\ud83e\udd5d",
"\ud83c\udf45",
"\ud83c\udf46",
"\ud83e\udd51",
"\ud83e\udd66",
"\ud83e\udd6c",
"\ud83e\udd52",
"\ud83c\udf36\N{VARIATION SELECTOR-16}",
"\ud83c\udf3d",
"\ud83e\udd55",
"\ud83e\uddc5",
"\ud83e\uddc4",
"\ud83e\udd54",
"\ud83c\udf60",
"\ud83e\udd50",
"\ud83e\udd6f",
"\ud83c\udf5e",
"\ud83e\udd56",
"\ud83e\udd56",
"\ud83e\udd68",
"\ud83e\uddc0",
"\ud83e\uddc0",
"\ud83e\udd5a",
"\ud83c\udf73",
"\ud83e\udd5e",
"\ud83e\uddc7",
"\ud83e\udd53",
"\ud83e\udd69",
"\ud83c\udf57",
"\ud83c\udf56",
"\ud83c\udf2d",
"\ud83c\udf2d",
"\ud83c\udf54",
"\ud83c\udf5f",
"\ud83c\udf55",
"\ud83e\udd6a",
"\ud83e\uddc6",
"\ud83e\udd59",
"\ud83e\udd59",
"\ud83c\udf2e",
"\ud83c\udf2f",
"\ud83e\udd57",
"\ud83e\udd57",
"\ud83e\udd58",
"\ud83e\udd58",
"\ud83e\udd6b",
"\ud83c\udf5d",
"\ud83c\udf5c",
"\ud83c\udf72",
"\ud83c\udf5b",
"\ud83c\udf63",
"\ud83c\udf71",
"\ud83e\udd5f",
"\ud83c\udf64",
"\ud83c\udf59",
"\ud83c\udf5a",
"\ud83c\udf58",
"\ud83c\udf65",
"\ud83e\udd60",
"\ud83e\udd6e",
"\ud83c\udf62",
"\ud83c\udf61",
"\ud83c\udf67",
"\ud83c\udf68",
"\ud83c\udf66",
"\ud83e\udd67",
"\ud83e\uddc1",
"\ud83c\udf70",
"\ud83c\udf82",
"\ud83c\udf6e",
"\ud83c\udf6e",
"\ud83c\udf6e",
"\ud83c\udf6d",
"\ud83c\udf6c",
"\ud83c\udf6b",
"\ud83c\udf7f",
"\ud83c\udf69",
"\ud83c\udf6a",
"\ud83c\udf30",
"\ud83e\udd5c",
"\ud83e\udd5c",
"\ud83c\udf6f",
"\ud83e\uddc8",
"\ud83e\udd5b",
"\ud83e\udd5b",
"\ud83c\udf7c",
"\N{HOT BEVERAGE}",
"\ud83c\udf75",
"\ud83e\uddc9",
"\ud83e\udd64",
"\ud83e\uddc3",
"\ud83e\uddca",
"\ud83c\udf76",
"\ud83c\udf7a",
"\ud83c\udf7b",
"\ud83e\udd42",
"\ud83e\udd42",
"\ud83c\udf77",
"\ud83e\udd43",
"\ud83e\udd43",
"\ud83c\udf78",
"\ud83c\udf79",
"\ud83c\udf7e",
"\ud83c\udf7e",
"\ud83e\udd44",
"\ud83c\udf74",
"\ud83c\udf7d\N{VARIATION SELECTOR-16}",
"\ud83c\udf7d\N{VARIATION SELECTOR-16}",
"\ud83e\udd63",
"\ud83e\udd61",
"\ud83e\udd62",
"\ud83e\uddc2",
"\N{SOCCER BALL}",
"\ud83c\udfc0",
"\ud83c\udfc8",
"\N{BASEBALL}",
"\ud83e\udd4e",
"\ud83c\udfbe",
"\ud83c\udfd0",
"\ud83c\udfc9",
"\ud83e\udd4f",
"\ud83c\udfb1",
"\ud83c\udfd3",
"\ud83c\udfd3",
"\ud83c\udff8",
"\ud83c\udfd2",
"\ud83c\udfd1",
"\ud83e\udd4d",
"\ud83c\udfcf",
"\ud83c\udfcf",
"\ud83e\udd45",
"\ud83e\udd45",
"\N{FLAG IN HOLE}",
"\ud83c\udff9",
"\ud83c\udff9",
"\ud83c\udfa3",
"\ud83e\udd4a",
"\ud83e\udd4a",
"\ud83e\udd4b",
"\ud83e\udd4b",
"\ud83c\udfbd",
"\ud83d\udef9",
"\ud83d\udef7",
"\ud83e\ude82",
"\N{ICE SKATE}\N{VARIATION SELECTOR-16}",
"\ud83e\udd4c",
"\ud83c\udfbf",
"\N{SKIER}\N{VARIATION SELECTOR-16}",
"\ud83c\udfc2",
"\ud83c\udfcb\N{VARIATION SELECTOR-16}",
"\ud83c\udfcb\N{VARIATION SELECTOR-16}",
"\ud83c\udfcb\N{VARIATION SELECTOR-16}",
"\ud83c\udfcb\N{VARIATION SELECTOR-16}\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83c\udfcb\N{VARIATION SELECTOR-16}\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\udd3c",
"\ud83e\udd3c",
"\ud83e\udd3c",
"\ud83e\udd3c\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\udd3c\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\udd38",
"\ud83e\udd38",
"\ud83e\udd38\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\udd38\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\N{PERSON WITH BALL}\N{VARIATION SELECTOR-16}",
"\N{PERSON WITH BALL}\N{VARIATION SELECTOR-16}",
"\N{PERSON WITH BALL}\N{VARIATION SELECTOR-16}",
"\N{PERSON WITH BALL}\N{VARIATION SELECTOR-16}\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\N{PERSON WITH BALL}\N{VARIATION SELECTOR-16}\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\udd3a",
"\ud83e\udd3a",
"\ud83e\udd3a",
"\ud83e\udd3e",
"\ud83e\udd3e",
"\ud83e\udd3e\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\udd3e\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83c\udfcc\N{VARIATION SELECTOR-16}",
"\ud83c\udfcc\N{VARIATION SELECTOR-16}",
"\ud83c\udfcc\N{VARIATION SELECTOR-16}\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83c\udfcc\N{VARIATION SELECTOR-16}\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83c\udfc7",
"\ud83e\uddd8",
"\ud83e\uddd8\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\uddd8\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83c\udfc4",
"\ud83c\udfc4",
"\ud83c\udfc4\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83c\udfc4\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83c\udfca",
"\ud83c\udfca",
"\ud83c\udfca\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83c\udfca\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\udd3d",
"\ud83e\udd3d",
"\ud83e\udd3d\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\udd3d\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udea3",
"\ud83d\udea3",
"\ud83d\udea3\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udea3\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\uddd7",
"\ud83e\uddd7\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\uddd7\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udeb5",
"\ud83d\udeb5",
"\ud83d\udeb5\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udeb5\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udeb4",
"\ud83d\udeb4",
"\ud83d\udeb4\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udeb4\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83c\udfc6",
"\ud83e\udd47",
"\ud83e\udd47",
"\ud83e\udd48",
"\ud83e\udd48",
"\ud83e\udd49",
"\ud83e\udd49",
"\ud83c\udfc5",
"\ud83c\udfc5",
"\ud83c\udf96\N{VARIATION SELECTOR-16}",
"\ud83c\udff5\N{VARIATION SELECTOR-16}",
"\ud83c\udf97\N{VARIATION SELECTOR-16}",
"\ud83c\udfab",
"\ud83c\udf9f\N{VARIATION SELECTOR-16}",
"\ud83c\udf9f\N{VARIATION SELECTOR-16}",
"\ud83c\udfaa",
"\ud83e\udd39",
"\ud83e\udd39",
"\ud83e\udd39",
"\ud83e\udd39\N{ZERO WIDTH JOINER}\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83e\udd39\N{ZERO WIDTH JOINER}\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\ud83c\udfad",
"\ud83c\udfa8",
"\ud83c\udfac",
"\ud83c\udfa4",
"\ud83c\udfa7",
"\ud83c\udfbc",
"\ud83c\udfb9",
"\ud83e\udd41",
"\ud83e\udd41",
"\ud83c\udfb7",
"\ud83c\udfba",
"\ud83e\ude95",
"\ud83c\udfb8",
"\ud83c\udfbb",
"\ud83c\udfb2",
"\N{BLACK CHESS PAWN}\N{VARIATION SELECTOR-16}",
"\ud83c\udfaf",
"\ud83e\ude81",
"\ud83e\ude80",
"\ud83c\udfb3",
"\ud83c\udfae",
"\ud83c\udfb0",
"\ud83e\udde9",
"\ud83d\ude97",
"\ud83d\ude95",
"\ud83d\ude99",
"\ud83d\ude8c",
"\ud83d\ude8e",
"\ud83c\udfce\N{VARIATION SELECTOR-16}",
"\ud83c\udfce\N{VARIATION SELECTOR-16}",
"\ud83d\ude93",
"\ud83d\ude91",
"\ud83d\ude92",
"\ud83d\ude90",
"\ud83d\ude9a",
"\ud83d\ude9b",
"\ud83d\ude9c",
"\ud83d\udefa",
"\ud83d\udef5",
"\ud83d\udef5",
"\ud83c\udfcd\N{VARIATION SELECTOR-16}",
"\ud83c\udfcd\N{VARIATION SELECTOR-16}",
"\ud83d\udef4",
"\ud83d\udeb2",
"\ud83e\uddbc",
"\ud83e\uddbd",
"\ud83d\udea8",
"\ud83d\ude94",
"\ud83d\ude8d",
"\ud83d\ude98",
"\ud83d\ude96",
"\ud83d\udea1",
"\ud83d\udea0",
"\ud83d\ude9f",
"\ud83d\ude83",
"\ud83d\ude8b",
"\ud83d\ude9e",
"\ud83d\ude9d",
"\ud83d\ude84",
"\ud83d\ude85",
"\ud83d\ude88",
"\ud83d\ude82",
"\ud83d\ude86",
"\ud83d\ude87",
"\ud83d\ude8a",
"\ud83d\ude89",
"\N{AIRPLANE}\N{VARIATION SELECTOR-16}",
"\ud83d\udeeb",
"\ud83d\udeec",
"\ud83d\udee9\N{VARIATION SELECTOR-16}",
"\ud83d\udee9\N{VARIATION SELECTOR-16}",
"\ud83d\udcba",
"\ud83d\udef0\N{VARIATION SELECTOR-16}",
"\ud83d\ude80",
"\ud83d\udef8",
"\ud83d\ude81",
"\ud83d\udef6",
"\ud83d\udef6",
"\N{SAILBOAT}",
"\ud83d\udea4",
"\ud83d\udee5\N{VARIATION SELECTOR-16}",
"\ud83d\udef3\N{VARIATION SELECTOR-16}",
"\ud83d\udef3\N{VARIATION SELECTOR-16}",
"\N{FERRY}\N{VARIATION SELECTOR-16}",
"\ud83d\udea2",
"\N{ANCHOR}",
"\N{FUEL PUMP}",
"\ud83d\udea7",
"\ud83d\udea6",
"\ud83d\udea5",
"\ud83d\ude8f",
"\ud83d\uddfa\N{VARIATION SELECTOR-16}",
"\ud83d\uddfa\N{VARIATION SELECTOR-16}",
"\ud83d\uddff",
"\ud83d\uddfd",
"\ud83d\uddfc",
"\ud83c\udff0",
"\ud83c\udfef",
"\ud83c\udfdf\N{VARIATION SELECTOR-16}",
"\ud83c\udfa1",
"\ud83c\udfa2",
"\ud83c\udfa0",
"\N{FOUNTAIN}",
"\N{UMBRELLA ON GROUND}\N{VARIATION SELECTOR-16}",
"\N{UMBRELLA ON GROUND}\N{VARIATION SELECTOR-16}",
"\ud83c\udfd6\N{VARIATION SELECTOR-16}",
"\ud83c\udfd6\N{VARIATION SELECTOR-16}",
"\ud83c\udfdd\N{VARIATION SELECTOR-16}",
"\ud83c\udfdd\N{VARIATION SELECTOR-16}",
"\ud83c\udfdc\N{VARIATION SELECTOR-16}",
"\ud83c\udf0b",
"\N{MOUNTAIN}\N{VARIATION SELECTOR-16}",
"\ud83c\udfd4\N{VARIATION SELECTOR-16}",
"\ud83c\udfd4\N{VARIATION SELECTOR-16}",
"\ud83d\uddfb",
"\ud83c\udfd5\N{VARIATION SELECTOR-16}",
"\N{TENT}",
"\ud83c\udfe0",
"\ud83c\udfe1",
"\ud83c\udfd8\N{VARIATION SELECTOR-16}",
"\ud83c\udfd8\N{VARIATION SELECTOR-16}",
"\ud83c\udfda\N{VARIATION SELECTOR-16}",
"\ud83c\udfda\N{VARIATION SELECTOR-16}",
"\ud83c\udfd7\N{VARIATION SELECTOR-16}",
"\ud83c\udfd7\N{VARIATION SELECTOR-16}",
"\ud83c\udfed",
"\ud83c\udfe2",
"\ud83c\udfec",
"\ud83c\udfe3",
"\ud83c\udfe4",
"\ud83c\udfe5",
"\ud83c\udfe6",
"\ud83c\udfe8",
"\ud83c\udfea",
"\ud83c\udfeb",
"\ud83c\udfe9",
"\ud83d\udc92",
"\ud83c\udfdb\N{VARIATION SELECTOR-16}",
"\N{CHURCH}",
"\ud83d\udd4c",
"\ud83d\uded5",
"\ud83d\udd4d",
"\ud83d\udd4b",
"\N{SHINTO SHRINE}\N{VARIATION SELECTOR-16}",
"\ud83d\udee4\N{VARIATION SELECTOR-16}",
"\ud83d\udee4\N{VARIATION SELECTOR-16}",
"\ud83d\udee3\N{VARIATION SELECTOR-16}",
"\ud83d\uddfe",
"\ud83c\udf91",
"\ud83c\udfde\N{VARIATION SELECTOR-16}",
"\ud83c\udfde\N{VARIATION SELECTOR-16}",
"\ud83c\udf05",
"\ud83c\udf04",
"\ud83c\udf20",
"\ud83c\udf87",
"\ud83c\udf86",
"\ud83c\udf07",
"\ud83c\udf07",
"\ud83c\udf06",
"\ud83c\udfd9\N{VARIATION SELECTOR-16}",
"\ud83c\udf03",
"\ud83c\udf0c",
"\ud83c\udf09",
"\ud83c\udf01",
"\N{WATCH}",
"\ud83d\udcf1",
"\ud83d\udcf2",
"\ud83d\udcbb",
"\N{KEYBOARD}\N{VARIATION SELECTOR-16}",
"\ud83d\udda5\N{VARIATION SELECTOR-16}",
"\ud83d\udda5\N{VARIATION SELECTOR-16}",
"\ud83d\udda8\N{VARIATION SELECTOR-16}",
"\ud83d\uddb1\N{VARIATION SELECTOR-16}",
"\ud83d\uddb1\N{VARIATION SELECTOR-16}",
"\ud83d\uddb2\N{VARIATION SELECTOR-16}",
"\ud83d\udd79\N{VARIATION SELECTOR-16}",
"\ud83d\udddc\N{VARIATION SELECTOR-16}",
"\ud83d\udcbd",
"\ud83d\udcbe",
"\ud83d\udcbf",
"\ud83d\udcc0",
"\ud83d\udcfc",
"\ud83d\udcf7",
"\ud83d\udcf8",
"\ud83d\udcf9",
"\ud83c\udfa5",
"\ud83d\udcfd\N{VARIATION SELECTOR-16}",
"\ud83d\udcfd\N{VARIATION SELECTOR-16}",
"\ud83c\udf9e\N{VARIATION SELECTOR-16}",
"\ud83d\udcde",
"\N{BLACK TELEPHONE}\N{VARIATION SELECTOR-16}",
"\ud83d\udcdf",
"\ud83d\udce0",
"\ud83d\udcfa",
"\ud83d\udcfb",
"\ud83c\udf99\N{VARIATION SELECTOR-16}",
"\ud83c\udf99\N{VARIATION SELECTOR-16}",
"\ud83c\udf9a\N{VARIATION SELECTOR-16}",
"\ud83c\udf9b\N{VARIATION SELECTOR-16}",
"\ud83e\udded",
"\N{STOPWATCH}\N{VARIATION SELECTOR-16}",
"\N{TIMER CLOCK}\N{VARIATION SELECTOR-16}",
"\N{TIMER CLOCK}\N{VARIATION SELECTOR-16}",
"\N{ALARM CLOCK}",
"\ud83d\udd70\N{VARIATION SELECTOR-16}",
"\ud83d\udd70\N{VARIATION SELECTOR-16}",
"\N{HOURGLASS}",
"\N{HOURGLASS WITH FLOWING SAND}",
"\ud83d\udce1",
"\ud83d\udd0b",
"\ud83d\udd0c",
"\ud83d\udca1",
"\ud83d\udd26",
"\ud83d\udd6f\N{VARIATION SELECTOR-16}",
"\ud83e\uddef",
"\ud83d\udee2\N{VARIATION SELECTOR-16}",
"\ud83d\udee2\N{VARIATION SELECTOR-16}",
"\ud83d\udcb8",
"\ud83d\udcb5",
"\ud83d\udcb4",
"\ud83d\udcb6",
"\ud83d\udcb7",
"\ud83d\udcb0",
"\ud83d\udcb3",
"\ud83d\udc8e",
"\N{SCALES}\N{VARIATION SELECTOR-16}",
"\ud83e\uddf0",
"\ud83d\udd27",
"\ud83d\udd28",
"\N{HAMMER AND PICK}\N{VARIATION SELECTOR-16}",
"\N{HAMMER AND PICK}\N{VARIATION SELECTOR-16}",
"\ud83d\udee0\N{VARIATION SELECTOR-16}",
"\ud83d\udee0\N{VARIATION SELECTOR-16}",
"\N{PICK}\N{VARIATION SELECTOR-16}",
"\ud83d\udd29",
"\N{GEAR}\N{VARIATION SELECTOR-16}",
"\ud83e\uddf1",
"\N{CHAINS}\N{VARIATION SELECTOR-16}",
"\ud83e\uddf2",
"\ud83d\udd2b",
"\ud83d\udca3",
"\ud83e\udde8",
"\ud83e\ude93",
"\ud83e\ude92",
"\ud83d\udd2a",
"\ud83d\udde1\N{VARIATION SELECTOR-16}",
"\ud83d\udde1\N{VARIATION SELECTOR-16}",
"\N{CROSSED SWORDS}\N{VARIATION SELECTOR-16}",
"\ud83d\udee1\N{VARIATION SELECTOR-16}",
"\ud83d\udeac",
"\N{COFFIN}\N{VARIATION SELECTOR-16}",
"\N{FUNERAL URN}\N{VARIATION SELECTOR-16}",
"\N{FUNERAL URN}\N{VARIATION SELECTOR-16}",
"\ud83c\udffa",
"\ud83e\ude94",
"\ud83d\udd2e",
"\ud83d\udcff",
"\ud83e\uddff",
"\ud83d\udc88",
"\N{ALEMBIC}\N{VARIATION SELECTOR-16}",
"\ud83d\udd2d",
"\ud83d\udd2c",
"\ud83d\udd73\N{VARIATION SELECTOR-16}",
"\ud83e\uddaf",
"\ud83e\ude7a",
"\ud83e\ude79",
"\ud83d\udc8a",
"\ud83d\udc89",
"\ud83e\ude78",
"\ud83e\uddec",
"\ud83e\udda0",
"\ud83e\uddeb",
"\ud83e\uddea",
"\ud83c\udf21\N{VARIATION SELECTOR-16}",
"\ud83e\ude91",
"\ud83e\uddf9",
"\ud83e\uddfa",
"\ud83e\uddfb",
"\ud83d\udebd",
"\ud83d\udeb0",
"\ud83d\udebf",
"\ud83d\udec1",
"\ud83d\udec0",
"\ud83e\uddfc",
"\ud83e\uddfd",
"\ud83e\uddf4",
"\ud83d\udece\N{VARIATION SELECTOR-16}",
"\ud83d\udece\N{VARIATION SELECTOR-16}",
"\ud83d\udd11",
"\ud83d\udddd\N{VARIATION SELECTOR-16}",
"\ud83d\udddd\N{VARIATION SELECTOR-16}",
"\ud83d\udeaa",
"\ud83d\udecb\N{VARIATION SELECTOR-16}",
"\ud83d\udecb\N{VARIATION SELECTOR-16}",
"\ud83d\udecf\N{VARIATION SELECTOR-16}",
"\ud83d\udecc",
"\ud83e\uddf8",
"\ud83d\uddbc\N{VARIATION SELECTOR-16}",
"\ud83d\uddbc\N{VARIATION SELECTOR-16}",
"\ud83d\udecd\N{VARIATION SELECTOR-16}",
"\ud83d\uded2",
"\ud83d\uded2",
"\ud83c\udf81",
"\ud83c\udf88",
"\ud83c\udf8f",
"\ud83c\udf80",
"\ud83c\udf8a",
"\ud83c\udf89",
"\ud83c\udf8e",
"\ud83c\udfee",
"\ud83c\udf90",
"\ud83e\udde7",
"\N{ENVELOPE}\N{VARIATION SELECTOR-16}",
"\ud83d\udce9",
"\ud83d\udce8",
"\ud83d\udce7",
"\ud83d\udce7",
"\ud83d\udc8c",
"\ud83d\udce5",
"\ud83d\udce4",
"\ud83d\udce6",
"\ud83c\udff7\N{VARIATION SELECTOR-16}",
"\ud83d\udcea",
"\ud83d\udceb",
"\ud83d\udcec",
"\ud83d\udced",
"\ud83d\udcee",
"\ud83d\udcef",
"\ud83d\udcdc",
"\ud83d\udcc3",
"\ud83d\udcc4",
"\ud83d\udcd1",
"\ud83e\uddfe",
"\ud83d\udcca",
"\ud83d\udcc8",
"\ud83d\udcc9",
"\ud83d\uddd2\N{VARIATION SELECTOR-16}",
"\ud83d\uddd2\N{VARIATION SELECTOR-16}",
"\ud83d\uddd3\N{VARIATION SELECTOR-16}",
"\ud83d\uddd3\N{VARIATION SELECTOR-16}",
"\ud83d\udcc6",
"\ud83d\udcc5",
"\ud83d\uddd1\N{VARIATION SELECTOR-16}",
"\ud83d\udcc7",
"\ud83d\uddc3\N{VARIATION SELECTOR-16}",
"\ud83d\uddc3\N{VARIATION SELECTOR-16}",
"\ud83d\uddf3\N{VARIATION SELECTOR-16}",
"\ud83d\uddf3\N{VARIATION SELECTOR-16}",
"\ud83d\uddc4\N{VARIATION SELECTOR-16}",
"\ud83d\udccb",
"\ud83d\udcc1",
"\ud83d\udcc2",
"\ud83d\uddc2\N{VARIATION SELECTOR-16}",
"\ud83d\uddc2\N{VARIATION SELECTOR-16}",
"\ud83d\uddde\N{VARIATION SELECTOR-16}",
"\ud83d\uddde\N{VARIATION SELECTOR-16}",
"\ud83d\udcf0",
"\ud83d\udcd3",
"\ud83d\udcd4",
"\ud83d\udcd2",
"\ud83d\udcd5",
"\ud83d\udcd7",
"\ud83d\udcd8",
"\ud83d\udcd9",
"\ud83d\udcda",
"\ud83d\udcd6",
"\ud83d\udd16",
"\ud83e\uddf7",
"\ud83d\udd17",
"\ud83d\udcce",
"\ud83d\udd87\N{VARIATION SELECTOR-16}",
"\ud83d\udd87\N{VARIATION SELECTOR-16}",
"\ud83d\udcd0",
"\ud83d\udccf",
"\ud83e\uddee",
"\ud83d\udccc",
"\ud83d\udccd",
"\N{BLACK SCISSORS}\N{VARIATION SELECTOR-16}",
"\ud83d\udd8a\N{VARIATION SELECTOR-16}",
"\ud83d\udd8a\N{VARIATION SELECTOR-16}",
"\ud83d\udd8b\N{VARIATION SELECTOR-16}",
"\ud83d\udd8b\N{VARIATION SELECTOR-16}",
"\N{BLACK NIB}\N{VARIATION SELECTOR-16}",
"\ud83d\udd8c\N{VARIATION SELECTOR-16}",
"\ud83d\udd8c\N{VARIATION SELECTOR-16}",
"\ud83d\udd8d\N{VARIATION SELECTOR-16}",
"\ud83d\udd8d\N{VARIATION SELECTOR-16}",
"\ud83d\udcdd",
"\ud83d\udcdd",
"\N{PENCIL}\N{VARIATION SELECTOR-16}",
"\ud83d\udd0d",
"\ud83d\udd0e",
"\ud83d\udd0f",
"\ud83d\udd10",
"\ud83d\udd12",
"\ud83d\udd13",
"\N{HEAVY BLACK HEART}\N{VARIATION SELECTOR-16}",
"\ud83e\udde1",
"\ud83d\udc9b",
"\ud83d\udc9a",
"\ud83d\udc99",
"\ud83d\udc9c",
"\ud83d\udda4",
"\ud83e\udd0e",
"\ud83e\udd0d",
"\ud83d\udc94",
"\N{HEAVY HEART EXCLAMATION MARK ORNAMENT}\N{VARIATION SELECTOR-16}",
"\N{HEAVY HEART EXCLAMATION MARK ORNAMENT}\N{VARIATION SELECTOR-16}",
"\ud83d\udc95",
"\ud83d\udc9e",
"\ud83d\udc93",
"\ud83d\udc97",
"\ud83d\udc96",
"\ud83d\udc98",
"\ud83d\udc9d",
"\ud83d\udc9f",
"\N{PEACE SYMBOL}\N{VARIATION SELECTOR-16}",
"\N{PEACE SYMBOL}\N{VARIATION SELECTOR-16}",
"\N{LATIN CROSS}\N{VARIATION SELECTOR-16}",
"\N{LATIN CROSS}\N{VARIATION SELECTOR-16}",
"\N{STAR AND CRESCENT}\N{VARIATION SELECTOR-16}",
"\ud83d\udd49\N{VARIATION SELECTOR-16}",
"\N{WHEEL OF DHARMA}\N{VARIATION SELECTOR-16}",
"\N{STAR OF DAVID}\N{VARIATION SELECTOR-16}",
"\ud83d\udd2f",
"\ud83d\udd4e",
"\N{YIN YANG}\N{VARIATION SELECTOR-16}",
"\N{ORTHODOX CROSS}\N{VARIATION SELECTOR-16}",
"\ud83d\uded0",
"\ud83d\uded0",
"\N{OPHIUCHUS}",
"\N{ARIES}",
"\N{TAURUS}",
"\N{GEMINI}",
"\N{CANCER}",
"\N{LEO}",
"\N{VIRGO}",
"\N{LIBRA}",
"\N{SCORPIUS}",
"\N{SAGITTARIUS}",
"\N{CAPRICORN}",
"\N{AQUARIUS}",
"\N{PISCES}",
"\ud83c\udd94",
"\N{ATOM SYMBOL}\N{VARIATION SELECTOR-16}",
"\N{ATOM SYMBOL}\N{VARIATION SELECTOR-16}",
"\ud83c\ude51",
"\N{RADIOACTIVE SIGN}\N{VARIATION SELECTOR-16}",
"\N{RADIOACTIVE SIGN}\N{VARIATION SELECTOR-16}",
"\N{BIOHAZARD SIGN}\N{VARIATION SELECTOR-16}",
"\N{BIOHAZARD SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udcf4",
"\ud83d\udcf3",
"\ud83c\ude36",
"\ud83c\ude1a",
"\ud83c\ude38",
"\ud83c\ude3a",
"\ud83c\ude37\N{VARIATION SELECTOR-16}",
"\N{EIGHT POINTED BLACK STAR}\N{VARIATION SELECTOR-16}",
"\ud83c\udd9a",
"\ud83d\udcae",
"\ud83c\ude50",
"\N{CIRCLED IDEOGRAPH SECRET}\N{VARIATION SELECTOR-16}",
"\N{CIRCLED IDEOGRAPH CONGRATULATION}\N{VARIATION SELECTOR-16}",
"\ud83c\ude34",
"\ud83c\ude35",
"\ud83c\ude39",
"\ud83c\ude32",
"\ud83c\udd70\N{VARIATION SELECTOR-16}",
"\ud83c\udd71\N{VARIATION SELECTOR-16}",
"\ud83c\udd8e",
"\ud83c\udd91",
"\ud83c\udd7e\N{VARIATION SELECTOR-16}",
"\ud83c\udd98",
"\N{CROSS MARK}",
"\N{HEAVY LARGE CIRCLE}",
"\ud83d\uded1",
"\ud83d\uded1",
"\N{NO ENTRY}",
"\ud83d\udcdb",
"\ud83d\udeab",
"\ud83d\udcaf",
"\ud83d\udca2",
"\N{HOT SPRINGS}\N{VARIATION SELECTOR-16}",
"\ud83d\udeb7",
"\ud83d\udeaf",
"\ud83d\udeb3",
"\ud83d\udeb1",
"\ud83d\udd1e",
"\ud83d\udcf5",
"\ud83d\udead",
"\N{HEAVY EXCLAMATION MARK SYMBOL}",
"\N{WHITE EXCLAMATION MARK ORNAMENT}",
"\N{BLACK QUESTION MARK ORNAMENT}",
"\N{WHITE QUESTION MARK ORNAMENT}",
"\N{DOUBLE EXCLAMATION MARK}\N{VARIATION SELECTOR-16}",
"\N{EXCLAMATION QUESTION MARK}\N{VARIATION SELECTOR-16}",
"\ud83d\udd05",
"\ud83d\udd06",
"\N{PART ALTERNATION MARK}\N{VARIATION SELECTOR-16}",
"\N{WARNING SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udeb8",
"\ud83d\udd31",
"\N{FLEUR-DE-LIS}\N{VARIATION SELECTOR-16}",
"\ud83d\udd30",
"\N{BLACK UNIVERSAL RECYCLING SYMBOL}\N{VARIATION SELECTOR-16}",
"\N{WHITE HEAVY CHECK MARK}",
"\ud83c\ude2f",
"\ud83d\udcb9",
"\N{SPARKLE}\N{VARIATION SELECTOR-16}",
"\N{EIGHT SPOKED ASTERISK}\N{VARIATION SELECTOR-16}",
"\N{NEGATIVE SQUARED CROSS MARK}",
"\ud83c\udf10",
"\ud83d\udca0",
"\N{CIRCLED LATIN CAPITAL LETTER M}\N{VARIATION SELECTOR-16}",
"\ud83c\udf00",
"\ud83d\udca4",
"\ud83c\udfe7",
"\ud83d\udebe",
"\N{WHEELCHAIR SYMBOL}",
"\ud83c\udd7f\N{VARIATION SELECTOR-16}",
"\ud83c\ude33",
"\ud83c\ude02\N{VARIATION SELECTOR-16}",
"\ud83d\udec2",
"\ud83d\udec3",
"\ud83d\udec4",
"\ud83d\udec5",
"\ud83d\udeb9",
"\ud83d\udeba",
"\ud83d\udebc",
"\ud83d\udebb",
"\ud83d\udeae",
"\ud83c\udfa6",
"\ud83d\udcf6",
"\ud83c\ude01",
"\ud83d\udd23",
"\N{INFORMATION SOURCE}\N{VARIATION SELECTOR-16}",
"\ud83d\udd24",
"\ud83d\udd21",
"\ud83d\udd20",
"\ud83c\udd96",
"\ud83c\udd97",
"\ud83c\udd99",
"\ud83c\udd92",
"\ud83c\udd95",
"\ud83c\udd93",
"\N{DIGIT ZERO}\N{VARIATION SELECTOR-16}\N{COMBINING ENCLOSING KEYCAP}",
"\N{DIGIT ONE}\N{VARIATION SELECTOR-16}\N{COMBINING ENCLOSING KEYCAP}",
"\N{DIGIT TWO}\N{VARIATION SELECTOR-16}\N{COMBINING ENCLOSING KEYCAP}",
"\N{DIGIT THREE}\N{VARIATION SELECTOR-16}\N{COMBINING ENCLOSING KEYCAP}",
"\N{DIGIT FOUR}\N{VARIATION SELECTOR-16}\N{COMBINING ENCLOSING KEYCAP}",
"\N{DIGIT FIVE}\N{VARIATION SELECTOR-16}\N{COMBINING ENCLOSING KEYCAP}",
"\N{DIGIT SIX}\N{VARIATION SELECTOR-16}\N{COMBINING ENCLOSING KEYCAP}",
"\N{DIGIT SEVEN}\N{VARIATION SELECTOR-16}\N{COMBINING ENCLOSING KEYCAP}",
"\N{DIGIT EIGHT}\N{VARIATION SELECTOR-16}\N{COMBINING ENCLOSING KEYCAP}",
"\N{DIGIT NINE}\N{VARIATION SELECTOR-16}\N{COMBINING ENCLOSING KEYCAP}",
"\ud83d\udd1f",
"\ud83d\udd22",
"\N{NUMBER SIGN}\N{VARIATION SELECTOR-16}\N{COMBINING ENCLOSING KEYCAP}",
"\N{ASTERISK}\N{VARIATION SELECTOR-16}\N{COMBINING ENCLOSING KEYCAP}",
"\N{ASTERISK}\N{VARIATION SELECTOR-16}\N{COMBINING ENCLOSING KEYCAP}",
"\N{EJECT SYMBOL}\N{VARIATION SELECTOR-16}",
"\N{EJECT SYMBOL}\N{VARIATION SELECTOR-16}",
"\N{BLACK RIGHT-POINTING TRIANGLE}\N{VARIATION SELECTOR-16}",
"\N{DOUBLE VERTICAL BAR}\N{VARIATION SELECTOR-16}",
"\N{DOUBLE VERTICAL BAR}\N{VARIATION SELECTOR-16}",
"\N{BLACK RIGHT-POINTING TRIANGLE WITH DOUBLE VERTICAL BAR}\N{VARIATION SELECTOR-16}",
"\N{BLACK SQUARE FOR STOP}\N{VARIATION SELECTOR-16}",
"\N{BLACK CIRCLE FOR RECORD}\N{VARIATION SELECTOR-16}",
"\N{BLACK RIGHT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}\N{VARIATION SELECTOR-16}",
"\N{BLACK RIGHT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}\N{VARIATION SELECTOR-16}",
"\N{BLACK LEFT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}\N{VARIATION SELECTOR-16}",
"\N{BLACK LEFT-POINTING DOUBLE TRIANGLE WITH VERTICAL BAR}\N{VARIATION SELECTOR-16}",
"\N{BLACK RIGHT-POINTING DOUBLE TRIANGLE}",
"\N{BLACK LEFT-POINTING DOUBLE TRIANGLE}",
"\N{BLACK UP-POINTING DOUBLE TRIANGLE}",
"\N{BLACK DOWN-POINTING DOUBLE TRIANGLE}",
"\N{BLACK LEFT-POINTING TRIANGLE}\N{VARIATION SELECTOR-16}",
"\ud83d\udd3c",
"\ud83d\udd3d",
"\N{BLACK RIGHTWARDS ARROW}\N{VARIATION SELECTOR-16}",
"\N{LEFTWARDS BLACK ARROW}\N{VARIATION SELECTOR-16}",
"\N{UPWARDS BLACK ARROW}\N{VARIATION SELECTOR-16}",
"\N{DOWNWARDS BLACK ARROW}\N{VARIATION SELECTOR-16}",
"\N{NORTH EAST ARROW}\N{VARIATION SELECTOR-16}",
"\N{SOUTH EAST ARROW}\N{VARIATION SELECTOR-16}",
"\N{SOUTH WEST ARROW}\N{VARIATION SELECTOR-16}",
"\N{NORTH WEST ARROW}\N{VARIATION SELECTOR-16}",
"\N{UP DOWN ARROW}\N{VARIATION SELECTOR-16}",
"\N{LEFT RIGHT ARROW}\N{VARIATION SELECTOR-16}",
"\N{RIGHTWARDS ARROW WITH HOOK}\N{VARIATION SELECTOR-16}",
"\N{LEFTWARDS ARROW WITH HOOK}\N{VARIATION SELECTOR-16}",
"\N{ARROW POINTING RIGHTWARDS THEN CURVING UPWARDS}\N{VARIATION SELECTOR-16}",
"\N{ARROW POINTING RIGHTWARDS THEN CURVING DOWNWARDS}\N{VARIATION SELECTOR-16}",
"\ud83d\udd00",
"\ud83d\udd01",
"\ud83d\udd02",
"\ud83d\udd04",
"\ud83d\udd03",
"\ud83c\udfb5",
"\ud83c\udfb6",
"\N{HEAVY PLUS SIGN}",
"\N{HEAVY MINUS SIGN}",
"\N{HEAVY DIVISION SIGN}",
"\N{HEAVY MULTIPLICATION X}\N{VARIATION SELECTOR-16}",
"\N{PERMANENT PAPER SIGN}\N{VARIATION SELECTOR-16}",
"\ud83d\udcb2",
"\ud83d\udcb1",
"\N{TRADE MARK SIGN}\N{VARIATION SELECTOR-16}",
"\N{COPYRIGHT SIGN}\N{VARIATION SELECTOR-16}",
"\N{REGISTERED SIGN}\N{VARIATION SELECTOR-16}",
"\N{WAVY DASH}\N{VARIATION SELECTOR-16}",
"\N{CURLY LOOP}",
"\N{DOUBLE CURLY LOOP}",
"\ud83d\udd1a",
"\ud83d\udd19",
"\ud83d\udd1b",
"\ud83d\udd1d",
"\ud83d\udd1c",
"\N{HEAVY CHECK MARK}\N{VARIATION SELECTOR-16}",
"\N{BALLOT BOX WITH CHECK}\N{VARIATION SELECTOR-16}",
"\ud83d\udd18",
"\N{MEDIUM WHITE CIRCLE}",
"\N{MEDIUM BLACK CIRCLE}",
"\ud83d\udd34",
"\ud83d\udd35",
"\ud83d\udfe4",
"\ud83d\udfe3",
"\ud83d\udfe2",
"\ud83d\udfe1",
"\ud83d\udfe0",
"\ud83d\udd3a",
"\ud83d\udd3b",
"\ud83d\udd38",
"\ud83d\udd39",
"\ud83d\udd36",
"\ud83d\udd37",
"\ud83d\udd33",
"\ud83d\udd32",
"\N{BLACK SMALL SQUARE}\N{VARIATION SELECTOR-16}",
"\N{WHITE SMALL SQUARE}\N{VARIATION SELECTOR-16}",
"\N{BLACK MEDIUM SMALL SQUARE}",
"\N{WHITE MEDIUM SMALL SQUARE}",
"\N{BLACK MEDIUM SQUARE}\N{VARIATION SELECTOR-16}",
"\N{WHITE MEDIUM SQUARE}\N{VARIATION SELECTOR-16}",
"\N{BLACK LARGE SQUARE}",
"\N{WHITE LARGE SQUARE}",
"\ud83d\udfe7",
"\ud83d\udfe6",
"\ud83d\udfe5",
"\ud83d\udfeb",
"\ud83d\udfea",
"\ud83d\udfe9",
"\ud83d\udfe8",
"\ud83d\udd08",
"\ud83d\udd07",
"\ud83d\udd09",
"\ud83d\udd0a",
"\ud83d\udd14",
"\ud83d\udd15",
"\ud83d\udce3",
"\ud83d\udce2",
"\ud83d\udde8\N{VARIATION SELECTOR-16}",
"\ud83d\udde8\N{VARIATION SELECTOR-16}",
"\ud83d\udc41\N{ZERO WIDTH JOINER}\ud83d\udde8",
"\ud83d\udcac",
"\ud83d\udcad",
"\ud83d\uddef\N{VARIATION SELECTOR-16}",
"\ud83d\uddef\N{VARIATION SELECTOR-16}",
"\N{BLACK SPADE SUIT}\N{VARIATION SELECTOR-16}",
"\N{BLACK CLUB SUIT}\N{VARIATION SELECTOR-16}",
"\N{BLACK HEART SUIT}\N{VARIATION SELECTOR-16}",
"\N{BLACK DIAMOND SUIT}\N{VARIATION SELECTOR-16}",
"\ud83c\udccf",
"\ud83c\udfb4",
"\ud83c\udc04",
"\ud83d\udd50",
"\ud83d\udd51",
"\ud83d\udd52",
"\ud83d\udd53",
"\ud83d\udd54",
"\ud83d\udd55",
"\ud83d\udd56",
"\ud83d\udd57",
"\ud83d\udd58",
"\ud83d\udd59",
"\ud83d\udd5a",
"\ud83d\udd5b",
"\ud83d\udd5c",
"\ud83d\udd5d",
"\ud83d\udd5e",
"\ud83d\udd5f",
"\ud83d\udd60",
"\ud83d\udd61",
"\ud83d\udd62",
"\ud83d\udd63",
"\ud83d\udd64",
"\ud83d\udd65",
"\ud83d\udd66",
"\ud83d\udd67",
"\N{FEMALE SIGN}\N{VARIATION SELECTOR-16}",
"\N{MALE SIGN}\N{VARIATION SELECTOR-16}",
"\N{STAFF OF AESCULAPIUS}\N{VARIATION SELECTOR-16}",
"\ud83c\uddff",
"\ud83c\uddfe",
"\ud83c\uddfd",
"\ud83c\uddfc",
"\ud83c\uddfb",
"\ud83c\uddfa",
"\ud83c\uddf9",
"\ud83c\uddf8",
"\ud83c\uddf7",
"\ud83c\uddf6",
"\ud83c\uddf5",
"\ud83c\uddf4",
"\ud83c\uddf3",
"\ud83c\uddf2",
"\ud83c\uddf1",
"\ud83c\uddf0",
"\ud83c\uddef",
"\ud83c\uddee",
"\ud83c\udded",
"\ud83c\uddec",
"\ud83c\uddeb",
"\ud83c\uddea",
"\ud83c\udde9",
"\ud83c\udde8",
"\ud83c\udde7",
"\ud83c\udde6",
"\ud83c\udff3\N{VARIATION SELECTOR-16}",
"\ud83c\udff4",
"\ud83c\udfc1",
"\ud83d\udea9",
"\ud83c\udff3\N{VARIATION SELECTOR-16}\N{ZERO WIDTH JOINER}\ud83c\udf08",
"\ud83c\udff3\N{VARIATION SELECTOR-16}\N{ZERO WIDTH JOINER}\ud83c\udf08",
"\ud83c\udff4\N{ZERO WIDTH JOINER}\N{SKULL AND CROSSBONES}\N{VARIATION SELECTOR-16}",
"\ud83c\udde6\ud83c\uddeb",
"\ud83c\udde6\ud83c\uddfd",
"\ud83c\udde6\ud83c\uddf1",
"\ud83c\udde9\ud83c\uddff",
"\ud83c\udde6\ud83c\uddf8",
"\ud83c\udde6\ud83c\udde9",
"\ud83c\udde6\ud83c\uddf4",
"\ud83c\udde6\ud83c\uddee",
"\ud83c\udde6\ud83c\uddf6",
"\ud83c\udde6\ud83c\uddec",
"\ud83c\udde6\ud83c\uddf7",
"\ud83c\udde6\ud83c\uddf2",
"\ud83c\udde6\ud83c\uddfc",
"\ud83c\udde6\ud83c\uddfa",
"\ud83c\udde6\ud83c\uddf9",
"\ud83c\udde6\ud83c\uddff",
"\ud83c\udde7\ud83c\uddf8",
"\ud83c\udde7\ud83c\udded",
"\ud83c\udde7\ud83c\udde9",
"\ud83c\udde7\ud83c\udde7",
"\ud83c\udde7\ud83c\uddfe",
"\ud83c\udde7\ud83c\uddea",
"\ud83c\udde7\ud83c\uddff",
"\ud83c\udde7\ud83c\uddef",
"\ud83c\udde7\ud83c\uddf2",
"\ud83c\udde7\ud83c\uddf9",
"\ud83c\udde7\ud83c\uddf4",
"\ud83c\udde7\ud83c\udde6",
"\ud83c\udde7\ud83c\uddfc",
"\ud83c\udde7\ud83c\uddf7",
"\ud83c\uddee\ud83c\uddf4",
"\ud83c\uddfb\ud83c\uddec",
"\ud83c\udde7\ud83c\uddf3",
"\ud83c\udde7\ud83c\uddec",
"\ud83c\udde7\ud83c\uddeb",
"\ud83c\udde7\ud83c\uddee",
"\ud83c\uddf0\ud83c\udded",
"\ud83c\udde8\ud83c\uddf2",
"\ud83c\udde8\ud83c\udde6",
"\ud83c\uddee\ud83c\udde8",
"\ud83c\udde8\ud83c\uddfb",
"\ud83c\udde7\ud83c\uddf6",
"\ud83c\uddf0\ud83c\uddfe",
"\ud83c\udde8\ud83c\uddeb",
"\ud83c\uddf9\ud83c\udde9",
"\ud83c\udde8\ud83c\uddf1",
"\ud83c\udde8\ud83c\uddf3",
"\ud83c\udde8\ud83c\uddfd",
"\ud83c\udde8\ud83c\udde8",
"\ud83c\udde8\ud83c\uddf4",
"\ud83c\uddf0\ud83c\uddf2",
"\ud83c\udde8\ud83c\uddec",
"\ud83c\udde8\ud83c\udde9",
"\ud83c\udde8\ud83c\uddf0",
"\ud83c\udde8\ud83c\uddf7",
"\ud83c\udde8\ud83c\uddee",
"\ud83c\udded\ud83c\uddf7",
"\ud83c\udde8\ud83c\uddfa",
"\ud83c\udde8\ud83c\uddfc",
"\ud83c\udde8\ud83c\uddfe",
"\ud83c\udde8\ud83c\uddff",
"\ud83c\udde9\ud83c\uddf0",
"\ud83c\udde9\ud83c\uddef",
"\ud83c\udde9\ud83c\uddf2",
"\ud83c\udde9\ud83c\uddf4",
"\ud83c\uddea\ud83c\udde8",
"\ud83c\uddea\ud83c\uddec",
"\ud83c\uddf8\ud83c\uddfb",
"\ud83c\uddec\ud83c\uddf6",
"\ud83c\uddea\ud83c\uddf7",
"\ud83c\uddea\ud83c\uddea",
"\ud83c\uddea\ud83c\uddf9",
"\ud83c\uddea\ud83c\uddfa",
"\ud83c\uddeb\ud83c\uddf0",
"\ud83c\uddeb\ud83c\uddf4",
"\ud83c\uddeb\ud83c\uddef",
"\ud83c\uddeb\ud83c\uddee",
"\ud83c\uddeb\ud83c\uddf7",
"\ud83c\uddec\ud83c\uddeb",
"\ud83c\uddf5\ud83c\uddeb",
"\ud83c\uddf9\ud83c\uddeb",
"\ud83c\uddec\ud83c\udde6",
"\ud83c\uddec\ud83c\uddf2",
"\ud83c\uddec\ud83c\uddea",
"\ud83c\udde9\ud83c\uddea",
"\ud83c\uddec\ud83c\udded",
"\ud83c\uddec\ud83c\uddee",
"\ud83c\uddec\ud83c\uddf7",
"\ud83c\uddec\ud83c\uddf1",
"\ud83c\uddec\ud83c\udde9",
"\ud83c\uddec\ud83c\uddf5",
"\ud83c\uddec\ud83c\uddfa",
"\ud83c\uddec\ud83c\uddf9",
"\ud83c\uddec\ud83c\uddec",
"\ud83c\uddec\ud83c\uddf3",
"\ud83c\uddec\ud83c\uddfc",
"\ud83c\uddec\ud83c\uddfe",
"\ud83c\udded\ud83c\uddf9",
"\ud83c\udded\ud83c\uddf3",
"\ud83c\udded\ud83c\uddf0",
"\ud83c\udded\ud83c\uddfa",
"\ud83c\uddee\ud83c\uddf8",
"\ud83c\uddee\ud83c\uddf3",
"\ud83c\uddee\ud83c\udde9",
"\ud83c\uddee\ud83c\uddf7",
"\ud83c\uddee\ud83c\uddf6",
"\ud83c\uddee\ud83c\uddea",
"\ud83c\uddee\ud83c\uddf2",
"\ud83c\uddee\ud83c\uddf1",
"\ud83c\uddee\ud83c\uddf9",
"\ud83c\uddef\ud83c\uddf2",
"\ud83c\uddef\ud83c\uddf5",
"\ud83c\udf8c",
"\ud83c\uddef\ud83c\uddea",
"\ud83c\uddef\ud83c\uddf4",
"\ud83c\uddf0\ud83c\uddff",
"\ud83c\uddf0\ud83c\uddea",
"\ud83c\uddf0\ud83c\uddee",
"\ud83c\uddfd\ud83c\uddf0",
"\ud83c\uddf0\ud83c\uddfc",
"\ud83c\uddf0\ud83c\uddec",
"\ud83c\uddf1\ud83c\udde6",
"\ud83c\uddf1\ud83c\uddfb",
"\ud83c\uddf1\ud83c\udde7",
"\ud83c\uddf1\ud83c\uddf8",
"\ud83c\uddf1\ud83c\uddf7",
"\ud83c\uddf1\ud83c\uddfe",
"\ud83c\uddf1\ud83c\uddee",
"\ud83c\uddf1\ud83c\uddf9",
"\ud83c\uddf1\ud83c\uddfa",
"\ud83c\uddf2\ud83c\uddf4",
"\ud83c\uddf2\ud83c\uddf0",
"\ud83c\uddf2\ud83c\uddec",
"\ud83c\uddf2\ud83c\uddfc",
"\ud83c\uddf2\ud83c\uddfe",
"\ud83c\uddf2\ud83c\uddfb",
"\ud83c\uddf2\ud83c\uddf1",
"\ud83c\uddf2\ud83c\uddf9",
"\ud83c\uddf2\ud83c\udded",
"\ud83c\uddf2\ud83c\uddf6",
"\ud83c\uddf2\ud83c\uddf7",
"\ud83c\uddf2\ud83c\uddfa",
"\ud83c\uddfe\ud83c\uddf9",
"\ud83c\uddf2\ud83c\uddfd",
"\ud83c\uddeb\ud83c\uddf2",
"\ud83c\uddf2\ud83c\udde9",
"\ud83c\uddf2\ud83c\udde8",
"\ud83c\uddf2\ud83c\uddf3",
"\ud83c\uddf2\ud83c\uddea",
"\ud83c\uddf2\ud83c\uddf8",
"\ud83c\uddf2\ud83c\udde6",
"\ud83c\uddf2\ud83c\uddff",
"\ud83c\uddf2\ud83c\uddf2",
"\ud83c\uddf3\ud83c\udde6",
"\ud83c\uddf3\ud83c\uddf7",
"\ud83c\uddf3\ud83c\uddf5",
"\ud83c\uddf3\ud83c\uddf1",
"\ud83c\uddf3\ud83c\udde8",
"\ud83c\uddf3\ud83c\uddff",
"\ud83c\uddf3\ud83c\uddee",
"\ud83c\uddf3\ud83c\uddea",
"\ud83c\uddf3\ud83c\uddec",
"\ud83c\uddf3\ud83c\uddfa",
"\ud83c\uddf3\ud83c\uddeb",
"\ud83c\uddf0\ud83c\uddf5",
"\ud83c\uddf2\ud83c\uddf5",
"\ud83c\uddf3\ud83c\uddf4",
"\ud83c\uddf4\ud83c\uddf2",
"\ud83c\uddf5\ud83c\uddf0",
"\ud83c\uddf5\ud83c\uddfc",
"\ud83c\uddf5\ud83c\uddf8",
"\ud83c\uddf5\ud83c\udde6",
"\ud83c\uddf5\ud83c\uddec",
"\ud83c\uddf5\ud83c\uddfe",
"\ud83c\uddf5\ud83c\uddea",
"\ud83c\uddf5\ud83c\udded",
"\ud83c\uddf5\ud83c\uddf3",
"\ud83c\uddf5\ud83c\uddf1",
"\ud83c\uddf5\ud83c\uddf9",
"\ud83c\uddf5\ud83c\uddf7",
"\ud83c\uddf6\ud83c\udde6",
"\ud83c\uddf7\ud83c\uddea",
"\ud83c\uddf7\ud83c\uddf4",
"\ud83c\uddf7\ud83c\uddfa",
"\ud83c\uddf7\ud83c\uddfc",
"\ud83c\uddfc\ud83c\uddf8",
"\ud83c\uddf8\ud83c\uddf2",
"\ud83c\uddf8\ud83c\uddf9",
"\ud83c\uddf8\ud83c\udde6",
"\ud83c\uddf8\ud83c\uddf3",
"\ud83c\uddf7\ud83c\uddf8",
"\ud83c\uddf8\ud83c\udde8",
"\ud83c\uddf8\ud83c\uddf1",
"\ud83c\uddf8\ud83c\uddec",
"\ud83c\uddf8\ud83c\uddfd",
"\ud83c\uddf8\ud83c\uddf0",
"\ud83c\uddf8\ud83c\uddee",
"\ud83c\uddec\ud83c\uddf8",
"\ud83c\uddf8\ud83c\udde7",
"\ud83c\uddf8\ud83c\uddf4",
"\ud83c\uddff\ud83c\udde6",
"\ud83c\uddf0\ud83c\uddf7",
"\ud83c\uddf8\ud83c\uddf8",
"\ud83c\uddea\ud83c\uddf8",
"\ud83c\uddf1\ud83c\uddf0",
"\ud83c\udde7\ud83c\uddf1",
"\ud83c\uddf8\ud83c\udded",
"\ud83c\uddf0\ud83c\uddf3",
"\ud83c\uddf1\ud83c\udde8",
"\ud83c\uddf5\ud83c\uddf2",
"\ud83c\uddfb\ud83c\udde8",
"\ud83c\uddf8\ud83c\udde9",
"\ud83c\uddf8\ud83c\uddf7",
"\ud83c\uddf8\ud83c\uddff",
"\ud83c\uddf8\ud83c\uddea",
"\ud83c\udde8\ud83c\udded",
"\ud83c\uddf8\ud83c\uddfe",
"\ud83c\uddf9\ud83c\uddfc",
"\ud83c\uddf9\ud83c\uddef",
"\ud83c\uddf9\ud83c\uddff",
"\ud83c\uddf9\ud83c\udded",
"\ud83c\uddf9\ud83c\uddf1",
"\ud83c\uddf9\ud83c\uddec",
"\ud83c\uddf9\ud83c\uddf0",
"\ud83c\uddf9\ud83c\uddf4",
"\ud83c\uddf9\ud83c\uddf9",
"\ud83c\uddf9\ud83c\uddf3",
"\ud83c\uddf9\ud83c\uddf7",
"\ud83c\uddf9\ud83c\uddf2",
"\ud83c\uddf9\ud83c\udde8",
"\ud83c\uddfb\ud83c\uddee",
"\ud83c\uddf9\ud83c\uddfb",
"\ud83c\uddfa\ud83c\uddec",
"\ud83c\uddfa\ud83c\udde6",
"\ud83c\udde6\ud83c\uddea",
"\ud83c\uddec\ud83c\udde7",
"\ud83c\udff4\udb40\udc67\udb40\udc62\udb40\udc65\udb40\udc6e\udb40\udc67\udb40\udc7f",
"\ud83c\udff4\udb40\udc67\udb40\udc62\udb40\udc73\udb40\udc63\udb40\udc74\udb40\udc7f",
"\ud83c\udff4\udb40\udc67\udb40\udc62\udb40\udc77\udb40\udc6c\udb40\udc73\udb40\udc7f",
"\ud83c\uddfa\ud83c\uddf8",
"\ud83c\uddfa\ud83c\uddfe",
"\ud83c\uddfa\ud83c\uddff",
"\ud83c\uddfb\ud83c\uddfa",
"\ud83c\uddfb\ud83c\udde6",
"\ud83c\uddfb\ud83c\uddea",
"\ud83c\uddfb\ud83c\uddf3",
"\ud83c\uddfc\ud83c\uddeb",
"\ud83c\uddea\ud83c\udded",
"\ud83c\uddfe\ud83c\uddea",
"\ud83c\uddff\ud83c\uddf2",
"\ud83c\uddff\ud83c\uddfc",
"\ud83c\udde6\ud83c\udde8",
"\ud83c\udde7\ud83c\uddfb",
"\ud83c\udde8\ud83c\uddf5",
"\ud83c\uddea\ud83c\udde6",
"\ud83c\udde9\ud83c\uddec",
"\ud83c\udded\ud83c\uddf2",
"\ud83c\uddf2\ud83c\uddeb",
"\ud83c\uddf8\ud83c\uddef",
"\ud83c\uddf9\ud83c\udde6",
"\ud83c\uddfa\ud83c\uddf2",
"\ud83c\uddfa\ud83c\uddf3",
]
|
# Copyright (c) nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/vulnerablecode/
# The VulnerableCode software is licensed under the Apache License version 2.0.
# Data generated with VulnerableCode require an acknowledgment.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with VulnerableCode or any VulnerableCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with VulnerableCode and provided on an 'AS IS' BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# VulnerableCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# VulnerableCode is a free software from nexB Inc. and others.
# Visit https://github.com/nexB/vulnerablecode/ for support and download.
import os
from unittest import TestCase
from vulnerabilities.data_source import Advisory
from vulnerabilities.data_source import Reference
from vulnerabilities.data_source import VulnerabilitySeverity
from vulnerabilities.importers.suse_scores import SUSESeverityScoreDataSource
from vulnerabilities.helpers import load_yaml
from vulnerabilities.severity_systems import ScoringSystem
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_DATA = os.path.join(BASE_DIR, "test_data/suse_scores", "suse-cvss-scores.yaml")
class TestSUSESeverityScoreDataSource(TestCase):
def test_to_advisory(self):
raw_data = load_yaml(TEST_DATA)
expected_data = [
Advisory(
summary="",
impacted_package_urls=[],
resolved_package_urls=[],
references=[
Reference(
reference_id="",
url="https://ftp.suse.com/pub/projects/security/yaml/suse-cvss-scores.yaml",
severities=[
VulnerabilitySeverity(
system=ScoringSystem(
identifier="cvssv2",
name="CVSSv2 Base Score",
url="https://www.first.org/cvss/v2/",
notes="cvssv2 base score",
),
value="4.3",
),
VulnerabilitySeverity(
system=ScoringSystem(
identifier="cvssv2_vector",
name="CVSSv2 Vector",
url="https://www.first.org/cvss/v2/",
notes="cvssv2 vector, used to get additional info about nature and severity of vulnerability", # nopep8
),
value="AV:N/AC:M/Au:N/C:N/I:N/A:P",
),
VulnerabilitySeverity(
system=ScoringSystem(
identifier="cvssv3.1",
name="CVSSv3.1 Base Score",
url="https://www.first.org/cvss/v3-1/",
notes="cvssv3.1 base score",
),
value="3.7",
),
VulnerabilitySeverity(
system=ScoringSystem(
identifier="cvssv3.1_vector",
name="CVSSv3.1 Vector",
url="https://www.first.org/cvss/v3-1/",
notes="cvssv3.1 vector, used to get additional info about nature and severity of vulnerability", # nopep8
),
value="CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:N/I:N/A:L",
),
],
)
],
vulnerability_id="CVE-2004-0230",
),
Advisory(
summary="",
impacted_package_urls=[],
resolved_package_urls=[],
references=[
Reference(
reference_id="",
url="https://ftp.suse.com/pub/projects/security/yaml/suse-cvss-scores.yaml",
severities=[
VulnerabilitySeverity(
system=ScoringSystem(
identifier="cvssv3",
name="CVSSv3 Base Score",
url="https://www.first.org/cvss/v3-0/",
notes="cvssv3 base score",
),
value="8.6",
),
VulnerabilitySeverity(
system=ScoringSystem(
identifier="cvssv3_vector",
name="CVSSv3 Vector",
url="https://www.first.org/cvss/v3-0/",
notes="cvssv3 vector, used to get additional info about nature and severity of vulnerability", # nopep8
),
value="CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:C/C:H/I:N/A:N",
),
],
)
],
vulnerability_id="CVE-2003-1605",
),
]
found_data = SUSESeverityScoreDataSource.to_advisory(raw_data)
found_advisories = list(map(Advisory.normalized, found_data))
expected_advisories = list(map(Advisory.normalized, expected_data))
assert sorted(found_advisories) == sorted(expected_advisories)
|
<reponame>brzx/pydataloader
#!/usr/bin/python
# -*- coding: utf-8 -*-
import targetTools
import beatbox, pdb
import datetime
class SFDCTools(targetTools.TargetTools):
def __init__(self, credential, log):
self.credential = credential
self.log = log
def getConnection(self):
self.sf = beatbox._tPartnerNS
self.svc = beatbox.PythonClient()
beatbox.gzipRequest = False
if self.credential["url"] == "test":
self.svc.serverUrl = self.svc.serverUrl.replace('login.', 'test.')
try:
self.svc.login(self.credential["username"], self.credential["password"])
return self.svc
except Exception as e:
self.log.AppendText(str(e))
self.log.AppendText("\n")
return None
def validTarget(self, target):
reflag = False
soql = "select id from %s limit 1" % (target, )
try:
self.svc.query(soql)
reflag = True
except Exception as e:
self.log.AppendText("target table does not exists\n")
self.log.AppendText(str(e))
self.log.AppendText("\n")
return reflag
def upsertData(self, data, tarTable, tarList, credential, keys):
fieldList = self.svc.describeSObjects(tarTable)[0].fields.keys()
fieldDict = {}
for f in fieldList:
for t in tarList:
if f.upper() == t.upper():
fieldDict[t] = f
if not any(f == keys["keyinfo"][0].upper() \
for f in map(lambda x:x.upper(), fieldList)):
self.log.AppendText("Skipped 'upsert' because the custom \
external Id field doesn't exist")
self.log.AppendText("upsert\n")
for row in data:
upsertDict = {}
upsertDict["type"] = tarTable
for n, t in enumerate(tarList):
upsertDict[fieldDict[t]] = row[n]
self.log.AppendText(str(upsertDict))
self.log.AppendText("\n")
try:
ur = self.svc.upsert(fieldDict[keys["keyinfo"][0]], upsertDict)
self.log.AppendText(str(ur[0]))
self.log.AppendText("\n")
except Exception as e:
self.log.AppendText("\n")
self.log.AppendText(str(e))
self.log.AppendText("\n")
self.log.AppendText("upsert data finish\n")
def insertData(self, data, tarTable, tarList, credential):
self.log.AppendText("insert\n")
tl = map(lambda x:x['name'], tarList)
for row in data:
insertDict = {}
insertDict["type"] = tarTable
for index, item in enumerate(tl):
if tarList[index]['name'] == item and tarList[index]['type'] == 'text':
insertDict[item] = row[index]
elif tarList[index]['name'] == item and tarList[index]['type'] == 'date':
insertDict[item] = datetime.datetime(int(row[index].split('-')[0]),
int(row[index].split('-')[1]), int(row[index].split('-')[2]))
else:
pass
self.log.AppendText(str(insertDict))
#pdb.set_trace()
try:
ur = self.svc.create([insertDict])
self.log.AppendText(str(ur[0]))
self.log.AppendText("\n")
except Exception as e:
self.log.AppendText("\n")
self.log.AppendText(str(e))
self.log.AppendText("\n")
self.log.AppendText("insert data finish\n")
if __name__ == "__main__":
pass
|
<gh_stars>1000+
import pytest
from schematics.models import Model
from schematics.types import IntType, StringType
from schematics.types.compound import ModelType, ListType
from schematics.exceptions import DataError
from schematics.util import ImportStringError
def test_simple_embedded_models():
class Location(Model):
country_code = StringType()
class Player(Model):
id = IntType()
location = ModelType(Location)
p = Player(dict(id=1, location={"country_code": "US"}))
assert p.id == 1
assert p.location.country_code == "US"
p.location = Location({"country_code": "IS"})
assert isinstance(p.location, Location)
assert p.location.country_code == "IS"
def test_simple_embedded_models_is_none():
class Location(Model):
country_code = StringType()
class Player(Model):
id = IntType()
location = ModelType(Location)
p = Player(dict(id=1))
assert p.id == 1
assert p.location is None
def test_simple_embedded_model_set_to_none():
class Location(Model):
country_code = StringType()
class Player(Model):
id = IntType()
location = ModelType(Location)
p = Player(dict(id=1))
p.location = None
assert p.id == 1
assert p.location is None
def test_simple_embedded_model_is_none_within_listtype():
class QuestionResources(Model):
type = StringType()
class Question(Model):
id = StringType()
resources = ModelType(QuestionResources)
class QuestionPack(Model):
id = StringType()
questions = ListType(ModelType(Question))
question_pack = QuestionPack({
"id": "1",
"questions": [
{
"id": "1",
},
]
})
assert question_pack.questions[0].resources is None
def test_raises_validation_error_on_init_with_partial_submodel():
class User(Model):
name = StringType(required=True)
age = IntType(required=True)
class Card(Model):
user = ModelType(User)
u = User({'name': 'Arthur'})
c = Card({'user': u})
with pytest.raises(DataError):
c.validate()
def test_model_type():
class User(Model):
name = StringType()
class Card(Model):
user = ModelType(User)
c = Card({"user": {'name': u'Doggy'}})
assert isinstance(c.user, User)
assert c.user.name == "Doggy"
def test_equality_with_embedded_models():
class Location(Model):
country_code = StringType()
class Player(Model):
id = IntType()
location = ModelType(Location)
p1 = Player(dict(id=1, location={"country_code": "US"}))
p2 = Player(dict(id=1, location={"country_code": "US"}))
assert id(p1.location) != id(p2.location)
assert p1.location == p2.location
assert p1 == p2
def test_default_value_when_embedded_model():
class Question(Model):
question_id = StringType(required=True)
type = StringType(default="text")
class QuestionPack(Model):
question = ModelType(Question)
pack = QuestionPack({
"question": {
"question_id": 1
}
})
assert pack.question.question_id == "1"
assert pack.question.type == "text"
def test_export_loop_with_subclassed_model():
class Asset(Model):
file_name = StringType()
class S3Asset(Asset):
bucket_name = StringType()
class Product(Model):
title = StringType()
asset = ModelType(Asset)
asset = S3Asset({'bucket_name': 'assets_bucket', 'file_name': 'bar'})
product = Product({'title': 'baz', 'asset': asset})
primitive = product.to_primitive()
assert 'bucket_name' in primitive['asset']
native = product.to_native()
assert 'bucket_name' in native['asset']
def test_conversion_error_recursive_overhead():
conversions = [0]
class Leaf(Model):
pass
next_model = Leaf
data = 'not a mapping'
for i in range(20):
class Recursive(Model):
x = ModelType(next_model, required=True)
def __init__(self, *args, **kwargs):
super(type(self), self).__init__(*args, **kwargs)
conversions[0] += 1
assert conversions[0] < 25
next_model = Recursive
data = {'x': data}
with pytest.raises(DataError):
next_model(data)
def test_mock_object():
class User(Model):
name = StringType(required=True)
age = IntType(required=True)
assert ModelType(User, required=True).mock() is not None
def test_specify_model_by_name():
class M(Model):
to_one = ModelType('M')
to_many = ListType(ModelType('M'))
matrix = ListType(ListType(ModelType('M')))
assert M.to_one.model_class is M
assert M.to_many.field.model_class is M
assert M.matrix.field.field.model_class is M
def test_model_context_pass_to_type():
from schematics.types import BaseType
from schematics.datastructures import Context
class CustomType(BaseType):
def to_native(self, value, context=None):
suffix = context.suffix
return str(value) + suffix
def to_primitive(self, value, context=None):
suffix = context.suffix
return value[:-len(suffix)]
class Thing(Model):
x = CustomType()
context = {'suffix': 'z'}
input = {'x': 'thingie'}
thing = Thing(input, context=context)
assert thing.x == 'thingiez'
assert thing.to_primitive(context=context) == {'x': 'thingie'}
# try it with a Context object
model_context = Context(suffix='z!')
thing2 = Thing(input, context=model_context)
assert thing2.x == 'thingiez!'
export_context = Context(suffix='z!')
assert thing2.to_primitive(context=export_context) == {'x': 'thingie'}
with pytest.raises(AttributeError):
# can't reuse the same Context object as was used for model
# TODO this may be unexpected to the uninitiated; a custom exception
# could explain it better.
thing2.to_primitive(context=model_context)
def test_model_app_data_pass_to_type():
from schematics.types import BaseType
class CustomType(BaseType):
def to_native(self, value, context=None):
suffix = context.app_data['suffix']
return str(value) + suffix
def to_primitive(self, value, context=None):
suffix = context.app_data['suffix']
return value[:-len(suffix)]
class Thing(Model):
x = CustomType()
app_data = {'suffix': 'z'}
input = {'x': 'thingie'}
thing = Thing(input, app_data=app_data)
assert thing.x == 'thingiez'
assert thing.to_primitive(app_data=app_data) == {'x': 'thingie'}
class OuterModel:
class InnerModel(Model):
test = StringType()
def test_deep_string_search():
class TestModel(Model):
deep_model = ModelType('test_model_type.OuterModel.InnerModel')
test = TestModel(dict(deep_model=dict(test='Abc')))
assert test.validate() is None
class TestModel2(Model):
invalid_model = ModelType('a.c.d.e')
with pytest.raises(ImportStringError):
TestModel2(dict(invalid_model=dict(a='1')))
def test_recursive_string_self_reference():
class RecursiveTestModel(Model):
recursive_model = ModelType('RecursiveTestModel')
test = StringType()
test = RecursiveTestModel(dict(recursive_model=dict(test='Abc')))
assert test.validate() is None
assert test.recursive_model.test == 'Abc'
def test_circular_string_reference():
class TestModel1(Model):
model2 = ModelType('TestModel2')
name = StringType()
class TestModel2(Model):
model1 = ModelType('TestModel1')
description = StringType()
data1 = {'name': 'Test1'}
data2 = {'description': 'Test2', 'model1': data1}
# TODO: we might want to support locals import late binding someday/somehow
with pytest.raises(ImportStringError):
test = TestModel1({
'name': 'Root',
'model2': data2
})
|
<gh_stars>0
import imageio
from imgaug import augmenters as iaa
import math
import random
from tensorflow.keras.preprocessing.image import ImageDataGenerator
class Augmentor(object):
def __init__(self):
self.function = self._get_augmentor()
def _get_augmentor(self):
return iaa.Sequential(
[
iaa.OneOf([
iaa.OneOf([
iaa.AdditiveGaussianNoise(scale=(0.04, 0.3)),
iaa.SaltAndPepper(0.01)
]),
iaa.CoarseDropout(p=(0.02, 0.1)),
iaa.GaussianBlur(sigma=(0.1, 2.0))
])
],
random_order=False
)
def process_image(self, image):
return self.function(image=image)
class SubsetGenerator(object):
def __init__(self, original_datagen, augmented_datagen, dataframe, x_col, target_size,
color_mode="grayscale", class_mode=None, **kwargs):
self.dataframe = dataframe
self.x_col = x_col
self.target_size = target_size
self.color_mode = color_mode
self.class_mode = class_mode
self.augmented = augmented_datagen.flow_from_dataframe(
dataframe=self.dataframe,
x_col=self.x_col,
target_size=self.target_size,
color_mode=self.color_mode,
class_mode=self.class_mode,
**kwargs
)
self.original = original_datagen.flow_from_dataframe(
dataframe=self.dataframe,
x_col=self.x_col,
target_size=self.target_size,
color_mode=self.color_mode,
class_mode=self.class_mode,
**kwargs
)
def __len__(self):
return len(self.augmented)
@property
def as_tuple(self):
return zip(self.augmented, self.original)
@property
def steps_per_epoch(self):
return self.original.n // self.original.batch_size
@property
def image_shape(self):
return self.augmented.image_shape
def reset(self):
self.augmented.reset()
self.original.reset()
class DataGenerator(object):
_common_data_gen_params = dict(
samplewise_center=False,
samplewise_std_normalization=False,
rescale=1. / 255
)
def __init__(self, dataset, unique_by, path_column, image_size, augmentor=Augmentor()):
self.dataset = dataset
self.unique_by = unique_by
self.image_size = image_size
self.augmentor = augmentor
self._augmented_datagen = ImageDataGenerator(
**self._common_data_gen_params,
preprocessing_function=augmentor.process_image
)
self._original_datagen = ImageDataGenerator(
**self._common_data_gen_params
)
self.train_df, self.valid_df, self.test_df = self._split_dataset()
self.train_generator = SubsetGenerator(
original_datagen=self._original_datagen,
augmented_datagen=self._augmented_datagen,
dataframe=self.train_df,
x_col=path_column,
target_size=image_size,
batch_size=16,
seed=1
)
self.validation_generator = SubsetGenerator(
original_datagen=self._original_datagen,
augmented_datagen=self._augmented_datagen,
dataframe=self.valid_df,
x_col=path_column,
target_size=image_size,
batch_size=16,
seed=1
)
self.test_generator = SubsetGenerator(
original_datagen=self._original_datagen,
augmented_datagen=self._augmented_datagen,
dataframe=self.test_df,
x_col=path_column,
target_size=image_size,
batch_size=1,
suffle=False,
seed=1
)
@property
def image_shape(self):
return self.train_generator.image_shape
def _split_dataset(self, seed=1234, shuffle=True, split_percentages=(0.66, 0.14, 0.2)):
if not isinstance(split_percentages, (list, tuple)):
raise ValueError("'split' argument must be a tuple or list")
else:
if sum(split_percentages) != 1.0:
raise ValueError("'split' must sum up to 1.0")
split_train, split_val, split_test = split_percentages
unique_choices = self.dataset.df[self.unique_by].unique().tolist()
nb_unique = len(unique_choices)
nb_train = math.floor(nb_unique * split_train)
nb_val = math.floor(nb_unique * split_val)
nb_test = nb_unique - nb_train - nb_val
if shuffle:
random.seed(seed)
random.shuffle(unique_choices)
result_choices = []
start = 0
for value in [nb_train, nb_val, nb_test]:
end = start + value
result_choices.append(
unique_choices[start: end]
)
start = end
train_choices, val_choices, test_choices = result_choices
train_df = self.dataset.df.loc[self.dataset.df[self.unique_by].isin(train_choices)]
val_df = self.dataset.df.loc[self.dataset.df[self.unique_by].isin(val_choices)]
test_df = self.dataset.df.loc[self.dataset.df[self.unique_by].isin(test_choices)]
return train_df.sample(frac=1, random_state=seed), val_df.sample(frac=1, random_state=seed), test_df.sample(
frac=1, random_state=seed)
def print_sample_augmented_images(self, num_augmentations=20):
seq = get_augmentator()
rnd_image = df_images_filtered.sample()
image = imageio.imread(rnd_image[IMAGE_PATH_COLUMN_NAME].item())
print("Original:")
show_original = ia.imshow(image)
images_aug = [seq(image=image) for _ in range(num_augmentations)]
print("Augmented:")
show_augmented = ia.imshow(ia.draw_grid(images_aug))
return show_original, show_augmented |
import numpy as np
import tensorflow as tf
#import keras
from tensorflow import keras
import pickle
import os
from utils import preprocess_flags
from utils import data_folder,kernel_folder,arch_folder,results_folder
def main(_):
FLAGS = tf.compat.v1.app.flags.FLAGS.flag_values_dict()
FLAGS = preprocess_flags(FLAGS)
globals().update(FLAGS)
if init_dist != "gaussian":
raise NotImplementedError("Initialization distributions other than Gaussian are not implemented for computing pac bayes bounds!")
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
print(rank)
if n_gpus>0:
os.environ["CUDA_VISIBLE_DEVICES"]=str((rank)%n_gpus)
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
#tf.enable_eager_execution(config=config)
set_session = tf.compat.v1.keras.backend.set_session
config.log_device_placement = False # to log device placement (on which device the operation ran)
sess = tf.compat.v1.Session(config=config)
set_session(sess) # set this TensorFlow session as the default session for Keras
'''GET DATA'''
from utils import load_data,load_model,load_kernel
train_images,flat_train_images,ys,_,_ = load_data(FLAGS)
X = flat_train_images
ys2 = [[y] for y in ys]
Y = np.array(ys2)
image_size = train_images.shape[1]
number_channels = train_images.shape[-1]
input_dim = flat_train_images.shape[1]
print("compute probability and bound", network, dataset)
if using_NTK:
FLAGS["use_empirical_NTK"] = True
theta = load_kernel(FLAGS)
print(theta)
#if using NTK, the above gets the NTK kernel, but we also need the non-NTK one to compute the bound!
FLAGS["use_empirical_NTK"] = False
K_pre = load_kernel(FLAGS)
print(K_pre)
if normalize_kernel:
K_pre = K_pre/K_pre.max()
K = kernel_mult*K_pre
if theta.shape[0] >= m: #must have compute kernel for GP_train
theta = theta[:m,:m]
if K.shape[0] >= m: #must have compute kernel for GP_train
K = K[:m,:m]
else:
K_pre = load_kernel(FLAGS)
print(K_pre)
if normalize_kernel:
K_pre = K_pre/K_pre.max()
K = kernel_mult*K_pre
if K.shape[0] >= m: #must have compute kernel for GP_train
K = K[:m,:m]
#finding log marginal likelihood of data
if using_EP:
from GP_prob.GP_prob_gpy2 import GP_prob
logPU = GP_prob(K,X,Y, method="EP", using_exactPB=using_exactPB)
elif using_Laplace:
from GP_prob.GP_prob_gpy2 import GP_prob
# from GP_prob.GP_prob_numpy import GP_prob
logPU = GP_prob(K,X,Y,method="Laplace", using_exactPB=using_exactPB)
# logPU = GP_prob(K,np.squeeze(Y))
elif using_Laplace2:
# from GP_prob.GP_prob_gpy import GP_prob
from GP_prob.GP_prob_numpy import GP_prob #this gives different results because it uses a worse implementation of Laplace, by using a more Naive Newton method to find the maximum of the posterior
# logPU = GP_prob(K,X,Y,method="Laplace")
logPU = GP_prob(K,np.squeeze(Y))
elif using_MC:
from GP_prob.GP_prob_MC import GP_prob
logPU = GP_prob(K,X,Y,FLAGS)
elif using_regression:
from GP_prob.GP_prob_regression import GP_prob
# logPU = GP_prob(K,X,Y,sigma_noise=np.sqrt(total_samples/2))
logPU = GP_prob(K,X,Y,sigma_noise=1.0)
elif using_NTK:
# from GP_prob.GP_prob_regression import GP_prob
# logPU = GP_prob(K,X,Y,sigma_noise=np.sqrt(total_samples/2))
# logPU = GP_prob(K,X,Y,sigma_noise=1.0, posterior="ntk")
from GP_prob.GP_prob_ntk import GP_prob
logPU = GP_prob(K,theta,X,Y,t=1e2)
if rank == 0:
print(logPU)
#compute PAC-Bayes bound
delta = 2**-10
bound = (-logPU+2*np.log(total_samples)+1-np.log(delta))/total_samples
bound = 1-np.exp(-bound)
print("pre-confusion-correction bound: ", bound)
rho = confusion/(1.0+confusion)
bound = (bound - 0.5*rho)/(1-rho) #to correct for the confusion changing the training data distribution (in training set, but not in test set)!
print("Bound: ", bound)
print("Accuracy bound: ", 1-bound)
useful_flags = ["dataset","boolfun_comp","boolfun", "network", "m","label_corruption","confusion", "number_layers", "sigmaw", "sigmab", "binarized", "pooling", "intermediate_pooling", "whitening", "training", "n_gpus", "kernel_mult", "normalize_kernel"]
with open(results_folder+prefix+"bounds.txt","a") as file:
file.write("#")
for key in useful_flags:
file.write("{}\t".format(key))
file.write("bound")
file.write("\t")
file.write("logP")
file.write("\n")
for key in useful_flags:
file.write("{}\t".format(FLAGS[key]))
file.write("{}".format(bound))
file.write("\t")
file.write("{}".format(logPU))
file.write("\n")
if __name__ == '__main__':
f = tf.compat.v1.app.flags
from utils import define_default_flags
define_default_flags(f)
f.DEFINE_boolean('using_EP', False, "Whether to use Expectation Propagation method for computing probability")
f.DEFINE_boolean('using_Laplace', False, "Whether to use Laplace method for computing probability")
f.DEFINE_boolean('using_Laplace2', False, "Whether my numpy implementation of Laplace method for computing probability")
f.DEFINE_boolean('using_regression', False, "Whether to use the exact relative entropy for MSE GP regression")
f.DEFINE_boolean('using_NTK', False, "Whether to use the exact relative entropy for MSE GP regression, with NTK posterior")
f.DEFINE_boolean('using_exactPB', False, "Whether using exact PAC-Bayes on approximate posterior rather than approximate PAC-Bayes on exact postierior")
f.DEFINE_boolean('using_MC', False, "Whether to use Monte Carlo method for computing probability")
f.DEFINE_boolean('normalize_kernel', False, "Whether to normalize the kernel (by dividing by max value) or not")
f.DEFINE_integer('num_post_samples', int(1e5), "Number of approximate EP posterior samples in importance-sampling-based Monte Carlo estimation of marginal likelihood")
f.DEFINE_float('cov_mult', 1.0, "Factor by which to multiply the variance of the approximate posterior, to focus the importance sampling more in the non-zero likelihood region, at the risk of biasing away from true posterior.")
f.DEFINE_float('kernel_mult', 1.0, "Factor by which to multiply the kernel before computing approximate marginal likelihood")
f.DEFINE_float('mean_mult', 1.0, "Factor by which to multiply the mean of the approximate posterior, to focus the importance sampling more in the non-zero likelihood region, at the risk of biasing away from true posterior.")
tf.compat.v1.app.run()
|
<reponame>openprocurement/openprocurement.auctions.flash<gh_stars>0
# -*- coding: utf-8 -*-
import unittest
from openprocurement.auctions.core.tests.base import snitch
from openprocurement.auctions.core.tests.blanks.chronograph_blanks import (
# AuctionSwitchAuctionResourceTest
switch_to_auction,
# AuctionSwitchUnsuccessfulResourceTest
switch_to_unsuccessful,
# AuctionComplaintSwitchResourceTest
switch_to_pending,
switch_to_complaint,
# AuctionAwardComplaintSwitchResourceTest
switch_to_pending_award,
switch_to_complaint_award,
)
from openprocurement.auctions.flash.tests import fixtures
from openprocurement.auctions.flash.tests.base import (
BaseAuctionWebTest, test_lots, test_bids, test_organization
)
from openprocurement.auctions.flash.tests.blanks.chronograph_blanks import (
# AuctionSwitchtenderingResourceTest
switch_to_tendering_by_enquiryPeriod_endDate,
switch_to_tendering_by_auctionPeriod_startDate,
switch_to_tendering_auctionPeriod,
# AuctionSwitchQualificationResourceTest
switch_to_qualification,
)
class AuctionSwitchtenderingResourceTest(BaseAuctionWebTest):
test_switch_to_tendering_by_enquiryPeriod_endDate = \
snitch(switch_to_tendering_by_enquiryPeriod_endDate)
test_switch_to_tendering_by_auctionPeriod_startDate = \
snitch(switch_to_tendering_by_auctionPeriod_startDate)
test_switch_to_tendering_auctionPeriod = \
snitch(switch_to_tendering_auctionPeriod)
class AuctionSwitchQualificationResourceTest(BaseAuctionWebTest):
initial_status = 'active.tendering'
initial_bids = test_bids[:1]
test_switch_to_qualification = snitch(switch_to_qualification)
class AuctionSwitchAuctionResourceTest(BaseAuctionWebTest):
initial_status = 'active.tendering'
initial_bids = test_bids
test_switch_to_auction = snitch(switch_to_auction)
class AuctionSwitchUnsuccessfulResourceTest(BaseAuctionWebTest):
initial_status = 'active.tendering'
test_switch_to_unsuccessful = snitch(switch_to_unsuccessful)
class AuctionLotSwitchQualificationResourceTest(
AuctionSwitchQualificationResourceTest
):
initial_lots = test_lots
class AuctionLotSwitchAuctionResourceTest(AuctionSwitchAuctionResourceTest):
initial_lots = test_lots
class AuctionLotSwitchUnsuccessfulResourceTest(
AuctionSwitchUnsuccessfulResourceTest
):
initial_lots = test_lots
class AuctionComplaintSwitchResourceTest(BaseAuctionWebTest):
initial_organization = test_organization
test_switch_to_pending = snitch(switch_to_pending)
test_switch_to_complaint = snitch(switch_to_complaint)
class AuctionLotComplaintSwitchResourceTest(
AuctionComplaintSwitchResourceTest
):
initial_lots = test_lots
class AuctionAwardComplaintSwitchResourceTest(BaseAuctionWebTest):
initial_status = 'active.auction'
initial_bids = test_bids
initial_organization = test_organization
def setUp(self):
super(AuctionAwardComplaintSwitchResourceTest, self).setUp()
# Create award
fixtures.create_award(self)
test_auction_award_complaint_switch_to_pending = snitch(
switch_to_pending_award
)
test_auction_award_complaint_switch_to_complaint = snitch(
switch_to_complaint_award
)
class AuctionLotAwardComplaintSwitchResourceTest(
AuctionAwardComplaintSwitchResourceTest
):
initial_status = 'active.auction'
initial_lots = test_lots
def setUp(self):
super(AuctionAwardComplaintSwitchResourceTest, self).setUp()
# Create award
fixtures.create_award(self)
def suite():
tests = unittest.TestSuite()
tests.addTest(unittest.makeSuite(AuctionSwitchtenderingResourceTest))
tests.addTest(unittest.makeSuite(AuctionSwitchQualificationResourceTest))
tests.addTest(unittest.makeSuite(AuctionSwitchAuctionResourceTest))
tests.addTest(unittest.makeSuite(AuctionSwitchUnsuccessfulResourceTest))
tests.addTest(unittest.makeSuite(
AuctionLotSwitchQualificationResourceTest
))
tests.addTest(unittest.makeSuite(AuctionLotSwitchAuctionResourceTest))
tests.addTest(unittest.makeSuite(AuctionLotSwitchUnsuccessfulResourceTest))
tests.addTest(unittest.makeSuite(AuctionComplaintSwitchResourceTest))
tests.addTest(unittest.makeSuite(AuctionLotComplaintSwitchResourceTest))
tests.addTest(unittest.makeSuite(AuctionAwardComplaintSwitchResourceTest))
tests.addTest(unittest.makeSuite(
AuctionLotAwardComplaintSwitchResourceTest
))
return tests
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
#!/usr/bin/env python
# _*_coding:utf-8_*_
import sys
import pandas as pd
import numpy as np
import rpy2
import rpy2.robjects
from rpy2.robjects import numpy2ri
numpy2ri.activate()
r = rpy2.robjects.r
r.library('Peptides')
GROUPS_SA = ['ALFCGIVW', 'RKQEND', 'MSPTHY'] #solventaccess
GROUPS_HB = ['ILVWAMGT', 'FYSQCN', 'PHKEDR'] # HEIJNE&BLOMBERG1979
def fasta_iter(fname):
header = None
chunks = []
with open(fname) as f:
for line in f:
if line[0] == '>':
if header is not None:
yield header,''.join(chunks)
header = line[1:].strip().split()[0]
chunks = []
else:
chunks.append(line.strip())
if header is not None:
yield header, ''.join(chunks)
def ctdd(sequence, groups):
code = []
for group in groups:
for i, aa in enumerate(sequence):
if aa in group:
code.append((i + 1)/len(sequence) * 100)
break
else:
code.append(0)
return code
def main(args):
if len(args) < 3:
sys.stderr.write("This is an internal FACS script and is not meant to be used independently")
sys.exit(1)
ifile = args[1]
ofile = args[2]
groups = [set(g) for g in (GROUPS_SA+GROUPS_HB)]
seqs = []
headers = []
encodings = []
for h,seq in fasta_iter(ifile):
seqs.append(seq)
headers.append(h)
encodings.append(ctdd(seq, groups))
# We can do this inside the loop so that we are not forced to pre-load all
# the sequences into memory. However, it becomes much slower
rpy2.robjects.globalenv['seq'] = seqs
aaComp = r('aaComp(seq)')
rfeatures = r('''
ch <- charge(seq=seq, pH=7, pKscale="EMBOSS")
pI <- pI(seq=seq, pKscale="EMBOSS")
aIndex <- aIndex(seq=seq)
instaIndex <- instaIndex(seq=seq)
boman <- boman(seq=seq)
hydrophobicity <- hydrophobicity(seq=seq, scale="Eisenberg")
hmoment <- hmoment(seq=seq, angle=100, window=11)
cbind(ch, pI, aIndex, instaIndex, boman, hydrophobicity, hmoment)
''')
aaComp = np.array([np.array(v) for v in aaComp])
aaComp = aaComp[:,:,1]
features = np.hstack([aaComp, rfeatures, encodings])
# The column names must match those in the saved model
features = pd.DataFrame(features, index=headers, columns=[
"tinyAA",
"smallAA",
"aliphaticAA",
"aromaticAA",
"nonpolarAA",
"polarAA",
"chargedAA",
"basicAA",
"acidicAA",
"charge",
"pI",
"aindex",
"instaindex",
"boman",
"hydrophobicity",
"hmoment",
"SA.G1.residue0",
"SA.G2.residue0",
"SA.G3.residue0",
"hb.Group.1.residue0",
"hb.Group.2.residue0",
"hb.Group.3.residue0",
])
features.insert(0, 'group', 'Unk')
features.insert(0, 'sequence', seqs)
features.to_csv(ofile, sep='\t', index_label='access')
if __name__ == '__main__':
main(sys.argv)
|
<reponame>trer/bombots
import os
import pygame as pg
class TexMan: # Texture Manager
def __init__(self, scale):
mod_dir = os.path.dirname(__file__)
filename = os.path.join(mod_dir, 'res', 'bb_sprites.png')
self.spr = pg.image.load(filename).convert_alpha()
self.src_scale = 32 # Determined by sheet
self.dst_scale = scale # Determined by environment
# [TODO: Make this into a dictionary, add animation system]
self.spr_floor = self.copy_spr((5, 0))
self.spr_box = self.copy_spr((5, 1))
self.spr_wall = self.copy_spr((6, 0))
self.spr_bot = {
'red' : {
'n' : self.copy_spr((0, 1)),
's' : self.copy_spr((0, 0)),
'e' : self.copy_spr((0, 2)),
'w' : self.copy_spr((0, 3))
},
'green' : {
'n' : self.copy_spr((1, 1)),
's' : self.copy_spr((1, 0)),
'e' : self.copy_spr((1, 2)),
'w' : self.copy_spr((1, 3))
},
'blue' : {
'n' : self.copy_spr((2, 1)),
's' : self.copy_spr((2, 0)),
'e' : self.copy_spr((2, 2)),
'w' : self.copy_spr((2, 3))
},
'yellow' : {
'n' : self.copy_spr((3, 1)),
's' : self.copy_spr((3, 0)),
'e' : self.copy_spr((3, 2)),
'w' : self.copy_spr((3, 3))
}
}
self.spr_pop_ext = self.copy_spr((5, 2))
self.spr_pop_num = self.copy_spr((5, 3))
self.spr_bomb = [
self.copy_spr((4, 0)),
self.copy_spr((4, 1)),
self.copy_spr((4, 2)),
self.copy_spr((4, 3))
]
self.spr_fire = {
'n' : self.copy_spr((7, 0)),
's' : self.copy_spr((7, 3)),
'e' : self.copy_spr((6, 3)),
'w' : self.copy_spr((6, 1)),
'h' : self.copy_spr((6, 2)),
'v' : self.copy_spr((7, 2)),
'x' : self.copy_spr((7, 1))
}
self.spr_fire_n = self.copy_spr((7, 0)) # North end
self.spr_fire_s = self.copy_spr((7, 3)) # South end
self.spr_fire_e = self.copy_spr((6, 3)) # East end
self.spr_fire_w = self.copy_spr((6, 1)) # West end
self.spr_fire_h = self.copy_spr((6, 2)) # Horizontal beam
self.spr_fire_v = self.copy_spr((7, 2)) # Vertical beam
self.spr_fire_x = self.copy_spr((7, 1)) # Cross-section
# Copy specific sprite from sheet to surface (pos is sheet index, not in pixels)
def copy_spr(self, pos):
spr = pg.Surface((self.src_scale, self.src_scale), pg.SRCALPHA)
spr.blit(self.spr, (0, 0, self.src_scale, self.src_scale),
(self.src_scale * pos[0], self.src_scale * pos[1],
self.src_scale, self.src_scale))
return pg.transform.scale(spr, (self.dst_scale, self.dst_scale))
|
<filename>squall/routing/path.py
import inspect
import re
from collections import Counter
from typing import Any, Callable, Dict, List, Optional, Tuple
from urllib.parse import urljoin
from squall import convertors, params
PARAM_REGEX = re.compile("{([a-zA-Z_][a-zA-Z0-9_]*)(:[a-zA-Z_][a-zA-Z0-9_]*)?}")
class Path:
__slots__ = ["path", "handler"]
def __init__(self, path: str, handler: Callable[..., Any]) -> None:
self.path = path
self.handler = handler
def strip_trailing_slash(self) -> None:
"""Strip trailing slash if path differ form '/'
>>> p = Path("/my/route/", lambda: None)
>>> assert p.path == "/my/route/"
>>> p.strip_trailing_slash()
>>> assert p.path == "/my/route"
"""
if self.path != "/":
self.path = self.path.rstrip("/")
def append_left(self, prefix: str) -> None:
"""Prepend provided prefix to the path
:param prefix: Prefix path to add
>>> p = Path("/my/route", lambda: None)
>>> assert p.path == "/my/route"
>>> p.append_left("/api/v1")
>>> assert p.path == "/api/v1/my/route"
"""
if not prefix.endswith("/"):
prefix += "/"
self.path = urljoin(prefix, self.path.strip("/"))
@property
def path_params(self) -> List[Tuple[str, Optional[str]]]:
"""Returns dynamic path parameters and their path-declared convertor aliases
>>> p = Path("/user/{user_id:int}/notes/{note:uuid}/type/{type}", lambda: None)
>>> assert p.path_params == [
>>> ("user_id", "int"),
>>> ("note", "uuid"),
>>> ("type", None)
>>> ]
"""
result, names = [], []
for param in PARAM_REGEX.finditer(self.path):
name, suffix = param.groups("")
names.append(name)
convertor = suffix.lstrip(":") if suffix else None
result.append((name, convertor))
if duplicates := [i for i, cnt in Counter(names).items() if cnt > 1]:
raise ValueError(
f'Path "{self.path}" contains '
f"duplicate params: {', '.join(duplicates)}"
)
return result
@property
def schema_path(self) -> str:
"""Returns simplified path without convertor aliases
>>> p = Path("/user/{user_id:int}/notes/{note:uuid}/type/{type}", lambda: None)
>>> assert p.schema_path == "/user/{user_id}/notes/{note}/type/{type}"
"""
result = self.path
for match in PARAM_REGEX.finditer(self.path):
param_name, convertor_type = match.groups("")
result = result.replace(
"{" f"{param_name}{convertor_type}" "}",
"{" f"{param_name}" "}",
)
return result
@property
def router_path(self) -> str:
"""Returns path with detailed convertors aliases information.
Also uses handler annotations for lookup.
Used for exact pattern registration in squall-router library
>>> async def my_handler(user_id: int, note): pass
>>>
>>> p = Path("/user/{user_id}/notes/{note:uuid}", my_handler)
>>> assert p.router_path == "/user/{user_id:int}/notes/{note:uuid}"
"""
result = self.schema_path
from_handler = self.get_path_params_from_handler()
for param_name, convertor_name in self.path_params:
if convertor_name := convertor_name or from_handler.get(param_name):
result = result.replace(
"{" f"{param_name}" "}",
"{" f"{param_name}:{convertor_name}" "}",
)
return result
def get_path_params_from_handler(self) -> Dict[str, Optional[str]]:
"""Returns handler parameters affiliated with path.
>>> from uuid import UUID
>>>
>>> async def my_handler(user_id: int, note: UUID): pass
>>>
>>> p = Path("/user/{user_id}/notes/{note}", my_handler)
>>> assert p.get_path_params_from_handler() == {
>>> "user_id": "int",
>>> "note": "uuid"
>>> }
"""
results = {}
path_params = dict(self.path_params)
for k, v in inspect.signature(self.handler).parameters.items():
name = k
if isinstance(v.default, params.Path):
if alias := v.default.alias:
name = alias
elif v.default is v.empty:
name = k
if name not in path_params:
continue
validate = path_params[name]
if v.annotation != v.empty:
if convertor := convertors.database.get_by_type(v.annotation):
if validate is None:
validate = convertor.alias
elif convertor.alias != path_params[name]:
raise ValueError(
f"Parameter {name} have different annotation and convertor types: "
f"{convertor.alias} != {path_params[name]}"
)
else:
raise ValueError(
f"Parameter `{name}` have unknown convertor type: {v.annotation}"
)
results[name] = validate
return results
|
<reponame>KnowingNothing/akg-test
from collections import namedtuple
import os
import logging
def get_block_str_from_config(config: namedtuple):
block_param = ""
if "block_x" in getattr(config, "_fields"):
block_param += str(config.block_x) + " "
if "block_y" in getattr(config, "_fields"):
block_param += str(config.block_y) + " "
if "block_z" in getattr(config, "_fields"):
block_param += str(config.block_z) + " "
return block_param
def get_thread_str_from_config(config: namedtuple):
thread_param = ""
if "thread_x" in getattr(config, "_fields"):
thread_param += str(config.thread_x) + " "
if "thread_y" in getattr(config, "_fields"):
thread_param += str(config.thread_y) + " "
if "thread_z" in getattr(config, "_fields"):
thread_param += str(config.thread_z) + " "
return thread_param
def get_parallel_build_num():
"""get the num of parallel build"""
env_dic = os.environ
try:
return int(env_dic.get('BUILD_PARALLEL_NUM').lower()) if env_dic.get('BUILD_PARALLEL_NUM') else 1
except NameError as e:
logging.error(e)
return 1
def get_available_gpu_num():
"""get the num of gpu"""
env_dic = os.environ
try:
return [int(id) for id in env_dic.get('USE_GPU_DEVICES').split(",")] if env_dic.get('USE_GPU_DEVICES') else [0, ]
except NameError as e:
logging.error(e)
return 1
def get_real_attr(value ,key ,need_tune_json, need_tune_keys):
if key not in need_tune_keys:
return value
if need_tune_json[key]['dtype'] == "bool":
if need_tune_json[key]['options'][value].lower() == "true":
return True
elif need_tune_json[key]['options'][value].lower() == "false":
return False
else:
raise TypeError("Wrong boolean type, please check json file")
elif need_tune_json[key]['dtype'] == "str":
if isinstance(need_tune_json[key]['options'][value], str):
return need_tune_json[key]['options'][value]
else:
raise TypeError("Wrong str type, please check json file")
elif need_tune_json[key]['dtype'] == "int":
if isinstance(need_tune_json[key]['options'][value], int):
return need_tune_json[key]['options'][value]
else:
raise TypeError("Wrong int type, please check json file")
def merge_attrs(attrs, config, need_tune_json):
tiling = [getattr(config, name) for name in getattr(
config, '_fields') if name.startswith('tiling')]
dim_str = ''
d_config = config._asdict()
d_attrs = attrs._asdict()
is_2d_tiling = False
for name in getattr(config, '_fields'):
if name.startswith('tiling'):
if name.count("_") == 2:
is_2d_tiling = True
break
for i, element in enumerate(tiling):
if is_2d_tiling:
if i % 2 == 0:
dim_str += "0 " + str(i//2) + " "
dim_str += str(element) + " "
else:
# 1d tiling
dim_str += "0 " + str(i) + " " + str(element) + " 1 "
# add block, thread information
block = [str(getattr(config, name)) for name in getattr(
config, '_fields') if name.startswith('block')]
bind_block_str = ' '.join(block)
thread = [str(getattr(config, name)) for name in getattr(
config, '_fields') if name.startswith('thread')]
bind_thread_str = ' '.join(thread)
d_attrs['dim'] = dim_str
d_attrs['bind_block'] = bind_block_str
d_attrs['bind_thread'] = bind_thread_str
need_tune_keys = need_tune_json.keys()
for key in need_tune_keys:
d_attrs[key] = d_config[key]
# make a new attrs with config info
attrs_type = type(attrs)
config_list = [get_real_attr(d_attrs[k],k,need_tune_json, need_tune_keys) for k in d_attrs]
new_attrs = attrs_type(*config_list)
return new_attrs
def get_skip_configs_from_log(skip_configs_log):
skip_config_set = set()
if skip_configs_log != "":
with open(skip_configs_log, 'r') as file:
for line in file:
config = str(line.split("|")[1]).strip()
skip_config_set.add(config)
print("SKIP CONFIGS NUMBER:", len(skip_config_set))
return skip_config_set
def get_tuning_attrs_from_json(tuning_attrs_json):
import json
need_tune_spaces = [[]]
keys = []
json_string = dict()
if tuning_attrs_json != "":
with open(tuning_attrs_json,'r') as file:
json_string =json.load(file)
for key in json_string.keys():
keys.append(key)
num_options = len(json_string[key]['options'])
tmp_spaces = []
for space in need_tune_spaces:
for i in range(num_options):
tmp_space = space[:]
tmp_space.append(i)
tmp_spaces.append(tmp_space)
need_tune_spaces = tmp_spaces[:]
return (keys, need_tune_spaces, json_string)
if __name__ == "__main__":
"""test components"""
file_name = "tuning_attrs_descs/reduce_tuning_attrs_desc.json"
keys, need_tune_spaces = get_tuning_attrs_from_json(file_name)
print(keys)
print(need_tune_spaces) |
import json
import math
import pandas as pd
from scipy import stats
from lib.settings import DATA_DIR
from lib.characters import ALL_NAMES
from lib.episodes import getSequentialEpisodeNumber
raw_comments = pd.read_csv(DATA_DIR / "comments.csv")
raw_mentions = pd.read_csv(DATA_DIR / "character-mentions.csv")
raw_sentiments = pd.read_csv(DATA_DIR / "comment-sentiments.csv")
partial_comments = raw_comments[["ID", "Season", "Episode"]]
comments = pd.merge(
partial_comments,
raw_sentiments,
left_on="ID",
right_on="Comment ID",
validate="1:1",
)
mentions = pd.merge(
partial_comments, raw_mentions, left_on="ID", right_on="Comment ID", validate="1:m"
)
agg_columns = {
"mean": ("Compound Score", "mean"),
"n": ("Compound Score", "count"),
"err": ("Compound Score", "sem"),
}
agg_scores_overall = comments.groupby(["Season", "Episode"]).agg(**agg_columns)
agg_scores_by_character = mentions.groupby(["Name", "Season", "Episode"]).agg(
**agg_columns
)
total_mentions_by_episode = agg_scores_by_character.groupby(
["Season", "Episode"]
).n.sum()
# Setup the dict to collect the mean scores as lists of dicts
means_data = {"overall": []}
for name in ALL_NAMES:
means_data[name] = []
def create_means_entry(season, episode, mean, n, err):
if n < 30:
return None
margin_of_err = stats.t.ppf(0.95, n - 1) * err
return {
"season": season,
"episode": episode,
"x": getSequentialEpisodeNumber(season, episode),
"y": mean,
"n": n,
"lower": mean - margin_of_err,
"upper": mean + margin_of_err,
}
# Collect overall scores from all comments
for row in agg_scores_overall.itertuples(name="Row"):
entry = create_means_entry(row[0][0], row[0][1], *row[1:])
if entry == None:
continue
means_data["overall"].append(entry)
for row in agg_scores_by_character.itertuples(name="Row"):
name = row[0][0]
season = row[0][1]
episode = row[0][2]
n = row[2]
if means_data[name] == None:
means_data[name] = []
entry = create_means_entry(season, episode, *row[1:])
if entry == None:
continue
entry["proportion"] = n / (total_mentions_by_episode[(season, episode)])
means_data[name].append(entry)
json.dump(means_data, open(DATA_DIR / "mean-scores.json", "w"), indent=2)
# View the average score by character
avg_score_by_char = (
mentions.groupby("Name")
.agg(mean=("Compound Score", "mean"), n=("Compound Score", "count"))
.sort_values("mean")
)
# Weight the characters' average scores by the natural log of the number of mentions
weighted_avg_scores_by_char = (
avg_score_by_char["mean"] * avg_score_by_char["n"].apply(math.log)
).sort_values()
# Export the views as CSV for the client
avg_score_by_char.to_csv(
DATA_DIR / "avg-score-by-character.csv", index_label="name", header=True
)
weighted_avg_scores_by_char.to_csv(
DATA_DIR / "weighted-avg-score-by-character.csv",
index_label="name",
header=["weigted_mean"],
)
|
<reponame>Waterpine/dataprep-1
"""
This file defines palettes used for EDA.
"""
from bokeh.palettes import Category10, Category20, Greys256, Pastel1, viridis
BRG = ["#1f78b4", "#d62728", "#2ca02c"]
CATEGORY10 = Category10[10]
CATEGORY20 = Category20[20]
GREYS256 = Greys256
PASTEL1 = Pastel1[9]
VIRIDIS = viridis(256)
RDBU = [
"#053061",
"#063263",
"#073466",
"#083669",
"#09386c",
"#0a3a6f",
"#0b3c72",
"#0c3e75",
"#0d4078",
"#0e437b",
"#0f457e",
"#114781",
"#124984",
"#134b87",
"#144d8a",
"#154f8d",
"#165190",
"#175493",
"#185695",
"#195898",
"#1a5a9b",
"#1c5c9e",
"#1d5ea1",
"#1e60a4",
"#1f62a7",
"#2064aa",
"#2166ac",
"#2368ad",
"#246aae",
"#256caf",
"#276db0",
"#286fb0",
"#2971b1",
"#2b73b2",
"#2c75b3",
"#2d76b4",
"#2f78b5",
"#307ab6",
"#317cb7",
"#337db8",
"#347fb9",
"#3581b9",
"#3783ba",
"#3884bb",
"#3986bc",
"#3b88bd",
"#3c8abe",
"#3d8bbf",
"#3f8dc0",
"#408fc1",
"#4191c2",
"#4393c3",
"#4694c4",
"#4996c5",
"#4c98c6",
"#4f9ac7",
"#529cc8",
"#559ec9",
"#58a0ca",
"#5ba2cb",
"#5ea4cc",
"#61a6cd",
"#65a8ce",
"#68aacf",
"#6bacd0",
"#6eaed1",
"#71b0d2",
"#74b2d3",
"#77b4d5",
"#7ab6d6",
"#7db8d7",
"#80bad8",
"#84bcd9",
"#87beda",
"#8ac0db",
"#8dc2dc",
"#90c4dd",
"#93c5de",
"#95c6df",
"#98c8df",
"#9ac9e0",
"#9dcae1",
"#9fcbe1",
"#a2cde2",
"#a4cee3",
"#a7cfe4",
"#a9d0e4",
"#abd2e5",
"#aed3e6",
"#b0d4e6",
"#b3d5e7",
"#b5d7e8",
"#b8d8e8",
"#bad9e9",
"#bddaea",
"#bfdceb",
"#c2ddeb",
"#c4deec",
"#c7dfed",
"#c9e1ed",
"#cce2ee",
"#cee3ef",
"#d1e5f0",
"#d2e5f0",
"#d3e6f0",
"#d5e7f0",
"#d6e7f1",
"#d8e8f1",
"#d9e9f1",
"#dbe9f1",
"#dceaf2",
"#deebf2",
"#dfecf2",
"#e1ecf3",
"#e2edf3",
"#e4eef3",
"#e5eef3",
"#e7eff4",
"#e8f0f4",
"#eaf1f4",
"#ebf1f4",
"#edf2f5",
"#eef3f5",
"#f0f3f5",
"#f1f4f6",
"#f3f5f6",
"#f4f5f6",
"#f6f6f6",
"#f7f6f6",
"#f7f5f4",
"#f7f4f2",
"#f7f3f0",
"#f8f2ee",
"#f8f0ec",
"#f8efea",
"#f8eee8",
"#f9ede7",
"#f9ece5",
"#f9ebe3",
"#f9eae1",
"#f9e9df",
"#fae8dd",
"#fae7db",
"#fae5d9",
"#fae4d7",
"#fbe3d6",
"#fbe2d4",
"#fbe1d2",
"#fbe0d0",
"#fcdfce",
"#fcdecc",
"#fcddca",
"#fcdcc8",
"#fddbc7",
"#fcd8c4",
"#fcd6c1",
"#fbd4be",
"#fbd2bc",
"#fbd0b9",
"#faceb6",
"#faccb4",
"#facab1",
"#f9c7ae",
"#f9c5ab",
"#f9c3a9",
"#f8c1a6",
"#f8bfa3",
"#f8bda1",
"#f7bb9e",
"#f7b99b",
"#f7b698",
"#f6b496",
"#f6b293",
"#f5b090",
"#f5ae8e",
"#f5ac8b",
"#f4aa88",
"#f4a886",
"#f4a683",
"#f3a380",
"#f2a07e",
"#f19e7c",
"#ef9b7a",
"#ee9878",
"#ed9676",
"#ec9374",
"#eb9072",
"#ea8d70",
"#e88b6e",
"#e7886c",
"#e6856a",
"#e58368",
"#e48065",
"#e27d63",
"#e17b61",
"#e0785f",
"#df755d",
"#de725b",
"#dd7059",
"#db6d57",
"#da6a55",
"#d96853",
"#d86551",
"#d7624f",
"#d6604d",
"#d45d4b",
"#d35a4a",
"#d15749",
"#d05447",
"#ce5146",
"#cd4f44",
"#cc4c43",
"#ca4942",
"#c94641",
"#c7433f",
"#c6403e",
"#c53e3c",
"#c33b3b",
"#c2383a",
"#c03538",
"#bf3237",
"#be3036",
"#bc2d34",
"#bb2a33",
"#b92732",
"#b82431",
"#b6212f",
"#b51f2e",
"#b41c2d",
"#b2192b",
"#b0172a",
"#ad162a",
"#aa1529",
"#a71429",
"#a41328",
"#a11228",
"#9e1127",
"#9b1027",
"#991027",
"#960f26",
"#930e26",
"#900d25",
"#8d0c25",
"#8a0b24",
"#870a24",
"#840923",
"#810823",
"#7e0722",
"#7b0622",
"#780521",
"#750421",
"#720320",
"#6f0220",
"#6c011f",
"#69001f",
"#67001f",
]
YlGnBu = [
"#ffffd9",
"#feffd8",
"#feffd6",
"#fdfed5",
"#fdfed4",
"#fcfed3",
"#fcfed2",
"#fbfdd0",
"#fafdcf",
"#fafdce",
"#f9fdcd",
"#f9fdcb",
"#f8fcca",
"#f7fcc9",
"#f7fcc8",
"#f6fcc7",
"#f6fbc6",
"#f5fbc5",
"#f4fbc4",
"#f4fbc3",
"#f3fac2",
"#f2fac1",
"#f1fac0",
"#f1f9bf",
"#f0f9be",
"#eff9bd",
"#eff9bc",
"#eef8bb",
"#edf8bb",
"#ecf8ba",
"#ebf7b9",
"#eaf7b9",
"#eaf7b8",
"#e9f6b8",
"#e8f6b7",
"#e7f6b7",
"#e6f5b6",
"#e5f5b6",
"#e4f4b5",
"#e3f4b5",
"#e2f4b5",
"#e1f3b4",
"#e0f3b4",
"#dff2b4",
"#ddf2b4",
"#dcf1b4",
"#dbf1b4",
"#daf0b4",
"#d9f0b3",
"#d7efb3",
"#d6efb3",
"#d5eeb3",
"#d3eeb3",
"#d2edb3",
"#d1edb4",
"#cfecb4",
"#ceecb4",
"#ccebb4",
"#cbebb4",
"#c9eab4",
"#c8e9b4",
"#c6e9b4",
"#c4e8b4",
"#c3e7b5",
"#c1e7b5",
"#bfe6b5",
"#bde5b5",
"#bce5b5",
"#bae4b5",
"#b8e3b6",
"#b6e2b6",
"#b4e2b6",
"#b2e1b6",
"#b0e0b6",
"#aedfb6",
"#acdfb7",
"#aadeb7",
"#a8ddb7",
"#a6dcb7",
"#a4dbb7",
"#a2dbb8",
"#a0dab8",
"#9ed9b8",
"#9cd8b8",
"#99d7b9",
"#97d7b9",
"#95d6b9",
"#93d5b9",
"#91d4b9",
"#8fd3ba",
"#8dd2ba",
"#8ad2ba",
"#88d1ba",
"#86d0bb",
"#84cfbb",
"#82cebb",
"#80cebb",
"#7ecdbc",
"#7cccbc",
"#7acbbc",
"#78cabc",
"#76cabd",
"#73c9bd",
"#71c8bd",
"#6fc7bd",
"#6dc6be",
"#6bc6be",
"#6ac5be",
"#68c4be",
"#66c3bf",
"#64c3bf",
"#62c2bf",
"#60c1bf",
"#5ec0c0",
"#5cbfc0",
"#5abfc0",
"#59bec0",
"#57bdc0",
"#55bcc1",
"#53bbc1",
"#52bac1",
"#50bac1",
"#4eb9c1",
"#4db8c1",
"#4bb7c1",
"#49b6c2",
"#48b5c2",
"#46b4c2",
"#45b3c2",
"#43b2c2",
"#42b1c2",
"#40b0c2",
"#3fafc2",
"#3daec2",
"#3cadc2",
"#3bacc2",
"#39abc2",
"#38aac2",
"#37a9c2",
"#35a8c2",
"#34a7c2",
"#33a6c2",
"#32a5c2",
"#31a3c1",
"#30a2c1",
"#2fa1c1",
"#2ea0c1",
"#2d9fc1",
"#2c9dc0",
"#2b9cc0",
"#2a9bc0",
"#299ac0",
"#2898bf",
"#2897bf",
"#2796bf",
"#2695be",
"#2693be",
"#2592be",
"#2591bd",
"#248fbd",
"#248ebc",
"#238cbc",
"#238bbb",
"#228abb",
"#2288ba",
"#2287ba",
"#2185b9",
"#2184b9",
"#2182b8",
"#2181b8",
"#217fb7",
"#217eb6",
"#207cb6",
"#207bb5",
"#2079b5",
"#2078b4",
"#2076b3",
"#2075b3",
"#2073b2",
"#2072b1",
"#2070b1",
"#216fb0",
"#216daf",
"#216cae",
"#216aae",
"#2169ad",
"#2167ac",
"#2166ac",
"#2164ab",
"#2163aa",
"#2261aa",
"#2260a9",
"#225ea8",
"#225da7",
"#225ca7",
"#225aa6",
"#2259a5",
"#2257a5",
"#2256a4",
"#2354a3",
"#2353a3",
"#2352a2",
"#2350a1",
"#234fa0",
"#234ea0",
"#234c9f",
"#234b9e",
"#234a9d",
"#23499d",
"#23479c",
"#23469b",
"#23459a",
"#224499",
"#224298",
"#224197",
"#224096",
"#223f95",
"#223e94",
"#213d93",
"#213c92",
"#213a91",
"#203990",
"#20388f",
"#20378d",
"#1f368c",
"#1f358b",
"#1e348a",
"#1e3388",
"#1d3287",
"#1d3185",
"#1c3184",
"#1c3082",
"#1b2f81",
"#1a2e7f",
"#1a2d7e",
"#192c7c",
"#182b7a",
"#172b79",
"#172a77",
"#162975",
"#152874",
"#142772",
"#132770",
"#13266e",
"#12256c",
"#11246b",
"#102469",
"#0f2367",
"#0e2265",
"#0d2163",
"#0d2161",
"#0c2060",
"#0b1f5e",
"#0a1e5c",
"#091e5a",
"#081d58",
]
|
<gh_stars>0
from common.services import cointainer_web3 as web3
import logging
import sha3
logger = logging.getLogger('watchtower.ingester.tasks')
WETH_CONTRACT_ADDRESS = '0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2'
ZX_PROXY_CONTRACT = '0xdef1c0ded9bec7f1a1670819833240f027b25eff'
k = sha3.keccak_256()
k.update('Withdrawal(address,uint256)'.encode('utf-8'))
weth_withdrawal_topic = '0x' + k.hexdigest()
k = sha3.keccak_256()
k.update('Deposit(address,uint256)'.encode('utf-8'))
weth_deposit_topic = '0x' + k.hexdigest()
def get_dex_eth_deposits(block):
block_hash = web3.toHex(block.get('hash'))
block_height = block.get('number')
logs = web3.eth.getLogs({
'fromBlock': block_height,
'toBlock': block_height,
'topics': [weth_deposit_topic]
})
if len(logs) < 1:
logger.info('No logs of type weth deposit found for block %s %s', block_height, block_hash)
from_addresses = dict()
contract_addresses = dict()
weth_deposit_transactions = dict()
for tx in block.transactions:
from_addresses[tx.get('hash').hex()] = tx['from']
contract_addresses[tx.get('hash').hex()] = tx['to']
for log in logs:
txid = web3.toHex(log.get('transactionHash'))
log_contract_address = log.get('address').lower()
contract_address = contract_addresses.get(txid)
if contract_address != None:
contract_address = contract_address.lower()
if log_contract_address != WETH_CONTRACT_ADDRESS:
continue
if contract_address != ZX_PROXY_CONTRACT:
continue
weth_deposit_transactions[txid] = ({
'txid': txid,
'contract_address': contract_address,
'from_address': from_addresses.get(txid),
'to_address': contract_address,
'block_height': block_height,
'block_hash': block_hash,
'amount': int(log.get('data'), 16)
})
return weth_deposit_transactions
def get_dex_eth_withdrawals(block):
block_hash = web3.toHex(block.get('hash'))
block_height = block.get('number')
logs = web3.eth.getLogs({
'fromBlock': block_height,
'toBlock': block_height,
'topics': [weth_withdrawal_topic]
})
if len(logs) < 1:
logger.info('No logs of type weth withdrawal found for block %s %s', block_height, block_hash)
from_addresses = dict()
contract_addresses = dict()
weth_withdrawal_transactions = dict()
for tx in block.transactions:
from_addresses[tx.get('hash').hex()] = tx['from']
contract_addresses[tx.get('hash').hex()] = tx['to']
for log in logs:
txid = web3.toHex(log.get('transactionHash'))
log_contract_address = log.get('address').lower()
contract_address = contract_addresses.get(txid)
if contract_address != None:
contract_address = contract_address.lower()
if log_contract_address != WETH_CONTRACT_ADDRESS:
continue
if contract_address != ZX_PROXY_CONTRACT:
continue
weth_withdrawal_transactions[txid] = ({
'txid': txid,
'contract_address': contract_address,
'from_address': contract_address,
'to_address': from_addresses.get(txid),
'block_height': block_height,
'block_hash': block_hash,
'amount': int(log.get('data'), 16)
})
return weth_withdrawal_transactions
def get_dex_eth_txs(block):
withdrawals = get_dex_eth_withdrawals(block)
deposits = get_dex_eth_deposits(block)
all = withdrawals.copy()
all.update(deposits)
return all
|
<filename>PixivDownloader.py
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 16 19:38:14 2019
AIに食わせる画像を探すためにpixivpyをやります。
タグで探せるようにつくります。
とりあえずクラス化しました。
"""
from pixivpy3 import AppPixivAPI
from pixivpy3 import PixivAPI
import os
class PixivDownloader :
def __init__(self):
self.pixiv = PixivAPI()
self.app_pixiv = AppPixivAPI()
def login(self, user_id, user_password):
return self.pixiv.login(user_id, user_password)
def download(self, save_num, query,
search_mode = "text",
search_types = ["illustration"],
save_path = "C:/Users/"+ os.environ.get("USERNAME") +"/Pictures",
worst_score = 0,
worst_views = 0,
worst_favo = 0,
r18_flag = False,
):
# 検索
search_result = self.pixiv.search_works(query, page = 1, per_page = save_num,
mode = search_mode, types = search_types)
# save_num の数ダウンロードしたいが、そもそも画像が少ない場合はあるだけダウンロード
if save_num > search_result.pagination.total:
save_num = search_result.pagination.total
# セーブ先 無ければつくる
save_path = save_path + "/" + query[0] + "/"
if not os.path.exists(save_path):
os.mkdir(save_path)
current_work = 0
total_downloaded = 0
page_num = 2
while total_downloaded < save_num:
if current_work == save_num:
# 条件不一致で飛ばされるので、不足分を補う為にもう一度読み込み
if search_result.pagination.next == None:
# これ以上読み込めない場合は終了
break
current_work = 0
search_result = self.pixiv.search_works(query, page = page_num, per_page = save_num,
mode = search_mode, types = search_types)
if save_num > len(search_result.response):
save_num = len(search_result.response)
page_num += 1
work = search_result.response[current_work]
current_work += 1
if work.page_count > 1:
# 複数セット省く
print("Out "+work.title+":セット")
continue
if work.stats.score < worst_score:
# Scoreによる選別
continue
if work.stats.views_count < worst_views:
# 閲覧数による選別
print("Out "+work.title+":閲覧数")
continue
if work.stats.favorited_count.public < worst_favo:
# いいねによる選別
print("Out "+work.title+":いいね")
continue
if r18_flag == (work.age_limit == "all-age"):
# 年齢制限の有無
print("Out "+work.title+":年齢制限")
continue
if os.path.exists(save_path+str(work.id)+"_p0.png") or os.path.exists(save_path+str(work.id)+"_p0.jpg"):
# 既に同名のファイルがある場合
print(work.title +" has already downloaded")
continue
# Download
self.pixiv.download(work.image_urls.large, save_path)
total_downloaded += 1;
print("Downloaded {0}/{1}".format(total_downloaded, save_num)+':'+str(work.title))
# 使用例
if __name__ == "__main__":
# 指定するものとか
save_works_num = 1000
favo = 10
mode = ["exact_tag"]
search_tags = [["カリオストロ(グラブル)"], ["櫻井桃華"], ["ネロ・クラウディウス"], ["ギルガメッシュ"], ["沙都子"], ["木之本桜"]]
save_path = ""
p = PixivDownloader()
p.login("id", "pass")
data = []
for data in search_tags:
p.download(save_works_num, data, search_mode = mode, save_path=save_path, worst_favo=favo)
print("Complete!")
|
#--------ESRI 2010-------------------------------------
#-------------------------------------------------------------------------------
# Copyright 2010-2013 Esri
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#-------------------------------------------------------------------------------
# Rejoin tracks
# This script will take any number of tracks selected in
#'inTrackLines' and condense them to one track, updating
# all the associated points to the new single track id
# INPUTS:
# Input Track Lines (FEATURELAYER)
# Input Track Points (FEATURELAYER)
# Field in which Track IDs are stored (FIELD)
# Field in which start date time of track line is stored (FIELD)
# Field in which finish date time of track line is stored (FIELD)
# OUTPUTS:
# Output Track Lines - derived (FEATURELAYER)
# Output Track Points - derived (FEATURELAYER)
#-------------------------------------------------------------------------------
import arcpy
from arcpy import env
import datetime
env.overwriteOutput = True
try:
#set features and cursors so that they are deletable in
#'finally' block should the script fail prior to their creation
updatefeat, updt_cursor = None, None
ft, linecursor = None, None
feat, ptcursor = None, None
inTrackLines = arcpy.GetParameterAsText(0)
inTrackPoints = arcpy.GetParameterAsText(1)
inTrackIDField = arcpy.GetParameterAsText(2)
inStartDateTimeField = arcpy.GetParameterAsText(3)
inFinishDateTimeField = arcpy.GetParameterAsText(4)
#we'll need to join the lines, so get the shape field name
#desc = arcpy.Describe(inTrackLines)
#shapefieldname = desc.ShapeFieldName
#iterate over each track line, gathering data and deleting all but the first
updt_cursor = arcpy.UpdateCursor(inTrackLines,"",None,"",inStartDateTimeField)
#updatefeat = updt_cursor.next() #UDPATE
updatefeat = next(updt_cursor)
loweststart = None
highestfinish = None
newtrackid = None
changedtrackids = None
ptarray = arcpy.Array()
while updatefeat:
startdt = updatefeat.getValue(inStartDateTimeField)
finishdt = updatefeat.getValue(inFinishDateTimeField)
shp = updatefeat.shape
polyline = shp.getPart(0)
#pt = polyline.next() #UDPATE
pt = next(polyline)
while pt:
#add the polyline to the point array that will be used to recreate a joined polyline later
ptarray.add(pt)
#pt = polyline.next() #UPDATE
pt = next(polyline)
if loweststart:
if startdt < loweststart:
loweststart = startdt
else:
loweststart = startdt
if highestfinish:
if finishdt > highestfinish:
highestfinish = finishdt
else:
highestfinish = finishdt
if newtrackid:
if changedtrackids:
changedtrackids += " OR \"" + inTrackIDField + "\" = '" + updatefeat.getValue(inTrackIDField) + "'"
else:
changedtrackids = "\"" + inTrackIDField + "\" = '" + updatefeat.getValue(inTrackIDField) + "'"
updt_cursor.deleteRow(updatefeat)
else:
newtrackid = updatefeat.getValue(inTrackIDField)
#updatefeat = updt_cursor.next() #UPDATE
updatefeat = next(updt_cursor)
#now, update the track line being left with new start and finish datetimes, and new geometry
linecursor = arcpy.UpdateCursor(inTrackLines)
#ft = linecursor.next() #UPDATE
ft = next(linecursor)
ft.setValue(inStartDateTimeField, loweststart)
ft.setValue(inFinishDateTimeField, highestfinish)
ft.shape = ptarray
linecursor.updateRow(ft)
#finally, update all points with the Track IDs affected to the new Track ID
arcpy.AddMessage("update track points where " + changedtrackids)
ptcursor = arcpy.UpdateCursor(inTrackPoints,changedtrackids)
#feat = ptcursor.next() #UPDATE
feat = next(ptcursor)
while feat:
feat.setValue(inTrackIDField, newtrackid)
ptcursor.updateRow(feat)
#feat = ptcursor.next() #UPDATE
feat = next(ptcursor)
arcpy.SetParameterAsText(5, inTrackLines)
arcpy.SetParameterAsText(6, inTrackPoints)
except:
if not arcpy.GetMessages() == "":
arcpy.AddMessage(arcpy.GetMessages(2))
finally:
if updatefeat:
del updatefeat
if updt_cursor:
del updt_cursor
if ft:
del ft
if linecursor:
del linecursor
if feat:
del feat
if ptcursor:
del ptcursor
|
import cv2
import numpy as np
class Thresholds:
def __init__(self):
self._HLS_H_THRESHOLD = (100, 255)
self._HLS_L_THRESHOLD = (100, 255)
self._HLS_S_THRESHOLD = (100, 255)
self._SOBEL_THRESHOLD = (20, 100)
self._SOBEL_KERNEL = 3
self._GRAD_MAG_THRESHOLD = (30, 170)
self._GRAD_DIR_THRESHOLD = (1.5, np.pi/2)
# Function that filters the H-channel of HLS with a threshold
def hls_h_select(self, img):
hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
h_channel = hls[:,:,0]
binary_output = np.zeros_like(h_channel)
binary_output[(h_channel > self._HLS_H_THRESHOLD[0]) & (h_channel <= self._HLS_H_THRESHOLD[1])] = 1
return binary_output
# Function that filters the L-channel of HLS with a threshold
def hls_l_select(self, img):
hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
l_channel = hls[:,:,1]
binary_output = np.zeros_like(l_channel)
binary_output[(l_channel > self._HLS_L_THRESHOLD[0]) & (l_channel <= self._HLS_L_THRESHOLD[1])] = 1
return binary_output
# Function that filters the S-channel of HLS with a threshold
def hls_s_select(self, img):
hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
s_channel = hls[:,:,2]
binary_output = np.zeros_like(s_channel)
binary_output[(s_channel > self._HLS_S_THRESHOLD[0]) & (s_channel <= self._HLS_S_THRESHOLD[1])] = 1
return binary_output
# Function that filters gradient's orientation with a threshold
def abs_sobel_thresh(self, img, orient='x'):
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Apply x or y gradient with the OpenCV Sobel() function
# and take the absolute value
if orient == 'x':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=self._SOBEL_KERNEL))
if orient == 'y':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=self._SOBEL_KERNEL))
# Rescale back to 8 bit integer
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# Create a copy and apply the threshold
binary_output = np.zeros_like(scaled_sobel)
# Here I'm using inclusive (>=, <=) thresholds, but exclusive is ok too
binary_output[(scaled_sobel >= self._SOBEL_THRESHOLD[0]) & (scaled_sobel <= self._SOBEL_THRESHOLD[1])] = 1
# Return the result
return binary_output
# Function that filters gradient's magnitude with a threshold
def mag_thresh(self, img):
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Take both Sobel x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=self._SOBEL_KERNEL)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=self._SOBEL_KERNEL)
# Calculate the gradient magnitude
gradmag = np.sqrt(sobelx**2 + sobely**2)
# Rescale to 8 bit
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
# Create a binary image of ones where threshold is met, zeros otherwise
binary_output = np.zeros_like(gradmag)
binary_output[(gradmag >= self._GRAD_MAG_THRESHOLD[0]) & (gradmag <= self._GRAD_MAG_THRESHOLD[1])] = 1
# Return the binary image
return binary_output
# Function that filters gradient's direction with a threshold
def dir_threshold(self, img):
# Grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Calculate the x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=self._SOBEL_KERNEL)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=self._SOBEL_KERNEL)
# Take the absolute value of the gradient direction,
# apply a threshold, and create a binary image result
absgraddir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
binary_output = np.zeros_like(absgraddir)
binary_output[(absgraddir >= self._GRAD_DIR_THRESHOLD[0]) & (absgraddir <= self._GRAD_DIR_THRESHOLD[1])] = 1
# Return the binary image
return binary_output
def combine(self, img):
# Combine thresholding
gradx = self.abs_sobel_thresh(img, orient='x')
mag_binary = self.mag_thresh(img)
s_binary = self.hls_s_select(img)
l_binary = self.hls_l_select(img)
combined = np.zeros_like(gradx)
combined[(gradx == 1) | ((s_binary == 1) & (l_binary == 1))] = 1
return combined |
<gh_stars>0
import json
from collections import defaultdict
from typing import List, Dict, Any, Union
import numpy as np
from py_lex import EmoLex
import torch
from torch.utils.data import Dataset
from pytorch_gleam.data.datasets.base_datasets import BaseDataModule
from pytorch_gleam.data.collators import MultiClassFrameEdgeBatchCollator
import pytorch_gleam.data.datasets.senticnet5 as senticnet5
from tqdm import tqdm
def read_jsonl(path):
with open(path, 'r') as f:
for line in f:
line = line.strip()
if line:
ex = json.loads(line)
yield ex
def get_sentic(word_text):
word_text = word_text.lower()
if word_text == 'coronavirus' or word_text == 'covid-19' or word_text == 'covid' or word_text == 'covid19':
word_text = 'virus'
if word_text not in senticnet5.senticnet:
word_text = word_text[:-1]
if word_text not in senticnet5.senticnet:
word_text = word_text[:-1]
if word_text not in senticnet5.senticnet:
return None
p_v, a_v, s_v, ap_v, p_m, s_m, po_l, po_v, s1, s2, s3, s4, s5 = senticnet5.senticnet[word_text]
return {
'pleasantness_value': float(p_v),
'attention_value': float(a_v),
'sensitivity_value': float(s_v),
'aptitude_value': float(ap_v),
'primary_mood': p_m,
'secondary_mood': s_m,
'polarity_label': po_l,
'polarity_value': float(po_v),
'semantics': [s1, s2, s3, s4, s5],
}
def add_sentic_token_features(token_data):
sentic = get_sentic(token_data['text'])
token_data['sentic'] = sentic
return token_data
def align_tokens(tokens, wpt_tokens, seq_offset=0):
align_map = {}
for token in tokens:
token['wpt_idxs'] = set()
start = token['start']
end = token['end']
for char_idx in range(start, end):
sub_token_idx = wpt_tokens.char_to_token(char_idx, sequence_index=seq_offset)
# White spaces have no token and will return None
if sub_token_idx is not None:
align_map[sub_token_idx] = token
token['wpt_idxs'].add(sub_token_idx)
return align_map
def align_token_sequences(m_tokens, t_tokens, wpt_tokens):
m_align_map = align_tokens(m_tokens, wpt_tokens)
t_align_map = align_tokens(t_tokens, wpt_tokens, seq_offset=1)
align_map = {**m_align_map, **t_align_map}
aligned_tokens = []
for sub_token_idx in range(len(wpt_tokens['input_ids'])):
if sub_token_idx not in align_map:
# CLS, SEP, or other special token
aligned_token = {
'pos': 'NONE',
'dep': 'NONE',
'head': 'NONE',
'sentic': None,
'text': '[CLS]' if sub_token_idx == 0 else '[SEP]',
'wpt_idxs': {sub_token_idx}
}
align_map[sub_token_idx] = aligned_token
aligned_token = align_map[sub_token_idx]
aligned_tokens.append(aligned_token)
return align_map, aligned_tokens
def flatten(multi_list):
return [item for sub_list in multi_list for item in sub_list]
def create_adjacency_matrix(edges, size, t_map, r_map):
adj = np.eye(size, dtype=np.float32)
for input_idx in range(size):
input_idx_text = t_map[input_idx]
i_edges = set(flatten([r_map[e_txt] for e_txt in edges[input_idx_text]]))
for edge_idx in i_edges:
adj[input_idx, edge_idx] = 1.0
adj[edge_idx, input_idx] = 1.0
return adj
def sentic_expand(sentic_edges, expand_list):
new_edges = set(sentic_edges)
for edge in sentic_edges:
edge_info = senticnet5.senticnet[edge]
for i in expand_list:
new_edges.add(edge_info[i])
return new_edges
def create_edges(
m_tokens, t_tokens, wpt_tokens,
num_semantic_hops, num_emotion_hops, num_lexical_hops,
emotion_type, emolex, lex_edge_expanded
):
seq_len = len(wpt_tokens['input_ids'])
align_map, a_tokens = align_token_sequences(m_tokens, t_tokens, wpt_tokens)
semantic_edges = defaultdict(set)
emotion_edges = defaultdict(set)
reverse_emotion_edges = defaultdict(set)
lexical_edges = defaultdict(set)
reverse_lexical_dep_edges = defaultdict(set)
reverse_lexical_pos_edges = defaultdict(set)
lexical_dep_edges = defaultdict(set)
lexical_pos_edges = defaultdict(set)
root_text = None
r_map = defaultdict(set)
t_map = {}
for token in a_tokens:
text = token['text'].lower()
head = token['head'].lower()
for wpt_idx in token['wpt_idxs']:
t_map[wpt_idx] = text
r_map[text].add(wpt_idx)
pos = token['pos']
dep = token['dep']
reverse_lexical_dep_edges[dep].add(text)
reverse_lexical_pos_edges[pos].add(text)
lexical_dep_edges[text].add(dep)
lexical_pos_edges[text].add(pos)
# will be two roots with two sequences
if dep == 'ROOT':
root_text = text
sentic = token['sentic']
if sentic is not None:
for sem in sentic['semantics']:
semantic_edges[text].add(sem)
for i in range(num_semantic_hops-1):
semantic_edges[text] = sentic_expand(semantic_edges[text], [8, 9, 10, 11, 12])
if emotion_type == 'senticnet':
emotion_edges[text].add(sentic['primary_mood'])
emotion_edges[text].add(sentic['secondary_mood'])
reverse_emotion_edges[sentic['primary_mood']].add(text)
reverse_emotion_edges[sentic['secondary_mood']].add(text)
elif emotion_type == 'emolex':
for emotion in emolex.categorize_token(text):
emotion_edges[text].add(emotion)
reverse_emotion_edges[emotion].add(text)
else:
raise ValueError(f'Invalid emotion type: {emotion_type}')
# for emotion in [sentic['primary_mood'], sentic['secondary_mood']]:
# emotion_edges[text] = emotion_edges[text].union(emotion_nodes[emotion])
# for i in range(num_emotion_hops - 1):
# new_emotions = sentic_expand(emotion_edges[text], [4, 5])
# for emotion in new_emotions:
# emotion_edges[text] = emotion_edges[text].union(emotion_nodes[emotion])
lexical_edges[text].add(head)
lexical_edges['[CLS]'].add(root_text)
lexical_edges['[SEP]'].add(root_text)
# text -> emotion node -> other text in sentence with same emotions
for text in emotion_edges.keys():
emotions = emotion_edges[text]
emotion_edges[text] = emotion_edges[text].union(
set(flatten(reverse_emotion_edges[emotion] for emotion in emotions))
)
if 'dep' in lex_edge_expanded:
for text in lexical_edges.keys():
# expand lexical edges to same dependency roles
text_deps = lexical_dep_edges[text]
lexical_edges[text] = lexical_edges[text].union(
set(flatten(reverse_lexical_dep_edges[dep] for dep in text_deps))
)
if 'pos' in lex_edge_expanded:
for text in lexical_edges.keys():
# expand lexical edges to same pos tags
text_pos = lexical_pos_edges[text]
lexical_edges[text] = lexical_edges[text].union(
set(flatten(reverse_lexical_pos_edges[pos] for pos in text_pos))
)
semantic_adj = create_adjacency_matrix(
edges=semantic_edges,
size=seq_len,
t_map=t_map,
r_map=r_map
)
emotion_adj = create_adjacency_matrix(
edges=emotion_edges,
size=seq_len,
t_map=t_map,
r_map=r_map
)
lexical_adj = create_adjacency_matrix(
edges=lexical_edges,
size=seq_len,
t_map=t_map,
r_map=r_map
)
edges = {
'semantic': semantic_adj,
'emotion': emotion_adj,
'lexical': lexical_adj,
}
return edges
class MultiClassFrameEdgeDataset(Dataset):
examples: List[Dict[Any, Union[Any, Dict]]]
def __init__(
self, data_path: Union[str, List[str]], frame_path: str,
label_name: str, tokenizer, label_map: Dict[str, int],
emo_path: str,
num_semantic_hops: int = 3,
num_emotion_hops: int = 1,
num_lexical_hops: int = 1,
emotion_type: str = 'senticnet',
lex_edge_expanded: str = 'none',
):
super().__init__()
self.frame_path = frame_path
self.tokenizer = tokenizer
self.label_name = label_name
self.label_map = label_map
self.num_semantic_hops = num_semantic_hops
self.num_emotion_hops = num_emotion_hops
self.num_lexical_hops = num_lexical_hops
self.emotion_type = emotion_type
self.lex_edge_expanded = lex_edge_expanded
self.emolex = EmoLex(emo_path)
self.examples = []
with open(self.frame_path) as f:
self.frames = json.load(f)
if isinstance(data_path, str):
self.read_path(data_path)
else:
for stage, stage_path in enumerate(data_path):
self.read_path(stage_path, stage)
def parse_example(self, ex):
ex_id = ex['id']
ex_text = ex['full_text'] if 'full_text' in ex else ex['text']
ex_text = ex_text.strip().replace('\r', ' ').replace('\n', ' ')
ex_examples = []
for f_id, f_label in ex[self.label_name].items():
frame = self.frames[f_id]
frame_text = frame['text']
ex_label = 0
if f_label in self.label_map:
ex_label = self.label_map[f_label]
token_data = self.tokenizer(
frame_text,
ex_text
)
tweet_parse = [add_sentic_token_features(x) for x in ex['parse']]
f_parse = [add_sentic_token_features(x) for x in frame['parse']]
ex_edges = create_edges(
f_parse,
tweet_parse,
token_data,
self.num_semantic_hops,
self.num_emotion_hops,
self.num_lexical_hops,
self.emotion_type,
self.emolex,
self.lex_edge_expanded,
)
example = {
'ids': f'{ex_id}|{f_id}',
'label': ex_label,
'input_ids': token_data['input_ids'],
'attention_mask': token_data['attention_mask'],
'edges': ex_edges
}
if 'token_type_ids' in token_data:
example['token_type_ids'] = token_data['token_type_ids']
ex_examples.append(example)
return ex_examples
def read_path(self, data_path, stage=0):
for ex in tqdm(read_jsonl(data_path)):
for ex_examples in self.parse_example(ex):
self.examples.append(ex_examples)
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
example = self.examples[idx]
return example
def worker_init_fn(self, _):
pass
class MultiClassFrameEdgeDataModule(BaseDataModule):
def __init__(
self,
label_name: str,
label_map: Dict[str, int],
frame_path: str,
emo_path: str,
num_semantic_hops: int = 3,
num_emotion_hops: int = 1,
num_lexical_hops: int = 1,
emotion_type: str = 'senticnet',
lex_edge_expanded: str = 'none',
train_path: str = None,
val_path: str = None,
test_path: str = None,
predict_path: str = None,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.label_map = label_map
self.label_name = label_name
self.train_path = train_path
self.val_path = val_path
self.test_path = test_path
self.predict_path = predict_path
self.frame_path = frame_path
if self.train_path is not None:
self.train_dataset = MultiClassFrameEdgeDataset(
tokenizer=self.tokenizer,
data_path=self.train_path,
frame_path=self.frame_path,
label_name=self.label_name,
label_map=self.label_map,
emo_path=emo_path,
num_semantic_hops=num_semantic_hops,
num_emotion_hops=num_emotion_hops,
num_lexical_hops=num_lexical_hops,
emotion_type=emotion_type,
lex_edge_expanded=lex_edge_expanded,
)
if self.val_path is not None:
self.val_dataset = MultiClassFrameEdgeDataset(
tokenizer=self.tokenizer,
data_path=self.val_path,
frame_path=self.frame_path,
label_name=self.label_name,
label_map=self.label_map,
emo_path=emo_path,
num_semantic_hops=num_semantic_hops,
num_emotion_hops=num_emotion_hops,
num_lexical_hops=num_lexical_hops,
emotion_type=emotion_type,
lex_edge_expanded=lex_edge_expanded,
)
if self.test_path is not None:
self.test_dataset = MultiClassFrameEdgeDataset(
tokenizer=self.tokenizer,
data_path=self.test_path,
frame_path=self.frame_path,
label_name=self.label_name,
label_map=self.label_map,
emo_path=emo_path,
num_semantic_hops=num_semantic_hops,
num_emotion_hops=num_emotion_hops,
num_lexical_hops=num_lexical_hops,
emotion_type=emotion_type,
lex_edge_expanded=lex_edge_expanded,
)
if self.predict_path is not None:
self.predict_dataset = MultiClassFrameEdgeDataset(
tokenizer=self.tokenizer,
data_path=self.predict_path,
frame_path=self.frame_path,
label_name=self.label_name,
label_map=self.label_map,
emo_path=emo_path,
num_semantic_hops=num_semantic_hops,
num_emotion_hops=num_emotion_hops,
num_lexical_hops=num_lexical_hops,
emotion_type=emotion_type,
lex_edge_expanded=lex_edge_expanded,
)
def create_collator(self):
return MultiClassFrameEdgeBatchCollator(
max_seq_len=self.max_seq_len,
use_tpus=self.use_tpus,
)
|
from threading import Timer, Thread
""" @Todos
- Create a method that will call the stop method when the process exits. Because the timer thread continues to run
even if the main program/thread is killed by keyboard interrupt, which means that the stop method is never called.
- Create a method that allows user to specify if the interval should start running immediately. Meaning that I can create
the interval and store the reference first without letting it run, this is so to allow me to do some settings/changes to
the object first, or to allow user to only execute this 'loop' when certain conditions are met.
- Create a method/check for the user to specify how many times this interval should run. So example would be, I only
want this interval based loop to loop a maximum of 10 times before I want it to automatically stop.
Or perhaps a time limit for how long this loop should loop for, like loopFor(10mins), meaning after 10 mins, the loop
should call self.stop method. This can be possibly implemented by using another 'kill interval' timer that calls the
stop method upon timeout.
- Since this module is based on the Timer Class from the threading library, and based on threads, it is not advised to
use this setInterval Class when doing actions that are CPU intensive and blocking on other threads, which may interfere
with the timing of this Class. Will be working on another class based on the same idea that will run in a seperate
process instead to allow true parallel and non concurrent execution style.
"""
class setInterval:
""" setInterval Class:
This is used to create a type of loop, where a given function/method is called repeatedly every
'n' given seconds, until the stop method is called, or when the main thread dies/exits. Used this
to emulate the behaviour of the native setInterval function in JavaScript.
"""
# To pass in the interval time, callback function, and any arguements for the callback function into the constructor
def __init__(self, time, fn, *args, **kwargs):
self.__time = time
self.fn = fn
self.args = args
self.kwargs = kwargs
# 1st call to function will only happen after the 1st time interval.
# Delay the first call to the timeout method by the given time interval
self.start()
# self.__thread = Thread(target=self.start) # Testing to use threads, so I can make it a daemon to die on main thread exits.
# Method to start the timer
def start(self):
self.__t = Timer(self.__time, self.timeOut)
self.__t.start()
# Return self to allow the user to do method call chainging. E.g. loop = setInterval(3, fn).start()
return self
# Method that is run everytime the Timer time's out.
def timeOut(self):
# Call the given function with any arguements supplied
self.fn(*self.args, **self.kwargs)
# Call start method to create another Timer object to call this function again on timeout.
self.start()
# Method to stop the loop, if needed, execute the callback one last time
def stop(self, oneLastTime=False):
# Kill the timer that repeatedly calls the timeOut method.
self.__t.cancel()
# If 'oneLastTime' is True, Call the given function with any arguements supplied for the last time.
if oneLastTime:
self.fn(*self.args, **self.kwargs)
# Set/Change the interval for which the timer takes to timeout. New interval will start immediately.
def set_interval(self, time):
# Stop the current timer
self.stop()
# Set the time into the object's field
self.__time = time
# Reset the timer with a newly created Timer object using start method.
self.start()
# Set the arguements to be passed into the callback function
def set_args(self, *args, **kwargs):
# Set the input arguements into the object's fields.
self.args = args
self.kwargs = kwargs
if __name__ == "__main__":
# If this module called as a standalone module to see how it works, then run the below example code
from time import sleep
# Print out the docs for setInterval in formatted doc strings
# help(setInterval)
def hi(val):
print(val)
try:
tout = setInterval(1, hi, 'hei')
sleep(3)
tout.set_interval(0.1)
sleep(2)
tout.set_interval(1)
tout.set_args('Nuggets')
sleep(3)
except (KeyboardInterrupt, SystemExit):
# Stop the interval, but execute the callback one last time before killing the loop
tout.stop(True)
tout.stop(True)
# If the keyboard interrupts at any time
# except KeyboardInterrupt:
|
# -*- coding: utf-8 -*-
# Модуль переменных параметров настройки "Общие данные для расчета динамики"
# (таблица "Динамика": com_dynamics) RastrWin3
class ComDynamics:
"""
"""
table: str = 'com_dynamics'
table_name: str = '"Общие данные для расчета динамики"'
Tras: str = 'Tras' # Время расчета (Tras)
Hint: str = 'Hint' # Начальный шаг интегрирования (H_инт)
Hmin: str = 'Hmin' # Минимальный шаг интегрирования (H_мин)
Hmax: str = 'Hmax' # Максимальный шаг интегрирования (H_макс)
Hout: str = 'Hout' # Шаг печати (H_печ)
Mint: str = 'Mint' # Основной метод интегрирования (Осн.Метод)
SMint: str = 'SMint' # Стартовый метод интегрирования (Старт.Метод)
IntEpsilon: str = 'IntEpsilon' # Точность шага интегрирования (dInt)
InformOnStepChange: str = 'InformOnStepChange' # Информировать об изменении шага (Выводить шаг)
Tf: str = 'Tf' # Постоянная сглаживания угловой скорости (частоты) узла (Tf)
dEf: str = 'dEf' # Точность балансировки эдс при учете явнополюсности (dEf)
Npf: str = 'Npf' # Макс число пересчетов УР на шаге при учете явнополюсности (Ит)
Valid: str = 'Valid' # Контроль входных параметров (Контр.)
dempfrec: str = 'dempfrec' # Демпфирование в уравнениях движения (Демпф)
corrT: str = 'corrT' # Корректировать Т в парковских моделях (Корр Т)
IsDemp: str = 'IsDemp' # Учет демп. момента в моделях с демп контурами (Уч Демп)
frSXNtoY: str = 'frSXNtoY' # Напряжения перехода с СХН на шунт (V_минСХРН)
SXNTolerance: str = 'SXNTolerance' # Допустимый небаланс СХН (SXNTol)
SnapPath: str = 'SnapPath' # Выходной каталог файлов результатов (Кат. результатов)
MaxResultFiles: str = 'MaxResultFiles' # Максимальное кол-во файлов результатов (Макс. файлов)
SnapTemplate: str = 'SnapTemplate' # Шаблон имени выходного файла (Шаблон имени)
SnapAutoLoad: str = 'SnapAutoLoad' # Автозагрузка последнего результата (Автозагрузка)
SnapMaxCount: str = 'SnapMaxCount' # Максимальное кол-во слотов результатов (Макс. рез-тов)
TripGeneratorOnSpeed: str = 'TripGeneratorOnSpeed' # Отключать генератор при превышении скорости % (Уставка автоматов безопасности)
PickupDropout: str = 'PickupDropout' # Информировать о пуске/возврате автоматики (Информировать о пуске/возврате автоматики)
RealtimeCSV: str = 'RealtimeCSV' # Выводить контролируемые величины в CSV (Выводить контролируемые величины в CSV)
PeriodAngle: str = 'PeriodAngle' # Отображать углы в диапазоне +/-180 (Отображать углы в диапазоне +/-180)
ResultFlowDirection: str = 'ResultFlowDirection' # Положительное направление результатов (Положительное направление результатов)
TreatWarningsAsErrors: str = 'TreatWarningsAsErrors' # Считать предупреждения ошибками (Предупреждение=Ошибка)
EventProcess: str = 'EventProcess' # Метод обработки дискретных изменений (Дискретные изменения)
com_dynamics_table = 'com_dynamics'
com_dynamics_attributes_list = ['Tras', 'Hint', 'Hmin', 'Hmax', 'Hout', 'Mint', 'SMint', 'IntEpsilon',
'InformOnStepChange', 'Tf', 'dEf', 'Npf', 'Valid', 'dempfrec', 'corrT', 'IsDemp',
'frSXNtoY', 'SXNTolerance', 'SnapPath', 'MaxResultFiles', 'SnapTemplate',
'SnapAutoLoad', 'SnapMaxCount', 'TripGeneratorOnSpeed', 'PickupDropout', 'RealtimeCSV',
'PeriodAngle', 'ResultFlowDirection', 'TreatWarningsAsErrors', 'EventProcess', ]
com_dynamics_attributes = {
'Tras': 'Время расчета (Tras)',
'Hint': 'Начальный шаг интегрирования (H_инт)',
'Hmin': 'Минимальный шаг интегрирования (H_мин)',
'Hmax': 'Максимальный шаг интегрирования (H_макс)',
'Hout': 'Шаг печати (H_печ)',
'Mint': 'Основной метод интегрирования (Осн.Метод)',
'SMint': 'Стартовый метод интегрирования (Старт.Метод)',
'IntEpsilon': 'Точность шага интегрирования (dInt)',
'InformOnStepChange': 'Информировать об изменении шага (Выводить шаг)',
'Tf': 'Постоянная сглаживания угловой скорости (частоты) узла (Tf)',
'dEf': 'Точность балансировки эдс при учете явнополюсности (dEf)',
'Npf': 'Макс число пересчетов УР на шаге при учете явнополюсности (Ит)',
'Valid': 'Контроль входных параметров (Контр.)',
'dempfrec': 'Демпфирование в уравнениях движения (Демпф)',
'corrT': 'Корректировать Т в парковских моделях (Корр Т)',
'IsDemp': 'Учет демп. момента в моделях с демп контурами (Уч Демп)',
'frSXNtoY': 'Напряжения перехода с СХН на шунт (V_минСХРН)',
'SXNTolerance': 'Допустимый небаланс СХН (SXNTol)',
'SnapPath': 'Выходной каталог файлов результатов (Кат. результатов)',
'MaxResultFiles': 'Максимальное кол-во файлов результатов (Макс. файлов)',
'SnapTemplate': 'Шаблон имени выходного файла (Шаблон имени)',
'SnapAutoLoad': 'Автозагрузка последнего результата (Автозагрузка)',
'SnapMaxCount': 'Максимальное кол-во слотов результатов (Макс. рез-тов)',
'TripGeneratorOnSpeed': 'Отключать генератор при превышении скорости % (Уставка автоматов безопасности)',
'PickupDropout': 'Информировать о пуске/возврате автоматики (Информировать о пуске/возврате автоматики)',
'RealtimeCSV': 'Выводить контролируемые величины в CSV (Выводить контролируемые величины в CSV)',
'PeriodAngle': 'Отображать углы в диапазоне +/-180 (Отображать углы в диапазоне +/-180)',
'ResultFlowDirection': 'Положительное направление результатов (Положительное направление результатов)',
'TreatWarningsAsErrors': 'Считать предупреждения ошибками (Предупреждение=Ошибка)',
'EventProcess': 'Метод обработки дискретных изменений (Дискретные изменения)'
}
|
<filename>apps/webapp/views.py
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import redirect, render
from django.views import generic
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from . forms import FilmForm, VehicleForm
from . models import Film, FilmCharacter, FilmPlanet, Person, Planet, Species, Starship, Vehicle, VehiclePassenger
class HomePageView(generic.TemplateView):
template_name = 'webapp/home.html'
class AboutPageView(generic.TemplateView):
template_name = 'webapp/about.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['Films'] = Film.objects.all().count()
context['Persons'] = Person.objects.all().count()
context['Planets'] = Planet.objects.all().count()
context['Species'] = Species.objects.all().count()
context['Starships'] = Starship.objects.all().count()
context['Vehicles'] = Vehicle.objects.all().count()
return context
class ContributerPageView(generic.TemplateView):
template_name = 'webapp/contributers.html'
# def about(request):
# # stripe_key = settings.STRIPE_KEYS['publishable']
# # data = cache.get('resource_data')
# # if not data:
# # data = get_resource_stats()
# # cache.set('resource_data', data, 10000)
# # data['stripe_key'] = stripe_key
# data = 'A long time ago in a galaxy far, far away.'
# return render(request, "about.html", data)
class DocsPageView(generic.TemplateView):
template_name = 'webapp/docs.html'
class RootPageView(generic.TemplateView):
template_name = 'webapp/root.html'
@method_decorator(login_required, name='dispatch')
class FilmCreateView(generic.View):
model = Film
form_class = FilmForm
success_message = "Film created successfully"
template_name = 'webapp/film_new.html'
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def post(self, request):
form = FilmForm(request.POST)
if form.is_valid():
film = form.save(commit=False)
film.save()
for character in form.cleaned_data['characters']:
FilmCharacter.objects.create(film=film, character=character)
for planet in form.cleaned_data['planets']:
FilmPlanet.objects.create(film=film, planet=planet)
return redirect(film) # shortcut to object's get_absolute_url()
# return HttpResponseRedirect(film.get_absolute_url())
return render(request, 'webapp/film_new.html', {'form': form})
def get(self, request):
form = FilmForm()
return render(request, 'webapp/film_new.html', {'form': form})
@method_decorator(login_required, name='dispatch')
class FilmDeleteView(generic.DeleteView):
model = Film
success_message = "Film deleted successfully"
success_url = reverse_lazy('films')
context_object_name = 'film'
template_name = 'webapp/film_delete.html'
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
# Delete FilmJurisdiction entries
FilmCharacter.objects \
.filter(film_id=self.object.film_id) \
.delete()
FilmPlanet.objects \
.filter(film_id=self.object.film_id) \
.delete()
self.object.delete()
return HttpResponseRedirect(self.get_success_url())
@method_decorator(login_required, name='dispatch')
class FilmUpdateview(generic.UpdateView):
model = Film
form_class = FilmForm
context_object_name = 'film'
success_message = "Film updated successfully"
template_name = 'webapp/film_update.html'
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def form_valid(self, form):
film = form.save(commit=False)
film.save()
# If any existing characters are not in updated list, delete them
new_character_ids = []
old_character_ids = FilmCharacter.objects \
.values_list('character_id', flat=True) \
.filter(film_id=film.film_id)
# New Character list
new_characters = form.cleaned_data['characters']
# Insert new unmatched character entries
for character in new_characters:
new_id = character.person_id
new_character_ids.append(new_id)
if new_id in old_character_ids:
continue
else:
FilmCharacter.objects \
.create(film=film, character=character)
# Delete old unmatched character entries
for old_character_id in old_character_ids:
if old_character_id in new_character_ids:
continue
else:
FilmCharacter.objects \
.filter(film_id=film.film_id, character_id=old_character_id) \
.delete()
# If any existing planets are not in updated list, delete them
new_planet_ids = []
old_planet_ids = FilmPlanet.objects \
.values_list('planet_id', flat=True) \
.filter(film_id=film.film_id)
# New Planet list
new_planets = form.cleaned_data['planets']
# Insert new unmatched planet entries
for planet in new_planets:
new_id = planet.planet_id
new_planet_ids.append(new_id)
if new_id in old_planet_ids:
continue
else:
FilmPlanet.objects \
.create(film=film, planet=planet)
# Delete old unmatched planet entries
for old_planet_id in old_planet_ids:
if old_planet_id in new_planet_ids:
continue
else:
FilmPlanet.objects \
.filter(film_id=film.film_id, planet_id=old_planet_id) \
.delete()
# return HttpResponseRedirect(film.get_absolute_url())
return redirect('film_detail', pk=film.pk)
class FilmPageView(generic.TemplateView):
template_name = 'webapp/film_docs.html'
class PersonPageView(generic.TemplateView):
template_name = 'webapp/person_docs.html'
class PlanetPageView(generic.TemplateView):
template_name = 'webapp/planet_docs.html'
class SpeciesPageView(generic.TemplateView):
template_name = 'webapp/species_docs.html'
class StarshipPageView(generic.TemplateView):
template_name = 'webapp/starship_docs.html'
class VehiclePageView(generic.TemplateView):
template_name = 'webapp/vehicle_docs.html'
class FilmDetailView(generic.DetailView):
model = Film
context_object_name = 'film'
template_name = 'webapp/film_detail.html'
def get_object(self):
return super().get_object()
class FilmListView(generic.ListView):
model = Film
context_object_name = 'films'
template_name = 'webapp/films.html'
# paginate_by = 20
# def dispatch(self, *args, **kwargs):
# return super().dispatch(*args, **kwargs)
def get_queryset(self):
return Film.objects.all()
# return Person.objects.select_related('homeworld').order_by('name')
class PersonDetailView(generic.DetailView):
model = Person
context_object_name = 'persons'
template_name = 'webapp/person_detail.html'
def get_object(self):
person = super().get_object()
return person
class PersonListView(generic.ListView):
model = Person
context_object_name = 'persons'
template_name = 'webapp/persons.html'
# paginate_by = 20
# def dispatch(self, *args, **kwargs):
# return super().dispatch(*args, **kwargs)
def get_queryset(self):
return Person.objects.all()
# return Person.objects.select_related('homeworld').order_by('name')
class PlanetDetailView(generic.DetailView):
model = Planet
context_object_name = 'planets'
template_name = 'webapp/planet_detail.html'
def get_object(self):
planet = super().get_object()
return planet
class PlanetListView(generic.ListView):
model = Planet
context_object_name = 'planets'
template_name = 'webapp/planets.html'
# paginate_by = 20
# def dispatch(self, *args, **kwargs):
# return super().dispatch(*args, **kwargs)
def get_queryset(self):
return Planet.objects.all()
# return Person.objects.select_related('homeworld').order_by('name')
class SpeciesDetailView(generic.DetailView):
model = Species
context_object_name = 'species'
template_name = 'webapp/species_detail.html'
def get_object(self):
species = super().get_object()
return species
class SpeciesListView(generic.ListView):
model = Species
context_object_name = 'species'
template_name = 'webapp/species.html'
# paginate_by = 20
# def dispatch(self, *args, **kwargs):
# return super().dispatch(*args, **kwargs)
def get_queryset(self):
return Species.objects.all()
# return Species.objects.select_related('homeworld').order_by('name')
class StarshipDetailView(generic.DetailView):
model = Starship
context_object_name = 'starships'
template_name = 'webapp/starship_detail.html'
def get_object(self):
starship = super().get_object()
return starship
class StarshipListView(generic.ListView):
model = Starship
context_object_name = 'starships'
template_name = 'webapp/starships.html'
def get_queryset(self):
return Starship.objects.all()
# return Starship.objects.select_related('?').order_by('?')
@method_decorator(login_required, name='dispatch')
class VehicleCreateView(generic.View):
model = Vehicle
form_class = VehicleForm
success_message = "Vehicle created successfully"
template_name = 'webapp/vehicle_new.html'
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def post(self, request):
form = VehicleForm(request.POST)
if form.is_valid():
vehicle = form.save(commit=False)
vehicle.save()
if form.cleaned_data['passengers']:
for passenger in form.cleaned_data['passengers']:
VehiclePassenger.objects.create(vehicle=vehicle, passenger=passenger)
return HttpResponseRedirect(vehicle.get_absolute_url())
return render(request, 'webapp/vehicle_new.html', {'form': form})
def get(self, request):
form = VehicleForm()
return render(request, 'webapp/vehicle_new.html', {'form': form})
@method_decorator(login_required, name='dispatch')
class VehicleDeleteView(generic.DeleteView):
model = Vehicle
success_message = "Vehicle deleted successfully"
success_url = reverse_lazy('vehicles')
context_object_name = 'vehicles'
template_name = 'webapp/vehicle_delete.html'
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
# Delete VehiclePassenger entries
VehiclePassenger.objects \
.filter(vehicle_id=self.object.vehicle_id) \
.delete()
self.object.delete()
return HttpResponseRedirect(self.get_success_url())
class VehicleDetailView(generic.DetailView):
model = Vehicle
context_object_name = 'vehicles'
template_name = 'webapp/vehicle_detail.html'
def get_object(self):
return super().get_object()
class VehicleListView(generic.ListView):
model = Vehicle
context_object_name = 'vehicles'
template_name = 'webapp/vehicles.html'
def get_queryset(self):
return Vehicle.objects.all()
@method_decorator(login_required, name='dispatch')
class VehicleUpdateView(generic.UpdateView):
model = Vehicle
form_class = VehicleForm
context_object_name = 'vehicles'
success_message = "Vehicle updated successfully"
template_name = 'webapp/vehicle_update.html'
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def form_valid(self, form):
vehicle = form.save(commit=False)
vehicle.save()
return HttpResponseRedirect(vehicle.get_absolute_url()) |
<filename>lib/meshrenderer/gl_utils/window.py
# -*- coding: utf-8 -*-
# flake8: noqa
import cyglfw3 as glfw
from OpenGL.GL import *
from OpenGL.GL.NV.bindless_texture import *
class Window(object):
def __init__(
self,
window_width,
window_height,
samples=1,
window_title="",
monitor=1,
show_at_center=True,
offscreen=False,
):
self.window_title = window_title
assert glfw.Init(), "Glfw Init failed!"
glfw.WindowHint(glfw.SAMPLES, samples)
if offscreen:
glfw.WindowHint(glfw.VISIBLE, False)
mon = glfw.GetMonitors()[monitor] if monitor != None else None
self.windowID = glfw.CreateWindow(window_width, window_height, self.window_title, mon)
assert self.windowID, "Could not create Window!"
glfw.MakeContextCurrent(self.windowID)
if not glInitBindlessTextureNV():
raise RuntimeError("Bindless Textures not supported")
self.framebuf_width, self.framebuf_height = glfw.GetFramebufferSize(self.windowID)
self.framebuffer_size_callback = []
def framebuffer_size_callback(window, w, h):
self.framebuf_width, self.framebuf_height = w, h
for callback in self.framebuffer_size_callback:
callback(w, h)
glfw.SetFramebufferSizeCallback(self.windowID, framebuffer_size_callback)
self.key_callback = []
def key_callback(window, key, scancode, action, mode):
if action == glfw.PRESS:
if key == glfw.KEY_ESCAPE:
glfw.SetWindowShouldClose(window, True)
for callback in self.key_callback:
callback(key, scancode, action, mode)
glfw.SetKeyCallback(self.windowID, key_callback)
self.mouse_callback = []
def mouse_callback(window, xpos, ypos):
for callback in self.mouse_callback:
callback(xpos, ypos)
glfw.SetCursorPosCallback(self.windowID, mouse_callback)
self.mouse_button_callback = []
def mouse_button_callback(window, button, action, mods):
for callback in self.mouse_button_callback:
callback(button, action, mods)
glfw.SetMouseButtonCallback(self.windowID, mouse_button_callback)
self.scroll_callback = []
def scroll_callback(window, xoffset, yoffset):
for callback in self.scroll_callback:
callback(xoffset, yoffset)
glfw.SetScrollCallback(self.windowID, scroll_callback)
self.previous_second = glfw.GetTime()
self.frame_count = 0.0
if show_at_center:
monitors = glfw.GetMonitors()
assert monitor >= 0 and monitor < len(monitors), "Invalid monitor selected."
vidMode = glfw.GetVideoMode(monitors[monitor])
glfw.SetWindowPos(
self.windowID,
vidMode.width / 2 - self.framebuf_width / 2,
vidMode.height / 2 - self.framebuf_height / 2,
)
def update_fps_counter(self):
current_second = glfw.GetTime()
elapsed_seconds = current_second - self.previous_second
if elapsed_seconds > 1.0:
self.previous_second = current_second
fps = float(self.frame_count) / float(elapsed_seconds)
glfw.SetWindowTitle(self.windowID, "%s @ FPS: %.2f" % (self.window_title, fps))
self.frame_count = 0.0
self.frame_count += 1.0
def is_open(self):
return not glfw.WindowShouldClose(self.windowID)
def swap_buffers(self):
glfw.SwapBuffers(self.windowID)
def poll_events(self):
glfw.PollEvents()
def update(self):
self.swap_buffers()
self.poll_events()
self.update_fps_counter()
def close(self):
glfw.Terminate()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
|
from multiprocessing import Process, Queue
from langumo.building import Builder
from langumo.utils import (AuxiliaryFile, AuxiliaryFileManager, colorful,
SentenceSplitter)
from typing import Iterable
class Parser:
def prepare(self, raw: AuxiliaryFile):
pass
def extract(self, raw: AuxiliaryFile) -> Iterable[str]:
raise NotImplementedError('this method must be implemented by '
'inheritor.')
def parse(self, text: str) -> str:
raise NotImplementedError('this method must be implemented by '
'inheritor.')
class ParseRawFile(Builder):
def __init__(self,
parser: Parser,
lang: str,
min_len: int,
max_len: int,
newline: str = '[NEWLINE]',
num_workers: int = 1):
self.parser = parser
self.lang = lang
self.min_len = min_len
self.max_len = max_len
self.newline = newline
self.num_workers = num_workers
def _parse_worker(self, from_queue: Queue, to_queue: Queue):
splitter = SentenceSplitter(self.lang)
while True:
# Get raw-formatted document from main process.
document = from_queue.get()
if document is None:
to_queue.put(None)
break
# Parse the document to the plain text.
parsed = self.parser.parse(document)
# Divide the document into sequences with required length.
group_sentences = []
for paragraph in parsed.splitlines():
for sentence in splitter.tokenize(paragraph):
group_sentences.append(sentence)
if sum(len(s) for s in group_sentences) > self.max_len:
to_queue.put(' '.join(group_sentences))
group_sentences.clear()
# Use custom line-break token instead of `\n` which is used for
# separating sequences.
if group_sentences:
group_sentences.append(self.newline)
# Use the remainder in dataset if its length is suitable.
if group_sentences and group_sentences[-1] == self.newline:
group_sentences = group_sentences[:-1]
text = ' '.join(group_sentences)
if len(text) > self.min_len and len(text) < self.max_len:
to_queue.put(text)
def _collect_worker(self, parsed: AuxiliaryFile, to_queue: Queue):
terminated = 0
with parsed.open('w') as fp:
while terminated < self.num_workers:
text = to_queue.get()
if text is None:
terminated += 1
continue
text += '\n' if not text.endswith('\n') else ''
fp.write(text)
def build(self, afm: AuxiliaryFileManager, raw: AuxiliaryFile
) -> AuxiliaryFile:
parsed = afm.create()
self.parser.prepare(raw)
# Create processes for parsing texts in parallel and a process for
# collecting the parsed texts and saving to the auxiliary file.
from_queue, to_queue = Queue(), Queue()
parsers = [Process(target=self._parse_worker,
args=(from_queue, to_queue),
daemon=True)
for _ in range(self.num_workers)]
collector = Process(target=self._collect_worker,
args=(parsed, to_queue),
daemon=True)
# Start the processes.
print(colorful.render(f'<r>[*]</r> parse raw-formatted corpus file '
f'with <g>{self.parser.__class__.__name__}</g>'))
for p in parsers:
p.start()
collector.start()
# Feed the extracted raw-formatted document to each parser process.
for document in self.parser.extract(raw):
from_queue.put(document)
for _ in range(self.num_workers):
from_queue.put(None)
# Wait for terminating the processes.
for p in parsers:
p.join()
collector.join()
return parsed
|
<filename>tests/test_controllers.py
from pilco.controllers import RbfController, LinearController, squash_sin
import numpy as np
import os
import tensorflow as tf
import oct2py
octave = oct2py.Oct2Py()
dir_path = os.path.dirname(os.path.realpath("__file__")) + "/tests/Matlab Code"
octave.addpath(dir_path)
from gpflow import config
float_type = config.default_float()
def test_rbf():
np.random.seed(0)
d = 3 # Input dimension
k = 2 # Number of outputs
b = 100 # basis functions
# Training Dataset
X0 = np.random.rand(100, d)
A = np.random.rand(d, k)
Y0 = np.sin(X0).dot(A) + 1e-3*(np.random.rand(100, k) - 0.5) # Just something smooth
rbf = RbfController(3, 2, b)
rbf.set_data((X0, Y0))
# Generate input
m = np.random.rand(1, d) # But MATLAB defines it as m'
s = np.random.rand(d, d)
s = s.dot(s.T) # Make s positive semidefinite
M, S, V = rbf.compute_action(m, s, squash=False)
# convert data to the struct expected by the MATLAB implementation
lengthscales = np.stack([model.kernel.lengthscales.numpy() for model in rbf.models])
variance = np.stack([model.kernel.variance.numpy() for model in rbf.models])
noise = np.stack([model.likelihood.variance.numpy() for model in rbf.models])
hyp = np.log(np.hstack(
(lengthscales,
np.sqrt(variance[:, None]),
np.sqrt(noise[:, None]))
)).T
gpmodel = oct2py.io.Struct()
gpmodel.hyp = hyp
gpmodel.inputs = X0
gpmodel.targets = Y0
# Call gp0 in octave
M_mat, S_mat, V_mat = octave.gp2(gpmodel, m.T, s, nout=3)
assert M.shape == M_mat.T.shape
assert S.shape == S_mat.shape
assert V.shape == V_mat.shape
np.testing.assert_allclose(M, M_mat.T, rtol=1e-4)
np.testing.assert_allclose(S, S_mat, rtol=1e-4)
np.testing.assert_allclose(V, V_mat, rtol=1e-4)
def test_linear():
np.random.seed(0)
d = 3 # Input dimension
k = 2 # Output dimension
# Generate input
m = np.random.rand(1, d) # But MATLAB defines it as m'
s = np.random.rand(d, d)
s = s.dot(s.T) # Make s positive semidefinite
W = np.random.rand(k, d) # But MATLAB defines it as m'
b = np.random.rand(1, k)
linear = LinearController(d, k)
linear.W.assign(W)
linear.b.assign(b)
M, S, V = linear.compute_action(m, s, squash=False)
# convert data to the struct expected by the MATLAB implementation
policy = oct2py.io.Struct()
policy.p = oct2py.io.Struct()
policy.p.w = W
policy.p.b = b.T
# Call function in octave
M_mat, S_mat, V_mat = octave.conlin(policy, m.T, s, nout=3)
assert M.shape == M_mat.T.shape
assert S.shape == S_mat.shape
assert V.shape == V_mat.shape
#np.testing.assert_allclose(M, M_mat.T, rtol=1e-4)
np.testing.assert_allclose(S, S_mat, rtol=1e-4)
np.testing.assert_allclose(V, V_mat, rtol=1e-4)
def test_squash():
np.random.seed(0)
d = 3 # Control dimensions
m = np.random.rand(1, d) # But MATLAB defines it as m'
s = np.random.rand(d, d)
s = s.dot(s.T)
e = 7.0
M, S, V = squash_sin(m, s, e)
M_mat, S_mat, V_mat = octave.gSin(m.T, s, e, nout=3)
M_mat = np.asarray(M_mat)
assert M.shape == M_mat.T.shape
assert S.shape == S_mat.shape
assert V.shape == V_mat.shape
np.testing.assert_allclose(M, M_mat.T, rtol=1e-4)
np.testing.assert_allclose(S, S_mat, rtol=1e-4)
np.testing.assert_allclose(V, V_mat, rtol=1e-4)
if __name__ == '__main__':
test_rbf()
test_linear()
test_squash()
|
from tw.api import Widget
from tw.forms import CalendarDatePicker, CalendarDateTimePicker, TableForm, DataGrid
from tw.forms.fields import (SingleSelectField, MultipleSelectField, InputField, HiddenField,
TextField, FileField, PasswordField, TextArea, Label)
from formencode.schema import Schema
from formencode.validators import StringBool
from formencode import Invalid
class SproxMethodPutHiddenField(HiddenField):
available_engines = ['mako', 'genshi']
template="sprox.widgets.tw1widgets.templates.hidden_put"
class SproxCalendarDatePicker(CalendarDatePicker):
date_format = '%Y-%m-%d'
class SproxTimePicker(CalendarDateTimePicker):
date_format = '%H:%M:%S'
class SproxCalendarDateTimePicker(CalendarDateTimePicker):
date_format = '%Y-%m-%d %H:%M:%S'
class SproxDataGrid(DataGrid):
available_engines = ['mako', 'genshi']
template = "sprox.widgets.tw1widgets.templates.datagrid"
params = ['pks', 'controller', 'xml_fields']
xml_fields = ['actions']
class ContainerWidget(Widget):
template = "genshi:sprox.widgets.tw1widgets.templates.container"
params = ["controller",]
class TableLabelWidget(Widget):
template = "genshi:sprox.widgets.tw1widgets.templates.tableLabel"
params = ["identifier", "controller"]
class ModelLabelWidget(Widget):
available_engines = ['mako', 'genshi']
template = "sprox.widgets.tw1widgets.templates.modelLabel"
params = ["identifier", "controller"]
class EntityLabelWidget(Widget):
template = "genshi:sprox.widgets.tw1widgets.templates.entityLabel"
params = ["entity", "controller"]
class RecordViewWidget(Widget):
template = "genshi:sprox.widgets.tw1widgets.templates.recordViewTable"
params = ["entity"]
class RecordFieldWidget(Widget):
template = "genshi:sprox.widgets.tw1widgets.templates.recordField"
params = ['field_name']
class TableDefWidget(Widget):
template = "genshi:sprox.widgets.tw1widgets.templates.tableDef"
params = ["identifier"]
class EntityDefWidget(Widget):
available_engines = ['genshi']
template = "sprox.widgets.tw1widgets.templates.entityDef"
params = ["entity"]
class TableWidget(Widget):
available_engines = ['genshi']
template = "genshi:sprox.widgets.tw1widgets.templates.table"
class SproxTableForm(TableForm):
available_engines = ['mako', 'genshi']
validator = Schema(ignore_missing_keys=True, allow_extra_fields=True)
template = "sprox.widgets.tw1widgets.templates.tableForm"
#custom checkbox widget since I am not happy with the behavior of the TW one
class SproxCheckBox(InputField):
available_engines = ['mako', 'genshi']
template = "sprox.widgets.tw1widgets.templates.checkbox"
validator = StringBool
def update_params(self, d):
InputField.update_params(self, d)
try:
checked = self.validator.to_python(d.value)
except Invalid:
checked = False
d.attrs['checked'] = checked or None
class PropertyMixin(Widget):
params = ['entity', 'field_name', 'provider', 'dropdown_field_names']
def _my_update_params(self, d, nullable=False):
entity = self.entity
options = self.provider.get_dropdown_options(self.entity, self.field_name, self.dropdown_field_names)
if nullable:
options.append([None,"-----------"])
if len(options) == 0:
return {}
d['options']= options
return d
class PropertySingleSelectField(SingleSelectField, PropertyMixin):
params=["nullable", "disabled"]
nullable=False
disabled=False
def update_params(self, d):
self._my_update_params(d,nullable=self.nullable)
SingleSelectField.update_params(self, d)
return d
class PropertyMultipleSelectField(MultipleSelectField, PropertyMixin):
params=["disabled"]
disabled=False
def update_params(self, d):
self._my_update_params(d)
MultipleSelectField.update_params(self, d)
return d
class SubDocument(TextField):
pass |
import datetime
import dateutil.tz
import pytz
import requests_mock
import warnings
try:
import zoneinfo
except ImportError:
from backports import zoneinfo
from exchangelib.errors import UnknownTimeZone, NaiveDateTimeNotAllowed
from exchangelib.ewsdatetime import EWSDateTime, EWSDate, EWSTimeZone, UTC
from exchangelib.winzone import generate_map, CLDR_TO_MS_TIMEZONE_MAP, CLDR_WINZONE_URL, CLDR_WINZONE_VERSION
from exchangelib.util import CONNECTION_ERRORS
from .common import TimedTestCase
class EWSDateTimeTest(TimedTestCase):
def test_super_methods(self):
tz = EWSTimeZone('Europe/Copenhagen')
self.assertIsInstance(EWSDateTime.now(), EWSDateTime)
self.assertIsInstance(EWSDateTime.now(tz=tz), EWSDateTime)
self.assertIsInstance(EWSDateTime.utcnow(), EWSDateTime)
self.assertIsInstance(EWSDateTime.fromtimestamp(123456789), EWSDateTime)
self.assertIsInstance(EWSDateTime.fromtimestamp(123456789, tz=tz), EWSDateTime)
self.assertIsInstance(EWSDateTime.utcfromtimestamp(123456789), EWSDateTime)
def test_ewstimezone(self):
# Test autogenerated translations
tz = EWSTimeZone('Europe/Copenhagen')
self.assertIsInstance(tz, EWSTimeZone)
self.assertEqual(tz.key, 'Europe/Copenhagen')
self.assertEqual(tz.ms_id, 'Romance Standard Time')
# self.assertEqual(EWSTimeZone('Europe/Copenhagen').ms_name, '') # EWS works fine without the ms_name
# Test localzone()
tz = EWSTimeZone.localzone()
self.assertIsInstance(tz, EWSTimeZone)
# Test common helpers
tz = EWSTimeZone('UTC')
self.assertIsInstance(tz, EWSTimeZone)
self.assertEqual(tz.key, 'UTC')
self.assertEqual(tz.ms_id, 'UTC')
tz = EWSTimeZone('GMT')
self.assertIsInstance(tz, EWSTimeZone)
self.assertEqual(tz.key, 'GMT')
self.assertEqual(tz.ms_id, 'UTC')
# Test mapper contents. Latest map from unicode.org has 394 entries
self.assertGreater(len(EWSTimeZone.IANA_TO_MS_MAP), 300)
for k, v in EWSTimeZone.IANA_TO_MS_MAP.items():
self.assertIsInstance(k, str)
self.assertIsInstance(v, tuple)
self.assertEqual(len(v), 2)
self.assertIsInstance(v[0], str)
# Test timezone unknown by ZoneInfo
with self.assertRaises(UnknownTimeZone) as e:
EWSTimeZone('UNKNOWN')
self.assertEqual(e.exception.args[0], 'No time zone found with key UNKNOWN')
# Test timezone known by IANA but with no Winzone mapping
with self.assertRaises(UnknownTimeZone) as e:
del EWSTimeZone.IANA_TO_MS_MAP['Africa/Tripoli']
EWSTimeZone('Africa/Tripoli')
self.assertEqual(e.exception.args[0], 'No Windows timezone name found for timezone "Africa/Tripoli"')
# Test __eq__ with non-EWSTimeZone compare
self.assertFalse(EWSTimeZone('GMT') == zoneinfo.ZoneInfo('UTC'))
# Test from_ms_id() with non-standard MS ID
self.assertEqual(EWSTimeZone('Europe/Copenhagen'), EWSTimeZone.from_ms_id('Europe/Copenhagen'))
def test_localize(self):
# Test some corner cases around DST
tz = EWSTimeZone('Europe/Copenhagen')
with warnings.catch_warnings():
# localize() is deprecated but we still want to test it. Silence the DeprecationWarning
warnings.simplefilter("ignore")
self.assertEqual(
str(tz.localize(EWSDateTime(2023, 10, 29, 2, 36, 0), is_dst=False)),
'2023-10-29 02:36:00+01:00'
)
self.assertEqual(
str(tz.localize(EWSDateTime(2023, 10, 29, 2, 36, 0), is_dst=None)),
'2023-10-29 02:36:00+02:00'
)
self.assertEqual(
str(tz.localize(EWSDateTime(2023, 10, 29, 2, 36, 0), is_dst=True)),
'2023-10-29 02:36:00+02:00'
)
self.assertEqual(
str(tz.localize(EWSDateTime(2023, 3, 26, 2, 36, 0), is_dst=False)),
'2023-03-26 02:36:00+01:00'
)
self.assertEqual(
str(tz.localize(EWSDateTime(2023, 3, 26, 2, 36, 0), is_dst=None)),
'2023-03-26 02:36:00+01:00'
)
self.assertEqual(
str(tz.localize(EWSDateTime(2023, 3, 26, 2, 36, 0), is_dst=True)),
'2023-03-26 02:36:00+02:00'
)
def test_ewsdatetime(self):
# Test a static timezone
tz = EWSTimeZone('Etc/GMT-5')
dt = EWSDateTime(2000, 1, 2, 3, 4, 5, tzinfo=tz)
self.assertIsInstance(dt, EWSDateTime)
self.assertIsInstance(dt.tzinfo, EWSTimeZone)
self.assertEqual(dt.tzinfo.ms_id, tz.ms_id)
self.assertEqual(dt.tzinfo.ms_name, tz.ms_name)
self.assertEqual(str(dt), '2000-01-02 03:04:05+05:00')
self.assertEqual(
repr(dt),
"EWSDateTime(2000, 1, 2, 3, 4, 5, tzinfo=EWSTimeZone(key='Etc/GMT-5'))"
)
# Test a DST timezone
tz = EWSTimeZone('Europe/Copenhagen')
dt = EWSDateTime(2000, 1, 2, 3, 4, 5, tzinfo=tz)
self.assertIsInstance(dt, EWSDateTime)
self.assertIsInstance(dt.tzinfo, EWSTimeZone)
self.assertEqual(dt.tzinfo.ms_id, tz.ms_id)
self.assertEqual(dt.tzinfo.ms_name, tz.ms_name)
self.assertEqual(str(dt), '2000-01-02 03:04:05+01:00')
self.assertEqual(
repr(dt),
"EWSDateTime(2000, 1, 2, 3, 4, 5, tzinfo=EWSTimeZone(key='Europe/Copenhagen'))"
)
# Test from_string
with self.assertRaises(NaiveDateTimeNotAllowed):
EWSDateTime.from_string('2000-01-02T03:04:05')
self.assertEqual(
EWSDateTime.from_string('2000-01-02T03:04:05+01:00'),
EWSDateTime(2000, 1, 2, 2, 4, 5, tzinfo=UTC)
)
self.assertEqual(
EWSDateTime.from_string('2000-01-02T03:04:05Z'),
EWSDateTime(2000, 1, 2, 3, 4, 5, tzinfo=UTC)
)
self.assertIsInstance(EWSDateTime.from_string('2000-01-02T03:04:05+01:00'), EWSDateTime)
self.assertIsInstance(EWSDateTime.from_string('2000-01-02T03:04:05Z'), EWSDateTime)
# Test addition, subtraction, summertime etc
self.assertIsInstance(dt + datetime.timedelta(days=1), EWSDateTime)
self.assertIsInstance(dt - datetime.timedelta(days=1), EWSDateTime)
self.assertIsInstance(dt - EWSDateTime.now(tz=tz), datetime.timedelta)
self.assertIsInstance(EWSDateTime.now(tz=tz), EWSDateTime)
# Test various input for from_datetime()
self.assertEqual(dt, EWSDateTime.from_datetime(
datetime.datetime(2000, 1, 2, 3, 4, 5, tzinfo=EWSTimeZone('Europe/Copenhagen'))
))
self.assertEqual(dt, EWSDateTime.from_datetime(
datetime.datetime(2000, 1, 2, 3, 4, 5, tzinfo=zoneinfo.ZoneInfo('Europe/Copenhagen'))
))
self.assertEqual(dt, EWSDateTime.from_datetime(
datetime.datetime(2000, 1, 2, 3, 4, 5, tzinfo=dateutil.tz.gettz('Europe/Copenhagen'))
))
self.assertEqual(dt, EWSDateTime.from_datetime(
datetime.datetime(2000, 1, 2, 3, 4, 5, tzinfo=pytz.timezone('Europe/Copenhagen'))
))
self.assertEqual(dt.ewsformat(), '2000-01-02T03:04:05+01:00')
utc_tz = EWSTimeZone('UTC')
self.assertEqual(dt.astimezone(utc_tz).ewsformat(), '2000-01-02T02:04:05Z')
# Test summertime
dt = EWSDateTime(2000, 8, 2, 3, 4, 5, tzinfo=tz)
self.assertEqual(dt.astimezone(utc_tz).ewsformat(), '2000-08-02T01:04:05Z')
# Test in-place add and subtract
dt = EWSDateTime(2000, 1, 2, 3, 4, 5, tzinfo=tz)
dt += datetime.timedelta(days=1)
self.assertIsInstance(dt, EWSDateTime)
self.assertEqual(dt, EWSDateTime(2000, 1, 3, 3, 4, 5, tzinfo=tz))
dt = EWSDateTime(2000, 1, 2, 3, 4, 5, tzinfo=tz)
dt -= datetime.timedelta(days=1)
self.assertIsInstance(dt, EWSDateTime)
self.assertEqual(dt, EWSDateTime(2000, 1, 1, 3, 4, 5, tzinfo=tz))
# Test ewsformat() failure
dt = EWSDateTime(2000, 1, 2, 3, 4, 5)
with self.assertRaises(ValueError):
dt.ewsformat()
# Test wrong tzinfo type
with self.assertRaises(ValueError):
EWSDateTime(2000, 1, 2, 3, 4, 5, tzinfo=pytz.timezone('Europe/Copenhagen'))
with self.assertRaises(ValueError):
EWSDateTime.from_datetime(EWSDateTime(2000, 1, 2, 3, 4, 5))
def test_generate(self):
try:
version, tz_map = generate_map()
except CONNECTION_ERRORS:
# generate_map() requires access to unicode.org, which may be unavailable. Don't fail test, since this is
# out of our control.
return
self.assertEqual(version, CLDR_WINZONE_VERSION)
self.assertDictEqual(tz_map, CLDR_TO_MS_TIMEZONE_MAP)
@requests_mock.mock()
def test_generate_failure(self, m):
m.get(CLDR_WINZONE_URL, status_code=500)
with self.assertRaises(ValueError):
generate_map()
def test_ewsdate(self):
self.assertEqual(EWSDate(2000, 1, 1).ewsformat(), '2000-01-01')
self.assertEqual(EWSDate.from_string('2000-01-01'), EWSDate(2000, 1, 1))
self.assertEqual(EWSDate.from_string('2000-01-01Z'), EWSDate(2000, 1, 1))
self.assertEqual(EWSDate.from_string('2000-01-01+01:00'), EWSDate(2000, 1, 1))
self.assertEqual(EWSDate.from_string('2000-01-01-01:00'), EWSDate(2000, 1, 1))
self.assertIsInstance(EWSDate(2000, 1, 2) - EWSDate(2000, 1, 1), datetime.timedelta)
self.assertIsInstance(EWSDate(2000, 1, 2) + datetime.timedelta(days=1), EWSDate)
self.assertIsInstance(EWSDate(2000, 1, 2) - datetime.timedelta(days=1), EWSDate)
# Test in-place add and subtract
dt = EWSDate(2000, 1, 2)
dt += datetime.timedelta(days=1)
self.assertIsInstance(dt, EWSDate)
self.assertEqual(dt, EWSDate(2000, 1, 3))
dt = EWSDate(2000, 1, 2)
dt -= datetime.timedelta(days=1)
self.assertIsInstance(dt, EWSDate)
self.assertEqual(dt, EWSDate(2000, 1, 1))
with self.assertRaises(ValueError):
EWSDate.from_date(EWSDate(2000, 1, 2))
|
<filename>test/test_im.py
import unittest
from unittest.mock import patch, PropertyMock, MagicMock
from os import path
from src import im
from io import BytesIO
THUMB_LENGTH = str(290)
IMAGE_PNG = 'kosys.png'
def with_image(name, consumer):
p = path.dirname(__file__) + '/fixture/images/' + name
with open(p, 'rb') as f:
image = BytesIO(f.read())
consumer((image, name))
class ImageMagickTestCase(unittest.TestCase):
def setUp(self):
self.app = im.app.test_client()
def tearDown(self):
None
def test_ping(self):
response = self.app.get('/ping')
self.assertEqual(response.data, b'pong')
def test_resize_with_valid_params(self):
def action(image):
params = dict(width=THUMB_LENGTH, height=THUMB_LENGTH, data=image)
response = self.request_resize(params)
self.assertEqual(response.status_code, 200)
with_image(IMAGE_PNG, action)
def test_resize_without_width(self):
def action(image):
params = dict(height=THUMB_LENGTH, data=image)
response = self.request_resize(params)
self.assertEqual(response.status_code, 400)
with_image(IMAGE_PNG, action)
def test_resize_without_height(self):
def action(image):
params = dict(width=THUMB_LENGTH, data=image)
response = self.request_resize(params)
self.assertEqual(response.status_code, 400)
with_image(IMAGE_PNG, action)
def test_resize_without_data(self):
params = dict(width=THUMB_LENGTH, height=THUMB_LENGTH)
response = self.request_resize(params)
self.assertEqual(response.status_code, 400)
def test_resize_with_too_large_width(self):
def action(image):
params = dict(width=str(im.MAX_THUMB_LENGTH + 1), height=THUMB_LENGTH, data=image)
response = self.request_resize(params)
self.assertEqual(response.status_code, 400)
with_image(IMAGE_PNG, action)
def test_resize_with_too_large_height(self):
def action(image):
params = dict(width=THUMB_LENGTH, height=str(im.MAX_THUMB_LENGTH + 1), data=image)
response = self.request_resize(params)
self.assertEqual(response.status_code, 400)
with_image(IMAGE_PNG, action)
def test_resize_with_unsupported_format(self):
def action(image):
params = dict(width=THUMB_LENGTH, height=THUMB_LENGTH, data=image)
response = self.request_resize(params)
self.assertEqual(response.status_code, 400)
with_image('favicon.ico', action)
def test_resize_with_invalid_format(self):
def action(image):
params = dict(width=THUMB_LENGTH, height=THUMB_LENGTH, data=image)
response = self.request_resize(params)
self.assertEqual(response.status_code, 400)
with_image('invalid.md', action)
def test_attempt_command_injection_with_width(self):
def action(image):
params = dict(width="|| rm -Rf /", height=THUMB_LENGTH, data=image)
response = self.request_resize(params)
self.assertEqual(response.status_code, 400)
with_image(IMAGE_PNG, action)
def test_attempt_command_injection_with_height(self):
def action(image):
params = dict(width=THUMB_LENGTH, height="|| rm -Rf /", data=image)
response = self.request_resize(params)
self.assertEqual(response.status_code, 400)
with_image(IMAGE_PNG, action)
@patch('subprocess.Popen')
def test_error_occurred_on_command_execution(self, popen):
def action(image):
params = dict(width=THUMB_LENGTH, height=THUMB_LENGTH, data=image)
response = self.request_resize(params)
self.assertEqual(response.status_code, 500)
popen.side_effect = OSError()
with_image(IMAGE_PNG, action)
@patch('subprocess.Popen.wait')
def test_command_result_non_0_code(self, wait):
def action(image):
params = dict(width=THUMB_LENGTH, height=THUMB_LENGTH, data=image)
response = self.request_resize(params)
self.assertEqual(response.status_code, 500)
wait.return_value = 1
with_image(IMAGE_PNG, action)
def request_resize(self, params):
return self.app.post('/resize',
content_type='multipart/form-data',
data=params
)
if __name__ == '__main__':
unittest.main()
|
<reponame>hwpplayers/ironic<filename>ironic/tests/unit/drivers/test_ipmi.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ironic.conductor import task_manager
from ironic.drivers.modules import agent
from ironic.drivers.modules import ipmitool
from ironic.drivers.modules import iscsi_deploy
from ironic.drivers.modules import noop
from ironic.drivers.modules import noop_mgmt
from ironic.drivers.modules import pxe
from ironic.drivers.modules.storage import cinder
from ironic.drivers.modules.storage import noop as noop_storage
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as obj_utils
class IPMIHardwareTestCase(db_base.DbTestCase):
def setUp(self):
super(IPMIHardwareTestCase, self).setUp()
self.config(enabled_hardware_types=['ipmi'],
enabled_power_interfaces=['ipmitool'],
enabled_management_interfaces=['ipmitool', 'noop'],
enabled_raid_interfaces=['no-raid', 'agent'],
enabled_console_interfaces=['no-console'],
enabled_vendor_interfaces=['ipmitool', 'no-vendor'])
def _validate_interfaces(self, task, **kwargs):
self.assertIsInstance(
task.driver.management,
kwargs.get('management', ipmitool.IPMIManagement))
self.assertIsInstance(
task.driver.power,
kwargs.get('power', ipmitool.IPMIPower))
self.assertIsInstance(
task.driver.boot,
kwargs.get('boot', pxe.PXEBoot))
self.assertIsInstance(
task.driver.deploy,
kwargs.get('deploy', iscsi_deploy.ISCSIDeploy))
self.assertIsInstance(
task.driver.console,
kwargs.get('console', noop.NoConsole))
self.assertIsInstance(
task.driver.raid,
kwargs.get('raid', noop.NoRAID))
self.assertIsInstance(
task.driver.vendor,
kwargs.get('vendor', ipmitool.VendorPassthru))
self.assertIsInstance(
task.driver.storage,
kwargs.get('storage', noop_storage.NoopStorage))
self.assertIsInstance(
task.driver.rescue,
kwargs.get('rescue', noop.NoRescue))
def test_default_interfaces(self):
node = obj_utils.create_test_node(self.context, driver='ipmi')
with task_manager.acquire(self.context, node.id) as task:
self._validate_interfaces(task)
def test_override_with_shellinabox(self):
self.config(enabled_console_interfaces=['ipmitool-shellinabox',
'ipmitool-socat'])
node = obj_utils.create_test_node(
self.context, driver='ipmi',
deploy_interface='direct',
raid_interface='agent',
console_interface='ipmitool-shellinabox',
vendor_interface='no-vendor')
with task_manager.acquire(self.context, node.id) as task:
self._validate_interfaces(
task,
deploy=agent.AgentDeploy,
console=ipmitool.IPMIShellinaboxConsole,
raid=agent.AgentRAID,
vendor=noop.NoVendor)
def test_override_with_cinder_storage(self):
self.config(enabled_storage_interfaces=['noop', 'cinder'])
node = obj_utils.create_test_node(
self.context, driver='ipmi',
storage_interface='cinder')
with task_manager.acquire(self.context, node.id) as task:
self._validate_interfaces(task, storage=cinder.CinderStorage)
def test_override_with_agent_rescue(self):
self.config(enabled_rescue_interfaces=['no-rescue', 'agent'])
node = obj_utils.create_test_node(
self.context, driver='ipmi',
rescue_interface='agent')
with task_manager.acquire(self.context, node.id) as task:
self._validate_interfaces(task, rescue=agent.AgentRescue)
def test_override_with_noop_mgmt(self):
self.config(enabled_management_interfaces=['ipmitool', 'noop'])
node = obj_utils.create_test_node(
self.context, driver='ipmi',
management_interface='noop')
with task_manager.acquire(self.context, node.id) as task:
self._validate_interfaces(task,
management=noop_mgmt.NoopManagement)
|
<reponame>servoz/capsul
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import os
import shutil
import unittest
import tempfile
import sys
import six
from traits.api import File
from capsul.api import Process, Pipeline, Switch, get_process_instance
class Identity(Process):
input_image = File(optional=False, output=False)
output_image = File(optional=False, output=True)
class ComplexPipeline(Pipeline):
"""Pipeline to test complex constructions behaviours
"""
def pipeline_definition(self):
# Create processes
self.add_process('first_pipeline',
'capsul.process.test.test_pipeline')
self.add_process('pipeline_1',
'capsul.process.test.test_pipeline',
make_optional=['output_1', 'output_10','output_100'])
#self.export_parameter('pipeline_1', 'output_1')
self.add_process('pipeline_10',
'capsul.process.test.test_pipeline',
make_optional=['output_1', 'output_10','output_100'])
self.add_process('pipeline_100',
'capsul.process.test.test_pipeline',
make_optional=['output_1', 'output_10','output_100'])
self.add_switch('select_threshold', ['threshold_1', 'threshold_10', 'threshold_100'], ['output_a', 'output_b', 'output_c'])
self.add_process('identity_a', Identity)
self.add_process('identity_b', Identity)
self.add_process('identity_c', Identity)
self.export_parameter('first_pipeline', 'select_method')
self.add_link('select_method->pipeline_1.select_method')
self.add_link('select_method->pipeline_10.select_method')
self.add_link('select_method->pipeline_100.select_method')
self.add_link('first_pipeline.output_1->pipeline_1.input_image')
self.add_link('first_pipeline.output_10->pipeline_10.input_image')
self.add_link('first_pipeline.output_100->pipeline_100.input_image')
self.add_link('pipeline_1.output_1->select_threshold.threshold_1_switch_output_a')
self.add_link('pipeline_1.output_10->select_threshold.threshold_10_switch_output_a')
self.add_link('pipeline_1.output_100->select_threshold.threshold_100_switch_output_a')
self.add_link('pipeline_10.output_1->select_threshold.threshold_1_switch_output_b')
self.add_link('pipeline_10.output_10->select_threshold.threshold_10_switch_output_b')
self.add_link('pipeline_10.output_100->select_threshold.threshold_100_switch_output_b')
self.add_link('pipeline_100.output_1->select_threshold.threshold_1_switch_output_c')
self.add_link('pipeline_100.output_10->select_threshold.threshold_10_switch_output_c')
self.add_link('pipeline_100.output_100->select_threshold.threshold_100_switch_output_c')
self.add_link('select_threshold.output_a->identity_a.input_image')
self.add_link('select_threshold.output_b->identity_b.input_image')
self.add_link('select_threshold.output_c->identity_c.input_image')
self.export_parameter('identity_a', 'output_image', 'output_a')
self.export_parameter('identity_b', 'output_image', 'output_b')
self.export_parameter('identity_c', 'output_image', 'output_c')
self.node_position = {'first_pipeline': (118.0, 486.0),
'identity_a': (870.0, 644.0),
'identity_b': (867.0, 742.0),
'identity_c': (866.0, 846.0),
'inputs': (-107.0, 491.0),
'outputs': (1111.0, 723.0),
'pipeline_1': (329.0, 334.0),
'pipeline_10': (331.0, 533.0),
'pipeline_100': (334.0, 738.0),
'select_threshold': (559.0, 453.0)}
class TestComplexPipeline(unittest.TestCase):
expected_status = [
({},
{
'': {
'_activated': True,
'_enabled': True,
},
'first_pipeline': {
'_activated': True,
'_enabled': True,
},
'first_pipeline.threshold_gt_1': {
'_activated': True,
'_enabled': True,
},
'first_pipeline.threshold_gt_10': {
'_activated': True,
'_enabled': True,
},
'first_pipeline.threshold_gt_100': {
'_activated': True,
'_enabled': True,
},
'first_pipeline.threshold_lt_1': {
'_activated': False,
'_enabled': False,
},
'first_pipeline.threshold_lt_10': {
'_activated': False,
'_enabled': False,
},
'first_pipeline.threshold_lt_100': {
'_activated': False,
'_enabled': False,
},
'first_pipeline.mask_1': {
'_activated': True,
'_enabled': True,
},
'first_pipeline.mask_10': {
'_activated': True,
'_enabled': True,
},
'first_pipeline.mask_100': {
'_activated': True,
'_enabled': True,
},
'pipeline_1': {
'_activated': True,
'_enabled': True,
},
'pipeline_1.threshold_gt_1': {
'_activated': True,
'_enabled': True,
},
'pipeline_1.threshold_gt_10': {
'_activated': False,
'_enabled': True,
},
'pipeline_1.threshold_gt_100': {
'_activated': False,
'_enabled': True,
},
'pipeline_1.threshold_lt_1': {
'_activated': False,
'_enabled': False,
},
'pipeline_1.threshold_lt_10': {
'_activated': False,
'_enabled': False,
},
'pipeline_1.threshold_lt_100': {
'_activated': False,
'_enabled': False,
},
'pipeline_1.mask_1': {
'_activated': True,
'_enabled': True,
},
'pipeline_1.mask_10': {
'_activated': False,
'_enabled': True,
},
'pipeline_1.mask_100': {
'_activated': False,
'_enabled': True,
},
'pipeline_10': {
'_activated': True,
'_enabled': True,
},
'pipeline_10.threshold_gt_1': {
'_activated': True,
'_enabled': True,
},
'pipeline_10.threshold_gt_10': {
'_activated': False,
'_enabled': True,
},
'pipeline_10.threshold_gt_100': {
'_activated': False,
'_enabled': True,
},
'pipeline_10.threshold_lt_1': {
'_activated': False,
'_enabled': False,
},
'pipeline_10.threshold_lt_10': {
'_activated': False,
'_enabled': False,
},
'pipeline_10.threshold_lt_100': {
'_activated': False,
'_enabled': False,
},
'pipeline_10.mask_1': {
'_activated': True,
'_enabled': True,
},
'pipeline_10.mask_10': {
'_activated': False,
'_enabled': True,
},
'pipeline_10.mask_100': {
'_activated': False,
'_enabled': True,
},
'pipeline_100': {
'_activated': True,
'_enabled': True,
},
'pipeline_100.threshold_gt_1': {
'_activated': True,
'_enabled': True,
},
'pipeline_100.threshold_gt_10': {
'_activated': False,
'_enabled': True,
},
'pipeline_100.threshold_gt_100': {
'_activated': False,
'_enabled': True,
},
'pipeline_100.threshold_lt_1': {
'_activated': False,
'_enabled': False,
},
'pipeline_100.threshold_lt_10': {
'_activated': False,
'_enabled': False,
},
'pipeline_100.threshold_lt_100': {
'_activated': False,
'_enabled': False,
},
'pipeline_100.mask_1': {
'_activated': True,
'_enabled': True,
},
'pipeline_100.mask_10': {
'_activated': False,
'_enabled': True,
},
'pipeline_100.mask_100': {
'_activated': False,
'_enabled': True,
},
'select_threshold': {
'_activated': True,
'_enabled': True,
},
'identity_a': {
'_activated': True,
'_enabled': True,
},
'identity_b': {
'_activated': True,
'_enabled': True,
},
'identity_c': {
'_activated': True,
'_enabled': True,
},
}
),
({'select_method': 'lower than'},
{
'': {
'_activated': True,
'_enabled': True,
},
'first_pipeline': {
'_activated': True,
'_enabled': True,
},
'first_pipeline.threshold_lt_1': {
'_activated': True,
'_enabled': True,
},
'first_pipeline.threshold_lt_10': {
'_activated': True,
'_enabled': True,
},
'first_pipeline.threshold_lt_100': {
'_activated': True,
'_enabled': True,
},
'first_pipeline.threshold_gt_1': {
'_activated': False,
'_enabled': False,
},
'first_pipeline.threshold_gt_10': {
'_activated': False,
'_enabled': False,
},
'first_pipeline.threshold_gt_100': {
'_activated': False,
'_enabled': False,
},
'first_pipeline.mask_1': {
'_activated': True,
'_enabled': True,
},
'first_pipeline.mask_10': {
'_activated': True,
'_enabled': True,
},
'first_pipeline.mask_100': {
'_activated': True,
'_enabled': True,
},
'pipeline_1': {
'_activated': True,
'_enabled': True,
},
'pipeline_1.threshold_lt_1': {
'_activated': True,
'_enabled': True,
},
'pipeline_1.threshold_lt_10': {
'_activated': False,
'_enabled': True,
},
'pipeline_1.threshold_lt_100': {
'_activated': False,
'_enabled': True,
},
'pipeline_1.threshold_gt_1': {
'_activated': False,
'_enabled': False,
},
'pipeline_1.threshold_gt_10': {
'_activated': False,
'_enabled': False,
},
'pipeline_1.threshold_gt_100': {
'_activated': False,
'_enabled': False,
},
'pipeline_1.mask_1': {
'_activated': True,
'_enabled': True,
},
'pipeline_1.mask_10': {
'_activated': False,
'_enabled': True,
},
'pipeline_1.mask_100': {
'_activated': False,
'_enabled': True,
},
'pipeline_10': {
'_activated': True,
'_enabled': True,
},
'pipeline_10.threshold_lt_1': {
'_activated': True,
'_enabled': True,
},
'pipeline_10.threshold_lt_10': {
'_activated': False,
'_enabled': True,
},
'pipeline_10.threshold_lt_100': {
'_activated': False,
'_enabled': True,
},
'pipeline_10.threshold_gt_1': {
'_activated': False,
'_enabled': False,
},
'pipeline_10.threshold_gt_10': {
'_activated': False,
'_enabled': False,
},
'pipeline_10.threshold_gt_100': {
'_activated': False,
'_enabled': False,
},
'pipeline_10.mask_1': {
'_activated': True,
'_enabled': True,
},
'pipeline_10.mask_10': {
'_activated': False,
'_enabled': True,
},
'pipeline_10.mask_100': {
'_activated': False,
'_enabled': True,
},
'pipeline_100': {
'_activated': True,
'_enabled': True,
},
'pipeline_100.threshold_lt_1': {
'_activated': True,
'_enabled': True,
},
'pipeline_100.threshold_lt_10': {
'_activated': False,
'_enabled': True,
},
'pipeline_100.threshold_lt_100': {
'_activated': False,
'_enabled': True,
},
'pipeline_100.threshold_gt_1': {
'_activated': False,
'_enabled': False,
},
'pipeline_100.threshold_gt_10': {
'_activated': False,
'_enabled': False,
},
'pipeline_100.threshold_gt_100': {
'_activated': False,
'_enabled': False,
},
'pipeline_100.mask_1': {
'_activated': True,
'_enabled': True,
},
'pipeline_100.mask_10': {
'_activated': False,
'_enabled': True,
},
'pipeline_100.mask_100': {
'_activated': False,
'_enabled': True,
},
'select_threshold': {
'_activated': True,
'_enabled': True,
},
'identity_a': {
'_activated': True,
'_enabled': True,
},
'identity_b': {
'_activated': True,
'_enabled': True,
},
'identity_c': {
'_activated': True,
'_enabled': True,
},
}
),
({'select_threshold': 'threshold_10'},
{
'': {
'_activated': True,
'_enabled': True,
},
'first_pipeline': {
'_activated': True,
'_enabled': True,
},
'first_pipeline.threshold_gt_1': {
'_activated': True,
'_enabled': True,
},
'first_pipeline.threshold_gt_10': {
'_activated': True,
'_enabled': True,
},
'first_pipeline.threshold_gt_100': {
'_activated': True,
'_enabled': True,
},
'first_pipeline.threshold_lt_1': {
'_activated': False,
'_enabled': False,
},
'first_pipeline.threshold_lt_10': {
'_activated': False,
'_enabled': False,
},
'first_pipeline.threshold_lt_100': {
'_activated': False,
'_enabled': False,
},
'first_pipeline.mask_1': {
'_activated': True,
'_enabled': True,
},
'first_pipeline.mask_10': {
'_activated': True,
'_enabled': True,
},
'first_pipeline.mask_100': {
'_activated': True,
'_enabled': True,
},
'pipeline_1': {
'_activated': True,
'_enabled': True,
},
'pipeline_1.threshold_gt_1': {
'_activated': False,
'_enabled': True,
},
'pipeline_1.threshold_gt_10': {
'_activated': True,
'_enabled': True,
},
'pipeline_1.threshold_gt_100': {
'_activated': False,
'_enabled': True,
},
'pipeline_1.threshold_lt_1': {
'_activated': False,
'_enabled': False,
},
'pipeline_1.threshold_lt_10': {
'_activated': False,
'_enabled': False,
},
'pipeline_1.threshold_lt_100': {
'_activated': False,
'_enabled': False,
},
'pipeline_1.mask_1': {
'_activated': False,
'_enabled': True,
},
'pipeline_1.mask_10': {
'_activated': True,
'_enabled': True,
},
'pipeline_1.mask_100': {
'_activated': False,
'_enabled': True,
},
'pipeline_10': {
'_activated': True,
'_enabled': True,
},
'pipeline_10.threshold_gt_1': {
'_activated': False,
'_enabled': True,
},
'pipeline_10.threshold_gt_10': {
'_activated': True,
'_enabled': True,
},
'pipeline_10.threshold_gt_100': {
'_activated': False,
'_enabled': True,
},
'pipeline_10.threshold_lt_1': {
'_activated': False,
'_enabled': False,
},
'pipeline_10.threshold_lt_10': {
'_activated': False,
'_enabled': False,
},
'pipeline_10.threshold_lt_100': {
'_activated': False,
'_enabled': False,
},
'pipeline_10.mask_1': {
'_activated': False,
'_enabled': True,
},
'pipeline_10.mask_10': {
'_activated': True,
'_enabled': True,
},
'pipeline_10.mask_100': {
'_activated': False,
'_enabled': True,
},
'pipeline_100': {
'_activated': True,
'_enabled': True,
},
'pipeline_100.threshold_gt_1': {
'_activated': False,
'_enabled': True,
},
'pipeline_100.threshold_gt_10': {
'_activated': True,
'_enabled': True,
},
'pipeline_100.threshold_gt_100': {
'_activated': False,
'_enabled': True,
},
'pipeline_100.threshold_lt_1': {
'_activated': False,
'_enabled': False,
},
'pipeline_100.threshold_lt_10': {
'_activated': False,
'_enabled': False,
},
'pipeline_100.threshold_lt_100': {
'_activated': False,
'_enabled': False,
},
'pipeline_100.mask_1': {
'_activated': False,
'_enabled': True,
},
'pipeline_100.mask_10': {
'_activated': True,
'_enabled': True,
},
'pipeline_100.mask_100': {
'_activated': False,
'_enabled': True,
},
'select_threshold': {
'_activated': True,
'_enabled': True,
},
'identity_a': {
'_activated': True,
'_enabled': True,
},
'identity_b': {
'_activated': True,
'_enabled': True,
},
'identity_c': {
'_activated': True,
'_enabled': True,
},
}
),
({'select_threshold': 'threshold_10',
'select_method': 'lower than'},
{
'': {
'_activated': True,
'_enabled': True,
},
'first_pipeline': {
'_activated': True,
'_enabled': True,
},
'first_pipeline.threshold_lt_1': {
'_activated': True,
'_enabled': True,
},
'first_pipeline.threshold_lt_10': {
'_activated': True,
'_enabled': True,
},
'first_pipeline.threshold_lt_100': {
'_activated': True,
'_enabled': True,
},
'first_pipeline.threshold_gt_1': {
'_activated': False,
'_enabled': False,
},
'first_pipeline.threshold_gt_10': {
'_activated': False,
'_enabled': False,
},
'first_pipeline.threshold_gt_100': {
'_activated': False,
'_enabled': False,
},
'first_pipeline.mask_1': {
'_activated': True,
'_enabled': True,
},
'first_pipeline.mask_10': {
'_activated': True,
'_enabled': True,
},
'first_pipeline.mask_100': {
'_activated': True,
'_enabled': True,
},
'pipeline_1': {
'_activated': True,
'_enabled': True,
},
'pipeline_1.threshold_lt_1': {
'_activated': False,
'_enabled': True,
},
'pipeline_1.threshold_lt_10': {
'_activated': True,
'_enabled': True,
},
'pipeline_1.threshold_lt_100': {
'_activated': False,
'_enabled': True,
},
'pipeline_1.threshold_gt_1': {
'_activated': False,
'_enabled': False,
},
'pipeline_1.threshold_gt_10': {
'_activated': False,
'_enabled': False,
},
'pipeline_1.threshold_gt_100': {
'_activated': False,
'_enabled': False,
},
'pipeline_1.mask_1': {
'_activated': False,
'_enabled': True,
},
'pipeline_1.mask_10': {
'_activated': True,
'_enabled': True,
},
'pipeline_1.mask_100': {
'_activated': False,
'_enabled': True,
},
'pipeline_10': {
'_activated': True,
'_enabled': True,
},
'pipeline_10.threshold_lt_1': {
'_activated': False,
'_enabled': True,
},
'pipeline_10.threshold_lt_10': {
'_activated': True,
'_enabled': True,
},
'pipeline_10.threshold_lt_100': {
'_activated': False,
'_enabled': True,
},
'pipeline_10.threshold_gt_1': {
'_activated': False,
'_enabled': False,
},
'pipeline_10.threshold_gt_10': {
'_activated': False,
'_enabled': False,
},
'pipeline_10.threshold_gt_100': {
'_activated': False,
'_enabled': False,
},
'pipeline_10.mask_1': {
'_activated': False,
'_enabled': True,
},
'pipeline_10.mask_10': {
'_activated': True,
'_enabled': True,
},
'pipeline_10.mask_100': {
'_activated': False,
'_enabled': True,
},
'pipeline_100': {
'_activated': True,
'_enabled': True,
},
'pipeline_100.threshold_lt_1': {
'_activated': False,
'_enabled': True,
},
'pipeline_100.threshold_lt_10': {
'_activated': True,
'_enabled': True,
},
'pipeline_100.threshold_lt_100': {
'_activated': False,
'_enabled': True,
},
'pipeline_100.threshold_gt_1': {
'_activated': False,
'_enabled': False,
},
'pipeline_100.threshold_gt_10': {
'_activated': False,
'_enabled': False,
},
'pipeline_100.threshold_gt_100': {
'_activated': False,
'_enabled': False,
},
'pipeline_100.mask_1': {
'_activated': False,
'_enabled': True,
},
'pipeline_100.mask_10': {
'_activated': True,
'_enabled': True,
},
'pipeline_100.mask_100': {
'_activated': False,
'_enabled': True,
},
'select_threshold': {
'_activated': True,
'_enabled': True,
},
'identity_a': {
'_activated': True,
'_enabled': True,
},
'identity_b': {
'_activated': True,
'_enabled': True,
},
'identity_c': {
'_activated': True,
'_enabled': True,
},
}
),
]
def test_activations(self):
for kwargs, activations_to_check in self.expected_status:
pipeline = get_process_instance(ComplexPipeline, **kwargs)
for full_node_name, node_activations in six.iteritems(activations_to_check):
split = full_node_name.split('.')
node_pipeline = pipeline
for i in split[:-1]:
node_pipeline = node_pipeline.nodes[i].process
node_name = split[-1]
try:
node = node_pipeline.nodes[node_name]
except KeyError:
raise KeyError('Pipeline {0} has no node named {1}'.format(node_pipeline.pipeline, node_name))
try:
what = 'activation of node {0}'.format(full_node_name or 'main pipeline node')
expected = node_activations.get('_activated')
if expected is not None:
got = node.activated
self.assertEqual(expected, got)
what = 'enabled for node {0}'.format(full_node_name or 'main pipeline node')
expected = node_activations.get('_enabled')
if expected is not None:
got = node.enabled
self.assertEqual(expected, got)
except AssertionError:
raise AssertionError('Wrong activation within ComplexPipeline with parameters {0}: {1} is supposed to be {2} but is {3}'.format(kwargs, what, expected, got))
def test():
""" Function to execute unitest
"""
suite = unittest.TestLoader().loadTestsFromTestCase(TestComplexPipeline)
runtime = unittest.TextTestRunner(verbosity=2).run(suite)
return runtime.wasSuccessful()
if __name__ == '__main__':
print('Test return code:', test())
if '-v' in sys.argv[1:]:
from pprint import pprint
pipeline = get_process_instance(ComplexPipeline)
from soma.qt_gui.qt_backend import QtGui
from capsul.qt_gui.widgets import PipelineDeveloperView
#from capsul.qt_gui.widgets.activation_inspector import ActivationInspectorApp
#app = ActivationInspectorApp(ComplexPipeline)
app = QtGui.QApplication(sys.argv)
view = PipelineDeveloperView(pipeline, allow_open_controller=True, show_sub_pipelines=True)
view.show()
app.exec_()
del view
|
import scrapelib
import datetime
import os
import re
from collections import defaultdict
from billy.scrape import ScrapeError
from billy.scrape.bills import BillScraper, Bill
from billy.scrape.votes import Vote
from billy.scrape.utils import convert_pdf
import lxml.html
def action_type(action):
# http://www.scstatehouse.gov/actionsearch.php is very useful for this
classifiers = (('Adopted', 'bill:passed'),
('Amended and adopted',
['bill:passed', 'amendment:passed']),
('Amended', 'amendment:passed'),
('Certain items vetoed', 'governor:vetoed:line-item'),
('Committed to', 'committee:referred'),
('Committee Amendment Adopted', 'amendment:passed'),
('Committee Amendment Amended and Adopted',
['amendment:passed', 'amendment:amended']),
('Committee Amendment Amended', 'amendment:amended'),
('Committee Amendment Tabled', 'amendment:tabled'),
('Committee report: Favorable',
'committee:passed:favorable'),
('Committee report: Majority favorable',
'committee:passed'),
('House amendment amended', 'amendment:amended'),
('Introduced and adopted',
['bill:introduced', 'bill:passed']),
('Introduced, adopted',
['bill:introduced', 'bill:passed']),
('Introduced and read first time', ['bill:introduced', 'bill:reading:1']),
('Introduced, read first time', ['bill:introduced', 'bill:reading:1']),
('Introduced', 'bill:introduced'),
('Prefiled', 'bill:filed'),
('Read second time', 'bill:reading:2'),
('Read third time', ['bill:passed', 'bill:reading:3']),
('Recommitted to Committee', 'committee:referred'),
('Referred to Committee', 'committee:referred'),
('Rejected', 'bill:failed'),
('Senate amendment amended', 'amendment:amended'),
('Signed by governor', 'governor:signed'),
('Signed by Governor', 'governor:signed'),
('Tabled', 'bill:failed'),
('Veto overridden', 'bill:veto_override:passed'),
('Veto sustained', 'bill:veto_override:failed'),
('Vetoed by Governor', 'governor:vetoed'),
)
for prefix, atype in classifiers:
if action.lower().startswith(prefix.lower()):
return atype
# otherwise
return 'other'
class SCBillScraper(BillScraper):
jurisdiction = 'sc'
urls = {
'lower' : {
'daily-bill-index': "http://www.scstatehouse.gov/hintro/hintros.php",
},
'upper' : {
'daily-bill-index': "http://www.scstatehouse.gov/sintro/sintros.php",
}
}
_subjects = defaultdict(set)
def scrape_subjects(self, session_code):
# only need to do it once
if self._subjects:
return
subject_search_url = 'http://www.scstatehouse.gov/subjectsearch.php'
data = self.post(subject_search_url,
data=dict((('GETINDEX','Y'), ('SESSION', session_code),
('INDEXCODE','0'), ('INDEXTEXT', ''),
('AORB', 'B'), ('PAGETYPE', '0')))).text
doc = lxml.html.fromstring(data)
# skip first two subjects, filler options
for option in doc.xpath('//option')[2:]:
subject = option.text
code = option.get('value')
url = '%s?AORB=B&session=%s&indexcode=%s' % (subject_search_url,
session_code, code)
data = self.get(url).text
doc = lxml.html.fromstring(data)
for bill in doc.xpath('//span[@style="font-weight:bold;"]'):
match = re.match('(?:H|S) \d{4}', bill.text)
if match:
# remove * and leading zeroes
bill_id = match.group().replace('*', ' ')
bill_id = re.sub(' 0*', ' ', bill_id)
self._subjects[bill_id].add(subject)
def scrape_vote_history(self, bill, vurl):
html = self.get(vurl).text
doc = lxml.html.fromstring(html)
doc.make_links_absolute(vurl)
# skip first two rows
for row in doc.xpath('//table/tr')[2:]:
tds = row.getchildren()
if len(tds) != 11:
self.warning('irregular vote row: %s' % vurl)
continue
timestamp, motion, vote, yeas, nays, nv, exc, pres, abst, total, result = tds
timestamp = timestamp.text.replace(u'\xa0', ' ')
timestamp = datetime.datetime.strptime(timestamp,
'%m/%d/%Y %H:%M %p')
yeas = int(yeas.text)
nays = int(nays.text)
others = int(nv.text) + int(exc.text) + int(abst.text) + int(pres.text)
assert yeas + nays + others == int(total.text)
passed = (result.text == 'Passed')
vote_link = vote.xpath('a')[0]
if '[H]' in vote_link.text:
chamber = 'lower'
else:
chamber = 'upper'
vote = Vote(chamber, timestamp, motion.text, passed, yeas, nays,
others)
vote.add_source(vurl)
rollcall_pdf = vote_link.get('href')
self.scrape_rollcall(vote, rollcall_pdf)
vote.add_source(rollcall_pdf)
bill.add_vote(vote)
def scrape_rollcall(self, vote, vurl):
(path, resp) = self.urlretrieve(vurl)
pdflines = convert_pdf(path, 'text')
os.remove(path)
current_vfunc = None
for line in pdflines.split('\n'):
line = line.strip()
# change what is being recorded
if line.startswith('YEAS') or line.startswith('AYES'):
current_vfunc = vote.yes
elif line.startswith('NAYS'):
current_vfunc = vote.no
elif (line.startswith('EXCUSED') or
line.startswith('NOT VOTING') or
line.startswith('ABSTAIN')):
current_vfunc = vote.other
# skip these
elif not line or line.startswith('Page '):
continue
# if a vfunc is active
elif current_vfunc:
# split names apart by 3 or more spaces
names = re.split('\s{3,}', line)
for name in names:
if name:
current_vfunc(name.strip())
def scrape_details(self, bill_detail_url, session, chamber, bill_id):
page = self.get(bill_detail_url).text
if 'INVALID BILL NUMBER' in page:
self.warning('INVALID BILL %s' % bill_detail_url)
return
doc = lxml.html.fromstring(page)
doc.make_links_absolute(bill_detail_url)
bill_div = doc.xpath('//div[@style="margin:0 0 40px 0;"]')[0]
bill_type = bill_div.xpath('span/text()')[0]
if 'General Bill' in bill_type:
bill_type = 'bill'
elif 'Concurrent Resolution' in bill_type:
bill_type = 'concurrent resolution'
elif 'Joint Resolution' in bill_type:
bill_type = 'joint resolution'
elif 'Resolution' in bill_type:
bill_type = 'resolution'
else:
raise ValueError('unknown bill type: %s' % bill_type)
# this is fragile, but less fragile than it was
b = bill_div.xpath('./b[text()="Summary:"]')[0]
bill_summary = b.getnext().tail.strip()
bill = Bill(session, chamber, bill_id, bill_summary, type=bill_type)
bill['subjects'] = list(self._subjects[bill_id])
# sponsors
for sponsor in doc.xpath('//a[contains(@href, "member.php")]/text()'):
bill.add_sponsor('primary', sponsor)
for sponsor in doc.xpath('//a[contains(@href, "committee.php")]/text()'):
sponsor = sponsor.replace(u'\xa0', ' ').strip()
bill.add_sponsor('primary', sponsor)
# find versions
version_url = doc.xpath('//a[text()="View full text"]/@href')[0]
version_html = self.get(version_url).text
version_doc = lxml.html.fromstring(version_html)
version_doc.make_links_absolute(version_url)
for version in version_doc.xpath('//a[contains(@href, "/prever/")]'):
# duplicate versions with same date, use first appearance
bill.add_version(version.text, version.get('href'),
on_duplicate='use_old',
mimetype='text/html')
# actions
for row in bill_div.xpath('table/tr'):
date_td, chamber_td, action_td = row.xpath('td')
date = datetime.datetime.strptime(date_td.text, "%m/%d/%y")
action_chamber = {'Senate':'upper',
'House':'lower',
None: 'other'}[chamber_td.text]
action = action_td.text_content()
action = action.split('(House Journal')[0]
action = action.split('(Senate Journal')[0].strip()
atype = action_type(action)
bill.add_action(action_chamber, action, date, atype)
# votes
vurl = doc.xpath('//a[text()="View Vote History"]/@href')
if vurl:
vurl = vurl[0]
self.scrape_vote_history(bill, vurl)
bill.add_source(bill_detail_url)
self.save_bill(bill)
def scrape(self, chamber, session):
# start with subjects
session_code = self.metadata['session_details'][session]['_code']
self.scrape_subjects(session_code)
# get bill index
index_url = self.urls[chamber]['daily-bill-index']
chamber_letter = 'S' if chamber == 'upper' else 'H'
page = self.get(index_url).text
doc = lxml.html.fromstring(page)
doc.make_links_absolute(index_url)
# visit each day and extract bill ids
days = doc.xpath('//div/b/a/@href')
for day_url in days:
try:
data = self.get(day_url).text
except scrapelib.HTTPError:
continue
doc = lxml.html.fromstring(data)
doc.make_links_absolute(day_url)
for bill_a in doc.xpath('//p/a[1]'):
bill_id = bill_a.text.replace('.', '')
if bill_id.startswith(chamber_letter):
self.scrape_details(bill_a.get('href'), session, chamber,
bill_id)
|
<reponame>thechiragthakur/Data-Science-Using-Python<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# # ASSIGNMENT 1
# Pima Indians Diabetes Database.
# It consists 768 tuples each having 9 attributes.
# In[1]:
import pandas as pd
#imported the dataset given to us
pid=pd.read_csv("C:\\Users\\Micontroller Lab N16\\IIT MANDI\\3rd Week\\pima-indians-diabetes.csv",sep=',')
#made a copy of the original dataset
pid1=pid.copy()
print(pid1)
#looked for any null value present in the dataset
print(pid1.isnull().sum())
# In[2]:
pid.columns
#created a list of all the columns present in the dataset
pid_col=list(pid.columns)
pid_col
pid_col1=pid_col.copy()
#we do not want to bring changes in the class column so we removed it from the copied dataset
pid_col1.remove('class')
pid_col1
# Question 1. Write a python program to
#
# a. Normalize all the attributes, except class attribute, of pima-indians-diabetes.csv
# using min-max normalization to transform the data in the range [0-1]. Save the file as
# pima-indians-diabetes-Normalised.csv
#
# b. Standardize, all the attributes, except class attribute, of pima-indians-
# diabetes.csv using z-normalization. Save the file as pima-indians-
# diabetes-Standardised.csv
# In[3]:
#Using minmaxscaler function from scikit-learn library for min max normalization
from sklearn.preprocessing import MinMaxScaler
pid1_mms=pid.copy()
scaler = MinMaxScaler(copy=True, feature_range=(0,1))
print(scaler.fit(pid1_mms)) #fitting the model
pid1_mms=scaler.fit_transform(pid1_mms)
print(pid1_mms) #normalized dataset
print('\n\n')
# In[4]:
#Using StandardScaler function from scikit-learn library for standardization.
from sklearn.preprocessing import StandardScaler
pid1_ss=pid1.copy()
scaler = StandardScaler()
standard_df = scaler.fit_transform(pid1_ss) #fitting the model
print(pid1_ss) #standardized dataset
# Question 2. Split the data of each class from pima-indians-diabetes.csv into train data and test
# data. Train data contain 70% of tuples from each of the class and test data contain remaining
# 30% of tuples from each class. Save the train data as diabetes-train.csv and save the
# test data as diabetes-test.csv
# a. Classify every test tuple using K-nearest neighbor (KNN) method for the different values
# of K (1, 3, 5, 7, 9, 11, 13, 15, 17, 21). Perform the following analysis :
# i. Find confusion matrix (use ‘confusion_matrix’) for each K.
# ii. Find the classification accuracy (You can use ‘accuracy_score’) for each K. Note the
# value of K for which the accuracy is high.
# In[5]:
X1 = pid1.drop(['class'], axis = 1)# X denotes the input functions and here class defines whether the person is ill or not
print(X1)
y1 = pid['class'] #y denotes the output functions
print(y1)
# In[6]:
from sklearn.model_selection import train_test_split #As given we are assigning 70% of data for training and 30% for testing
X1_train, X1_test, y1_train, y1_test = train_test_split(X1, y1, train_size = 0.7, random_state = 42)
print(X1_train)
print(y1_train)
print(X1_test)
print(y1_test)
# In[7]:
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score,confusion_matrix
neighbors=[1,3,5,7,9,11,13,15,17,19,21]
train_accuracy = np.empty(len(neighbors))
test_accuracy = np.empty(len(neighbors))
# Loop over K values
for i, k in enumerate(neighbors):
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X1_train, y1_train)
print('Predicted Outcomes for neighbours =',k,'are', knn.predict(X1_test))
print('\n')
print('Accuracy = ',knn.score(X1_test, y1_test))
print('\n')
matrix = confusion_matrix(y1_test,knn.predict(X1_test))
print('Confusion Matrix = ',matrix)
print('\n\n')
# Compute traning and test data accuracy
train_accuracy[i] = knn.score(X1_train, y1_train)
test_accuracy[i] = knn.score(X1_test, y1_test)
# Generate plot
plt.plot(neighbors, test_accuracy, label = 'Testing dataset Accuracy')
plt.plot(neighbors, train_accuracy, label = 'Training dataset Accuracy')
plt.legend()
plt.xlabel('n_neighbors')
plt.ylabel('Accuracy')
plt.show()
# Question 3. Split the data of each class from pima-indians-diabetes-Normalised.csv into
# train data and test data. Train data should contain same 70% of tuples in Question 2 from
# each of the class and test data contain remaining same 30% of tuples from each class. Save the
#
# train data as diabetes-train-normalise.csv and save the test data as diabetes-
# test-normalise.csv
#
# a. Classify every test tuple using K-nearest neighbor (KNN) method for the different values
# of K (1, 3, 5, 7, 9, 11, 13, 15, 17, 21). Perform the following analysis :
# i. Find confusion matrix (use ‘confusion_matrix’) for each K.
# ii. Find the classification accuracy (You can use ‘accuracy_score’) for each K. Note the
# value of K for which the accuracy is high.
# In[8]:
X2 = pid1_mms
X2
y2 = pid['class']
y2
# In[9]:
from sklearn.model_selection import train_test_split
X2_train, X2_test, y2_train, y2_test = train_test_split(X2, y2, train_size = 0.7, random_state = 42)
print(X2_train)
print(y2_train)
print(X2_test)
print(y2_test)
# In[10]:
import numpy as np
import matplotlib.pyplot as plt
neighbors=[1,3,5,7,9,11,13,15,17,19,21]
train_accuracy = np.empty(len(neighbors))
test_accuracy = np.empty(len(neighbors))
acc=[]
# Loop over K values
for i, k in enumerate(neighbors):
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X2_train, y2_train)
print('Predicted Outcomes for neighbours =',k,'are', knn.predict(X2_test))
print('\n')
print('Accuracy = ',knn.score(X2_test, y2_test))
print('\n')
matrix = confusion_matrix(y2_test,knn.predict(X2_test))
print('Confusion Matrix = ',matrix)
print('\n\n')
# Compute traning and test data accuracy
train_accuracy[i] = knn.score(X2_train, y2_train)
test_accuracy[i] = knn.score(X2_test, y2_test)
# Generate plot
plt.plot(neighbors, test_accuracy, label = 'Testing dataset Accuracy')
plt.plot(neighbors, train_accuracy, label = 'Training dataset Accuracy')
plt.legend()
plt.xlabel('n_neighbors')
plt.ylabel('Accuracy')
plt.show()
# Question 4. Split the data of each class from pima-indians-diabetes-Standardised.csv into
# train data and test data. Train data should contain same 70% of tuples in Question 2 from
# each of the class and test data contain remaining same 30% of tuples from each class. Save the
#
# train data as diabetes-train-standardise.csv and save the test data as diabetes-
# test-standardise.csv
#
# a. Classify every test tuple using K-nearest neighbor (KNN) method for the different values
# of K (1, 3, 5, 7, 9, 11, 13, 15, 17, 21). Perform the following analysis :
# i. Find confusion matrix (use ‘confusion_matrix’) for each K.
#
# ii. Find the classification accuracy (You can use ‘accuracy_score’) for each K. Note the
# value of K for which the accuracy is high.
# In[11]:
X3 = pid1_ss
X3
y3 = pid1_ss['class']
y3
# In[12]:
from sklearn.model_selection import train_test_split
X3_train, X3_test, y3_train, y3_test = train_test_split(X3, y3, train_size = 0.7, random_state = 42)
print(X3_train)
print(y3_train)
print(X3_test)
print(y3_test)
# In[13]:
import numpy as np
import matplotlib.pyplot as plt
neighbors=[1,3,5,7,9,11,13,15,17,19,21]
train_accuracy = np.empty(len(neighbors))
test_accuracy = np.empty(len(neighbors))
acc=[]
# Loop over K values
for i, k in enumerate(neighbors):
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X3_train, y3_train)
print('Predicted Outcomes for neighbours =',k,'are', knn.predict(X3_test))
print('\n')
print('Accuracy = ',knn.score(X3_test, y3_test))
print('\n')
matrix = confusion_matrix(y3_test,knn.predict(X3_test))
print('Confusion Matrix = ',matrix)
print('\n\n')
# Compute traning and test data accuracy
train_accuracy[i] = knn.score(X3_train, y3_train)
test_accuracy[i] = knn.score(X3_test, y3_test)
# Generate plot
plt.plot(neighbors, test_accuracy, label = 'Testing dataset Accuracy')
plt.plot(neighbors, train_accuracy, label = 'Training dataset Accuracy')
plt.legend()
plt.xlabel('n_neighbors')
plt.ylabel('Accuracy')
plt.show()
# 6. Why the value of K is considered as odd integer?
# Suppose P1 is the point, for which label needs to predict. First, you find the k closest point to P1 and then classify points by majority vote of its k neighbors. Each object votes for their class and the class with the most votes is taken as the prediction.
# if k=even it will be difficult to choose class as the voting may be tied.
|
<filename>preprocessor/preprocessor.py<gh_stars>0
import os
import random
import json
import tgt
import librosa
import numpy as np
from tqdm import tqdm
import audio as Audio
from text import grapheme_to_phoneme
from utils.tools import read_lexicon
from g2p_en import G2p
random.seed(1234)
class Preprocessor:
def __init__(self, config):
self.config = config
self.in_dir = config["path"]["raw_path"]
self.out_dir = config["path"]["preprocessed_path"]
self.val_size = config["preprocessing"]["val_size"]
self.sampling_rate = config["preprocessing"]["audio"]["sampling_rate"]
self.skip_len = config["preprocessing"]["audio"]["skip_len"]
self.trim_top_db = config["preprocessing"]["audio"]["trim_top_db"]
self.filter_length = config["preprocessing"]["stft"]["filter_length"]
self.hop_length = config["preprocessing"]["stft"]["hop_length"]
self.g2p = G2p()
self.lexicon = read_lexicon(config["path"]["lexicon_path"])
self.STFT = Audio.stft.TacotronSTFT(
config["preprocessing"]["stft"]["filter_length"],
config["preprocessing"]["stft"]["hop_length"],
config["preprocessing"]["stft"]["win_length"],
config["preprocessing"]["mel"]["n_mel_channels"],
config["preprocessing"]["audio"]["sampling_rate"],
config["preprocessing"]["mel"]["mel_fmin"],
config["preprocessing"]["mel"]["mel_fmax"],
)
def build_from_path(self):
os.makedirs((os.path.join(self.out_dir, "mel")), exist_ok=True)
print("Processing Data ...")
out = list()
n_frames = 0
# Compute pitch, energy, duration, and mel-spectrogram
speakers = {}
for i, speaker in enumerate(tqdm(os.listdir(self.in_dir))):
speakers[speaker] = i
for wav_name in tqdm(os.listdir(os.path.join(self.in_dir, speaker))):
if ".wav" not in wav_name:
continue
basename = wav_name.split(".")[0]
ret = self.process_utterance(speaker, basename)
if ret is None:
continue
else:
info, n = ret
out.append(info)
n_frames += n
# Save files
with open(os.path.join(self.out_dir, "speakers.json"), "w") as f:
f.write(json.dumps(speakers))
print(
"Total time: {} hours".format(
n_frames * self.hop_length / self.sampling_rate / 3600
)
)
random.shuffle(out)
out = [r for r in out if r is not None]
# Write metadata
with open(os.path.join(self.out_dir, "train.txt"), "w", encoding="utf-8") as f:
for m in out[self.val_size :]:
f.write(m + "\n")
with open(os.path.join(self.out_dir, "val.txt"), "w", encoding="utf-8") as f:
for m in out[: self.val_size]:
f.write(m + "\n")
return out
def process_utterance(self, speaker, basename):
wav_path = os.path.join(self.in_dir, speaker, "{}.wav".format(basename))
text_path = os.path.join(self.in_dir, speaker, "{}.lab".format(basename))
# Read and trim wav files
wav, _ = librosa.load(wav_path)
wav = wav.astype(np.float32)
if len(wav) < self.skip_len:
return None
wav = librosa.effects.trim(wav, top_db=self.trim_top_db, frame_length=self.filter_length, hop_length=self.hop_length)[0]
# Compute mel-scale spectrogram
mel_spectrogram, _ = Audio.stft.get_mel_spectrogram(self.STFT, wav)
mel_spectrogram = mel_spectrogram.squeeze(1).numpy()
# Read raw text
with open(text_path, "r") as f:
raw_text = f.readline().strip("\n")
# Get phoneme
phone = grapheme_to_phoneme(raw_text, self.g2p, self.lexicon)
text = "{" + " ".join(phone) + "}"
# Save files
mel_filename = "{}-mel-{}.npy".format(speaker, basename)
np.save(
os.path.join(self.out_dir, "mel", mel_filename),
mel_spectrogram.T,
)
return (
"|".join([basename, speaker, text, raw_text]),
mel_spectrogram.shape[1],
)
|
<filename>supervision/ssim.py
# code from https://github.com/Po-Hsun-Su/pytorch-ssim
import torch
import numpy
import math
def __gaussian__(kernel_size, std, data_type=torch.float32):
gaussian = numpy.array([math.exp(-(x - kernel_size//2)**2/float(2*std**2)) for x in range(kernel_size)])
gaussian /= numpy.sum(gaussian)
return torch.tensor(gaussian, dtype=data_type)
def __create_kernel__(kernel_size, data_type=torch.float32, channels=3, std=1.5):
gaussian1d = __gaussian__(kernel_size, std).unsqueeze(1)
gaussian2d = torch.mm(gaussian1d, gaussian1d.t())\
.type(data_type)\
.unsqueeze(0)\
.unsqueeze(0)
window = gaussian2d.expand(channels, 1, kernel_size, kernel_size).contiguous()
return window
def __ssim_gaussian__(prediction, groundtruth, kernel, kernel_size, channels=3):
padding = kernel_size // 2
prediction_mean = torch.nn.functional.conv2d(prediction, kernel, padding = padding, groups = channels)
groundtruth_mean = torch.nn.functional.conv2d(groundtruth, kernel, padding = padding, groups = channels)
prediction_mean_squared = prediction_mean.pow(2)
groundtruth_mean_squared = groundtruth_mean.pow(2)
prediction_mean_times_groundtruth_mean = prediction_mean * groundtruth_mean
prediction_sigma_squared = torch.nn.functional.conv2d(prediction * prediction, kernel, padding=padding, groups=channels)\
- prediction_mean_squared
groundtruth_sigma_squared = torch.nn.functional.conv2d(groundtruth * groundtruth, kernel, padding=padding, groups=channels)\
- groundtruth_mean_squared
prediction_groundtruth_covariance = torch.nn.functional.conv2d(prediction * groundtruth, kernel, padding=padding, groups=channels)\
- prediction_mean_times_groundtruth_mean
C1 = 0.01**2 # assume that images are in the [0, 1] range
C2 = 0.03**2 # assume that images are in the [0, 1] range
return (
( # numerator
(2 * prediction_mean_times_groundtruth_mean + C1) # luminance term
* (2 * prediction_groundtruth_covariance + C2) # structural term
)
/ # division
( # denominator
(prediction_mean_squared + groundtruth_mean_squared + C1) # luminance term
* (prediction_sigma_squared + groundtruth_sigma_squared + C2) # structural term
)
)
def ssim_gaussian(prediction, groundtruth, kernel_size = 11, std=1.5):
(_, channels, _, _) = prediction.size()
kernel = __create_kernel__(kernel_size, data_type=prediction.type(),\
channels=channels, std=std)
if prediction.is_cuda:
kernel = kernel.to(prediction.get_device())
kernel = kernel.type_as(prediction)
return __ssim_gaussian__(prediction, groundtruth, kernel, kernel_size, channels)
def ssim_box(prediction, groundtruth, kernel_size=3):
C1 = 0.01 ** 2
C2 = 0.03 ** 2
prediction_mean = torch.nn.AvgPool2d(kernel_size, stride=1)(prediction)
groundtruth_mean = torch.nn.AvgPool2d(kernel_size, stride=1)(groundtruth)
prediction_groundtruth_mean = prediction_mean * groundtruth_mean
prediction_mean_squared = prediction_mean.pow(2)
groundtruth_mean_squared = groundtruth_mean.pow(2)
prediction_sigma = torch.nn.AvgPool2d(kernel_size, stride=1)(prediction * prediction) - prediction_mean_squared
groundtruth_sigma = torch.nn.AvgPool2d(kernel_size, stride=1)(groundtruth * groundtruth) - groundtruth_mean_squared
correlation = torch.nn.AvgPool2d(kernel_size, stride=1)(prediction * groundtruth) - prediction_groundtruth_mean
numerator = (2 * prediction_groundtruth_mean + C1) * (2 * correlation + C2)
denominator = (prediction_mean_squared + groundtruth_mean_squared + C1)\
* (prediction_sigma + groundtruth_sigma + C2)
ssim = numerator / denominator
pad = kernel_size // 2
return torch.nn.functional.pad(ssim, (pad, pad, pad, pad))
def ssim_loss(prediction, groundtruth, kernel_size=5, std=1.5, mode='gaussian'):
if mode == 'gaussian':
return ssim_gaussian(prediction, groundtruth, kernel_size=kernel_size, std=std)
elif mode == 'box':
return ssim_box(prediction, groundtruth, kernel_size=kernel_size) |
# -*- coding: utf-8 -*-
model = {
'tlh': 0,
"e' ": 1,
'gh ': 2,
"i' ": 3,
" 'e": 4,
"u' ": 5,
' vi': 6,
'atl': 7,
"a' ": 8,
' gh': 9,
'ej ': 10,
' ho': 11,
' ch': 12,
' mu': 13,
' tl': 14,
'nga': 15,
'mey': 16,
"wi'": 17,
"be'": 18,
'an ': 19,
'ch ': 20,
'gan': 21,
'chu': 22,
'lh ': 23,
'ing': 24,
"'e'": 25,
'hin': 26,
'jat': 27,
'lhi': 28,
' da': 29,
' ja': 30,
"o' ": 31,
'ugh': 32,
'aq ': 33,
'cha': 34,
' po': 35,
'ey ': 36,
" 'a": 37,
' je': 38,
"'ej": 39,
' pa': 40,
'ng ': 41,
'ad ': 42,
' qa': 43,
'oh ': 44,
'eh ': 45,
'ah ': 46,
'gha': 47,
'je ': 48,
' lu': 49,
'hol': 50,
"aw'": 51,
' ji': 52,
'ong': 53,
"pu'": 54,
'aj ': 55,
'vad': 56,
"w' ": 57,
"' j": 58,
"ha'": 59,
'is ': 60,
'tah': 61,
"' '": 62,
'ang': 63,
"h '": 64,
'pon': 65,
'am ': 66,
'law': 67,
"mo'": 68,
"qu'": 69,
'hbe': 70,
'ol ': 71,
'vam': 72,
'agh': 73,
"mu'": 74,
'ahv': 75,
'bej': 76,
'ogh': 77,
'uch': 78,
"' v": 79,
'ach': 80,
'hug': 81,
' lo': 82,
' qu': 83,
'cho': 84,
'hva': 85,
'ij ': 86,
' la': 87,
"lu'": 88,
'vis': 89,
' ne': 90,
' pu': 91,
' so': 92,
' ta': 93,
' va': 94,
"'ac": 95,
"di'": 96,
"hu'": 97,
'lah': 98,
'moh': 99,
" 'o": 100,
"' m": 101,
'daq': 102,
'hah': 103,
'n h': 104,
'neh': 105,
"u'm": 106,
"ay'": 107,
'gho': 108,
'h v': 109,
'meh': 110,
'oy ': 111,
' ma': 112,
' nu': 113,
"'me": 114,
'el ': 115,
' ba': 116,
' be': 117,
' de': 118,
' ng': 119,
"' t": 120,
'h d': 121,
'hvi': 122,
'oq ': 123,
' wa': 124,
"' l": 125,
"'wi": 126,
'hme': 127,
"li'": 128,
'uq ': 129,
' bo': 130,
'bog': 131,
'del': 132,
'h p': 133,
'h t': 134,
'ich': 135,
'vil': 136,
' qe': 137,
' wi': 138,
'ahb': 139,
'ban': 140,
'eng': 141,
'haq': 142,
'hoh': 143,
'ov ': 144,
'viq': 145,
' ha': 146,
' ti': 147,
"' n": 148,
"' p": 149,
"'a'": 150,
'hwi': 151,
'igh': 152,
"lo'": 153,
"y' ": 154,
' du': 155,
' no': 156,
' yu': 157,
"'mo": 158,
"'va": 159,
'daj': 160,
'das': 161,
'egh': 162,
'hom': 163,
'muc': 164,
'om ': 165,
'otl': 166,
'us ': 167,
' bi': 168,
' tu': 169,
"' h": 170,
'chm': 171,
'h q': 172,
'hov': 173,
'nis': 174,
'qar': 175,
'uj ': 176,
"' q": 177,
"'ch": 178,
'h m': 179,
'hmo': 180,
'jih': 181,
'par': 182,
'wij': 183,
' hu': 184,
"' d": 185,
"'a ": 186,
'etl': 187,
'h g': 188,
'h j': 189,
'h l': 190,
'lod': 191,
'maq': 192,
'och': 193,
"wa'": 194,
'yuq': 195,
' di': 196,
' le': 197,
' pe': 198,
' ya': 199,
"'di": 200,
'che': 201,
'ech': 202,
'ih ': 203,
'ija': 204,
'in ': 205,
"j '": 206,
'j m': 207,
'lhw': 208,
"pa'": 209,
" 'i": 210,
' mi': 211,
' qi': 212,
' ro': 213,
' ru': 214,
"'be": 215,
'anp': 216,
'ghi': 217,
'ghu': 218,
'h b': 219,
'hay': 220,
'hch': 221,
'iq ': 222,
'npu': 223,
'od ': 224,
'paq': 225,
'qay': 226,
'rda': 227,
'soh': 228,
' do': 229,
' me': 230,
' qo': 231,
' sa': 232,
"' c": 233,
"' g": 234,
"' s": 235,
"'lu": 236,
'aml': 237,
'ard': 238,
'as ': 239,
'd p': 240,
'gme': 241,
'h n': 242,
'hta': 243,
"i'v": 244,
'j j': 245,
'jij': 246,
'len': 247,
'ngm': 248,
'qan': 249,
'qme': 250,
'vaj': 251,
'wiv': 252,
' mo': 253,
' ni': 254,
"'la": 255,
"'pu": 256,
"'qu": 257,
'ar ': 258,
'arm': 259,
'dwi': 260,
'g p': 261,
'ghd': 262,
'h c': 263,
'ham': 264,
'hla': 265,
'hqu': 266,
'ilo': 267,
'iqa': 268,
'iqi': 269,
'j p': 270,
'j t': 271,
'j v': 272,
'lad': 273,
'lho': 274,
'mar': 275,
'mug': 276,
'pus': 277,
'q s': 278,
'q t': 279,
'rgh': 280,
'rma': 281,
'sov': 282,
"ta'": 283,
'tin': 284,
"tu'": 285,
"u'd": 286,
'vet': 287,
'yli': 288,
"yu'": 289,
' to': 290,
"'oh": 291,
'aqq': 292,
'art': 293,
'at ': 294,
'ayl': 295,
'ayt': 296,
'et ': 297,
'haj': 298,
'har': 299,
}
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example demonstrates how to add a keyword to an ad group."""
from __future__ import absolute_import
import argparse
import six
import sys
import google.ads.google_ads.client
def main(client, customer_id, ad_group_id, keyword):
ad_group_service = client.get_service('AdGroupService', version='v1')
ad_group_criterion_service = client.get_service('AdGroupCriterionService',
version='v1')
# Create keyword.
ad_group_criterion_operation = client.get_type('AdGroupCriterionOperation',
version='v1')
ad_group_criterion = ad_group_criterion_operation.create
ad_group_criterion.ad_group.value = ad_group_service.ad_group_path(
customer_id, ad_group_id)
ad_group_criterion.status = client.get_type(
'AdGroupCriterionStatusEnum').ENABLED
ad_group_criterion.keyword.text.value = keyword
ad_group_criterion.keyword.match_type = client.get_type(
'KeywordMatchTypeEnum').EXACT
# Optional field
# All fields can be referenced from the protos directly.
# The protos are located in subdirectories under
# google/ads/googleads/v0/proto.
# ad_group_criterion.negative.value = True
# Optional repeated field
# final_url = ad_group_criterion.final_urls.add()
# final_url.value = 'https://www.example.com'
# Add keyword
try:
ad_group_criterion_response = (
ad_group_criterion_service.mutate_ad_group_criteria(
customer_id, [ad_group_criterion_operation]))
except google.ads.google_ads.errors.GoogleAdsException as ex:
print('Request with ID "%s" failed with status "%s" and includes the '
'following errors:' % (ex.request_id, ex.error.code().name))
for error in ex.failure.errors:
print('\tError with message "%s".' % error.message)
if error.location:
for field_path_element in error.location.field_path_elements:
print('\t\tOn field: %s' % field_path_element.field_name)
sys.exit(1)
print('Created keyword %s.'
% ad_group_criterion_response.results[0].resource_name)
if __name__ == '__main__':
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
google_ads_client = (google.ads.google_ads.client.GoogleAdsClient
.load_from_storage())
parser = argparse.ArgumentParser(
description=('Adds a keyword to the provided ad group, for the '
'specified customer.'))
# The following argument(s) should be provided to run the example.
parser.add_argument('-c', '--customer_id', type=six.text_type,
required=True, help='The Google Ads customer ID.')
parser.add_argument('-a', '--ad_group_id', type=six.text_type,
required=True, help='The ad group ID.')
parser.add_argument('-k', '--keyword', type=six.text_type, required=False,
default='mars cruise',
help=('The keyword to be added to the ad group. Note '
'that you will receive an error response if you '
'attempt to create a duplicate keyword.'))
args = parser.parse_args()
main(google_ads_client, args.customer_id, args.ad_group_id, args.keyword)
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import mindspore.ops as P
from mindspore import nn
class Module3(nn.Cell):
def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_0_kernel_size, conv2d_0_stride,
conv2d_0_padding, conv2d_0_pad_mode, conv2d_0_group):
super(Module3, self).__init__()
self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
out_channels=conv2d_0_out_channels,
kernel_size=conv2d_0_kernel_size,
stride=conv2d_0_stride,
padding=conv2d_0_padding,
pad_mode=conv2d_0_pad_mode,
dilation=(1, 1),
group=conv2d_0_group,
has_bias=True)
self.relu_1 = nn.ReLU()
def construct(self, x):
opt_conv2d_0 = self.conv2d_0(x)
opt_relu_1 = self.relu_1(opt_conv2d_0)
return opt_relu_1
class Module8(nn.Cell):
def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, module3_0_conv2d_0_in_channels,
module3_0_conv2d_0_out_channels, module3_0_conv2d_0_kernel_size, module3_0_conv2d_0_stride,
module3_0_conv2d_0_padding, module3_0_conv2d_0_pad_mode, module3_0_conv2d_0_group,
module3_1_conv2d_0_in_channels, module3_1_conv2d_0_out_channels, module3_1_conv2d_0_kernel_size,
module3_1_conv2d_0_stride, module3_1_conv2d_0_padding, module3_1_conv2d_0_pad_mode,
module3_1_conv2d_0_group):
super(Module8, self).__init__()
self.module3_0 = Module3(conv2d_0_in_channels=module3_0_conv2d_0_in_channels,
conv2d_0_out_channels=module3_0_conv2d_0_out_channels,
conv2d_0_kernel_size=module3_0_conv2d_0_kernel_size,
conv2d_0_stride=module3_0_conv2d_0_stride,
conv2d_0_padding=module3_0_conv2d_0_padding,
conv2d_0_pad_mode=module3_0_conv2d_0_pad_mode,
conv2d_0_group=module3_0_conv2d_0_group)
self.module3_1 = Module3(conv2d_0_in_channels=module3_1_conv2d_0_in_channels,
conv2d_0_out_channels=module3_1_conv2d_0_out_channels,
conv2d_0_kernel_size=module3_1_conv2d_0_kernel_size,
conv2d_0_stride=module3_1_conv2d_0_stride,
conv2d_0_padding=module3_1_conv2d_0_padding,
conv2d_0_pad_mode=module3_1_conv2d_0_pad_mode,
conv2d_0_group=module3_1_conv2d_0_group)
self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
out_channels=conv2d_0_out_channels,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
def construct(self, x):
module3_0_opt = self.module3_0(x)
module3_1_opt = self.module3_1(module3_0_opt)
opt_conv2d_0 = self.conv2d_0(module3_1_opt)
return opt_conv2d_0
class Module0(nn.Cell):
def __init__(self, conv2d_0_in_channels, conv2d_0_out_channels, conv2d_2_in_channels, conv2d_2_out_channels,
conv2d_2_group, conv2d_4_in_channels, conv2d_4_out_channels):
super(Module0, self).__init__()
self.conv2d_0 = nn.Conv2d(in_channels=conv2d_0_in_channels,
out_channels=conv2d_0_out_channels,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.relu_1 = nn.ReLU()
self.conv2d_2 = nn.Conv2d(in_channels=conv2d_2_in_channels,
out_channels=conv2d_2_out_channels,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1, 1, 1),
pad_mode="pad",
dilation=(1, 1),
group=conv2d_2_group,
has_bias=True)
self.relu_3 = nn.ReLU()
self.conv2d_4 = nn.Conv2d(in_channels=conv2d_4_in_channels,
out_channels=conv2d_4_out_channels,
kernel_size=(1, 1),
stride=(1, 1),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.relu_6 = nn.ReLU()
def construct(self, x):
opt_conv2d_0 = self.conv2d_0(x)
opt_relu_1 = self.relu_1(opt_conv2d_0)
opt_conv2d_2 = self.conv2d_2(opt_relu_1)
opt_relu_3 = self.relu_3(opt_conv2d_2)
opt_conv2d_4 = self.conv2d_4(opt_relu_3)
opt_add_5 = P.Add()(x, opt_conv2d_4)
opt_relu_6 = self.relu_6(opt_add_5)
return opt_relu_6
class Module10(nn.Cell):
def __init__(self, module0_0_conv2d_0_in_channels, module0_0_conv2d_0_out_channels, module0_0_conv2d_2_in_channels,
module0_0_conv2d_2_out_channels, module0_0_conv2d_2_group, module0_0_conv2d_4_in_channels,
module0_0_conv2d_4_out_channels, module0_1_conv2d_0_in_channels, module0_1_conv2d_0_out_channels,
module0_1_conv2d_2_in_channels, module0_1_conv2d_2_out_channels, module0_1_conv2d_2_group,
module0_1_conv2d_4_in_channels, module0_1_conv2d_4_out_channels, module0_2_conv2d_0_in_channels,
module0_2_conv2d_0_out_channels, module0_2_conv2d_2_in_channels, module0_2_conv2d_2_out_channels,
module0_2_conv2d_2_group, module0_2_conv2d_4_in_channels, module0_2_conv2d_4_out_channels,
module0_3_conv2d_0_in_channels, module0_3_conv2d_0_out_channels, module0_3_conv2d_2_in_channels,
module0_3_conv2d_2_out_channels, module0_3_conv2d_2_group, module0_3_conv2d_4_in_channels,
module0_3_conv2d_4_out_channels):
super(Module10, self).__init__()
self.module0_0 = Module0(conv2d_0_in_channels=module0_0_conv2d_0_in_channels,
conv2d_0_out_channels=module0_0_conv2d_0_out_channels,
conv2d_2_in_channels=module0_0_conv2d_2_in_channels,
conv2d_2_out_channels=module0_0_conv2d_2_out_channels,
conv2d_2_group=module0_0_conv2d_2_group,
conv2d_4_in_channels=module0_0_conv2d_4_in_channels,
conv2d_4_out_channels=module0_0_conv2d_4_out_channels)
self.module0_1 = Module0(conv2d_0_in_channels=module0_1_conv2d_0_in_channels,
conv2d_0_out_channels=module0_1_conv2d_0_out_channels,
conv2d_2_in_channels=module0_1_conv2d_2_in_channels,
conv2d_2_out_channels=module0_1_conv2d_2_out_channels,
conv2d_2_group=module0_1_conv2d_2_group,
conv2d_4_in_channels=module0_1_conv2d_4_in_channels,
conv2d_4_out_channels=module0_1_conv2d_4_out_channels)
self.module0_2 = Module0(conv2d_0_in_channels=module0_2_conv2d_0_in_channels,
conv2d_0_out_channels=module0_2_conv2d_0_out_channels,
conv2d_2_in_channels=module0_2_conv2d_2_in_channels,
conv2d_2_out_channels=module0_2_conv2d_2_out_channels,
conv2d_2_group=module0_2_conv2d_2_group,
conv2d_4_in_channels=module0_2_conv2d_4_in_channels,
conv2d_4_out_channels=module0_2_conv2d_4_out_channels)
self.module0_3 = Module0(conv2d_0_in_channels=module0_3_conv2d_0_in_channels,
conv2d_0_out_channels=module0_3_conv2d_0_out_channels,
conv2d_2_in_channels=module0_3_conv2d_2_in_channels,
conv2d_2_out_channels=module0_3_conv2d_2_out_channels,
conv2d_2_group=module0_3_conv2d_2_group,
conv2d_4_in_channels=module0_3_conv2d_4_in_channels,
conv2d_4_out_channels=module0_3_conv2d_4_out_channels)
def construct(self, x):
module0_0_opt = self.module0_0(x)
module0_1_opt = self.module0_1(module0_0_opt)
module0_2_opt = self.module0_2(module0_1_opt)
module0_3_opt = self.module0_3(module0_2_opt)
return module0_3_opt
class Module1(nn.Cell):
def __init__(self):
super(Module1, self).__init__()
self.module0_0 = Module0(conv2d_0_in_channels=720,
conv2d_0_out_channels=720,
conv2d_2_in_channels=720,
conv2d_2_out_channels=720,
conv2d_2_group=6,
conv2d_4_in_channels=720,
conv2d_4_out_channels=720)
self.module0_1 = Module0(conv2d_0_in_channels=720,
conv2d_0_out_channels=720,
conv2d_2_in_channels=720,
conv2d_2_out_channels=720,
conv2d_2_group=6,
conv2d_4_in_channels=720,
conv2d_4_out_channels=720)
def construct(self, x):
module0_0_opt = self.module0_0(x)
module0_1_opt = self.module0_1(module0_0_opt)
return module0_1_opt
class MindSporeModel(nn.Cell):
def __init__(self):
super(MindSporeModel, self).__init__()
self.conv2d_0 = nn.Conv2d(in_channels=3,
out_channels=32,
kernel_size=(3, 3),
stride=(2, 2),
padding=(1, 1, 1, 1),
pad_mode="pad",
dilation=(1, 1),
group=1,
has_bias=True)
self.relu_1 = nn.ReLU()
self.conv2d_2 = nn.Conv2d(in_channels=32,
out_channels=80,
kernel_size=(1, 1),
stride=(2, 2),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.module8_0 = Module8(conv2d_0_in_channels=80,
conv2d_0_out_channels=80,
module3_0_conv2d_0_in_channels=32,
module3_0_conv2d_0_out_channels=80,
module3_0_conv2d_0_kernel_size=(1, 1),
module3_0_conv2d_0_stride=(1, 1),
module3_0_conv2d_0_padding=0,
module3_0_conv2d_0_pad_mode="valid",
module3_0_conv2d_0_group=1,
module3_1_conv2d_0_in_channels=80,
module3_1_conv2d_0_out_channels=80,
module3_1_conv2d_0_kernel_size=(3, 3),
module3_1_conv2d_0_stride=(2, 2),
module3_1_conv2d_0_padding=(1, 1, 1, 1),
module3_1_conv2d_0_pad_mode="pad",
module3_1_conv2d_0_group=1)
self.relu_9 = nn.ReLU()
self.module0_0 = Module0(conv2d_0_in_channels=80,
conv2d_0_out_channels=80,
conv2d_2_in_channels=80,
conv2d_2_out_channels=80,
conv2d_2_group=1,
conv2d_4_in_channels=80,
conv2d_4_out_channels=80)
self.conv2d_17 = nn.Conv2d(in_channels=80,
out_channels=240,
kernel_size=(1, 1),
stride=(2, 2),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.module8_1 = Module8(conv2d_0_in_channels=240,
conv2d_0_out_channels=240,
module3_0_conv2d_0_in_channels=80,
module3_0_conv2d_0_out_channels=240,
module3_0_conv2d_0_kernel_size=(1, 1),
module3_0_conv2d_0_stride=(1, 1),
module3_0_conv2d_0_padding=0,
module3_0_conv2d_0_pad_mode="valid",
module3_0_conv2d_0_group=1,
module3_1_conv2d_0_in_channels=240,
module3_1_conv2d_0_out_channels=240,
module3_1_conv2d_0_kernel_size=(3, 3),
module3_1_conv2d_0_stride=(2, 2),
module3_1_conv2d_0_padding=(1, 1, 1, 1),
module3_1_conv2d_0_pad_mode="pad",
module3_1_conv2d_0_group=2)
self.relu_24 = nn.ReLU()
self.module10_0 = Module10(module0_0_conv2d_0_in_channels=240,
module0_0_conv2d_0_out_channels=240,
module0_0_conv2d_2_in_channels=240,
module0_0_conv2d_2_out_channels=240,
module0_0_conv2d_2_group=2,
module0_0_conv2d_4_in_channels=240,
module0_0_conv2d_4_out_channels=240,
module0_1_conv2d_0_in_channels=240,
module0_1_conv2d_0_out_channels=240,
module0_1_conv2d_2_in_channels=240,
module0_1_conv2d_2_out_channels=240,
module0_1_conv2d_2_group=2,
module0_1_conv2d_4_in_channels=240,
module0_1_conv2d_4_out_channels=240,
module0_2_conv2d_0_in_channels=240,
module0_2_conv2d_0_out_channels=240,
module0_2_conv2d_2_in_channels=240,
module0_2_conv2d_2_out_channels=240,
module0_2_conv2d_2_group=2,
module0_2_conv2d_4_in_channels=240,
module0_2_conv2d_4_out_channels=240,
module0_3_conv2d_0_in_channels=240,
module0_3_conv2d_0_out_channels=240,
module0_3_conv2d_2_in_channels=240,
module0_3_conv2d_2_out_channels=240,
module0_3_conv2d_2_group=2,
module0_3_conv2d_4_in_channels=240,
module0_3_conv2d_4_out_channels=240)
self.conv2d_53 = nn.Conv2d(in_channels=240,
out_channels=720,
kernel_size=(1, 1),
stride=(2, 2),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.module8_2 = Module8(conv2d_0_in_channels=720,
conv2d_0_out_channels=720,
module3_0_conv2d_0_in_channels=240,
module3_0_conv2d_0_out_channels=720,
module3_0_conv2d_0_kernel_size=(1, 1),
module3_0_conv2d_0_stride=(1, 1),
module3_0_conv2d_0_padding=0,
module3_0_conv2d_0_pad_mode="valid",
module3_0_conv2d_0_group=1,
module3_1_conv2d_0_in_channels=720,
module3_1_conv2d_0_out_channels=720,
module3_1_conv2d_0_kernel_size=(3, 3),
module3_1_conv2d_0_stride=(2, 2),
module3_1_conv2d_0_padding=(1, 1, 1, 1),
module3_1_conv2d_0_pad_mode="pad",
module3_1_conv2d_0_group=6)
self.relu_60 = nn.ReLU()
self.module10_1 = Module10(module0_0_conv2d_0_in_channels=720,
module0_0_conv2d_0_out_channels=720,
module0_0_conv2d_2_in_channels=720,
module0_0_conv2d_2_out_channels=720,
module0_0_conv2d_2_group=6,
module0_0_conv2d_4_in_channels=720,
module0_0_conv2d_4_out_channels=720,
module0_1_conv2d_0_in_channels=720,
module0_1_conv2d_0_out_channels=720,
module0_1_conv2d_2_in_channels=720,
module0_1_conv2d_2_out_channels=720,
module0_1_conv2d_2_group=6,
module0_1_conv2d_4_in_channels=720,
module0_1_conv2d_4_out_channels=720,
module0_2_conv2d_0_in_channels=720,
module0_2_conv2d_0_out_channels=720,
module0_2_conv2d_2_in_channels=720,
module0_2_conv2d_2_out_channels=720,
module0_2_conv2d_2_group=6,
module0_2_conv2d_4_in_channels=720,
module0_2_conv2d_4_out_channels=720,
module0_3_conv2d_0_in_channels=720,
module0_3_conv2d_0_out_channels=720,
module0_3_conv2d_2_in_channels=720,
module0_3_conv2d_2_out_channels=720,
module0_3_conv2d_2_group=6,
module0_3_conv2d_4_in_channels=720,
module0_3_conv2d_4_out_channels=720)
self.module10_2 = Module10(module0_0_conv2d_0_in_channels=720,
module0_0_conv2d_0_out_channels=720,
module0_0_conv2d_2_in_channels=720,
module0_0_conv2d_2_out_channels=720,
module0_0_conv2d_2_group=6,
module0_0_conv2d_4_in_channels=720,
module0_0_conv2d_4_out_channels=720,
module0_1_conv2d_0_in_channels=720,
module0_1_conv2d_0_out_channels=720,
module0_1_conv2d_2_in_channels=720,
module0_1_conv2d_2_out_channels=720,
module0_1_conv2d_2_group=6,
module0_1_conv2d_4_in_channels=720,
module0_1_conv2d_4_out_channels=720,
module0_2_conv2d_0_in_channels=720,
module0_2_conv2d_0_out_channels=720,
module0_2_conv2d_2_in_channels=720,
module0_2_conv2d_2_out_channels=720,
module0_2_conv2d_2_group=6,
module0_2_conv2d_4_in_channels=720,
module0_2_conv2d_4_out_channels=720,
module0_3_conv2d_0_in_channels=720,
module0_3_conv2d_0_out_channels=720,
module0_3_conv2d_2_in_channels=720,
module0_3_conv2d_2_out_channels=720,
module0_3_conv2d_2_group=6,
module0_3_conv2d_4_in_channels=720,
module0_3_conv2d_4_out_channels=720)
self.module10_3 = Module10(module0_0_conv2d_0_in_channels=720,
module0_0_conv2d_0_out_channels=720,
module0_0_conv2d_2_in_channels=720,
module0_0_conv2d_2_out_channels=720,
module0_0_conv2d_2_group=6,
module0_0_conv2d_4_in_channels=720,
module0_0_conv2d_4_out_channels=720,
module0_1_conv2d_0_in_channels=720,
module0_1_conv2d_0_out_channels=720,
module0_1_conv2d_2_in_channels=720,
module0_1_conv2d_2_out_channels=720,
module0_1_conv2d_2_group=6,
module0_1_conv2d_4_in_channels=720,
module0_1_conv2d_4_out_channels=720,
module0_2_conv2d_0_in_channels=720,
module0_2_conv2d_0_out_channels=720,
module0_2_conv2d_2_in_channels=720,
module0_2_conv2d_2_out_channels=720,
module0_2_conv2d_2_group=6,
module0_2_conv2d_4_in_channels=720,
module0_2_conv2d_4_out_channels=720,
module0_3_conv2d_0_in_channels=720,
module0_3_conv2d_0_out_channels=720,
module0_3_conv2d_2_in_channels=720,
module0_3_conv2d_2_out_channels=720,
module0_3_conv2d_2_group=6,
module0_3_conv2d_4_in_channels=720,
module0_3_conv2d_4_out_channels=720)
self.module1_0 = Module1()
self.conv2d_159 = nn.Conv2d(in_channels=720,
out_channels=1920,
kernel_size=(1, 1),
stride=(2, 2),
padding=0,
pad_mode="valid",
dilation=(1, 1),
group=1,
has_bias=True)
self.module8_3 = Module8(conv2d_0_in_channels=1920,
conv2d_0_out_channels=1920,
module3_0_conv2d_0_in_channels=720,
module3_0_conv2d_0_out_channels=1920,
module3_0_conv2d_0_kernel_size=(1, 1),
module3_0_conv2d_0_stride=(1, 1),
module3_0_conv2d_0_padding=0,
module3_0_conv2d_0_pad_mode="valid",
module3_0_conv2d_0_group=1,
module3_1_conv2d_0_in_channels=1920,
module3_1_conv2d_0_out_channels=1920,
module3_1_conv2d_0_kernel_size=(3, 3),
module3_1_conv2d_0_stride=(2, 2),
module3_1_conv2d_0_padding=(1, 1, 1, 1),
module3_1_conv2d_0_pad_mode="pad",
module3_1_conv2d_0_group=16)
self.relu_166 = nn.ReLU()
self.avgpool2d_167 = nn.AvgPool2d(kernel_size=(7, 7))
self.flatten_168 = nn.Flatten()
self.dense_169 = nn.Dense(in_channels=1920, out_channels=1000, has_bias=True)
def construct(self, input_1):
opt_conv2d_0 = self.conv2d_0(input_1)
opt_relu_1 = self.relu_1(opt_conv2d_0)
opt_conv2d_2 = self.conv2d_2(opt_relu_1)
module8_0_opt = self.module8_0(opt_relu_1)
opt_add_8 = P.Add()(opt_conv2d_2, module8_0_opt)
opt_relu_9 = self.relu_9(opt_add_8)
module0_0_opt = self.module0_0(opt_relu_9)
opt_conv2d_17 = self.conv2d_17(module0_0_opt)
module8_1_opt = self.module8_1(module0_0_opt)
opt_add_23 = P.Add()(opt_conv2d_17, module8_1_opt)
opt_relu_24 = self.relu_24(opt_add_23)
module10_0_opt = self.module10_0(opt_relu_24)
opt_conv2d_53 = self.conv2d_53(module10_0_opt)
module8_2_opt = self.module8_2(module10_0_opt)
opt_add_59 = P.Add()(opt_conv2d_53, module8_2_opt)
opt_relu_60 = self.relu_60(opt_add_59)
module10_1_opt = self.module10_1(opt_relu_60)
module10_2_opt = self.module10_2(module10_1_opt)
module10_3_opt = self.module10_3(module10_2_opt)
module1_0_opt = self.module1_0(module10_3_opt)
opt_conv2d_159 = self.conv2d_159(module1_0_opt)
module8_3_opt = self.module8_3(module1_0_opt)
opt_add_165 = P.Add()(opt_conv2d_159, module8_3_opt)
opt_relu_166 = self.relu_166(opt_add_165)
opt_avgpool2d_167 = self.avgpool2d_167(opt_relu_166)
opt_flatten_168 = self.flatten_168(opt_avgpool2d_167)
opt_dense_169 = self.dense_169(opt_flatten_168)
return opt_dense_169
|
<gh_stars>10-100
#!/bin/python
import argparse
import os
import glob
import json
import shutil
import urllib.parse
import re
import bs4 as bs
import subprocess
def get_path(url: str):
urlpath = urllib.parse.urlparse(url).path
match = re.search(r"^(/books/\d+)?/(.+?)(/content|/encrypted/\d+)?$", urlpath)
path = match.group(2)
if path[-8:].find(".") == -1:
if str(match.group(3)).find("encrypted") >= 0:
path += ".jpg"
else:
path += ".html"
return path
def export(dir, book_name, tmp_dir, out_dir):
pages = []
pages_files = glob.glob(os.path.join(dir, '*_pages.json'))[0]
with open(pages_files) as f:
pages = json.load(f)
if len(pages) == 0:
return
print("Copying files")
if os.path.isdir(tmp_dir):
shutil.rmtree(tmp_dir)
shutil.copytree(dir, tmp_dir)
filelist = []
pageimages = []
with open(os.path.join(tmp_dir, 'renames.json')) as f:
renames = json.load(f)
print("Fixing files")
for page in pages:
path = get_path(page["absoluteURL"])
changed = False
file = os.path.join(tmp_dir, path)
with open(file) as f:
soup = bs.BeautifulSoup(f.read(), 'html.parser')
for style in soup.head.find_all("style"):
# some xhtml files contain "body{visibility:hidden}" making content invisible
if style.string.find("visibility:hidden") != -1:
style.decompose()
changed = True
for link in soup.find_all("link", href=True):
if link["href"][0] == "/":
new_path = get_path(link["href"])
link["href"] = os.path.relpath(os.path.join(tmp_dir, new_path), os.path.dirname(file))
changed = True
for source in soup.find_all(["img", "script"], src=True):
if source["src"][0] == "/":
new_path = get_path(source["src"])
if new_path in renames:
new_path = renames[new_path]
source["src"] = os.path.relpath(os.path.join(tmp_dir, new_path), os.path.dirname(file))
changed = True
if source.has_attr("id") and source["id"] == "pbk-page":
pageimages.append(os.path.join(os.path.dirname(file), source["src"]));
if changed:
with open(file, "wb") as f:
f.write(str(soup).encode("utf-8"))
filelist.append(file)
if len(pageimages) > 0:
filelist = pageimages
create_pdf(out_dir, book_name, filelist)
def create_pdf(out_dir, book_name, filelist):
print("Generating PDF, Please Wait! This will take a while.")
outfile = os.path.join(out_dir, book_name + ".pdf")
if filelist[0][-4:] == 'html':
params = ["--no-pdf-compression", "--disable-javascript"]
args = ["wkhtmltopdf"]
else:
params = []
args = ["magick"]
args = args + params + filelist + [outfile]
result = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print("Created {:s}".format(outfile))
if result.returncode != 0:
print(result.stderr)
print("Warning! There was some error while creating PDF!")
def main():
parser = argparse.ArgumentParser()
parser.add_argument('dir', type=str,
help='path to a location of book')
parser.add_argument('--book-name', type=str, default='Book',
help='name of the book. Default=[%(default)s]')
parser.add_argument('--tmp-dir', type=str, default='tmp',
help='specify a directory to keep temporally files. Default=[%(default)s]')
parser.add_argument('--out-dir', type=str, default='.',
help='specify a directory where to save exported book. Default=[Current Directory]')
args = parser.parse_args()
try:
os.makedirs(args.out_dir, exist_ok=True)
except OSError as e:
print("Unable to create output directory {:s}: {:s}"
.format(args.out_dir, e))
if not os.path.isdir(args.dir):
print("Wrong location to book!")
return
export(args.dir, args.book_name, args.tmp_dir, args.out_dir)
if __name__ == '__main__':
main()
|
<reponame>wubinbai/argus-freesound
import torch
from torch import nn
import torch.nn.functional as F
class ChannelAttention(nn.Module):
def __init__(self, in_planes, ratio=16):
super(ChannelAttention, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.max_pool = nn.AdaptiveMaxPool2d(1)
self.fc1 = nn.Conv2d(in_planes, in_planes // ratio, 1, bias=False)
self.relu1 = nn.ReLU()
self.fc2 = nn.Conv2d(in_planes // ratio, in_planes, 1, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))
max_out = self.fc2(self.relu1(self.fc1(self.max_pool(x))))
out = avg_out + max_out
return self.sigmoid(out)
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
padding = 3 if kernel_size == 7 else 1
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_out, max_out], dim=1)
x = self.conv1(x)
return self.sigmoid(x)
class ConvolutionalBlockAttentionModule(nn.Module):
def __init__(self, in_planes, ratio=16, kernel_size=7):
super(ConvolutionalBlockAttentionModule, self).__init__()
self.ca = ChannelAttention(in_planes, ratio)
self.sa = SpatialAttention(kernel_size)
def forward(self, input):
out = self.ca(input) * input
out = self.sa(out) * out
return out
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, 1, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
)
self.conv2 = nn.Sequential(
nn.Conv2d(out_channels, out_channels, 3, 1, 1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
)
self._init_weights()
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.zeros_(m.bias)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = F.avg_pool2d(x, 2)
return x
class SkipBlock(nn.Module):
def __init__(self, in_channels, out_channels, scale_factor=2):
super(SkipBlock, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, 3, 1, 1, bias=False)
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.scale_factor = scale_factor
def forward(self, x):
if self.scale_factor >= 2:
x = F.avg_pool2d(x, self.scale_factor)
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class SkipAttention(nn.Module):
def __init__(self, num_classes, base_size=64,
dropout=0.2, ratio=16, kernel_size=7,
last_filters=8, last_fc=2):
super().__init__()
self.conv1 = ConvBlock(in_channels=3, out_channels=base_size)
self.skip1 = SkipBlock(in_channels=base_size, out_channels=base_size*8,
scale_factor=8)
self.conv2 = ConvBlock(in_channels=base_size, out_channels=base_size*2)
self.skip2 = SkipBlock(in_channels=base_size * 2, out_channels=base_size*8,
scale_factor=4)
self.conv3 = ConvBlock(in_channels=base_size*2, out_channels=base_size*4)
self.skip3 = SkipBlock(in_channels=base_size*4, out_channels=base_size*8,
scale_factor=2)
self.conv4 = ConvBlock(in_channels=base_size*4, out_channels=base_size*8)
self.attention = ConvolutionalBlockAttentionModule(base_size*8*4,
ratio=ratio,
kernel_size=kernel_size)
self.merge = SkipBlock(base_size*8*4, base_size*last_filters, 1)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Dropout(dropout),
nn.Linear(base_size*last_filters, base_size*last_fc),
nn.PReLU(),
nn.BatchNorm1d(base_size*last_fc),
nn.Dropout(dropout/2),
nn.Linear(base_size*last_fc, num_classes),
)
def forward(self, x):
x = self.conv1(x)
skip1 = self.skip1(x)
x = self.conv2(x)
skip2 = self.skip2(x)
x = self.conv3(x)
skip3 = self.skip3(x)
x = self.conv4(x)
x = torch.cat([x, skip1, skip2, skip3], dim=1)
x = self.attention(x)
x = self.merge(x)
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
|
<filename>src/wikidated/_utils/seven_zip_archive.py
#
# Copyright 2021-2022 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from contextlib import contextmanager
from logging import getLogger
from os.path import relpath
from pathlib import Path
from shutil import rmtree
from subprocess import DEVNULL, PIPE
from typing import IO, Any, Callable, Iterator, Optional
from typing_extensions import Final, Protocol
from wikidated._utils.misc import external_process
_LOGGER = getLogger(__name__)
class _SupportsLessThan(Protocol):
def __lt__(self, __other: Any) -> bool:
...
class SevenZipArchive:
def __init__(self, path: Path) -> None:
self.path: Final = path
@classmethod
def from_dir(cls, dir_: Path, path: Path) -> SevenZipArchive:
tmp_path = path.parent / (".tmp." + path.name)
_LOGGER.debug(f"Creating 7z archive {path} from directory {dir_}.")
if tmp_path.exists():
tmp_path.unlink()
with external_process(
("7z", "a", "-ms=off", relpath(tmp_path, dir_), "."),
stdin=DEVNULL,
stdout=PIPE,
stderr=PIPE,
cwd=dir_,
exhaust_stdout_to_log=True,
exhaust_stderr_to_log=True,
check_return_code_zero=True,
) as _:
pass
tmp_path.rename(path)
return SevenZipArchive(path)
@classmethod
def from_dir_with_order(
cls, dir_: Path, path: Path, key: Callable[[Path], _SupportsLessThan]
) -> SevenZipArchive:
tmp_path = path.parent / f".tmp.{path.name}"
tmp_dir = path.parent / f".tmp.{path.name}.contents"
listfile_rename = path.parent / f".tmp.{path.name}.listfile-rename"
_LOGGER.debug(f"Creating ordered 7z archive {path} from directory {dir_}.")
files = list(dir_.iterdir())
files.sort(key=key)
ordered_filename_num_digits = len(str(len(files) - 1))
tmp_dir.mkdir(exist_ok=False, parents=True)
with listfile_rename.open("w", encoding="UTF-8") as fout:
for i, file in enumerate(files):
ordered_filename = f"{i:0{ordered_filename_num_digits}d}"
fout.write(f"{ordered_filename}\n{file.name}\n")
(tmp_dir / ordered_filename).symlink_to(file.resolve())
if tmp_path.exists():
tmp_path.unlink()
with external_process(
("7z", "a", "-l", "-ms=off", relpath(tmp_path, tmp_dir), "."),
stdin=DEVNULL,
stdout=PIPE,
stderr=PIPE,
cwd=tmp_dir,
exhaust_stdout_to_log=True,
exhaust_stderr_to_log=True,
check_return_code_zero=True,
) as _:
pass
rmtree(tmp_dir)
with external_process(
("7z", "rn", f"{tmp_path}", f"@{listfile_rename}"),
stdin=DEVNULL,
stdout=PIPE,
stderr=PIPE,
exhaust_stdout_to_log=True,
exhaust_stderr_to_log=True,
check_return_code_zero=True,
) as _:
pass
listfile_rename.unlink()
tmp_path.rename(path)
return SevenZipArchive(path)
@contextmanager
def write(self, file_name: Optional[Path] = None) -> Iterator[IO[str]]:
# This method seems to take longer the more file already exist in the archive.
# If you plan want to create archives with many files, it is better to just
# create a directory with all files in it as you need them, and then to convert
# that to an archive using SevenZipArchive.from_dir().
if file_name:
_LOGGER.debug(f"Writing file {file_name} to 7z archive {self.path}.")
else:
_LOGGER.debug(f"Writing to 7z archive {self.path}.")
file_name_str = str(file_name) if file_name else ""
with external_process(
("7z", "a", "-bd", "-bso0", f"-si{file_name_str}", str(self.path)),
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
exhaust_stdout_to_log=True,
exhaust_stderr_to_log=True,
check_return_code_zero=True,
) as seven_zip_process:
assert seven_zip_process.stdin is not None
yield seven_zip_process.stdin
@contextmanager
def read(self, file_name: Optional[Path] = None) -> Iterator[IO[str]]:
if file_name:
_LOGGER.debug(f"Reading file {file_name} from 7z archive {self.path}.")
else:
_LOGGER.debug(f"Reading from 7z archive {self.path}.")
file_name_str = str(file_name) if file_name else ""
# Not sure how to check for errors here (particularly, if one wants to end
# processing output from stdout, before the full archive if depleted). Waiting
# for output on stderr stalls the process. Terminating while output in stdout is
# still being generated results in a -15 return code.
with external_process(
("7z", "x", "-so", str(self.path), file_name_str),
stdin=DEVNULL,
stdout=PIPE,
stderr=PIPE,
) as seven_zip_process:
assert seven_zip_process.stdout is not None
yield seven_zip_process.stdout
def iter_file_names(self) -> Iterator[Path]:
_LOGGER.debug(f"Iterating file names in 7z archive {self.path}.")
with external_process(
("7z", "l", "-ba", "-slt", str(self.path)),
stdin=DEVNULL,
stdout=PIPE,
stderr=PIPE,
) as seven_zip_process:
assert seven_zip_process.stdout is not None
for line in seven_zip_process.stdout:
if line.startswith("Path = "):
yield Path(line[len("Path = ") : -len("\n")])
|
<reponame>NeoBryant/Brain_Image_Segmentation
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib.pyplot as plt
import numpy as np
#%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib
def truncated_normal_(tensor, mean=0, std=1):
size = tensor.shape
tmp = tensor.new_empty(size + (4,)).normal_()
valid = (tmp < 2) & (tmp > -2)
ind = valid.max(-1, keepdim=True)[1]
tensor.data.copy_(tmp.gather(-1, ind).squeeze(-1))
tensor.data.mul_(std).add_(mean)
def init_weights(m):
if type(m) == nn.Conv2d or type(m) == nn.ConvTranspose2d:
nn.init.kaiming_normal_(m.weight, mode='fan_in', nonlinearity='relu')
#nn.init.normal_(m.weight, std=0.001)
#nn.init.normal_(m.bias, std=0.001)
truncated_normal_(m.bias, mean=0, std=0.001)
def init_weights_orthogonal_normal(m):
if type(m) == nn.Conv2d or type(m) == nn.ConvTranspose2d:
nn.init.orthogonal_(m.weight)
truncated_normal_(m.bias, mean=0, std=0.001)
#nn.init.normal_(m.bias, std=0.001)
def l2_regularisation(m):
l2_reg = None
for W in m.parameters():
if l2_reg is None:
l2_reg = W.norm(2)
else:
l2_reg = l2_reg + W.norm(2)
return l2_reg
def save_mask_prediction_example(mask, pred, iter):
plt.imshow(pred[0,:,:],cmap='Greys')
plt.savefig('images/'+str(iter)+"_prediction.png")
plt.imshow(mask[0,:,:],cmap='Greys')
plt.savefig('images/'+str(iter)+"_mask.png")
def label2multichannel(mask, class_num=9):
"""将单通道(元素值1-9)变为多通道(元素值0-1)
输入为torch
输出为torch
mask.shape: batch_size,1,240,240
"""
mask = mask.numpy()
batch_size = mask.shape[0]
h, w = mask.shape[2], mask.shape[3]
# mask = mask.reshape((240, 240))
label = np.zeros((batch_size,class_num, 240, 240))
for b in range(batch_size):
for i in range(class_num):
for x in range(h):
for y in range(w):
if int(mask[b,0,x,y]) == i+1:
label[b,i,x,y] = 1
label = torch.from_numpy(label)
label = label.type(torch.FloatTensor)
return label
def mask2rgb(mask):
"""单通道变rgb图
输入numpy,输出numpy
mask: numpy
mask.shape (240,240)
"""
mask = mask.reshape((240,240))
# 颜色rgb (黑,红,绿,蓝,黄,淡紫,青,紫,白)
color = [(0,0,0),(255,0,0),(0,255,0),(0,0,255),(255,255,0),
(255,0,255),(0,255,255),(128,0,128),(255,255,255)]
img = np.zeros((240, 240, 3))
for c in range(9):
for x in range(240):
for y in range(240):
if int(mask[x,y]) == c:
img[x,y,0] = color[c][0]
img[x,y,1] = color[c][1]
img[x,y,2] = color[c][2]
img /= 255
return img
def show_curve(y1s, title='loss'):
"""
plot curlve for Loss and Accuacy\\
Args:\\
ys: loss or acc list\\
title: loss or accuracy
"""
x = np.array(range(len(y1s)))
y1 = np.array(y1s)
plt.plot(x, y1, label='train')
plt.axis()
plt.title('{} curve'.format(title))
plt.xlabel('epoch')
plt.ylabel('{}'.format(title))
plt.legend(loc='best')
#plt.show()
plt.savefig("picture/{}.png".format(title))
plt.show()
plt.close()
print('Saved figure: picture/{}.png'.format(title))
# def bayes_uncertain(image_np, label_np, results, count, the_class):
# """
# Keyword arguments:
# image_np -- 原图
# label_np -- 标签
# results -- 同一张图片的不同预测结果
# Return: 预测结果的均值和方差
# """
# results = np.array(results) # list->numpy
# shape = results.shape
# # mean_result
# # variance_result
# mean_result = np.zeros((shape[1], shape[2])) # 均值
# variance_result = np.zeros((shape[1], shape[2])) # 方差
# # 计算均值
# for i in range(shape[0]):
# mean_result += results[i]
# mean_result /= shape[0]
# # 计算方差
# for i in range(shape[0]):
# variance_result += np.square(mean_result-results[i])
# variance_result /= shape[0]
# # 显示保存图片
# fig, ax = plt.subplots(2,2, sharey=True, figsize=(14,12))
# ax[0][0].set_title("Original data")
# ax[0][1].set_title("Ground Truth")
# ax[1][0].set_title("mean predicted result")
# ax[1][1].set_title("variance")
# ax00 = ax[0][0].imshow(image_np, aspect="auto", cmap="gray")
# ax01 = ax[0][1].imshow(label_np, aspect="auto")
# ax10 = ax[1][0].imshow(mean_result, aspect="auto")
# ax11 = ax[1][1].imshow(variance_result, aspect="auto")
# fig.colorbar(ax00, ax=ax[0][0])
# fig.colorbar(ax01, ax=ax[0][1])
# fig.colorbar(ax10, ax=ax[1][0])
# fig.colorbar(ax11, ax=ax[1][1])
# # 保存
# plt.savefig('picture/class_{}_mean_variance_threshold_{}.jpg'.format(the_class, count))
def bayes_uncertain(image_np, label_np, results, count, class_num):
"""
Keyword arguments:
image_np -- 原图
label_np -- 标签
results -- 同一张图片的不同预测结果, 8通道
Return: 预测结果的均值和方差
"""
results = np.array(results) # list->numpy
shape = results.shape
result = results.reshape((9,240,240))
# # mean_result
# # variance_result
# mean_result = np.zeros((shape[1], shape[2])) # 均值
# variance_result = np.zeros((shape[1], shape[2])) # 方差
# # 显示保存图片
fig, ax = plt.subplots(6, 2, sharey=True, figsize=(14, 36))
ax[0][0].set_title("Original")
ax[0][1].set_title("Ground Truth")
ax[1][0].set_title("class 1")
ax[1][1].set_title("class 2")
ax[2][0].set_title("class 3")
ax[2][1].set_title("class 4")
ax[3][0].set_title("class 5")
ax[3][1].set_title("class 6")
ax[4][0].set_title("class 7")
ax[4][1].set_title("class 8")
ax[5][0].set_title("class 9")
# ax00 = ax[0][0].imshow(image_np, aspect="auto", cmap="gray")
ax00 = ax[0][0].imshow(image_np, aspect="auto", cmap="gray")
ax01 = ax[0][1].imshow(label_np, aspect="auto")
ax10 = ax[1][0].imshow(result[0], aspect="auto")
ax11 = ax[1][1].imshow(result[1], aspect="auto")
ax20 = ax[2][0].imshow(result[2], aspect="auto")
ax21 = ax[2][1].imshow(result[3], aspect="auto")
ax30 = ax[3][0].imshow(result[4], aspect="auto")
ax31 = ax[3][1].imshow(result[5], aspect="auto")
ax40 = ax[4][0].imshow(result[6], aspect="auto")
ax41 = ax[4][1].imshow(result[7], aspect="auto")
ax50 = ax[5][0].imshow(result[8], aspect="auto")
fig.colorbar(ax00, ax=ax[0][0])
fig.colorbar(ax01, ax=ax[0][1])
fig.colorbar(ax10, ax=ax[1][0])
fig.colorbar(ax11, ax=ax[1][1])
fig.colorbar(ax20, ax=ax[2][0])
fig.colorbar(ax21, ax=ax[2][1])
fig.colorbar(ax30, ax=ax[3][0])
fig.colorbar(ax31, ax=ax[3][1])
fig.colorbar(ax40, ax=ax[4][0])
fig.colorbar(ax41, ax=ax[4][1])
fig.colorbar(ax50, ax=ax[5][0])
# 保存
plt.savefig(
'picture/class_{}_{}.jpg'.format(class_num, count))
def save_result_img(orig, mask, pred,step):
"""保存原图、标签、预测结果进行对比"""
fig, ax = plt.subplots(2, 2, sharey=True, figsize=(14, 12))
ax[0][0].set_title("Original")
ax[0][1].set_title("Ground Truth")
ax[1][0].set_title("predict")
ax00 = ax[0][0].imshow(orig, aspect="auto", cmap="gray")
ax01 = ax[0][1].imshow(mask, aspect="auto")
ax10 = ax[1][0].imshow(pred, aspect="auto")
fig.colorbar(ax00, ax=ax[0][0])
fig.colorbar(ax01, ax=ax[0][1])
fig.colorbar(ax10, ax=ax[1][0])
plt.savefig('picture/compare_{}.jpg'.format(step))
return |
<reponame>doubleDragon/quantApi
# coding=utf-8
import requests
import hmac
import time
import urllib
import hashlib
class Client(object):
"""Represents a Poloniex exchange"""
base_url = 'https://poloniex.com/'
def __init__(self, api_key=None, secret=None):
"""
Args:
api_key (str): Optional API Key
secret (str): Option Secret
"""
assert isinstance(api_key, (str, type(None)))
assert isinstance(secret, (str, type(None)))
assert ((api_key is not None and secret is not None) or
(api_key is None and secret is None))
self._api_key = api_key
self._secret = secret
@property
def _nonce(self):
"""
Returns a nonce
Used in authentication
"""
# return str(int(round(time.time() * 10000)))
return int(round(time.time() * 1000000))
def _get(self, path):
"""
Args:
path (unicode)
Returns:
dict
"""
assert isinstance(path, unicode)
req = requests.get(self.base_url + path)
data = req.json()
return data
def _post(self, command, data):
"""
Args:
command (str)
data (dict)
Returns:
dict
"""
assert isinstance(command, str)
assert isinstance(data, dict)
data['command'] = command
# data['nonce'] = int(time.time() * 1000)
data['nonce'] = self._nonce
encoded_data = urllib.urlencode(data)
sign = hmac.new(self._secret, encoded_data, hashlib.sha512).hexdigest()
headers = {
'Key': self._api_key,
'Sign': sign
}
url = self.base_url + 'tradingApi'
print('url= ' + url)
req = requests.post(url, data=data, headers=headers)
print('request= ' + str(req.content))
response = req.json()
return response
# symbol为必须是大写, example: symbol=LTC
def ticker(self, symbol):
return self._get(u'public?command=returnTicker¤cy=' + symbol.upper())
# symbol为必须是大写, example: symbol=BTC_LTC
def depth(self, symbol):
"""
:param symbol: such as 'BTC_LTC'
:return: json
"""
path = u'public?command=returnOrderBook¤cyPair={}&depth={}'.format(symbol.upper(), 5)
return self._get(path)
def buy(self, symbol, rate, amount):
"""
:param symbol: EXAMPLE 'BTC_LTC'
:param rate: the price of the order
:param amount: amount of the order
:return: the order id
"""
command = 'buy'
data = {
'currencyPair': str(symbol),
'rate': str(rate),
'amount': str(amount)
}
resp = self._post(command, data)
if u'orderNumber' in resp:
return resp[u'orderNumber']
def sell(self, symbol, rate, amount):
"""
the same as function buy
"""
command = 'sell'
data = {
'currencyPair': str(symbol),
'rate': str(rate),
'amount': str(amount)
}
resp = self._post(command, data)
if u'orderNumber' in resp:
return resp[u'orderNumber']
def margin_buy(self, symbol, rate, amount, lending_rate=2):
"""
:param symbol: such as BTC_LTC
:param amount: the amount of order and cannot be greater than 5%
:param rate: the price of order
:param lending_rate: option rate
:return: the order number
"""
command = 'marginBuy'
data = {
'currencyPair': str(symbol.upper()),
'amount': str(amount),
'rate': rate,
'lendingRate': str(lending_rate)
}
print('data: ' + str(data))
resp = self._post(command, data)
print('marginSell resp: ' + str(resp))
if (u'success' in resp) and resp[u'success'] == 1:
order_id = resp[u'orderNumber']
else:
order_id = None
return order_id
def margin_sell(self, symbol, rate, amount, lending_rate=2):
"""
same as margin_buy
"""
command = 'marginSell'
data = {
'currencyPair': str(symbol.upper()),
'amount': str(amount),
'rate': repr(rate),
'lendingRate': str(lending_rate)
}
print('data: ' + str(data))
resp = self._post(command, data)
print('marginSell resp: ' + str(resp))
if (u'success' in resp) and resp[u'success'] == 1:
order_id = resp[u'orderNumber']
else:
order_id = None
return order_id
def view_active_orders(self, symbol):
command = 'returnOpenOrders'
data = {
'currencyPair': str(symbol.upper())
}
return self._post(command, data)
def view_margin_position(self, symbol):
command = 'getMarginPosition'
data = {
'currencyPair': symbol
}
return self._post(command, data)
# def get_max_demand_rate(self):
# """
# Returns:
# float: The maximum loan rate asked for BTC
# """
# data = self._get(u'public?command=returnLoanOrders¤cy=BTC')
#
# demand_rates = set()
# for demand in data['demands']:
# demand_rates.add(float(demand['rate']))
#
# max_demand = max(demand_rates)
#
# return max_demand
#
# def get_min_offer_rate(self, min_amount=0.0):
# """
# Args:
# min_amount (float): Minimum amount of BTC offered. If not offer is
# proposing the given amount, returns the
# maximum known offer.
# Returns:
# float: The minimum loan rate offered for BTC, or None if no offer
# is providing min_amount BTC.
# """
# assert isinstance(min_amount, float)
#
# data = self._get(u'public?command=returnLoanOrders¤cy=BTC')
#
# offer_rates = set()
#
# # Filter the offers based on the min_amount, if any
# for offer in data['offers']:
# if float(offer['amount']) >= min_amount:
# rate = float(offer['rate'])
# offer_rates.add(rate)
#
# # If we have valid offers, get the lowest one
# if offer_rates:
# min_offer = min(offer_rates)
# # If we have no valid offer, get the largest unfiltered offer, as the
# # API won't let us get more offers
# else:
# for offer in data['offers']:
# rate = float(offer['rate'])
# offer_rates.add(rate)
# min_offer = max(offer_rates)
#
# return min_offer
#
# def get_unused(self):
# """
# Returns:
# float: Amount of BTC available in the lending wallet.
# """
# data = {
# 'account': 'lending'
# }
# response = self._post('returnAvailableAccountBalances', data)
# if u'BTC' in response[u'lending']:
# unused_btc = float(response[u'lending'][u'BTC'])
# else:
# unused_btc = 0.0
#
# return unused_btc
#
# def offer_btc_loan(self, rate, amount, duration):
# """
# Args:
# rate (float)
# amount (float)
# duration (int): Duration of the loan, in days (2-60).
#
# Returns:
# int or None: Order ID if successful
# """
# assert isinstance(rate, float)
# assert isinstance(amount, float)
# assert isinstance(duration, int)
#
# data = {
# 'currency': 'BTC',
# 'amount': amount,
# 'duration': duration,
# 'autoRenew': 0,
# 'lendingRate': rate
# }
# resp = self._post('createLoanOffer', data)
#
# if resp[u'success'] == 1:
# order_id = resp[u'orderID']
# else:
# order_id = None
#
# return order_id
|
<filename>WassersteinGAN/src/utils/data_utils.py
import cv2
import glob
import h5py
import imageio
import matplotlib.pylab as plt
import matplotlib.gridspec as gridspec
import numpy as np
import os
from scipy import stats
from keras.datasets import mnist, cifar10
from keras.optimizers import Adam, SGD, RMSprop
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
def normalization(X, image_data_format="channels_last"):
X = X / 255.
if image_data_format == "channels_last":
X = (X - 0.5) / 0.5
else:
X = (X - 0.5) / 0.5
return X
def inverse_normalization(X):
return ((X * 0.5 + 0.5) * 255.).astype(np.uint8)
def load_mnist(image_data_format):
(X_train, y_train), (X_test, y_test) = mnist.load_data()
if image_data_format == 'channels_first':
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28)
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28)
else:
X_train = X_train.reshape(X_train.shape[0], 28, 28, 1)
X_test = X_test.reshape(X_test.shape[0], 28, 28, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train = normalization(X_train, image_data_format)
X_test = normalization(X_test, image_data_format)
nb_classes = len(np.unique(np.hstack((y_train, y_test))))
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, Y_train, X_test, Y_test
def load_cifar10(image_data_format):
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
if image_data_format == 'channels_first':
X_train = X_train.reshape(X_train.shape[0], 3, 32, 32)
X_test = X_test.reshape(X_test.shape[0], 3, 32, 32)
else:
X_train = X_train.reshape(X_train.shape[0], 32, 32, 3)
X_test = X_test.reshape(X_test.shape[0], 32, 32, 3)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train = normalization(X_train, image_data_format)
X_test = normalization(X_test, image_data_format)
nb_classes = len(np.unique(np.vstack((y_train, y_test))))
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, Y_train, X_test, Y_test
def load_celebA(img_dim, image_data_format):
with h5py.File("../../data/processed/CelebA_%s_data.h5" % img_dim, "r") as hf:
X_real_train = hf["data"][:].astype(np.float32)
X_real_train = normalization(X_real_train, image_data_format)
if image_data_format == "channels_last":
X_real_train = X_real_train.transpose(0, 2, 3, 1)
return X_real_train
def load_image_dataset(dset, img_dim, image_data_format, batch_size):
X_batch_gen = None
if dset == "celebA":
X_real_train = load_celebA(img_dim, image_data_format)
elif dset == "mnist":
X_real_train, _, _, _ = load_mnist(image_data_format)
elif dset == "cifar10":
X_real_train, _, _, _ = load_cifar10(image_data_format)
else:
X_batch_gen = data_generator_from_dir(dset, (img_dim, img_dim), batch_size, image_data_format)
X_real_train = next(X_batch_gen)
return X_real_train, X_batch_gen
def data_generator_from_dir(data_dir, target_size, batch_size, image_data_format="channels_last"):
# data_gen args
print("Loading data from", data_dir)
# Check if number of files in data_dir is a multiple of batch_size
number_of_images = sum([len(files) for r, d, files in os.walk(data_dir)])
if number_of_images % batch_size != 0:
raise ValueError("ERROR: # of images in " + str(data_dir) + " found by keras.ImageDataGenerator is not a multiple of the batch_size ( " + str(batch_size) + " )!\nFound " + str(number_of_images) + " images. Add " + str(batch_size - number_of_images % batch_size) + " more image(s), or delete " + str(number_of_images % batch_size) + " image(s).")
# datagens
data_generator_args = dict(preprocessing_function=normalization)
image_datagen = ImageDataGenerator(**data_generator_args)
# Image generators
image_data_generator = image_datagen.flow_from_directory(data_dir, target_size=target_size, batch_size=batch_size, class_mode=None, seed=29)
if len(image_data_generator) == 0:
raise ValueError("ERROR: # of images found by keras.ImageDataGenerator is 0!\nPlease save the images in the data_dir into at least one modre directory, preferably into classes. Given data_dir:", data_dir)
return image_data_generator
def load_toy(n_mixture=8, std=0.01, radius=1.0, pts_per_mixture=5000):
thetas = np.linspace(0, 2 * np.pi, n_mixture + 1)[:-1]
xs, ys = radius * np.sin(thetas), radius * np.cos(thetas)
cov = std * np.eye(2)
X = np.zeros((n_mixture * pts_per_mixture, 2))
for i in range(n_mixture):
mean = np.array([xs[i], ys[i]])
pts = np.random.multivariate_normal(mean, cov, pts_per_mixture)
X[i * pts_per_mixture: (i + 1) * pts_per_mixture, :] = pts
return X
def get_optimizer(opt, lr):
if opt == "SGD":
return SGD(lr=lr)
elif opt == "RMSprop":
return RMSprop(lr=lr)
elif opt == "Adam":
return Adam(lr=lr, beta1=0.5)
def gen_batch(X, X_batch_gen, batch_size):
while True:
if X_batch_gen is None:
idx = np.random.choice(X.shape[0], batch_size, replace=False)
yield X[idx]
else:
yield next(X_batch_gen)
def sample_noise(noise_scale, batch_size, noise_dim):
return np.random.normal(scale=noise_scale, size=(batch_size, noise_dim[0]))
def get_disc_batch(X_real_batch, generator_model, batch_counter, batch_size, noise_dim, noise_scale=0.5):
# Pass noise to the generator
noise_input = sample_noise(noise_scale, batch_size, noise_dim)
# Produce an output
X_disc_gen = generator_model.predict(noise_input, batch_size=batch_size)
X_disc_real = X_real_batch[:batch_size]
return X_disc_real, X_disc_gen
def save_model_weights(generator_model, discriminator_model, DCGAN_model, e,
save_weights_every_n_epochs=5, save_only_last_n_weights=10, model_name="WGAN"):
purge_weights(generator_model, discriminator_model, DCGAN_model, save_only_last_n_weights, model_name)
model_path = os.path.join("../../models", model_name)
if (e + 1) % save_weights_every_n_epochs == 0:
print("Saving weight...")
gen_weights_path = os.path.join(model_path, '%s_epoch%5d.h5' % (generator_model.name, e))
generator_model.save_weights(gen_weights_path, overwrite=True)
disc_weights_path = os.path.join(model_path, '%s_epoch%5d.h5' % (discriminator_model.name, e))
discriminator_model.save_weights(disc_weights_path, overwrite=True)
DCGAN_weights_path = os.path.join(model_path, '%s_epoch%5d.h5' % (DCGAN_model.name, e))
DCGAN_model.save_weights(DCGAN_weights_path, overwrite=True)
def purge_weights(generator_model, discriminator_model, DCGAN_model, n, model_name):
gen_weight_files = sorted(glob.glob('../../models/%s/%s*' % (model_name, generator_model.name)))
for gen_weight_file in gen_weight_files[:-n]:
os.remove(os.path.realpath(gen_weight_file))
disc_weight_files = sorted(glob.glob('../../models/%s/%s*' % (model_name, discriminator_model.name)))
for disc_weight_file in disc_weight_files[:-n]:
os.remove(os.path.realpath(disc_weight_file))
DCGAN_weight_files = sorted(glob.glob('../../models/%s/%s*' % (model_name, DCGAN_model.name)))
for DCGAN_weight_file in DCGAN_weight_files[:-n]:
os.remove(os.path.realpath(DCGAN_weight_file))
def plot_generated_batch(X_real, generator_model, epoch_number, batch_size,
noise_dim, image_data_format, model_name,
noise_scale=0.5, suffix='training', MAX_FRAMES_PER_GIF=100):
# Generate images
X_gen = sample_noise(noise_scale, batch_size, noise_dim)
X_gen = generator_model.predict(X_gen)
X_real = inverse_normalization(X_real)
X_gen = inverse_normalization(X_gen)
Xg = X_gen[:8]
Xr = X_real[:8]
if image_data_format == "channels_last":
X = np.concatenate((Xg, Xr), axis=0)
list_rows = []
for i in range(int(X.shape[0] / 4)):
Xr = np.concatenate([X[k] for k in range(4 * i, 4 * (i + 1))], axis=1)
list_rows.append(Xr)
Xr = np.concatenate(list_rows, axis=0)
if image_data_format == "channels_first":
X = np.concatenate((Xg, Xr), axis=0)
list_rows = []
for i in range(int(X.shape[0] / 4)):
Xr = np.concatenate([X[k] for k in range(4 * i, 4 * (i + 1))], axis=2)
list_rows.append(Xr)
Xr = np.concatenate(list_rows, axis=1)
Xr = Xr.transpose(1,2,0)
# Make iter text
text_image = cv2.putText(np.zeros((32, Xr.shape[1], Xr.shape[2])),
'%s epoch' % str(epoch_number), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, .4, (255, 255, 255), 1, cv2.LINE_AA).astype('uint8')
image = np.vstack((text_image, Xr))
# if Xr.shape[-1] == 1:
# plt.imshow(Xr[:, :, 0], cmap="gray")
# else:
# plt.imshow(Xr)
# plt.savefig("../../figures/current_batch.png")
# plt.clf()
# plt.close()
imageio.imsave(os.path.join("../../figures", model_name, model_name + "_current_batch_%s.png" % suffix), image)
# Make gif
gif_frames = []
# Read old gif frames
try:
gif_frames_reader = imageio.get_reader(os.path.join("../../figures", model_name, model_name + "_%s.gif" % suffix))
for frame in gif_frames_reader:
gif_frames.append(frame[:, :, :3])
except:
pass
# Append new frame
im = cv2.putText(np.concatenate((np.zeros((32, Xg[0].shape[1], Xg[0].shape[2])), Xg[0]), axis=0),
'%s epoch' % str(epoch_number), (10, 10), cv2.FONT_HERSHEY_SIMPLEX, .4, (255, 255, 255), 1, cv2.LINE_AA).astype('uint8')
gif_frames.append(im)
# If frames exceeds, save as different file
if len(gif_frames) > MAX_FRAMES_PER_GIF:
print("Splitting the GIF...")
gif_frames_00 = gif_frames[:MAX_FRAMES_PER_GIF]
num_of_gifs_already_saved = len(glob.glob(os.path.join("../../figures", model_name, model_name + "_%s_*.gif" % suffix)))
print("Saving", os.path.join("../../figures", model_name, model_name + "_%s_%03d.gif" % (suffix, num_of_gifs_already_saved)))
imageio.mimsave(os.path.join("../../figures", model_name, model_name + "_%s_%03d.gif" % (suffix, num_of_gifs_already_saved)), gif_frames_00)
gif_frames = gif_frames[MAX_FRAMES_PER_GIF:]
# Save gif
print("Saving", os.path.join("../../figures", model_name, model_name + "_%s.gif" % suffix))
imageio.mimsave(os.path.join("../../figures", model_name, model_name + "_%s.gif" % suffix), gif_frames)
def plot_losses(disc_losses, disc_losses_real, disc_losses_gen, gen_losses,
model_name, init_epoch=0):
epochs = np.arange(len(disc_losses)) + init_epoch
fig = plt.figure()
plt.plot(epochs, disc_losses, linewidth=2, label='D')
plt.plot(epochs, disc_losses_real, linewidth=1, label='D_real')
plt.plot(epochs, disc_losses_gen, linewidth=1, label='D_gen')
plt.plot(epochs, gen_losses, linewidth=2, label='G')
plt.legend()
plt.title("Losses")
plt.xlabel("Epochs")
plt.savefig(os.path.join("../../figures", model_name, model_name + "_losses.png"), bbox_inches='tight')
plt.clf()
plt.close()
def plot_generated_toy_batch(X_real, generator_model, discriminator_model, noise_dim, gen_iter, noise_scale=0.5):
# Generate images
X_gen = sample_noise(noise_scale, 10000, noise_dim)
X_gen = generator_model.predict(X_gen)
# Get some toy data to plot KDE of real data
data = load_toy(pts_per_mixture=200)
x = data[:, 0]
y = data[:, 1]
xmin, xmax = -1.5, 1.5
ymin, ymax = -1.5, 1.5
# Peform the kernel density estimate
xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([xx.ravel(), yy.ravel()])
values = np.vstack([x, y])
kernel = stats.gaussian_kde(values)
f = np.reshape(kernel(positions).T, xx.shape)
# Plot the contour
fig = plt.figure(figsize=(10,10))
plt.suptitle("Generator iteration %s" % gen_iter, fontweight="bold", fontsize=22)
ax = fig.gca()
ax.contourf(xx, yy, f, cmap='Blues', vmin=np.percentile(f,80), vmax=np.max(f), levels=np.linspace(0.25, 0.85, 30))
# Also plot the contour of the discriminator
delta = 0.025
xmin, xmax = -1.5, 1.5
ymin, ymax = -1.5, 1.5
# Create mesh
XX, YY = np.meshgrid(np.arange(xmin, xmax, delta), np.arange(ymin, ymax, delta))
arr_pos = np.vstack((np.ravel(XX), np.ravel(YY))).T
# Get Z = predictions
ZZ = discriminator_model.predict(arr_pos)
ZZ = ZZ.reshape(XX.shape)
# Plot contour
ax.contour(XX, YY, ZZ, cmap="Blues", levels=np.linspace(0.25, 0.85, 10))
dy, dx = np.gradient(ZZ)
# Add streamlines
# plt.streamplot(XX, YY, dx, dy, linewidth=0.5, cmap="magma", density=1, arrowsize=1)
# Scatter generated data
plt.scatter(X_gen[:1000, 0], X_gen[:1000, 1], s=20, color="coral", marker="o")
l_gen = plt.Line2D((0,1),(0,0), color='coral', marker='o', linestyle='', markersize=20)
l_D = plt.Line2D((0,1),(0,0), color='steelblue', linewidth=3)
l_real = plt.Rectangle((0, 0), 1, 1, fc="steelblue")
# Create legend from custom artist/label lists
# bbox_to_anchor = (0.4, 1)
ax.legend([l_real, l_D, l_gen], ['Real data KDE', 'Discriminator contour',
'Generated data'], fontsize=18, loc="upper left")
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax + 0.8)
plt.savefig("../../figures/toy_dataset_iter%s.jpg" % gen_iter)
plt.clf()
plt.close()
if __name__ == '__main__':
data = load_toy(pts_per_mixture=200)
x = data[:, 0]
y = data[:, 1]
xmin, xmax = -1.5, 1.5
ymin, ymax = -1.5, 1.5
# Peform the kernel density estimate
xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([xx.ravel(), yy.ravel()])
values = np.vstack([x, y])
kernel = stats.gaussian_kde(values)
f = np.reshape(kernel(positions).T, xx.shape)
fig = plt.figure()
gen_it = 5
plt.suptitle("Generator iteration %s" % gen_it, fontweight="bold")
ax = fig.gca()
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
# Contourf plot
cfset = ax.contourf(xx, yy, f, cmap='Blues', vmin=np.percentile(f,90),
vmax=np.max(f), levels=np.linspace(0.25, 0.85, 30))
# cfset = ax.contour(xx, yy, f, color="k", levels=np.linspace(0.25, 0.85, 30), label="roger")
plt.legend()
plt.show()
|
import socketserver
import os
import threading
import time
import datetime
import sqlite3 as lite
import sys
import random
import string
HOST ='192.168.53.12'
PORT = 5050
class MyTCPHandler(socketserver.StreamRequestHandler):
#id = 0
#def readline(self):
# server.request_line= self.rfile.readline().rstrip('\r\n')
def handle(self):
#clientsocket,addr = self.accept()
#print("Got a connection from %s" % str(addr))
while True:
self.data = self.rfile.readline().decode('utf-8')
if not self.data:
print('DISCONNECTED')
break
data = str(self.data)
print(len(data))
if (len(data) == 16):
usr = data[0:15]
print ("got connection from id: %s"% usr)
#out = data[0:15]
elif(len(data)>4 and len(data)<16):
usr= str(self.data)
usr= usr[:-1]
con=lite.connect('recdb/grannylist.db')
cur=con.cursor()
cur.execute("SELECT Uid FROM Grannys WHERE Username = '%s' OR Uid = '%s'"% (usr, usr))
row = cur.fetchone()
print(row)
if row:
out=','.join(map(str, row))
else:
print('got none type')
print(row)
break
#out = '5L7VDF04SZ6MTQJ'
#print(out)
if out == 'None':
#self.id_generator()
id = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(15))
cur.execute("UPDATE Grannys SET Uid = '%s' WHERE Username='%s'" % (id, usr))
con.commit()
connect = False
self.wfile.write(id.encode('ascii'))
os.makedirs('/var/www/html/recdb/%s'% id)
elif (len(data) < 4):
print ("latest action id is %s"% data)
if (int(data[0:2]) == 12):
date = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')
f= open('recdb/%s/%s_%s_sleep.db'% (usr,usr,date),'wb')
#id= clientsocket.recv(1024).decode
l = self.rfile.read(1024)
while l:
print ("getting some data...")
f.write(l)
l = self.rfile.read(1024)
print("all done here!(sleep data)")
f.close()
db_a = lite.connect('recdb/%s/%s_sleep.db'% (usr,usr))
db_b = lite.connect('recdb/%s/%s_%s_sleep.db'% (usr,usr,date))
b_cursor = db_b.cursor()
b_cursor.execute('SELECT * FROM Sleep')
#output = b_cursor.fetchall() # Returns the results as a list.
#b_cursor.close()
# Insert those contents into another table.
a_cursor = db_a.cursor()
a_cursor.execute('CREATE TABLE IF NOT EXISTS Sleep(Id INTEGER PRIMARY KEY, Time DATETIME, X NUMERIC, Y NUMERIC, Z NUMERIC)')
for row in b_cursor.fetchall():
#time=row["Time"]
#x =row["X"]
#y =row["Y"]
#z =row["Z"]
#print(row["Time"])
id, time, x, y, z = row
print("----------------------------------------------------------")
print("Time: %s, X:%s, Y:%s, Z:%s"% (time, x, y, z))
print("----------------------------------------------------------")
a_cursor.execute("INSERT INTO Sleep (Time, X, Y, Z) VALUES ('%s','%s','%s','%s')"% (time, x, y, z))
db_a.commit()
a_cursor.close()
b_cursor.close()
elif (int(data[0:2]) == 3 or int(data[0:2]) == 4 or int(data[0:2]) == 5):
date = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d-%H:%M')
f= open('recdb/%s/%s_%s_pic.png'% (usr,usr,date),'wb')
#id= clientsocket.recv(1024).decode
l = self.rfile.read(1024)
while l:
print ("getting some data...")
f.write(l)
l = self.rfile.read(1024)
print("all done here!(naked pictures)")
f.close()
else:
f= open('recdb/%s/%s_Actions.db'% (usr,usr),'wb')
#id= clientsocket.recv(1024).decode
l = self.rfile.read(1024)
while l:
print ("getting some data...")
f.write(l)
l = self.rfile.read(1024)
f.close()
print ("all done here!(action data)")
self.data = self.rfile.readline().decode('utf-8')
msg = str(self.data)
print(msg)
def id_generator(size=15, chars=string.ascii_uppercase + string.digits):
self.id = ''.join(random.choice(chars) for _ in range(size))
server = socketserver.TCPServer((HOST, PORT), MyTCPHandler)
server.allow_reuse_address = True
server.serve_forever()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.