hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
67d2df752b7aa39827c0d7ecf8e8fdf9654cbe97 | 4,789 | py | Python | acct_mgr/tests/functional/testenv.py | scanterog/acct_mgr-0.4.4 | efe1a248b0b2cb935daa6142a5488d3ed2d6fa66 | [
"Beerware"
] | 1 | 2017-06-17T13:02:52.000Z | 2017-06-17T13:02:52.000Z | acct_mgr/tests/functional/testenv.py | scanterog/acct_mgr-0.4.4 | efe1a248b0b2cb935daa6142a5488d3ed2d6fa66 | [
"Beerware"
] | null | null | null | acct_mgr/tests/functional/testenv.py | scanterog/acct_mgr-0.4.4 | efe1a248b0b2cb935daa6142a5488d3ed2d6fa66 | [
"Beerware"
] | 1 | 2018-11-02T01:09:58.000Z | 2018-11-02T01:09:58.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2008 Matthew Good <trac@matt-good.net>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# Author: Pedro Algarvio <ufs@ufsoft.org>
import os
import sys
import time
from subprocess import call, Popen
from trac.admin import console
from trac.web import standalone
from trac.env import open_environment
from trac.tests.functional.compat import rmtree, close_fds
from trac.tests.functional.testenv import FunctionalTestEnvironment
from trac.tests.notification import SMTPThreadedServer
from acct_mgr.pwhash import htpasswd
from acct_mgr.tests.functional import logfile
from acct_mgr.tests.functional import tc, ConnectError
from acct_mgr.tests.functional.smtpd import AcctMgrSMTPThreadedServer
class AcctMgrFuntionalTestEnvironment(FunctionalTestEnvironment):
def __init__(self, dirname, port, url):
FunctionalTestEnvironment.__init__(self, dirname, port, url)
self.smtp_port = self.port + os.getpid() % 1000
self.smtpd = AcctMgrSMTPThreadedServer(self.smtp_port)
config = self.get_trac_environment().config
# Enabled Account Manager
config.set('components', 'acct_mgr.*', 'enabled')
# Disable trac's LoginModule
config.set('components', 'trac.web.auth.LoginModule', 'disabled')
# Setup Account Manager
config.set('account-manager', 'password_file', self.htpasswd)
config.set('account-manager', 'password_format', 'htpasswd')
config.set('account-manager', 'password_store', 'HtPasswdStore')
# Setup Notification
config.set('notification', 'smtp_enabled', 'true')
config.set('notification', 'smtp_from', 'testenv%s@localhost' % self.port)
config.set('notification', 'smtp_port', self.smtp_port)
config.set('notification', 'smtp_server', 'localhost')
config.set('project', 'url', self.url)
config.set('project', 'admin', 'testenv%s@localhost' % self.port)
config.set('trac', 'base_url', self.url)
config.save()
def start(self):
"""Starts the webserver"""
if 'FIGLEAF' in os.environ:
exe = os.environ['FIGLEAF']
else:
exe = sys.executable
server = Popen([exe, standalone.__file__,
"--port=%s" % self.port, "-s",
"--hostname=localhost",
self.tracdir],
stdout=logfile, stderr=logfile,
close_fds=close_fds,
cwd=self.command_cwd,
)
self.pid = server.pid
# Verify that the url is ok
timeout = 30
while timeout:
try:
tc.go(self.url)
break
except ConnectError:
time.sleep(1)
timeout -= 1
else:
raise Exception('Timed out waiting for server to start.')
tc.url(self.url)
self.smtpd.start()
def stop(self):
FunctionalTestEnvironment.stop(self)
self.smtpd.stop()
def create(self):
"""Create a new test environment; Trac, Subversion,
authentication."""
if os.mkdir(self.dirname):
raise Exception('unable to create test environment')
if call(["svnadmin", "create", self.repodir], stdout=logfile,
stderr=logfile, close_fds=close_fds):
raise Exception('unable to create subversion repository')
self._tracadmin('initenv', 'testenv%s' % self.port,
'sqlite:db/trac.db', 'svn', self.repodir)
if os.path.exists(self.htpasswd):
os.unlink(self.htpasswd)
self.adduser('admin')
self.adduser('user')
self._tracadmin('permission', 'add', 'admin', 'TRAC_ADMIN')
# Setup Trac logging
env = self.get_trac_environment()
env.config.set('logging', 'log_type', 'file')
env.config.save()
def adduser(self, user):
"""Add a user to the environment. Password is the username."""
f = open(self.htpasswd, 'a')
f.write("%s:%s\n" % (user, htpasswd(user)))
f.close()
def _tracadmin(self, *args):
"""Internal utility method for calling trac-admin"""
retval = call([sys.executable, console.__file__, self.tracdir]
+ list(args), stdout=logfile, stderr=logfile,
close_fds=close_fds, cwd=self.command_cwd)
if retval:
raise Exception('Failed with exitcode %s running trac-admin ' \
'with %r' % (retval, args))
| 38.312 | 82 | 0.60284 |
5b222a2e84a2d0716fa7560f7c90ccf3c93b3393 | 813 | py | Python | recsim/environments/__init__.py | MontrealAI/recsim | 9098a8fac9aad62a880011ee575a3db4e7d80ee2 | [
"Apache-2.0"
] | 625 | 2019-09-25T00:45:42.000Z | 2022-03-31T10:27:30.000Z | recsim/environments/__init__.py | MontrealAI/recsim | 9098a8fac9aad62a880011ee575a3db4e7d80ee2 | [
"Apache-2.0"
] | 22 | 2019-10-21T13:40:41.000Z | 2022-03-29T18:29:56.000Z | recsim/environments/__init__.py | isabella232/recsim | 55e50e4be736d222ffe8c2477ed1981b40f91605 | [
"Apache-2.0"
] | 108 | 2019-09-27T18:58:58.000Z | 2022-03-30T09:39:05.000Z | # coding=utf-8
# coding=utf-8
# Copyright 2019 The RecSim Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module importing all environments."""
from recsim.environments import interest_evolution
from recsim.environments import interest_exploration
from recsim.environments import long_term_satisfaction
| 40.65 | 74 | 0.785978 |
45674c08b083cc85a2b92ad6e781178523911aa1 | 2,154 | py | Python | alg_kruskal_minimum_spanning_tree.py | bowen0701/python-algorithms-data-structures | e625f59a9fc59e4728825078d4434a7968a724e5 | [
"BSD-2-Clause"
] | 8 | 2019-03-18T06:37:24.000Z | 2022-01-30T07:50:58.000Z | alg_kruskal_minimum_spanning_tree.py | bowen0701/python-algorithms-data-structures | e625f59a9fc59e4728825078d4434a7968a724e5 | [
"BSD-2-Clause"
] | null | null | null | alg_kruskal_minimum_spanning_tree.py | bowen0701/python-algorithms-data-structures | e625f59a9fc59e4728825078d4434a7968a724e5 | [
"BSD-2-Clause"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def sort_edges_by_weight(w_graph_d):
pass
def make_set(v, previous_d, rank_d):
previous_d[v] = v
rank_d[v] = 0
return previous_d, rank_d
def link(v, u, previous_d, rank_d):
"""Link method.
Changes the parent pointer of one of roots, say v,
and makes it point to y. Return the root of composit tree u.
"""
if rank_d[v] > rank_d[u]:
u, v = v, u
if rank_d[v] = rank_d[u]:
rank_d[u] += 1
previous_d[v] = u
return u, previous_d, rank_d
def find(v, previous_d):
"""Find method.
Make each vertices point directly to the root.
"""
if v != previous_d[v]:
previous_d[v] = find(v, previous_d)
return previous_d[v], previous_d
else:
return v, previous_d
def union(v, u, previous_d, rank_d):
"""Union by rank method.
Make the shorter tree nodes to the root of the longer.
"""
find_v, previous_d = find(v, previous_d)
find_u, previous_d = find(u, previous_d)
return link(find_v, find_u, previous_d, rank_d)
def kruskal(w_graph_d):
"""Kruskal's algorithm for minimum spanning tree
in undirected weighted graph.
Time complexity for graph G(V, E):
O(|E|+|V|+|E|log(|V|))
= O(|E|log(|V|^2))
= O(|E|log(|V|)).
"""
mst_set = set()
sorted_edges_d = sort_edges_by_weight(w_graph_d)
previous_d = {}
rank_d = {}
for v in w_graph_d.keys():
previous_d, rank_d = make_set(v, previous_d, rank_d)
for (v, u) in sorted_edges_d.keys():
find_v, previous_d = find(v, previous_d)
find_u, previous_d = find(u, previous_d)
if find_v != find_u:
mst_set.add((v, u))
u, previous_d, rank_d = union(v, u, previous_d, rank_d)
return mst_set, previous_d, rank_d
def main():
w_graph_d = {
'a': {'b': 1, 'd': 4, 'e': 3},
'b': {'a': 1, 'd': 4, 'e': 2},
'c': {'e': 4, 'f': 5},
'd': {'a': 4, 'b': 4, 'e': 4},
'e': {'a': 3, 'b': 2, 'c': 4, 'd': 4, 'f': 7},
'f': {'c': 5, 'e': 7}
}
print('w_graph_d:\n{}'.format(w_graph_d))
print('Kruskal\'s minimum spanning tree:')
pass
if __name__ == '__main__':
main()
| 22.206186 | 61 | 0.616527 |
a48b96f61501d3a0c0c9e59be24866333a84eb27 | 1,312 | py | Python | h2o-py/tests/testdir_algos/glrm/pyunit_cancar_glrm.py | ahmedengu/h2o-3 | ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11 | [
"Apache-2.0"
] | 6,098 | 2015-05-22T02:46:12.000Z | 2022-03-31T16:54:51.000Z | h2o-py/tests/testdir_algos/glrm/pyunit_cancar_glrm.py | ahmedengu/h2o-3 | ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11 | [
"Apache-2.0"
] | 2,517 | 2015-05-23T02:10:54.000Z | 2022-03-30T17:03:39.000Z | h2o-py/tests/testdir_algos/glrm/pyunit_cancar_glrm.py | ahmedengu/h2o-3 | ac2c0a6fbe7f8e18078278bf8a7d3483d41aca11 | [
"Apache-2.0"
] | 2,199 | 2015-05-22T04:09:55.000Z | 2022-03-28T22:20:45.000Z | from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glrm import H2OGeneralizedLowRankEstimator
def glrm_cancar():
print("Importing cancar.csv data...")
cancarH2O = h2o.upload_file(pyunit_utils.locate("smalldata/glrm_test/cancar.csv"))
cancarH2O.describe()
print("Building GLRM model with init = PlusPlus:\n")
glrm_pp = H2OGeneralizedLowRankEstimator(k=4,
transform="NONE",
init="PlusPlus",
loss="Quadratic",
regularization_x="None",
regularization_y="None",
max_iterations=1000)
glrm_pp.train(x=cancarH2O.names, training_frame=cancarH2O)
glrm_pp.show()
print("Building GLRM model with init = SVD:\n")
glrm_svd = H2OGeneralizedLowRankEstimator(k=4, transform="NONE", init="SVD", loss="Quadratic", regularization_x="None", regularization_y="None", max_iterations=1000)
glrm_svd.train(x=cancarH2O.names, training_frame=cancarH2O)
glrm_svd.show()
if __name__ == "__main__":
pyunit_utils.standalone_test(glrm_cancar)
else:
glrm_cancar()
| 37.485714 | 167 | 0.617378 |
d5c82fb5f57e67368ef0fe0f560c4899614083f6 | 1,177 | py | Python | examples/remote_send.py | Perlence/EmbedPython.ahk | 17ecfbc41757e5ba5008dcd157d1528d7b57decd | [
"BSD-3-Clause"
] | 34 | 2020-10-29T10:09:56.000Z | 2022-03-25T02:04:20.000Z | examples/remote_send.py | Perlence/EmbedPython.ahk | 17ecfbc41757e5ba5008dcd157d1528d7b57decd | [
"BSD-3-Clause"
] | 9 | 2021-02-16T05:59:31.000Z | 2022-02-16T08:13:14.000Z | examples/remote_send.py | Perlence/EmbedPython.ahk | 17ecfbc41757e5ba5008dcd157d1528d7b57decd | [
"BSD-3-Clause"
] | 6 | 2021-02-15T14:40:00.000Z | 2022-01-02T17:07:53.000Z | """Receive keys over TCP and replay them.
Example::
$ ahkpy remote_send.py &
Listening on localhost 3033
$ printf "#r" | nc localhost 3033
"""
import argparse
import asyncio
import sys
import ahkpy as ahk
def main():
parser = argparse.ArgumentParser()
parser.add_argument("HOST", nargs="?", default="localhost")
parser.add_argument("PORT", nargs="?", default=3033, type=int)
args = parser.parse_args()
try:
asyncio.run(serve(args.HOST, args.PORT))
except KeyboardInterrupt:
sys.exit()
async def serve(host, port):
srv = await asyncio.start_server(handle, host, port)
print("Listening on", host, port)
# Schedule a function that will check AHK message queue repeatedly.
loop = asyncio.get_running_loop()
loop.call_soon(sleeper, loop)
await srv.serve_forever()
async def handle(reader, writer):
try:
send_bytes = await reader.read()
send_str = send_bytes.decode()
print(repr(send_str))
ahk.send(send_str)
finally:
writer.close()
def sleeper(loop):
ahk.sleep(0.01)
loop.call_soon(sleeper, loop)
if __name__ == "__main__":
main()
| 20.293103 | 71 | 0.656754 |
269daeec5ed95e40af7896d178ed7c2ae9c2d763 | 30,532 | py | Python | neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py | ilay09/neutron | b7f9803c88b17a6ebd40fd44d15d4336bea7b394 | [
"Apache-2.0"
] | null | null | null | neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py | ilay09/neutron | b7f9803c88b17a6ebd40fd44d15d4336bea7b394 | [
"Apache-2.0"
] | null | null | null | neutron/tests/unit/plugins/ml2/drivers/openvswitch/agent/test_ovs_tunnel.py | ilay09/neutron | b7f9803c88b17a6ebd40fd44d15d4336bea7b394 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import time
import mock
from neutron_lib import constants as n_const
from oslo_config import cfg
from oslo_log import log
import six
from neutron.agent.common import ip_lib
from neutron.agent.common import ovs_lib
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \
import ovs_test_base
from neutron.tests.unit.plugins.ml2.drivers.openvswitch.agent \
import test_vlanmanager
def nonzero(f):
if six.PY3:
return f.__bool__()
else:
return f.__nonzero__()
# Useful global dummy variables.
NET_UUID = '3faeebfe-5d37-11e1-a64b-000c29d5f0a7'
LS_ID = 420
LV_ID = 42
LV_IDS = [42, 43]
VIF_ID = '404deaec-5d37-11e1-a64b-000c29d5f0a8'
VIF_MAC = '3c:09:24:1e:78:23'
OFPORT_NUM = 1
VIF_PORT = ovs_lib.VifPort('port', OFPORT_NUM,
VIF_ID, VIF_MAC, 'switch')
VIF_PORTS = {VIF_ID: VIF_PORT}
FIXED_IPS = [{'subnet_id': 'my-subnet-uuid',
'ip_address': '1.1.1.1'}]
VM_DEVICE_OWNER = n_const.DEVICE_OWNER_COMPUTE_PREFIX + 'fake'
TUN_OFPORTS = {p_const.TYPE_GRE: {'ip1': '11', 'ip2': '12'}}
BCAST_MAC = "01:00:00:00:00:00/01:00:00:00:00:00"
UCAST_MAC = "00:00:00:00:00:00/01:00:00:00:00:00"
class DummyPort(object):
def __init__(self, interface_id):
self.interface_id = interface_id
class DummyVlanBinding(object):
def __init__(self, network_id, vlan_id):
self.network_id = network_id
self.vlan_id = vlan_id
class TunnelTest(object):
USE_VETH_INTERCONNECTION = False
VETH_MTU = None
def setUp(self):
super(TunnelTest, self).setUp()
self.useFixture(test_vlanmanager.LocalVlanManagerFixture())
conn_patcher = mock.patch(
'neutron.agent.ovsdb.impl_idl._connection')
conn_patcher.start()
self.addCleanup(conn_patcher.stop)
cfg.CONF.set_default('firewall_driver',
'neutron.agent.firewall.NoopFirewallDriver',
group='SECURITYGROUP')
cfg.CONF.set_override('report_interval', 0, 'AGENT')
self.INT_BRIDGE = 'integration_bridge'
self.TUN_BRIDGE = 'tunnel_bridge'
self.MAP_TUN_BRIDGE = 'tun_br_map'
self.AUX_BRIDGE = 'ancillary_bridge'
self.NET_MAPPING = ['net1:%s' % self.MAP_TUN_BRIDGE]
self.INT_OFPORT = 11111
self.TUN_OFPORT = 22222
self.MAP_TUN_INT_OFPORT = 33333
self.MAP_TUN_PHY_OFPORT = 44444
self.LVM_DATA = (
LV_ID, 'gre', None, LS_ID, VIF_PORTS)
self.LVM_FLAT_DATA = (
LV_ID, 'flat', 'net1', LS_ID, VIF_PORTS)
self.LVM_VLAN_DATA = (
LV_ID, 'vlan', 'net1', LS_ID, VIF_PORTS)
self.inta = mock.Mock()
self.intb = mock.Mock()
mock.patch.object(ovs_lib.BaseOVS, 'config',
new_callable=mock.PropertyMock,
return_value={}).start()
mock.patch('neutron.agent.ovsdb.impl_idl._connection').start()
self.ovs_bridges = {
self.INT_BRIDGE: mock.create_autospec(
self.br_int_cls('br-int')),
self.TUN_BRIDGE: mock.create_autospec(
self.br_tun_cls('br-tun')),
self.MAP_TUN_BRIDGE: mock.create_autospec(
self.br_phys_cls('br-phys')),
self.AUX_BRIDGE: mock.create_autospec(
ovs_lib.OVSBridge('br-aux')),
}
self.ovs_int_ofports = {
'patch-tun': self.TUN_OFPORT,
'int-%s' % self.MAP_TUN_BRIDGE: self.MAP_TUN_INT_OFPORT
}
def lookup_br(br_name, *args, **kwargs):
return self.ovs_bridges[br_name]
self.mock_int_bridge_cls = mock.patch(self._BR_INT_CLASS,
autospec=True).start()
self.mock_int_bridge_cls.side_effect = lookup_br
self.mock_phys_bridge_cls = mock.patch(self._BR_PHYS_CLASS,
autospec=True).start()
self.mock_phys_bridge_cls.side_effect = lookup_br
self.mock_tun_bridge_cls = mock.patch(self._BR_TUN_CLASS,
autospec=True).start()
self.mock_tun_bridge_cls.side_effect = lookup_br
self.mock_aux_bridge_cls = mock.patch(
'neutron.agent.common.ovs_lib.OVSBridge',
autospec=True).start()
self.mock_aux_bridge_cls.side_effect = lookup_br
self.mock_int_bridge = self.ovs_bridges[self.INT_BRIDGE]
self.mock_int_bridge.add_port.return_value = self.MAP_TUN_INT_OFPORT
self.mock_int_bridge.add_patch_port.side_effect = (
lambda tap, peer: self.ovs_int_ofports[tap])
self.mock_int_bridge.port_exists.return_value = False
self.mock_int_bridge.get_vif_ports.return_value = []
self.mock_int_bridge.get_ports_attributes.return_value = []
self.mock_int_bridge.db_get_val.return_value = {}
self.mock_map_tun_bridge = self.ovs_bridges[self.MAP_TUN_BRIDGE]
self.mock_map_tun_bridge.br_name = self.MAP_TUN_BRIDGE
self.mock_map_tun_bridge.add_port.return_value = (
self.MAP_TUN_PHY_OFPORT)
self.mock_map_tun_bridge.add_patch_port.return_value = (
self.MAP_TUN_PHY_OFPORT)
self.mock_map_tun_bridge.port_exists.return_value = False
self.mock_tun_bridge = self.ovs_bridges[self.TUN_BRIDGE]
self.mock_tun_bridge.add_port.return_value = self.INT_OFPORT
self.mock_tun_bridge.add_patch_port.return_value = self.INT_OFPORT
self.ipdevice = mock.patch.object(ip_lib, 'IPDevice').start()
self.ipwrapper = mock.patch.object(ip_lib, 'IPWrapper').start()
add_veth = self.ipwrapper.return_value.add_veth
add_veth.return_value = [self.inta, self.intb]
self.get_bridges = mock.patch.object(ovs_lib.BaseOVS,
'get_bridges').start()
self.get_bridges.return_value = [self.INT_BRIDGE,
self.TUN_BRIDGE,
self.MAP_TUN_BRIDGE,
self.AUX_BRIDGE]
self.get_bridge_external_bridge_id = mock.patch.object(
ovs_lib.BaseOVS,
'get_bridge_external_bridge_id').start()
self.get_bridge_external_bridge_id.side_effect = (
lambda bridge: bridge if bridge in self.ovs_bridges else None)
self.execute = mock.patch('neutron.agent.common.utils.execute').start()
self._define_expected_calls()
def _define_expected_calls(self, arp_responder=False):
self.mock_int_bridge_cls_expected = [
mock.call(self.INT_BRIDGE,
datapath_type=mock.ANY),
]
self.mock_phys_bridge_cls_expected = [
mock.call(self.MAP_TUN_BRIDGE,
datapath_type=mock.ANY),
]
self.mock_tun_bridge_cls_expected = [
mock.call(self.TUN_BRIDGE,
datapath_type=mock.ANY),
]
self.mock_int_bridge = self.ovs_bridges[self.INT_BRIDGE]
self.mock_int_bridge_expected = [
mock.call.create(),
mock.call.set_secure_mode(),
mock.call.setup_controllers(mock.ANY),
mock.call.setup_default_table(),
]
self.mock_map_tun_bridge_expected = [
mock.call.create(),
mock.call.set_secure_mode(),
mock.call.setup_controllers(mock.ANY),
mock.call.setup_default_table(),
mock.call.port_exists('phy-%s' % self.MAP_TUN_BRIDGE),
mock.call.add_patch_port('phy-%s' % self.MAP_TUN_BRIDGE,
constants.NONEXISTENT_PEER),
]
self.mock_int_bridge_expected += [
mock.call.db_get_val('Interface', 'int-%s' % self.MAP_TUN_BRIDGE,
'type', log_errors=False),
mock.call.port_exists('int-%s' % self.MAP_TUN_BRIDGE),
mock.call.add_patch_port('int-%s' % self.MAP_TUN_BRIDGE,
constants.NONEXISTENT_PEER),
]
self.mock_int_bridge_expected += [
mock.call.drop_port(in_port=self.MAP_TUN_INT_OFPORT),
mock.call.set_db_attribute(
'Interface', 'int-%s' % self.MAP_TUN_BRIDGE,
'options', {'peer': 'phy-%s' % self.MAP_TUN_BRIDGE}),
]
self.mock_map_tun_bridge_expected += [
mock.call.drop_port(in_port=self.MAP_TUN_PHY_OFPORT),
mock.call.set_db_attribute(
'Interface', 'phy-%s' % self.MAP_TUN_BRIDGE,
'options', {'peer': 'int-%s' % self.MAP_TUN_BRIDGE}),
]
self.mock_aux_bridge = self.ovs_bridges[self.AUX_BRIDGE]
self.mock_aux_bridge_expected = [
]
self.mock_tun_bridge_expected = [
mock.call.create(secure_mode=True),
mock.call.setup_controllers(mock.ANY),
mock.call.port_exists('patch-int'),
nonzero(mock.call.port_exists()),
mock.call.add_patch_port('patch-int', 'patch-tun'),
]
self.mock_int_bridge_expected += [
mock.call.port_exists('patch-tun'),
mock.call.add_patch_port('patch-tun', 'patch-int'),
]
self.mock_int_bridge_expected += [
mock.call.get_vif_ports((ovs_lib.INVALID_OFPORT,
ovs_lib.UNASSIGNED_OFPORT)),
mock.call.get_ports_attributes(
'Port', columns=['name', 'other_config', 'tag'], ports=[])
]
self.mock_tun_bridge_expected += [
mock.call.setup_default_table(self.INT_OFPORT, arp_responder),
]
self.ipdevice_expected = []
self.ipwrapper_expected = [mock.call()]
self.get_bridges_expected = [mock.call(), mock.call()]
self.inta_expected = []
self.intb_expected = []
self.execute_expected = []
def _build_agent(self, **config_opts_agent):
"""Configure and initialize OVS agent.
:param config_opts_agent: a dict with options to override the
default values for the AGENT group.
"""
bridge_classes = {
'br_int': self.mock_int_bridge_cls,
'br_phys': self.mock_phys_bridge_cls,
'br_tun': self.mock_tun_bridge_cls,
}
cfg.CONF.set_override('integration_bridge', self.INT_BRIDGE, 'OVS')
cfg.CONF.set_override('tunnel_bridge', self.TUN_BRIDGE, 'OVS')
cfg.CONF.set_override('local_ip', '10.0.0.1', 'OVS')
cfg.CONF.set_override('bridge_mappings', self.NET_MAPPING, 'OVS')
cfg.CONF.set_override('polling_interval', 2, 'AGENT')
cfg.CONF.set_override('tunnel_types', ['gre'], 'AGENT')
cfg.CONF.set_override('veth_mtu', self.VETH_MTU, 'AGENT')
cfg.CONF.set_override('minimize_polling', False, 'AGENT')
cfg.CONF.set_override('use_veth_interconnection',
self.USE_VETH_INTERCONNECTION, 'OVS')
for k, v in config_opts_agent.items():
cfg.CONF.set_override(k, v, 'AGENT')
return self.mod_agent.OVSNeutronAgent(bridge_classes, cfg.CONF)
def _verify_mock_call(self, mock_obj, expected):
mock_obj.assert_has_calls(expected)
self.assertEqual(expected, mock_obj.mock_calls)
def _verify_mock_calls(self):
self._verify_mock_call(self.mock_int_bridge_cls,
self.mock_int_bridge_cls_expected)
self._verify_mock_call(self.mock_tun_bridge_cls,
self.mock_tun_bridge_cls_expected)
self._verify_mock_call(self.mock_phys_bridge_cls,
self.mock_phys_bridge_cls_expected)
self._verify_mock_call(self.mock_int_bridge,
self.mock_int_bridge_expected)
self._verify_mock_call(self.mock_map_tun_bridge,
self.mock_map_tun_bridge_expected)
self._verify_mock_call(self.mock_tun_bridge,
self.mock_tun_bridge_expected)
self._verify_mock_call(self.mock_aux_bridge,
self.mock_aux_bridge_expected)
self._verify_mock_call(self.ipdevice, self.ipdevice_expected)
self._verify_mock_call(self.ipwrapper, self.ipwrapper_expected)
self._verify_mock_call(self.get_bridges, self.get_bridges_expected)
self._verify_mock_call(self.inta, self.inta_expected)
self._verify_mock_call(self.intb, self.intb_expected)
self._verify_mock_call(self.execute, self.execute_expected)
def test_construct(self):
agent = self._build_agent()
self.assertEqual(agent.agent_id, 'ovs-agent-%s' % cfg.CONF.host)
self._verify_mock_calls()
# TODO(ethuleau): Initially, local ARP responder is be dependent to the
# ML2 l2 population mechanism driver.
# The next two tests use l2_pop flag to test ARP responder
def test_construct_with_arp_responder(self):
self._build_agent(l2_population=True, arp_responder=True)
self._define_expected_calls(True)
self._verify_mock_calls()
def test_construct_without_arp_responder(self):
self._build_agent(l2_population=False, arp_responder=True)
self._verify_mock_calls()
def test_construct_vxlan(self):
self._build_agent(tunnel_types=['vxlan'])
self._verify_mock_calls()
def test_provision_local_vlan(self):
ofports = list(TUN_OFPORTS[p_const.TYPE_GRE].values())
self.mock_tun_bridge_expected += [
mock.call.install_flood_to_tun(LV_ID, LS_ID, ofports),
mock.call.provision_local_vlan(
network_type=p_const.TYPE_GRE,
lvid=LV_ID,
segmentation_id=LS_ID),
]
a = self._build_agent()
a.available_local_vlans = set([LV_ID])
a.tun_br_ofports = TUN_OFPORTS
a.provision_local_vlan(NET_UUID, p_const.TYPE_GRE, None, LS_ID)
self._verify_mock_calls()
def test_provision_local_vlan_flat(self):
self.mock_map_tun_bridge_expected.append(
mock.call.provision_local_vlan(
port=self.MAP_TUN_PHY_OFPORT,
lvid=LV_ID,
segmentation_id=None,
distributed=False))
self.mock_int_bridge_expected.append(
mock.call.provision_local_vlan(
port=self.INT_OFPORT,
lvid=LV_ID,
segmentation_id=None))
a = self._build_agent()
a.available_local_vlans = set([LV_ID])
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.provision_local_vlan(NET_UUID, p_const.TYPE_FLAT, 'net1', LS_ID)
self._verify_mock_calls()
def test_provision_local_vlan_flat_fail(self):
a = self._build_agent()
a.provision_local_vlan(NET_UUID, p_const.TYPE_FLAT, 'net2', LS_ID)
self._verify_mock_calls()
def test_provision_local_vlan_vlan(self):
self.mock_map_tun_bridge_expected.append(
mock.call.provision_local_vlan(
port=self.MAP_TUN_PHY_OFPORT,
lvid=LV_ID,
segmentation_id=LS_ID,
distributed=False))
self.mock_int_bridge_expected.append(
mock.call.provision_local_vlan(
port=self.INT_OFPORT,
lvid=LV_ID,
segmentation_id=LS_ID))
a = self._build_agent()
a.available_local_vlans = set([LV_ID])
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.provision_local_vlan(NET_UUID, p_const.TYPE_VLAN, 'net1', LS_ID)
self._verify_mock_calls()
def test_provision_local_vlan_vlan_fail(self):
a = self._build_agent()
a.provision_local_vlan(NET_UUID, p_const.TYPE_VLAN, 'net2', LS_ID)
self._verify_mock_calls()
def test_reclaim_local_vlan(self):
self.mock_tun_bridge_expected += [
mock.call.reclaim_local_vlan(network_type='gre',
segmentation_id=LS_ID),
mock.call.delete_flood_to_tun(LV_ID),
mock.call.delete_unicast_to_tun(LV_ID, None),
mock.call.delete_arp_responder(LV_ID, None),
]
a = self._build_agent()
a.available_local_vlans = set()
a.vlan_manager.add(NET_UUID, *self.LVM_DATA)
a.reclaim_local_vlan(NET_UUID)
self.assertIn(self.LVM_DATA[0], a.available_local_vlans)
self._verify_mock_calls()
def test_reclaim_local_vlan_flat(self):
self.mock_map_tun_bridge_expected.append(
mock.call.reclaim_local_vlan(
port=self.MAP_TUN_PHY_OFPORT,
lvid=self.LVM_FLAT_DATA[0]))
self.mock_int_bridge_expected.append(
mock.call.reclaim_local_vlan(
port=self.INT_OFPORT,
segmentation_id=None))
a = self._build_agent()
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.available_local_vlans = set()
a.vlan_manager.add(NET_UUID, *self.LVM_FLAT_DATA)
a.reclaim_local_vlan(NET_UUID)
self.assertIn(self.LVM_FLAT_DATA[0], a.available_local_vlans)
self._verify_mock_calls()
def test_reclaim_local_vlan_vlan(self):
self.mock_map_tun_bridge_expected.append(
mock.call.reclaim_local_vlan(
port=self.MAP_TUN_PHY_OFPORT,
lvid=self.LVM_VLAN_DATA[0]))
self.mock_int_bridge_expected.append(
mock.call.reclaim_local_vlan(
port=self.INT_OFPORT,
segmentation_id=LS_ID))
a = self._build_agent()
a.phys_brs['net1'] = self.mock_map_tun_bridge
a.phys_ofports['net1'] = self.MAP_TUN_PHY_OFPORT
a.int_ofports['net1'] = self.INT_OFPORT
a.available_local_vlans = set()
a.vlan_manager.add(NET_UUID, *self.LVM_VLAN_DATA)
a.reclaim_local_vlan(NET_UUID)
self.assertIn(self.LVM_VLAN_DATA[0], a.available_local_vlans)
self._verify_mock_calls()
def test_port_bound(self):
vlan_mapping = {'segmentation_id': str(LS_ID),
'physical_network': 'None',
'net_uuid': NET_UUID,
'network_type': 'gre'}
self.mock_int_bridge_expected += [
mock.call.db_get_val('Port', 'port', 'other_config'),
mock.call.set_db_attribute('Port', VIF_PORT.port_name,
'other_config',
vlan_mapping)]
a = self._build_agent()
a.vlan_manager.add(NET_UUID, *self.LVM_DATA)
a.local_dvr_map = {}
self.ovs_bridges[self.INT_BRIDGE].db_get_val.return_value = {}
a.port_bound(VIF_PORT, NET_UUID, 'gre', None, LS_ID,
FIXED_IPS, VM_DEVICE_OWNER, False)
self._verify_mock_calls()
def test_port_unbound(self):
with mock.patch.object(self.mod_agent.OVSNeutronAgent,
'reclaim_local_vlan') as reclaim_local_vlan:
a = self._build_agent()
a.vlan_manager.add(NET_UUID, *self.LVM_DATA)
a.port_unbound(VIF_ID, NET_UUID)
reclaim_local_vlan.assert_called_once_with(NET_UUID)
self._verify_mock_calls()
def test_port_dead(self):
self.mock_int_bridge_expected += [
mock.call.db_get_val('Port', VIF_PORT.port_name, 'tag',
log_errors=True),
mock.call.set_db_attribute(
'Port', VIF_PORT.port_name,
'tag', constants.DEAD_VLAN_TAG,
log_errors=True),
mock.call.drop_port(in_port=VIF_PORT.ofport),
]
a = self._build_agent()
a.available_local_vlans = set([LV_ID])
a.vlan_manager.add(NET_UUID, *self.LVM_DATA)
self.ovs_bridges[self.INT_BRIDGE].db_get_val.return_value = mock.Mock()
a.port_dead(VIF_PORT)
self._verify_mock_calls()
def test_tunnel_update(self):
tunnel_port = '9999'
self.mock_tun_bridge.add_tunnel_port.return_value = tunnel_port
self.mock_tun_bridge_expected += [
mock.call.add_tunnel_port('gre-0a000a01', '10.0.10.1', '10.0.0.1',
'gre', 4789, True, False),
mock.call.setup_tunnel_port('gre', tunnel_port),
]
a = self._build_agent()
a.tunnel_update(
mock.sentinel.ctx, tunnel_ip='10.0.10.1',
tunnel_type=p_const.TYPE_GRE)
self._verify_mock_calls()
def test_tunnel_update_self(self):
a = self._build_agent()
a.tunnel_update(
mock.sentinel.ctx, tunnel_ip='10.0.0.1')
self._verify_mock_calls()
def test_daemon_loop(self):
reply_ge_1 = {'added': [{'name': 'tap0', 'ofport': 3,
'external_ids': {
'attached-mac': 'test_mac'}}],
'removed': []}
reply_ge_2 = {'added': [],
'removed': [{'name': 'tap0', 'ofport': 3,
'external_ids': {
'attached-mac': 'test_mac'}}]}
reply_pe_1 = {'current': set(['tap0']),
'added': set(['tap0']),
'removed': set([])}
reply_pe_2 = {'current': set([]),
'added': set([]),
'removed': set(['tap0'])}
reply_ancillary = {'current': set([]),
'added': set([]),
'removed': set([])}
self.mock_int_bridge_expected += [
mock.call.check_canary_table(),
mock.call.cleanup_flows(),
mock.call.check_canary_table()
]
self.mock_tun_bridge_expected += [
mock.call.cleanup_flows()
]
self.mock_map_tun_bridge_expected += [
mock.call.cleanup_flows()
]
# No cleanup is expected on ancillary bridge
self.ovs_bridges[self.INT_BRIDGE].check_canary_table.return_value = \
constants.OVS_NORMAL
with mock.patch.object(log.KeywordArgumentAdapter,
'exception') as log_exception,\
mock.patch.object(self.mod_agent.OVSNeutronAgent,
'process_ports_events') as process_p_events,\
mock.patch.object(
self.mod_agent.OVSNeutronAgent,
'process_network_ports') as process_network_ports,\
mock.patch.object(self.mod_agent.OVSNeutronAgent,
'tunnel_sync'),\
mock.patch.object(time, 'sleep'),\
mock.patch.object(
self.mod_agent.OVSNeutronAgent,
'update_stale_ofport_rules') as update_stale:
log_exception.side_effect = Exception(
'Fake exception to get out of the loop')
update_stale.return_value = []
devices_not_ready = set()
process_p_events.side_effect = [
(reply_pe_1, reply_ancillary, devices_not_ready),
(reply_pe_2, reply_ancillary, devices_not_ready)]
interface_polling = mock.Mock()
interface_polling.get_events.side_effect = [reply_ge_1, reply_ge_2]
failed_devices = {'removed': set([]), 'added': set([])}
failed_ancillary_devices = {'removed': set([]), 'added': set([])}
process_network_ports.side_effect = [
failed_devices,
Exception('Fake exception to get out of the loop')]
n_agent = self._build_agent()
# Hack to test loop
# We start method and expect it will raise after 2nd loop
# If something goes wrong, assert_has_calls below will catch it
try:
n_agent.rpc_loop(interface_polling)
except Exception:
pass
# FIXME(salv-orlando): There should not be assertions on log
# messages
log_exception.assert_called_once_with(
"Error while processing VIF ports")
process_p_events.assert_has_calls([
mock.call(reply_ge_1, set(), set(), devices_not_ready,
failed_devices, failed_ancillary_devices, set()),
mock.call(reply_ge_2, set(['tap0']), set(), devices_not_ready,
failed_devices, failed_ancillary_devices,
set())
])
process_network_ports.assert_has_calls([
mock.call({'current': set(['tap0']),
'removed': set([]),
'added': set(['tap0'])}, False),
])
self.assertTrue(update_stale.called)
self._verify_mock_calls()
class TunnelTestOFCtl(TunnelTest, ovs_test_base.OVSOFCtlTestBase):
pass
class TunnelTestRyu(TunnelTest, ovs_test_base.OVSRyuTestBase):
pass
class TunnelTestUseVethInterco(TunnelTest):
USE_VETH_INTERCONNECTION = True
def _define_expected_calls(self, arp_responder=False):
self.mock_int_bridge_cls_expected = [
mock.call(self.INT_BRIDGE,
datapath_type=mock.ANY),
]
self.mock_phys_bridge_cls_expected = [
mock.call(self.MAP_TUN_BRIDGE,
datapath_type=mock.ANY),
]
self.mock_tun_bridge_cls_expected = [
mock.call(self.TUN_BRIDGE,
datapath_type=mock.ANY),
]
self.mock_int_bridge_expected = [
mock.call.create(),
mock.call.set_secure_mode(),
mock.call.setup_controllers(mock.ANY),
mock.call.setup_default_table(),
]
self.mock_map_tun_bridge_expected = [
mock.call.create(),
mock.call.set_secure_mode(),
mock.call.setup_controllers(mock.ANY),
mock.call.setup_default_table(),
mock.call.add_port('phy-%s' % self.MAP_TUN_BRIDGE),
]
self.mock_int_bridge_expected += [
mock.call.db_get_val('Interface', 'int-%s' % self.MAP_TUN_BRIDGE,
'type', log_errors=False),
mock.call.add_port('int-%s' % self.MAP_TUN_BRIDGE)
]
self.mock_int_bridge_expected += [
mock.call.drop_port(in_port=self.MAP_TUN_INT_OFPORT),
]
self.mock_map_tun_bridge_expected += [
mock.call.drop_port(in_port=self.MAP_TUN_PHY_OFPORT),
]
self.mock_aux_bridge = self.ovs_bridges[self.AUX_BRIDGE]
self.mock_aux_bridge_expected = [
]
self.mock_tun_bridge_expected = [
mock.call.create(secure_mode=True),
mock.call.setup_controllers(mock.ANY),
mock.call.port_exists('patch-int'),
nonzero(mock.call.port_exists()),
mock.call.add_patch_port('patch-int', 'patch-tun'),
]
self.mock_int_bridge_expected += [
mock.call.port_exists('patch-tun'),
mock.call.add_patch_port('patch-tun', 'patch-int')
]
self.mock_int_bridge_expected += [
mock.call.get_vif_ports((ovs_lib.INVALID_OFPORT,
ovs_lib.UNASSIGNED_OFPORT)),
mock.call.get_ports_attributes(
'Port', columns=['name', 'other_config', 'tag'], ports=[])
]
self.mock_tun_bridge_expected += [
mock.call.setup_default_table(self.INT_OFPORT, arp_responder),
]
self.ipdevice_expected = [
mock.call('int-%s' % self.MAP_TUN_BRIDGE),
mock.call().exists(),
nonzero(mock.call().exists()),
mock.call().link.delete()
]
self.ipwrapper_expected = [
mock.call(),
mock.call().add_veth('int-%s' % self.MAP_TUN_BRIDGE,
'phy-%s' % self.MAP_TUN_BRIDGE)
]
self.get_bridges_expected = [mock.call(), mock.call()]
self.inta_expected = [mock.call.link.set_up()]
self.intb_expected = [mock.call.link.set_up()]
self.execute_expected = [mock.call(['udevadm', 'settle',
'--timeout=10'])]
class TunnelTestUseVethIntercoOFCtl(TunnelTestUseVethInterco,
ovs_test_base.OVSOFCtlTestBase):
pass
class TunnelTestUseVethIntercoRyu(TunnelTestUseVethInterco,
ovs_test_base.OVSRyuTestBase):
pass
class TunnelTestWithMTU(TunnelTestUseVethInterco):
VETH_MTU = 1500
def _define_expected_calls(self, arp_responder=False):
super(TunnelTestWithMTU, self)._define_expected_calls(arp_responder)
self.inta_expected.append(mock.call.link.set_mtu(self.VETH_MTU))
self.intb_expected.append(mock.call.link.set_mtu(self.VETH_MTU))
class TunnelTestWithMTUOFCtl(TunnelTestWithMTU,
ovs_test_base.OVSOFCtlTestBase):
pass
class TunnelTestWithMTURyu(TunnelTestWithMTU,
ovs_test_base.OVSRyuTestBase):
pass
| 40.068241 | 79 | 0.602974 |
3776b9afb675f9988813c44f404acf4939b7d3c7 | 267 | py | Python | chapters/06_recurrent_neural_networks_and_natural_language_processing/04_arxiv/helpers/attribute_dictionary.py | Asurada2015/TF-_for_MI | 5fafdb78286b122036fa9aecf2a4be72ea4673e1 | [
"Apache-2.0"
] | 8 | 2018-01-09T05:29:01.000Z | 2019-03-03T13:40:51.000Z | chapters/06_recurrent_neural_networks_and_natural_language_processing/03_ocr/helpers/attribute_dictionary.py | Asurada2015/TF-_for_MI | 5fafdb78286b122036fa9aecf2a4be72ea4673e1 | [
"Apache-2.0"
] | null | null | null | chapters/06_recurrent_neural_networks_and_natural_language_processing/03_ocr/helpers/attribute_dictionary.py | Asurada2015/TF-_for_MI | 5fafdb78286b122036fa9aecf2a4be72ea4673e1 | [
"Apache-2.0"
] | 6 | 2017-10-25T02:17:59.000Z | 2018-11-08T01:58:32.000Z | class AttrDict(dict):
def __getattr__(self, key):
if key not in self:
raise AttributeError
return self[key]
def __setattr__(self, key, value):
if key not in self:
raise AttributeError
self[key] = value | 24.272727 | 38 | 0.580524 |
1a31cb5eeda4736094255147ec14dd4714968cbe | 258 | py | Python | codility.com/stack_and_queues/stack.py | Jagrmi-C/jagrmitest | 7c6cf61317972e5d7ddc4fdced0b4e5e6dd52e96 | [
"MIT"
] | null | null | null | codility.com/stack_and_queues/stack.py | Jagrmi-C/jagrmitest | 7c6cf61317972e5d7ddc4fdced0b4e5e6dd52e96 | [
"MIT"
] | null | null | null | codility.com/stack_and_queues/stack.py | Jagrmi-C/jagrmitest | 7c6cf61317972e5d7ddc4fdced0b4e5e6dd52e96 | [
"MIT"
] | null | null | null |
stack = [9, 2, 3, 1, 4]
size = 0
def push(x):
global size
stack[size] = x
size += 1
def pop():
global size
size -= 1
return stack[size]
push(5)
push(6)
push(1)
print(stack)
print(pop())
print(stack)
print(pop())
print(stack)
| 9.214286 | 23 | 0.562016 |
17f5f313a8cc3ae0c0addd94fcdcc97f6554f8f4 | 1,116 | py | Python | src/fairtest/examples/error_profiling.py | columbia/fairtest | 8696051c9276f127ab8b2f437850f845ff0ca786 | [
"Apache-2.0"
] | 42 | 2017-01-12T13:59:23.000Z | 2022-03-01T01:44:12.000Z | src/fairtest/examples/error_profiling.py | columbia/fairtest | 8696051c9276f127ab8b2f437850f845ff0ca786 | [
"Apache-2.0"
] | 3 | 2019-05-24T21:02:51.000Z | 2019-11-15T15:36:17.000Z | src/fairtest/examples/error_profiling.py | columbia/fairtest | 8696051c9276f127ab8b2f437850f845ff0ca786 | [
"Apache-2.0"
] | 20 | 2017-01-12T23:07:10.000Z | 2021-08-11T09:13:50.000Z | """
Run FairTest Error Profiling Investigations on Movie Recommender Dataset
Usage: python error_profiling.py
"""
import fairtest.utils.prepare_data as prepare
from fairtest import Testing, train, test, report, DataSource
import sys
def main(argv=sys.argv):
if len(argv) != 1:
usage(argv)
FILENAME = "../../../data/recommender/recommendations.txt"
OUTPUT_DIR = "."
data = prepare.data_from_csv(FILENAME, sep='\t',
to_drop=['Types', 'Avg Movie Age',
'Avg Recommended Rating',
'Avg Seen Rating'])
SENS = ['Gender']
TARGET = 'RMSE'
EXPL = []
data_source = DataSource(data)
# Instantiate the experiment
inv = Testing(data_source, SENS, TARGET, EXPL, random_state=0)
# Train the classifier
train([inv])
# Evaluate on the testing set
test([inv])
# Create the report
report([inv], "error_profiling", OUTPUT_DIR)
def usage(argv):
print "Usage:%s" % argv[0]
exit(-1)
if __name__ == '__main__':
sys.exit(main())
| 23.744681 | 72 | 0.590502 |
e2e86b5b7d4c1e3d9155672271eb53a9cecb9176 | 21,917 | py | Python | argostranslate/translate.py | argosopentechnologies/Argos-Translate | c834ef224418a830abe8ca4ed4e942f4ea07cbca | [
"MIT"
] | 1 | 2020-04-30T13:12:47.000Z | 2020-04-30T13:12:47.000Z | argostranslate/translate.py | argosopentechnologies/Argos-Translate | c834ef224418a830abe8ca4ed4e942f4ea07cbca | [
"MIT"
] | null | null | null | argostranslate/translate.py | argosopentechnologies/Argos-Translate | c834ef224418a830abe8ca4ed4e942f4ea07cbca | [
"MIT"
] | null | null | null | import logging
import ctranslate2
import sentencepiece as spm
import stanza
from argostranslate import package, settings, models, sbd, apis, fewshot
from argostranslate.utils import info, error
class Hypothesis:
"""Represents a translation hypothesis
Attributes:
value (str): The hypothetical translation value
score (float): The score representing the quality of the translation
"""
def __init__(self, value, score):
self.value = value
self.score = score
def __lt__(self, other):
return self.score < other.score
def __repr__(self):
return f"({repr(self.value)}, {self.score})"
def __str__(self):
return repr(self)
class ITranslation:
"""Respresents a translation between two Languages
Attributes:
from_lang (Language): The Language this Translation translates from.
to_lang (Language): The Language this Translation translates to.
"""
def translate(self, input_text):
"""Translates a string from self.from_lang to self.to_lang
Args:
input_text (str): The text to be translated.
Returns:
str: input_text translated.
"""
return self.hypotheses(input_text, num_hypotheses=1)[0].value
def hypotheses(self, input_text, num_hypotheses=4):
"""Translates a string from self.from_lang to self.to_lang
Args:
input_text (str): The text to be translated.
num_hypotheses (int): Number of hypothetic results expected
Returns:
[Hypothesis]: List of translation hypotheses
"""
raise NotImplementedError()
@staticmethod
def split_into_paragraphs(input_text):
"""Splits input_text into paragraphs and returns a list of paragraphs.
Args:
input_text (str): The text to be split.
Returns:
[str]: A list of paragraphs.
"""
return input_text.split("\n")
@staticmethod
def combine_paragraphs(paragraphs):
"""Combines a list of paragraphs together.
Args:
paragraphs ([str]): A list of paragraphs.
Returns:
[str]: list of n paragraphs combined into one string.
"""
return "\n".join(paragraphs)
def __repr__(self):
return str(self.from_lang) + " -> " + str(self.to_lang)
def __str__(self):
return repr(self).replace("->", "→")
class Language:
"""Represents a language that can be translated from/to.
Attributes:
code (str): The code representing the language.
name (str): The human readable name of the language.
translations_from ([ITranslation]): A list of the translations
that translate from this language.
translations_to ([ITranslation]): A list of the translations
that translate to this language
"""
def __init__(self, code, name):
self.code = code
self.name = name
self.translations_from = []
self.translations_to = []
def __str__(self):
return self.name
def get_translation(self, to):
"""Gets a translation from this Language to another Language.
Args:
to (Language): The Language to look for a Translation to.
Returns:
ITranslation: A valid Translation if there is one in translations_from
else None.
"""
valid_translations = list(
filter(lambda x: x.to_lang.code == to.code, self.translations_from)
)
if len(valid_translations) > 0:
return valid_translations[0]
return None
class PackageTranslation(ITranslation):
"""A Translation that is installed with a package"""
def __init__(self, from_lang, to_lang, pkg):
self.from_lang = from_lang
self.to_lang = to_lang
self.pkg = pkg
self.translator = None
def hypotheses(self, input_text, num_hypotheses):
if self.translator is None:
model_path = str(self.pkg.package_path / "model")
self.translator = ctranslate2.Translator(model_path, device=settings.device)
paragraphs = ITranslation.split_into_paragraphs(input_text)
info("paragraphs:", paragraphs)
translated_paragraphs = []
for paragraph in paragraphs:
translated_paragraphs.append(
apply_packaged_translation(
self.pkg, paragraph, self.translator, num_hypotheses
)
)
info("translated_paragraphs:", translated_paragraphs)
# Construct new hypotheses using all paragraphs
hypotheses_to_return = [Hypothesis("", 0) for i in range(num_hypotheses)]
for i in range(num_hypotheses):
for translated_paragraph in translated_paragraphs:
value = ITranslation.combine_paragraphs(
[hypotheses_to_return[i].value, translated_paragraph[i].value]
)
score = hypotheses_to_return[i].score + translated_paragraph[i].score
hypotheses_to_return[i] = Hypothesis(value, score)
hypotheses_to_return[i].value = hypotheses_to_return[i].value.lstrip("\n")
info("hypotheses_to_return:", hypotheses_to_return)
return hypotheses_to_return
class IdentityTranslation(ITranslation):
"""A Translation that doesn't modify input_text."""
def __init__(self, lang):
"""Creates an IdentityTranslation.
Args:
lang (Language): The Language this Translation translates
from and to.
"""
self.from_lang = lang
self.to_lang = lang
def hypotheses(self, input_text, num_hypotheses):
return [Hypothesis(input_text, 0) for i in range(num_hypotheses)]
class CompositeTranslation(ITranslation):
"""A ITranslation that is performed by chaining two Translations
Attributes:
t1 (ITranslation): The first Translation to apply.
t2 (ITranslation): The second Translation to apply.
"""
def __init__(self, t1, t2):
"""Creates a CompositeTranslation.
Args:
t1 (ITranslation): The first Translation to apply.
t2 (ITranslation): The second Translation to apply.
"""
self.t1 = t1
self.t2 = t2
self.from_lang = t1.from_lang
self.to_lang = t2.to_lang
def hypotheses(self, input_text, num_hypotheses):
t1_hypotheses = self.t1.hypotheses(input_text, num_hypotheses)
# Combine hypotheses
# O(n^2)
to_return = []
for t1_hypothesis in t1_hypotheses:
t2_hypotheses = self.t2.hypotheses(t1_hypothesis.value, num_hypotheses)
for t2_hypothesis in t2_hypotheses:
to_return.append(
Hypothesis(
t2_hypothesis.value, t1_hypothesis.score + t2_hypothesis.score
)
)
to_return.sort()
return to_return[0:num_hypotheses]
class CachedTranslation(ITranslation):
"""Caches a translation to improve performance.
This is done by splitting up the text passed for translation
into paragraphs and translating each paragraph individually.
A hash of the paragraphs and their corresponding translations
are saved from the previous translation and used to improve
performance on the next one. This is especially useful if you
are repeatedly translating nearly identical text with a small
change at the end of it.
"""
def __init__(self, underlying):
"""Creates a CachedTranslation.
Args:
underlying (ITranslation): The underlying translation to cache.
"""
self.underlying = underlying
self.from_lang = underlying.from_lang
self.to_lang = underlying.to_lang
self.cache = dict()
def hypotheses(self, input_text, num_hypotheses=4):
new_cache = dict() # 'text': ['t1'...('tN')]
paragraphs = ITranslation.split_into_paragraphs(input_text)
translated_paragraphs = []
for paragraph in paragraphs:
translated_paragraph = self.cache.get(paragraph)
# If len() of our cached items are different than `num_hypotheses` it means that
# the search parameter is changed by caller, so we can't re-use cache, and should update it.
if (
translated_paragraph is None
or len(translated_paragraph) != num_hypotheses
):
translated_paragraph = self.underlying.hypotheses(
paragraph, num_hypotheses
)
new_cache[paragraph] = translated_paragraph
translated_paragraphs.append(translated_paragraph)
self.cache = new_cache
# Construct hypotheses
hypotheses_to_return = [Hypothesis("", 0) for i in range(num_hypotheses)]
for i in range(num_hypotheses):
for j in range(len(translated_paragraphs)):
value = ITranslation.combine_paragraphs(
[hypotheses_to_return[i].value, translated_paragraphs[j][i].value]
)
score = (
hypotheses_to_return[i].score + translated_paragraphs[j][i].score
)
hypotheses_to_return[i] = Hypothesis(value, score)
hypotheses_to_return[i].value = hypotheses_to_return[i].value.lstrip("\n")
return hypotheses_to_return
class RemoteTranslation(ITranslation):
"""A translation provided by a remote LibreTranslate server"""
def __init__(self, from_lang, to_lang, api):
self.from_lang = from_lang
self.to_lang = to_lang
self.api = api
def hypotheses(self, input_text, num_hypotheses=1):
"""LibreTranslate only supports single hypotheses.
A list of length num_hypotheses will be returned with identical hypotheses.
"""
result = self.api.translate(input_text, self.from_lang.code, self.to_lang.code)
return [Hypothesis(result, 0)] * num_hypotheses
# Backwards compatibility, renamed in 1.8
LibreTranslateTranslation = RemoteTranslation
class FewShotTranslation(ITranslation):
"""A translation performed with a few shot language model"""
def __init__(self, from_lang, to_lang, language_model):
self.from_lang = from_lang
self.to_lang = to_lang
self.language_model = language_model
def hypotheses(self, input_text, num_hypotheses=1):
# Split into sentences
DEFAULT_SENTENCE_LENGTH = 250
sentences = []
start_index = 0
while start_index < len(input_text) - 1:
prompt = sbd.generate_fewshot_sbd_prompt(input_text[start_index:])
response = sbd.parse_fewshot_response(self.language_model.infer(prompt))
detected_sentence_index = sbd.process_seq2seq_sbd(
input_text[start_index:], response
)
if detected_sentence_index == -1:
# Couldn't find sentence boundary
sbd_index = start_index + DEFAULT_SENTENCE_LENGTH
else:
sbd_index = start_index + detected_sentence_index
sentences.append(input_text[start_index:sbd_index])
info("start_index", start_index)
info("sbd_index", sbd_index)
info(input_text[start_index:sbd_index])
start_index = sbd_index
to_return = ""
for sentence in sentences:
prompt = fewshot.generate_prompt(
sentence,
self.from_lang.name,
self.from_lang.code,
self.to_lang.name,
self.to_lang.code,
)
info("fewshot prompt", prompt)
response = self.language_model.infer(prompt)
info("fewshot response", response)
result = fewshot.parse_inference(response)
info("fewshot result", result)
to_return += result
return [Hypothesis(to_return, 0)] * num_hypotheses
def apply_packaged_translation(pkg, input_text, translator, num_hypotheses=4):
"""Applies the translation in pkg to translate input_text.
Args:
pkg (Package): The package that provides the translation.
input_text (str): The text to be translated.
translator (ctranslate2.Translator): The CTranslate2 Translator
num_hypotheses (int): The number of hypotheses to generate
Returns:
[Hypothesis]: A list of Hypothesis's for translating input_text
"""
info("apply_packaged_translation", input_text)
# Sentence boundary detection
if pkg.type == "sbd":
sentences = [input_text]
elif settings.stanza_available:
stanza_pipeline = stanza.Pipeline(
lang=pkg.from_code,
dir=str(pkg.package_path / "stanza"),
processors="tokenize",
use_gpu=settings.device == "cuda",
logging_level="WARNING",
)
stanza_sbd = stanza_pipeline(input_text)
sentences = [sentence.text for sentence in stanza_sbd.sentences]
else:
DEFAULT_SENTENCE_LENGTH = 250
sentences = []
start_index = 0
# Get sbd translation
sbd_package = sbd.get_sbd_package()
assert sbd_package is not None
sbd_translation = PackageTranslation(None, None, sbd_package)
while start_index < len(input_text) - 1:
detected_sentence_index = sbd.detect_sentence(
input_text[start_index:], sbd_translation
)
if detected_sentence_index == -1:
# Couldn't find sentence boundary
sbd_index = start_index + DEFAULT_SENTENCE_LENGTH
else:
sbd_index = start_index + detected_sentence_index
sentences.append(input_text[start_index:sbd_index])
info("start_index", start_index)
info("sbd_index", sbd_index)
info(input_text[start_index:sbd_index])
start_index = sbd_index
info("sentences", sentences)
# Tokenization
sp_model_path = str(pkg.package_path / "sentencepiece.model")
sp_processor = spm.SentencePieceProcessor(model_file=sp_model_path)
tokenized = [sp_processor.encode(sentence, out_type=str) for sentence in sentences]
info("tokenized", tokenized)
# Translation
BATCH_SIZE = 32
translated_batches = translator.translate_batch(
tokenized,
replace_unknowns=True,
max_batch_size=BATCH_SIZE,
beam_size=max(num_hypotheses, 4),
num_hypotheses=num_hypotheses,
length_penalty=0.2,
return_scores=True,
)
info("translated_batches", translated_batches)
# Build hypotheses
value_hypotheses = []
for i in range(num_hypotheses):
translated_tokens = []
cumulative_score = 0
for translated_batch in translated_batches:
translated_tokens += translated_batch[i]["tokens"]
cumulative_score += translated_batch[i]["score"]
detokenized = "".join(translated_tokens)
detokenized = detokenized.replace("▁", " ")
value = detokenized
if len(value) > 0 and value[0] == " ":
# Remove space at the beginning of the translation added
# by the tokenizer.
value = value[1:]
hypothesis = Hypothesis(value, cumulative_score)
value_hypotheses.append(hypothesis)
info("value_hypotheses:", value_hypotheses)
return value_hypotheses
def get_installed_languages():
"""Returns a list of Languages installed from packages"""
info("get_installed_languages")
if settings.model_provider == settings.ModelProvider.OPENNMT:
packages = package.get_installed_packages()
# If stanza not available filter for sbd available
if not settings.stanza_available:
sbd_packages = list(filter(lambda x: x.type == "sbd", packages))
sbd_available_codes = set()
for sbd_package in sbd_packages:
sbd_available_codes = sbd_available_codes.union(sbd_package.from_codes)
packages = list(
filter(lambda x: x.from_code in sbd_available_codes, packages)
)
# Filter for translate packages
packages = list(filter(lambda x: x.type == "translate", packages))
# Load languages and translations from packages
language_of_code = dict()
for pkg in packages:
if pkg.from_code not in language_of_code:
language_of_code[pkg.from_code] = Language(pkg.from_code, pkg.from_name)
if pkg.to_code not in language_of_code:
language_of_code[pkg.to_code] = Language(pkg.to_code, pkg.to_name)
from_lang = language_of_code[pkg.from_code]
to_lang = language_of_code[pkg.to_code]
translation_to_add = CachedTranslation(
PackageTranslation(from_lang, to_lang, pkg)
)
from_lang.translations_from.append(translation_to_add)
to_lang.translations_to.append(translation_to_add)
languages = list(language_of_code.values())
# Add translations so everything can translate to itself
for language in languages:
identity_translation = IdentityTranslation(language)
language.translations_from.append(identity_translation)
language.translations_to.append(identity_translation)
# Pivot through intermediate languages to add translations
# that don't already exist
for language in languages:
keep_adding_translations = True
while keep_adding_translations:
keep_adding_translations = False
for translation in language.translations_from:
for translation_2 in translation.to_lang.translations_from:
if language.get_translation(translation_2.to_lang) is None:
# The language currently doesn't have a way to translate
# to this language
keep_adding_translations = True
composite_translation = CompositeTranslation(
translation, translation_2
)
language.translations_from.append(composite_translation)
translation_2.to_lang.translations_to.append(
composite_translation
)
elif settings.model_provider == settings.ModelProvider.LIBRETRANSLATE:
# TODO: Add API key and custom URL support
libretranslate_api = apis.LibreTranslateAPI()
supported_languages = (
libretranslate_api.languages()
) # [{"code":"en", "name":"English"}]
languages = [Language(l["code"], l["name"]) for l in supported_languages]
for from_lang in languages:
for to_lang in languages:
translation = LibreTranslateTranslation(
from_lang, to_lang, libretranslate_api
)
from_lang.translations_from.append(translation)
to_lang.translations_to.append(translation)
elif settings.model_provider == settings.ModelProvider.OPENAI:
language_model = apis.OpenAIAPI(settings.openai_api_key)
# TODO
languages = [Language("en", "English"), Language("es", "Spanish")]
for from_lang in languages:
for to_lang in languages:
translation = FewShotTranslation(from_lang, to_lang, language_model)
from_lang.translations_from.append(translation)
to_lang.translations_to.append(translation)
# Put English first if available so it shows up as the from language in the gui
en_index = None
for i, language in enumerate(languages):
if language.code == "en":
en_index = i
break
english = None
if en_index is not None:
english = languages.pop(en_index)
languages.sort(key=lambda x: x.name)
if english is not None:
languages = [english] + languages
return languages
def load_installed_languages():
"""Deprecated 1.2, use get_installed_languages"""
return get_installed_languages()
def get_language_from_code(code):
"""Gets a language object from a code
An exception will be thrown if an installed language with this
code can not be found.
Args:
code (str): The ISO 639 code of the language
Returns:
translate.Language: The language object
"""
return list(filter(
lambda x: x.code == code,
get_installed_languages()))[0]
def get_translation_from_codes(from_code, to_code):
"""Gets a translation object from codes for from and to languages
An exception will be thrown if an installed translation between the from lang
and to lang can not be found.
Args:
from_code (str): The ISO 639 code of the source language
to_code (str): The ISO 639 code of the target language
Returns:
translate.ITranslation: The translation object
"""
from_lang = get_language_by_iso_code(from_code)
to_lang = get_language_by_iso_code(to_code)
return from_lang.get_translation(to_lang)
def translate(q, from_code, to_code):
"""Translate a string of text
Args:
q (str): The text to translate
from_code (str): The ISO 639 code of the source language
to_code (str): The ISO 639 code of the target language
Returns:
str: The translated text
"""
translation = get_translation_from_codes(from_code, to_code)
return translation.translate(q)
| 35.929508 | 104 | 0.635306 |
f01a43283c521c92123cc32d4cf9a2539384f1e0 | 2,812 | py | Python | src/spifi/skeleton.py | carolinearndt/spifi | f4e36ad1c4661872a46dd21fd22715fcb9fb305d | [
"MIT"
] | null | null | null | src/spifi/skeleton.py | carolinearndt/spifi | f4e36ad1c4661872a46dd21fd22715fcb9fb305d | [
"MIT"
] | null | null | null | src/spifi/skeleton.py | carolinearndt/spifi | f4e36ad1c4661872a46dd21fd22715fcb9fb305d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is a skeleton file that can serve as a starting point for a Python
console script. To run this script uncomment the following lines in the
[options.entry_points] section in setup.cfg:
console_scripts =
fibonacci = spifi.skeleton:run
Then run `python setup.py install` which will install the command `fibonacci`
inside your current environment.
Besides console scripts, the header (i.e. until _logger...) of this file can
also be used as template for Python modules.
Note: This skeleton file can be safely removed if not needed!
"""
import argparse
import sys
import logging
from spifi import __version__
__author__ = "Caroline Arndt"
__copyright__ = "Caroline Arndt"
__license__ = "mit"
_logger = logging.getLogger(__name__)
def fib(n):
"""Fibonacci example function
Args:
n (int): integer
Returns:
int: n-th Fibonacci number
"""
assert n > 0
a, b = 1, 1
for i in range(n-1):
a, b = b, a+b
return a
def parse_args(args):
"""Parse command line parameters
Args:
args ([str]): command line parameters as list of strings
Returns:
:obj:`argparse.Namespace`: command line parameters namespace
"""
parser = argparse.ArgumentParser(
description="Just a Fibonnaci demonstration")
parser.add_argument(
'--version',
action='version',
version='spifi {ver}'.format(ver=__version__))
parser.add_argument(
dest="n",
help="n-th Fibonacci number",
type=int,
metavar="INT")
parser.add_argument(
'-v',
'--verbose',
dest="loglevel",
help="set loglevel to INFO",
action='store_const',
const=logging.INFO)
parser.add_argument(
'-vv',
'--very-verbose',
dest="loglevel",
help="set loglevel to DEBUG",
action='store_const',
const=logging.DEBUG)
return parser.parse_args(args)
def setup_logging(loglevel):
"""Setup basic logging
Args:
loglevel (int): minimum loglevel for emitting messages
"""
logformat = "[%(asctime)s] %(levelname)s:%(name)s:%(message)s"
logging.basicConfig(level=loglevel, stream=sys.stdout,
format=logformat, datefmt="%Y-%m-%d %H:%M:%S")
def main(args):
"""Main entry point allowing external calls
Args:
args ([str]): command line parameter list
"""
args = parse_args(args)
setup_logging(args.loglevel)
_logger.debug("Starting crazy calculations...")
print("The {}-th Fibonacci number is {}".format(args.n, fib(args.n)))
_logger.info("Script ends here")
def run():
"""Entry point for console_scripts
"""
main(sys.argv[1:])
if __name__ == "__main__":
run()
| 24.034188 | 77 | 0.634424 |
ee98623e0e624b19ba4936bb08af98ebe4adc079 | 604 | py | Python | tests/beta_tests/test_how_many_times_does_it_contain.py | the-zebulan/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | 40 | 2016-03-09T12:26:20.000Z | 2022-03-23T08:44:51.000Z | tests/beta_tests/test_how_many_times_does_it_contain.py | akalynych/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | null | null | null | tests/beta_tests/test_how_many_times_does_it_contain.py | akalynych/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | 36 | 2016-11-07T19:59:58.000Z | 2022-03-31T11:18:27.000Z | import unittest
from katas.beta.how_many_times_does_it_contain import string_counter
class StringCounterTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(string_counter('Hello world', 'o'), 2)
def test_equal_2(self):
self.assertEqual(string_counter(
"Wait isn't it supposed to be cynical?", 'i'), 4)
def test_equal_3(self):
self.assertEqual(string_counter(
"I'm gona be the best code warrior ever dad", 'r'), 4)
def test_equal_4(self):
self.assertEqual(string_counter('Do you like Harry Potter?', '?'), 1)
| 30.2 | 77 | 0.678808 |
59267cb880bb8c0b26f340c7c40832c8bd6045bf | 1,406 | py | Python | clients/kratos/python/test/test_submit_self_service_settings_flow_with_profile_method_body.py | russelg/sdk | 2515b35981784319bd7d58fcf0b5ab85b501b62f | [
"Apache-2.0"
] | 77 | 2020-02-14T17:27:36.000Z | 2022-03-25T08:44:52.000Z | clients/kratos/python/test/test_submit_self_service_settings_flow_with_profile_method_body.py | russelg/sdk | 2515b35981784319bd7d58fcf0b5ab85b501b62f | [
"Apache-2.0"
] | 125 | 2020-02-07T21:45:52.000Z | 2022-03-31T12:54:24.000Z | clients/kratos/python/test/test_submit_self_service_settings_flow_with_profile_method_body.py | russelg/sdk | 2515b35981784319bd7d58fcf0b5ab85b501b62f | [
"Apache-2.0"
] | 44 | 2020-01-31T22:05:47.000Z | 2022-03-09T14:41:22.000Z | """
Ory Kratos API
Documentation for all public and administrative Ory Kratos APIs. Public and administrative APIs are exposed on different ports. Public APIs can face the public internet without any protection while administrative APIs should never be exposed without prior authorization. To protect the administative API port you should use something like Nginx, Ory Oathkeeper, or any other technology capable of authorizing incoming requests. # noqa: E501
The version of the OpenAPI document: v0.8.2-alpha.1
Contact: hi@ory.sh
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import ory_kratos_client
from ory_kratos_client.model.submit_self_service_settings_flow_with_profile_method_body import SubmitSelfServiceSettingsFlowWithProfileMethodBody
class TestSubmitSelfServiceSettingsFlowWithProfileMethodBody(unittest.TestCase):
"""SubmitSelfServiceSettingsFlowWithProfileMethodBody unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSubmitSelfServiceSettingsFlowWithProfileMethodBody(self):
"""Test SubmitSelfServiceSettingsFlowWithProfileMethodBody"""
# FIXME: construct object with mandatory attributes with example values
# model = SubmitSelfServiceSettingsFlowWithProfileMethodBody() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 38 | 446 | 0.783784 |
1ac9445618f54d86864a2fcd151419766c8802b2 | 22 | py | Python | psfsubtraction/prepare/__init__.py | hamogu/psfsubtraction | 719f3f4da86abf1c4ba48b1b98d57d191f73188f | [
"MIT"
] | 2 | 2017-04-04T18:50:31.000Z | 2019-01-23T00:41:39.000Z | psfsubtraction/prepare/__init__.py | hamogu/psfsubtraction | 719f3f4da86abf1c4ba48b1b98d57d191f73188f | [
"MIT"
] | 8 | 2016-06-19T23:40:25.000Z | 2019-01-11T15:56:39.000Z | psfsubtraction/prepare/__init__.py | hamogu/psfsubtraction | 719f3f4da86abf1c4ba48b1b98d57d191f73188f | [
"MIT"
] | 3 | 2016-06-19T23:34:00.000Z | 2016-07-08T17:43:13.000Z | from .center import *
| 11 | 21 | 0.727273 |
6230a70610b27414aeac8500a10345c97265a053 | 624 | py | Python | release/src/router/wget/testenv/conf/hook_sample.py | zhoutao0712/rtn11pb1 | 09e6b6c7ef4b91be0a9374daeacc3ac9f2fa3a05 | [
"Apache-2.0"
] | null | null | null | release/src/router/wget/testenv/conf/hook_sample.py | zhoutao0712/rtn11pb1 | 09e6b6c7ef4b91be0a9374daeacc3ac9f2fa3a05 | [
"Apache-2.0"
] | null | null | null | release/src/router/wget/testenv/conf/hook_sample.py | zhoutao0712/rtn11pb1 | 09e6b6c7ef4b91be0a9374daeacc3ac9f2fa3a05 | [
"Apache-2.0"
] | null | null | null | from exc.test_failed import TestFailed
from conf import hook
""" Hook: SampleHook
This a sample file for how a new hook should be defined.
Any errors should always be reported by raising a TestFailed exception instead
of returning a true or false value.
"""
@hook(alias='SampleHookAlias')
class SampleHook:
def __init__(self, sample_hook_arg):
# do conf initialization here
self.arg = sample_hook_arg
def __call__(self, test_obj):
# implement hook here
# if you need the test case instance, refer to test_obj
if False:
raise TestFailed ("Reason")
pass
| 27.130435 | 78 | 0.697115 |
249231239cfdbe7d146e7727b2c94443cca8d35c | 74,369 | py | Python | tests/test_arrow_dataset.py | jbragg/datasets | 8992869fecea434764f5639d1b0ef7660aeb04cd | [
"Apache-2.0"
] | null | null | null | tests/test_arrow_dataset.py | jbragg/datasets | 8992869fecea434764f5639d1b0ef7660aeb04cd | [
"Apache-2.0"
] | null | null | null | tests/test_arrow_dataset.py | jbragg/datasets | 8992869fecea434764f5639d1b0ef7660aeb04cd | [
"Apache-2.0"
] | null | null | null | import os
import pickle
import tempfile
from functools import partial
from unittest import TestCase
import numpy as np
import pandas as pd
import pyarrow as pa
from absl.testing import parameterized
import datasets.arrow_dataset
from datasets import concatenate_datasets, load_from_disk, temp_seed
from datasets.arrow_dataset import Dataset, transmit_format
from datasets.dataset_dict import DatasetDict
from datasets.features import ClassLabel, Features, Sequence, Value
from datasets.info import DatasetInfo
from .utils import require_tf, require_torch
class Unpicklable:
def __getstate__(self):
raise pickle.PicklingError()
def picklable_map_function(x):
return {"id": int(x["filename"].split("_")[-1])}
def picklable_map_function_with_indices(x, i):
return {"id": i}
def picklable_filter_function(x):
return int(x["filename"].split("_")[-1]) < 10
IN_MEMORY_PARAMETERS = [
{"testcase_name": name, "in_memory": im} for im, name in [(True, "in_memory"), (False, "on_disk")]
]
@parameterized.named_parameters(IN_MEMORY_PARAMETERS)
class BaseDatasetTest(TestCase):
def setUp(self):
# google colab doesn't allow to pickle loggers
# so we want to make sure each tests passes without pickling the logger
def reduce_ex(self):
raise pickle.PicklingError()
datasets.arrow_dataset.logger.__reduce_ex__ = reduce_ex
def _create_dummy_dataset(self, in_memory: bool, tmp_dir: str, multiple_columns=False) -> Dataset:
if multiple_columns:
data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"], "col_3": [False, True, False, True]}
dset = Dataset.from_dict(data)
else:
dset = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(x) for x in np.arange(30).tolist()]})
if not in_memory:
dset = self._to(in_memory, tmp_dir, dset)
return dset
def _to(self, in_memory, tmp_dir, *datasets):
if in_memory:
datasets = [dataset.map(keep_in_memory=True) for dataset in datasets]
else:
start = 0
while os.path.isfile(os.path.join(tmp_dir, f"dataset{start}.arrow")):
start += 1
datasets = [
dataset.map(cache_file_name=os.path.join(tmp_dir, f"dataset{start + i}.arrow"))
for i, dataset in enumerate(datasets)
]
return datasets if len(datasets) > 1 else datasets[0]
def test_dummy_dataset(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertEqual(dset[0]["filename"], "my_name-train_0")
self.assertEqual(dset["filename"][0], "my_name-train_0")
del dset
dset = self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True)
self.assertDictEqual(
dset.features, Features({"col_1": Value("int64"), "col_2": Value("string"), "col_3": Value("bool")})
)
self.assertEqual(dset[0]["col_1"], 3)
self.assertEqual(dset["col_1"][0], 3)
del dset
def test_dataset_getitem(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
self.assertEqual(dset[0]["filename"], "my_name-train_0")
self.assertEqual(dset["filename"][0], "my_name-train_0")
self.assertEqual(dset[-1]["filename"], "my_name-train_29")
self.assertEqual(dset["filename"][-1], "my_name-train_29")
self.assertListEqual(dset[:2]["filename"], ["my_name-train_0", "my_name-train_1"])
self.assertListEqual(dset["filename"][:2], ["my_name-train_0", "my_name-train_1"])
self.assertEqual(dset[:-1]["filename"][-1], "my_name-train_28")
self.assertEqual(dset["filename"][:-1][-1], "my_name-train_28")
self.assertListEqual(dset[[0, -1]]["filename"], ["my_name-train_0", "my_name-train_29"])
self.assertListEqual(dset[np.array([0, -1])]["filename"], ["my_name-train_0", "my_name-train_29"])
del dset
def test_dummy_dataset_pickle(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
tmp_file = os.path.join(tmp_dir, "dset.pt")
dset = self._create_dummy_dataset(in_memory, tmp_dir).select(range(10))
with open(tmp_file, "wb") as f:
pickle.dump(dset, f)
with open(tmp_file, "rb") as f:
dset = pickle.load(f)
self.assertEqual(len(dset), 10)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertEqual(dset[0]["filename"], "my_name-train_0")
self.assertEqual(dset["filename"][0], "my_name-train_0")
del dset
dset = self._create_dummy_dataset(in_memory, tmp_dir).select(
range(10), indices_cache_file_name=os.path.join(tmp_dir, "ind.arrow")
)
if not in_memory:
dset._data = Unpicklable()
dset._indices = Unpicklable()
with open(tmp_file, "wb") as f:
pickle.dump(dset, f)
with open(tmp_file, "rb") as f:
dset = pickle.load(f)
self.assertEqual(len(dset), 10)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertEqual(dset[0]["filename"], "my_name-train_0")
self.assertEqual(dset["filename"][0], "my_name-train_0")
del dset
def test_dummy_dataset_serialize(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir).select(range(10))
dataset_path = os.path.join(tmp_dir, "my_dataset")
dset.save_to_disk(dataset_path)
dset = dset.load_from_disk(dataset_path)
self.assertEqual(len(dset), 10)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertEqual(dset[0]["filename"], "my_name-train_0")
self.assertEqual(dset["filename"][0], "my_name-train_0")
del dset
dset = self._create_dummy_dataset(in_memory, tmp_dir).select(
range(10), indices_cache_file_name=os.path.join(tmp_dir, "ind.arrow")
)
if not in_memory:
dset._data = Unpicklable()
dset._indices = Unpicklable()
dset.save_to_disk(dataset_path)
dset = dset.load_from_disk(dataset_path)
self.assertEqual(len(dset), 10)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertEqual(dset[0]["filename"], "my_name-train_0")
self.assertEqual(dset["filename"][0], "my_name-train_0")
del dset
def test_dummy_dataset_load_from_disk(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir).select(range(10))
dataset_path = os.path.join(tmp_dir, "my_dataset")
dset.save_to_disk(dataset_path)
dset = load_from_disk(dataset_path)
self.assertEqual(len(dset), 10)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertEqual(dset[0]["filename"], "my_name-train_0")
self.assertEqual(dset["filename"][0], "my_name-train_0")
del dset
def test_set_format_numpy_multiple_columns(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True)
fingerprint = dset._fingerprint
dset.set_format(type="numpy", columns=["col_1"])
self.assertEqual(len(dset[0]), 1)
self.assertIsInstance(dset[0]["col_1"], np.ndarray)
self.assertListEqual(list(dset[0]["col_1"].shape), [])
self.assertEqual(dset[0]["col_1"].item(), 3)
self.assertIsInstance(dset["col_1"], np.ndarray)
self.assertListEqual(list(dset["col_1"].shape), [4])
np.testing.assert_array_equal(dset["col_1"], np.array([3, 2, 1, 0]))
self.assertNotEqual(dset._fingerprint, fingerprint)
dset.reset_format()
with dset.formatted_as(type="numpy", columns=["col_1"]):
self.assertEqual(len(dset[0]), 1)
self.assertIsInstance(dset[0]["col_1"], np.ndarray)
self.assertListEqual(list(dset[0]["col_1"].shape), [])
self.assertEqual(dset[0]["col_1"].item(), 3)
self.assertIsInstance(dset["col_1"], np.ndarray)
self.assertListEqual(list(dset["col_1"].shape), [4])
np.testing.assert_array_equal(dset["col_1"], np.array([3, 2, 1, 0]))
self.assertEqual(dset.format["type"], None)
self.assertEqual(dset.format["format_kwargs"], {})
self.assertEqual(dset.format["columns"], dset.column_names)
self.assertEqual(dset.format["output_all_columns"], False)
dset.set_format(type="numpy", columns=["col_1"], output_all_columns=True)
self.assertEqual(len(dset[0]), 3)
self.assertIsInstance(dset[0]["col_2"], str)
self.assertEqual(dset[0]["col_2"], "a")
dset.set_format(type="numpy", columns=["col_1", "col_2"])
self.assertEqual(len(dset[0]), 2)
self.assertEqual(dset[0]["col_2"].item(), "a")
del dset
@require_torch
def test_set_format_torch(self, in_memory):
import torch
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True)
dset.set_format(type="torch", columns=["col_1"])
self.assertEqual(len(dset[0]), 1)
self.assertIsInstance(dset[0]["col_1"], torch.Tensor)
self.assertIsInstance(dset["col_1"], torch.Tensor)
self.assertListEqual(list(dset[0]["col_1"].shape), [])
self.assertEqual(dset[0]["col_1"].item(), 3)
dset.set_format(type="torch", columns=["col_1"], output_all_columns=True)
self.assertEqual(len(dset[0]), 3)
self.assertIsInstance(dset[0]["col_2"], str)
self.assertEqual(dset[0]["col_2"], "a")
dset.set_format(type="torch", columns=["col_1", "col_2"])
with self.assertRaises(TypeError):
dset[0]
del dset
@require_tf
def test_set_format_tf(self, in_memory):
import tensorflow as tf
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True)
dset.set_format(type="tensorflow", columns=["col_1"])
self.assertEqual(len(dset[0]), 1)
self.assertIsInstance(dset[0]["col_1"], tf.Tensor)
self.assertListEqual(list(dset[0]["col_1"].shape), [])
self.assertEqual(dset[0]["col_1"].numpy().item(), 3)
dset.set_format(type="tensorflow", columns=["col_1"], output_all_columns=True)
self.assertEqual(len(dset[0]), 3)
self.assertIsInstance(dset[0]["col_2"], str)
self.assertEqual(dset[0]["col_2"], "a")
dset.set_format(type="tensorflow", columns=["col_1", "col_2"])
self.assertEqual(len(dset[0]), 2)
self.assertEqual(dset[0]["col_2"].numpy().decode("utf-8"), "a")
del dset
def test_set_format_pandas(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True)
dset.set_format(type="pandas", columns=["col_1"])
self.assertEqual(len(dset[0].columns), 1)
self.assertIsInstance(dset[0], pd.DataFrame)
self.assertListEqual(list(dset[0].shape), [1, 1])
self.assertEqual(dset[0]["col_1"].item(), 3)
dset.set_format(type="pandas", columns=["col_1", "col_2"])
self.assertEqual(len(dset[0].columns), 2)
self.assertEqual(dset[0]["col_2"].item(), "a")
del dset
def test_transmit_format(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True)
transform = datasets.arrow_dataset.transmit_format(lambda x: x)
# make sure identity transform doesn't apply unnecessary format
self.assertEqual(dset._fingerprint, transform(dset)._fingerprint)
dset.set_format(**dset.format)
self.assertEqual(dset._fingerprint, transform(dset)._fingerprint)
# check lists comparisons
dset.set_format(columns=["col_1"])
self.assertEqual(dset._fingerprint, transform(dset)._fingerprint)
dset.set_format(columns=["col_1", "col_2"])
self.assertEqual(dset._fingerprint, transform(dset)._fingerprint)
dset.set_format("numpy", columns=["col_1", "col_2"])
self.assertEqual(dset._fingerprint, transform(dset)._fingerprint)
del dset
def test_cast_(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True)
features = dset.features
features["col_1"] = Value("float64")
features = Features({k: features[k] for k in list(features)[::-1]})
fingerprint = dset._fingerprint
dset.cast_(features)
self.assertEqual(dset.num_columns, 3)
self.assertEqual(dset.features["col_1"], Value("float64"))
self.assertIsInstance(dset[0]["col_1"], float)
self.assertNotEqual(dset._fingerprint, fingerprint)
del dset
def test_remove_columns_(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True)
fingerprint = dset._fingerprint
dset.remove_columns_(column_names="col_1")
self.assertEqual(dset.num_columns, 2)
self.assertListEqual(list(dset.column_names), ["col_2", "col_3"])
del dset
dset = self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True)
dset.remove_columns_(column_names=["col_1", "col_2", "col_3"])
self.assertEqual(dset.num_columns, 0)
self.assertNotEqual(dset._fingerprint, fingerprint)
del dset
def test_rename_column_(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True)
fingerprint = dset._fingerprint
dset.rename_column_(original_column_name="col_1", new_column_name="new_name")
self.assertEqual(dset.num_columns, 3)
self.assertListEqual(list(dset.column_names), ["new_name", "col_2", "col_3"])
self.assertNotEqual(dset._fingerprint, fingerprint)
del dset
def test_concatenate(self, in_memory):
data1, data2, data3 = {"id": [0, 1, 2]}, {"id": [3, 4, 5]}, {"id": [6, 7]}
info1 = DatasetInfo(description="Dataset1")
info2 = DatasetInfo(description="Dataset2")
with tempfile.TemporaryDirectory() as tmp_dir:
dset1, dset2, dset3 = (
Dataset.from_dict(data1, info=info1),
Dataset.from_dict(data2, info=info2),
Dataset.from_dict(data3),
)
dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3)
dset_concat = concatenate_datasets([dset1, dset2, dset3])
self.assertEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 2))
self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3))
self.assertListEqual(dset_concat["id"], [0, 1, 2, 3, 4, 5, 6, 7])
self.assertEqual(len(dset_concat._data_files), 0 if in_memory else 3)
self.assertEqual(len(dset_concat._indices_data_files), 0)
self.assertEqual(dset_concat.info.description, "Dataset1\n\nDataset2\n\n")
del dset_concat, dset1, dset2, dset3
def test_concatenate_formatted(self, in_memory):
data1, data2, data3 = {"id": [0, 1, 2]}, {"id": [3, 4, 5]}, {"id": [6, 7]}
info1 = DatasetInfo(description="Dataset1")
info2 = DatasetInfo(description="Dataset2")
with tempfile.TemporaryDirectory() as tmp_dir:
dset1, dset2, dset3 = (
Dataset.from_dict(data1, info=info1),
Dataset.from_dict(data2, info=info2),
Dataset.from_dict(data3),
)
dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3)
dset1.set_format("numpy")
dset_concat = concatenate_datasets([dset1, dset2, dset3])
self.assertEqual(dset_concat.format["type"], None)
dset2.set_format("numpy")
dset3.set_format("numpy")
dset_concat = concatenate_datasets([dset1, dset2, dset3])
self.assertEqual(dset_concat.format["type"], "numpy")
del dset_concat, dset1, dset2, dset3
def test_concatenate_with_indices(self, in_memory):
data1, data2, data3 = {"id": [0, 1, 2] * 2}, {"id": [3, 4, 5] * 2}, {"id": [6, 7]}
info1 = DatasetInfo(description="Dataset1")
info2 = DatasetInfo(description="Dataset2")
with tempfile.TemporaryDirectory() as tmp_dir:
dset1, dset2, dset3 = (
Dataset.from_dict(data1, info=info1),
Dataset.from_dict(data2, info=info2),
Dataset.from_dict(data3),
)
dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3)
dset1, dset2, dset3 = dset1.select([0, 1, 2]), dset2.select([0, 1, 2]), dset3
dset_concat = concatenate_datasets([dset1, dset2, dset3])
self.assertEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 2))
self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3))
self.assertListEqual(dset_concat["id"], [0, 1, 2, 3, 4, 5, 6, 7])
self.assertEqual(len(dset_concat._data_files), 0 if in_memory else 3)
self.assertEqual(len(dset_concat._indices_data_files), 0)
self.assertEqual(dset_concat.info.description, "Dataset1\n\nDataset2\n\n")
with tempfile.TemporaryDirectory() as tmp_dir:
dset1, dset2, dset3 = (
Dataset.from_dict(data1, info=info1).select(
[0, 1, 2], indices_cache_file_name=os.path.join(tmp_dir, "i.arrow")
),
Dataset.from_dict(data2, info=info2).select([0, 1, 2]),
Dataset.from_dict(data3),
)
with self.assertRaises(ValueError):
_ = concatenate_datasets([dset1, dset2, dset3])
del dset_concat, dset1, dset2, dset3
def test_concatenate_with_indices_from_disk(self, in_memory):
data1, data2, data3 = {"id": [0, 1, 2] * 2}, {"id": [3, 4, 5] * 2}, {"id": [6, 7]}
info1 = DatasetInfo(description="Dataset1")
info2 = DatasetInfo(description="Dataset2")
with tempfile.TemporaryDirectory() as tmp_dir:
dset1, dset2, dset3 = (
Dataset.from_dict(data1, info=info1),
Dataset.from_dict(data2, info=info2),
Dataset.from_dict(data3),
)
dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3)
dset1, dset2, dset3 = (
dset1.select([0, 1, 2], indices_cache_file_name=os.path.join(tmp_dir, "i1.arrow")),
dset2.select([0, 1, 2], indices_cache_file_name=os.path.join(tmp_dir, "i2.arrow")),
dset3.select([0, 1], indices_cache_file_name=os.path.join(tmp_dir, "i3.arrow")),
)
dset_concat = concatenate_datasets([dset1, dset2, dset3])
self.assertEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 2))
self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3))
self.assertListEqual(dset_concat["id"], [0, 1, 2, 3, 4, 5, 6, 7])
self.assertEqual(len(dset_concat._data_files), 0 if in_memory else 3)
self.assertEqual(len(dset_concat._indices_data_files), 0) # now in memory since an offset is applied
self.assertEqual(dset_concat.info.description, "Dataset1\n\nDataset2\n\n")
del dset_concat, dset1, dset2, dset3
def test_concatenate_pickle_with_history(self, in_memory):
data1, data2, data3 = {"id": [0, 1, 2] * 2}, {"id": [3, 4, 5] * 2}, {"id": [6, 7], "foo": ["bar", "bar"]}
info1 = DatasetInfo(description="Dataset1")
info2 = DatasetInfo(description="Dataset2")
with tempfile.TemporaryDirectory() as tmp_dir:
dset1, dset2, dset3 = (
Dataset.from_dict(data1, info=info1),
Dataset.from_dict(data2, info=info2),
Dataset.from_dict(data3),
)
dset1, dset2, dset3 = self._to(in_memory, tmp_dir, dset1, dset2, dset3)
dset1, dset2, dset3 = (
dset1.select([0, 1, 2], indices_cache_file_name=os.path.join(tmp_dir, "i1.arrow")),
dset2.select([0, 1, 2], indices_cache_file_name=os.path.join(tmp_dir, "i2.arrow")),
dset3.select([0, 1], indices_cache_file_name=os.path.join(tmp_dir, "i3.arrow")),
)
dset3.remove_columns_("foo")
if not in_memory:
dset1._data, dset2._data, dset3._data = Unpicklable(), Unpicklable(), Unpicklable()
dset1, dset2, dset3 = [pickle.loads(pickle.dumps(d)) for d in (dset1, dset2, dset3)]
dset_concat = concatenate_datasets([dset1, dset2, dset3])
if not in_memory:
dset_concat._data = Unpicklable()
dset_concat = pickle.loads(pickle.dumps(dset_concat))
self.assertEqual((len(dset1), len(dset2), len(dset3)), (3, 3, 2))
self.assertEqual(len(dset_concat), len(dset1) + len(dset2) + len(dset3))
self.assertListEqual(dset_concat["id"], [0, 1, 2, 3, 4, 5, 6, 7])
self.assertEqual(len(dset_concat._data_files), 0 if in_memory else 3)
self.assertEqual(len(dset_concat._inplace_history), 0 if in_memory else 3)
self.assertEqual(len(dset_concat._indices_data_files), 0)
self.assertEqual(dset_concat.info.description, "Dataset1\n\nDataset2\n\n")
del dset_concat, dset1, dset2, dset3
def test_flatten(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = Dataset.from_dict(
{"a": [{"b": {"c": ["text"]}}] * 10, "foo": [1] * 10},
features=Features({"a": {"b": Sequence({"c": Value("string")})}, "foo": Value("int64")}),
)
dset = self._to(in_memory, tmp_dir, dset)
fingerprint = dset._fingerprint
dset.flatten_()
self.assertListEqual(dset.column_names, ["a.b.c", "foo"])
self.assertListEqual(list(dset.features.keys()), ["a.b.c", "foo"])
self.assertDictEqual(dset.features, Features({"a.b.c": Sequence(Value("string")), "foo": Value("int64")}))
self.assertNotEqual(dset._fingerprint, fingerprint)
del dset
def test_map(self, in_memory):
# standard
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
fingerprint = dset._fingerprint
dset_test = dset.map(lambda x: {"name": x["filename"][:-2], "id": int(x["filename"].split("_")[-1])})
self.assertEqual(len(dset_test), 30)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(
dset_test.features,
Features({"filename": Value("string"), "name": Value("string"), "id": Value("int64")}),
)
self.assertListEqual(dset_test["id"], list(range(30)))
self.assertNotEqual(dset_test._fingerprint, fingerprint)
del dset, dset_test
# with indices
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
dset_test_with_indices = dset.map(lambda x, i: {"name": x["filename"][:-2], "id": i}, with_indices=True)
self.assertEqual(len(dset_test_with_indices), 30)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(
dset_test_with_indices.features,
Features({"filename": Value("string"), "name": Value("string"), "id": Value("int64")}),
)
self.assertListEqual(dset_test_with_indices["id"], list(range(30)))
del dset, dset_test_with_indices
# interrupted
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
def func(x, i):
if i == 4:
raise KeyboardInterrupt()
return {"name": x["filename"][:-2], "id": i}
tmp_file = os.path.join(tmp_dir, "test.arrow")
self.assertRaises(
KeyboardInterrupt,
dset.map,
function=func,
with_indices=True,
cache_file_name=tmp_file,
writer_batch_size=2,
)
self.assertFalse(os.path.exists(tmp_file))
dset_test_with_indices = dset.map(
lambda x, i: {"name": x["filename"][:-2], "id": i},
with_indices=True,
cache_file_name=tmp_file,
writer_batch_size=2,
)
self.assertTrue(os.path.exists(tmp_file))
self.assertEqual(len(dset_test_with_indices), 30)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(
dset_test_with_indices.features,
Features({"filename": Value("string"), "name": Value("string"), "id": Value("int64")}),
)
self.assertListEqual(dset_test_with_indices["id"], list(range(30)))
del dset, dset_test_with_indices
# formatted
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True)
dset.set_format("numpy", columns=["col_1"])
dset_test = dset.map(lambda x: {"col_1_plus_one": x["col_1"].item() + 1})
self.assertEqual(len(dset_test), 4)
self.assertEqual(dset_test.format["type"], "numpy")
self.assertIsInstance(dset_test["col_1"], np.ndarray)
self.assertIsInstance(dset_test["col_1_plus_one"], np.ndarray)
self.assertListEqual(sorted(dset_test[0].keys()), ["col_1", "col_1_plus_one"])
self.assertListEqual(sorted(dset_test.column_names), ["col_1", "col_1_plus_one", "col_2", "col_3"])
del dset, dset_test
def test_map_multiprocessing(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir: # standard
dset = self._create_dummy_dataset(in_memory, tmp_dir)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
fingerprint = dset._fingerprint
dset_test = dset.map(picklable_map_function, num_proc=2)
self.assertEqual(len(dset_test), 30)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(
dset_test.features,
Features({"filename": Value("string"), "id": Value("int64")}),
)
self.assertEqual(len(dset_test._data_files), 0 if in_memory else 2)
self.assertListEqual(dset_test["id"], list(range(30)))
self.assertNotEqual(dset_test._fingerprint, fingerprint)
del dset, dset_test
with tempfile.TemporaryDirectory() as tmp_dir: # with_indices
dset = self._create_dummy_dataset(in_memory, tmp_dir)
fingerprint = dset._fingerprint
dset_test = dset.map(picklable_map_function_with_indices, num_proc=3, with_indices=True)
self.assertEqual(len(dset_test), 30)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(
dset_test.features,
Features({"filename": Value("string"), "id": Value("int64")}),
)
self.assertEqual(len(dset_test._data_files), 0 if in_memory else 3)
self.assertListEqual(dset_test["id"], list(range(30)))
self.assertNotEqual(dset_test._fingerprint, fingerprint)
del dset, dset_test
with tempfile.TemporaryDirectory() as tmp_dir: # lambda (requires multiprocess from pathos)
dset = self._create_dummy_dataset(in_memory, tmp_dir)
fingerprint = dset._fingerprint
dset_test = dset.map(lambda x: {"id": int(x["filename"].split("_")[-1])}, num_proc=2)
self.assertEqual(len(dset_test), 30)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(
dset_test.features,
Features({"filename": Value("string"), "id": Value("int64")}),
)
self.assertEqual(len(dset_test._data_files), 0 if in_memory else 2)
self.assertListEqual(dset_test["id"], list(range(30)))
self.assertNotEqual(dset_test._fingerprint, fingerprint)
del dset, dset_test
def test_new_features(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
features = Features({"filename": Value("string"), "label": ClassLabel(names=["positive", "negative"])})
dset_test_with_indices = dset.map(lambda x, i: {"label": i % 2}, with_indices=True, features=features)
self.assertEqual(len(dset_test_with_indices), 30)
self.assertDictEqual(
dset_test_with_indices.features,
features,
)
del dset, dset_test_with_indices
def test_map_batched(self, in_memory):
def map_batched(example):
return {"filename_new": [x + "_extension" for x in example["filename"]]}
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
dset_test_batched = dset.map(map_batched, batched=True)
self.assertEqual(len(dset_test_batched), 30)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(
dset_test_batched.features, Features({"filename": Value("string"), "filename_new": Value("string")})
)
del dset, dset_test_batched
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
with dset.formatted_as("numpy", columns=["filename"]):
dset_test_batched = dset.map(map_batched, batched=True)
self.assertEqual(len(dset_test_batched), 30)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(
dset_test_batched.features,
Features({"filename": Value("string"), "filename_new": Value("string")}),
)
del dset, dset_test_batched
def map_batched_with_indices(example, idx):
return {"filename_new": [x + "_extension_" + str(idx) for x in example["filename"]]}
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
dset_test_with_indices_batched = dset.map(map_batched_with_indices, batched=True, with_indices=True)
self.assertEqual(len(dset_test_with_indices_batched), 30)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(
dset_test_with_indices_batched.features,
Features({"filename": Value("string"), "filename_new": Value("string")}),
)
del dset, dset_test_with_indices_batched
def test_map_nested(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = Dataset.from_dict({"field": ["a", "b"]})
dset = self._to(in_memory, tmp_dir, dset)
dset = dset.map(lambda example: {"otherfield": {"capital": example["field"].capitalize()}})
dset = dset.map(lambda example: {"otherfield": {"append_x": example["field"] + "x"}})
self.assertEqual(dset[0], {"field": "a", "otherfield": {"append_x": "ax"}})
del dset
@require_torch
def test_map_torch(self, in_memory):
import torch
def func(example):
return {"tensor": torch.Tensor([1.0, 2, 3])}
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
dset_test = dset.map(func)
self.assertEqual(len(dset_test), 30)
self.assertDictEqual(
dset_test.features, Features({"filename": Value("string"), "tensor": Sequence(Value("float64"))})
)
self.assertListEqual(dset_test[0]["tensor"], [1, 2, 3])
del dset, dset_test
@require_tf
def test_map_tf(self, in_memory):
import tensorflow as tf
def func(example):
return {"tensor": tf.constant([1.0, 2, 3])}
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
dset_test = dset.map(func)
self.assertEqual(len(dset_test), 30)
self.assertDictEqual(
dset_test.features, Features({"filename": Value("string"), "tensor": Sequence(Value("float64"))})
)
self.assertListEqual(dset_test[0]["tensor"], [1, 2, 3])
del dset, dset_test
def test_map_numpy(self, in_memory):
def func(example):
return {"tensor": np.array([1.0, 2, 3])}
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
dset_test = dset.map(func)
self.assertEqual(len(dset_test), 30)
self.assertDictEqual(
dset_test.features, Features({"filename": Value("string"), "tensor": Sequence(Value("float64"))})
)
self.assertListEqual(dset_test[0]["tensor"], [1, 2, 3])
del dset, dset_test
def test_map_remove_colums(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
dset = dset.map(lambda x, i: {"name": x["filename"][:-2], "id": i}, with_indices=True)
self.assertTrue("id" in dset[0])
self.assertDictEqual(
dset.features, Features({"filename": Value("string"), "name": Value("string"), "id": Value("int64")})
)
dset = dset.map(lambda x: x, remove_columns=["id"])
self.assertTrue("id" not in dset[0])
self.assertDictEqual(dset.features, Features({"filename": Value("string"), "name": Value("string")}))
del dset
def test_filter(self, in_memory):
# keep only first five examples
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
fingerprint = dset._fingerprint
dset_filter_first_five = dset.filter(lambda x, i: i < 5, with_indices=True)
self.assertEqual(len(dset_filter_first_five), 5)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_filter_first_five.features, Features({"filename": Value("string")}))
self.assertNotEqual(dset_filter_first_five._fingerprint, fingerprint)
del dset, dset_filter_first_five
# filter filenames with even id at the end + formatted
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
dset.set_format("numpy")
fingerprint = dset._fingerprint
dset_filter_even_num = dset.filter(lambda x: (int(x["filename"].item()[-1]) % 2 == 0))
self.assertEqual(len(dset_filter_even_num), 15)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_filter_even_num.features, Features({"filename": Value("string")}))
self.assertNotEqual(dset_filter_even_num._fingerprint, fingerprint)
self.assertEqual(dset_filter_even_num.format["type"], "numpy")
del dset, dset_filter_even_num
def test_filter_multiprocessing(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
fingerprint = dset._fingerprint
dset_filter_first_ten = dset.filter(picklable_filter_function, num_proc=2)
self.assertEqual(len(dset_filter_first_ten), 10)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_filter_first_ten.features, Features({"filename": Value("string")}))
self.assertEqual(len(dset_filter_first_ten._data_files), 0 if in_memory else 2)
self.assertNotEqual(dset_filter_first_ten._fingerprint, fingerprint)
del dset, dset_filter_first_ten
def test_keep_features_after_transform_specified(self, in_memory):
features = Features(
{"tokens": Sequence(Value("string")), "labels": Sequence(ClassLabel(names=["negative", "positive"]))}
)
def invert_labels(x):
return {"labels": [(1 - label) for label in x["labels"]]}
with tempfile.TemporaryDirectory() as tmp_dir:
dset = Dataset.from_dict({"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features)
dset = self._to(in_memory, tmp_dir, dset)
inverted_dset = dset.map(invert_labels, features=features)
self.assertEqual(inverted_dset.features.type, features.type)
self.assertDictEqual(inverted_dset.features, features)
del dset, inverted_dset
def test_keep_features_after_transform_unspecified(self, in_memory):
features = Features(
{"tokens": Sequence(Value("string")), "labels": Sequence(ClassLabel(names=["negative", "positive"]))}
)
def invert_labels(x):
return {"labels": [(1 - label) for label in x["labels"]]}
with tempfile.TemporaryDirectory() as tmp_dir:
dset = Dataset.from_dict({"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features)
dset = self._to(in_memory, tmp_dir, dset)
inverted_dset = dset.map(invert_labels)
self.assertEqual(inverted_dset.features.type, features.type)
self.assertDictEqual(inverted_dset.features, features)
del dset, inverted_dset
def test_keep_features_after_transform_to_file(self, in_memory):
features = Features(
{"tokens": Sequence(Value("string")), "labels": Sequence(ClassLabel(names=["negative", "positive"]))}
)
def invert_labels(x):
return {"labels": [(1 - label) for label in x["labels"]]}
with tempfile.TemporaryDirectory() as tmp_dir:
dset = Dataset.from_dict({"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features)
dset = self._to(in_memory, tmp_dir, dset)
tmp_file = os.path.join(tmp_dir, "test.arrow")
dset.map(invert_labels, cache_file_name=tmp_file)
inverted_dset = Dataset.from_file(tmp_file)
self.assertEqual(inverted_dset.features.type, features.type)
self.assertDictEqual(inverted_dset.features, features)
del dset, inverted_dset
def test_keep_features_after_transform_to_memory(self, in_memory):
features = Features(
{"tokens": Sequence(Value("string")), "labels": Sequence(ClassLabel(names=["negative", "positive"]))}
)
def invert_labels(x):
return {"labels": [(1 - label) for label in x["labels"]]}
with tempfile.TemporaryDirectory() as tmp_dir:
dset = Dataset.from_dict({"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features)
dset = self._to(in_memory, tmp_dir, dset)
inverted_dset = dset.map(invert_labels, keep_in_memory=True)
self.assertEqual(inverted_dset.features.type, features.type)
self.assertDictEqual(inverted_dset.features, features)
del dset, inverted_dset
def test_keep_features_after_loading_from_cache(self, in_memory):
features = Features(
{"tokens": Sequence(Value("string")), "labels": Sequence(ClassLabel(names=["negative", "positive"]))}
)
def invert_labels(x):
return {"labels": [(1 - label) for label in x["labels"]]}
with tempfile.TemporaryDirectory() as tmp_dir:
dset = Dataset.from_dict({"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features)
dset = self._to(in_memory, tmp_dir, dset)
tmp_file1 = os.path.join(tmp_dir, "test1.arrow")
tmp_file2 = os.path.join(tmp_dir, "test2.arrow")
inverted_dset = dset.map(invert_labels, cache_file_name=tmp_file1)
inverted_dset = dset.map(invert_labels, cache_file_name=tmp_file2)
self.assertGreater(len(inverted_dset.cache_files), 0)
self.assertEqual(inverted_dset.features.type, features.type)
self.assertDictEqual(inverted_dset.features, features)
del dset, inverted_dset
def test_keep_features_with_new_features(self, in_memory):
features = Features(
{"tokens": Sequence(Value("string")), "labels": Sequence(ClassLabel(names=["negative", "positive"]))}
)
def invert_labels(x):
return {"labels": [(1 - label) for label in x["labels"]], "labels2": x["labels"]}
expected_features = Features(
{
"tokens": Sequence(Value("string")),
"labels": Sequence(ClassLabel(names=["negative", "positive"])),
"labels2": Sequence(Value("int64")),
}
)
with tempfile.TemporaryDirectory() as tmp_dir:
dset = Dataset.from_dict({"tokens": [["foo"] * 5] * 10, "labels": [[1] * 5] * 10}, features=features)
dset = self._to(in_memory, tmp_dir, dset)
inverted_dset = dset.map(invert_labels)
self.assertEqual(inverted_dset.features.type, expected_features.type)
self.assertDictEqual(inverted_dset.features, expected_features)
del dset, inverted_dset
def test_select(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
# select every two example
indices = list(range(0, len(dset), 2))
tmp_file = os.path.join(tmp_dir, "test.arrow")
fingerprint = dset._fingerprint
dset_select_even = dset.select(indices, indices_cache_file_name=tmp_file)
self.assertEqual(len(dset_select_even), 15)
for row in dset_select_even:
self.assertEqual(int(row["filename"][-1]) % 2, 0)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_select_even.features, Features({"filename": Value("string")}))
self.assertNotEqual(dset_select_even._fingerprint, fingerprint)
del dset, dset_select_even
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
bad_indices = list(range(5))
bad_indices[3] = "foo"
tmp_file = os.path.join(tmp_dir, "test.arrow")
self.assertRaises(
Exception,
dset.select,
indices=bad_indices,
indices_cache_file_name=tmp_file,
writer_batch_size=2,
)
self.assertFalse(os.path.exists(tmp_file))
dset.set_format("numpy")
dset_select_five = dset.select(
range(5),
indices_cache_file_name=tmp_file,
writer_batch_size=2,
)
self.assertTrue(os.path.exists(tmp_file))
self.assertEqual(len(dset_select_five), 5)
self.assertEqual(dset_select_five.format["type"], "numpy")
for i, row in enumerate(dset_select_five):
self.assertEqual(int(row["filename"].item()[-1]), i)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_select_five.features, Features({"filename": Value("string")}))
del dset, dset_select_five
def test_select_then_map(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
d1 = dset.select([0])
d2 = dset.select([1])
d1 = d1.map(lambda x: {"id": int(x["filename"].split("_")[-1])})
d2 = d2.map(lambda x: {"id": int(x["filename"].split("_")[-1])})
self.assertEqual(d1[0]["id"], 0)
self.assertEqual(d2[0]["id"], 1)
del dset, d1, d2
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
d1 = dset.select([0], indices_cache_file_name=os.path.join(tmp_dir, "i1.arrow"))
d2 = dset.select([1], indices_cache_file_name=os.path.join(tmp_dir, "i2.arrow"))
d1 = d1.map(lambda x: {"id": int(x["filename"].split("_")[-1])})
d2 = d2.map(lambda x: {"id": int(x["filename"].split("_")[-1])})
self.assertEqual(d1[0]["id"], 0)
self.assertEqual(d2[0]["id"], 1)
del dset, d1, d2
def test_pickle_after_many_transforms_on_disk(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
self.assertEqual(len(dset._data_files), 0 if in_memory else 1)
dset.rename_column_("filename", "file")
self.assertListEqual(dset.column_names, ["file"])
dset = dset.select(range(5))
self.assertEqual(len(dset), 5)
dset = dset.map(lambda x: {"id": int(x["file"][-1])})
self.assertListEqual(sorted(dset.column_names), ["file", "id"])
dset.rename_column_("id", "number")
self.assertListEqual(sorted(dset.column_names), ["file", "number"])
dset = dset.select([1])
self.assertEqual(dset[0]["file"], "my_name-train_1")
self.assertEqual(dset[0]["number"], 1)
self.assertEqual(dset._indices["indices"].to_pylist(), [1])
self.assertEqual(
dset._inplace_history,
[] if in_memory else [{"transforms": [("rename_column_", ("id", "number"), {})]}],
)
if not in_memory:
dset._data = Unpicklable() # check that we don't pickle the entire table
pickled = pickle.dumps(dset)
loaded = pickle.loads(pickled)
self.assertEqual(loaded[0]["file"], "my_name-train_1")
self.assertEqual(loaded[0]["number"], 1)
del dset, loaded
def test_shuffle(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
tmp_file = os.path.join(tmp_dir, "test.arrow")
fingerprint = dset._fingerprint
dset_shuffled = dset.shuffle(seed=1234, indices_cache_file_name=tmp_file)
self.assertEqual(len(dset_shuffled), 30)
self.assertEqual(dset_shuffled[0]["filename"], "my_name-train_28")
self.assertEqual(dset_shuffled[2]["filename"], "my_name-train_10")
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_shuffled.features, Features({"filename": Value("string")}))
self.assertNotEqual(dset_shuffled._fingerprint, fingerprint)
# Reproducibility
tmp_file = os.path.join(tmp_dir, "test_2.arrow")
dset_shuffled_2 = dset.shuffle(seed=1234, indices_cache_file_name=tmp_file)
self.assertListEqual(dset_shuffled["filename"], dset_shuffled_2["filename"])
# Compatible with temp_seed
with temp_seed(42):
d1 = dset.shuffle()
with temp_seed(42):
d2 = dset.shuffle()
d3 = dset.shuffle()
self.assertListEqual(d1["filename"], d2["filename"])
self.assertEqual(d1._fingerprint, d2._fingerprint)
self.assertNotEqual(d3["filename"], d2["filename"])
self.assertNotEqual(d3._fingerprint, d2._fingerprint)
del dset, dset_shuffled, dset_shuffled_2, d1, d2, d3
def test_sort(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
# Keep only 10 examples
tmp_file = os.path.join(tmp_dir, "test.arrow")
dset = dset.select(range(10), indices_cache_file_name=tmp_file)
tmp_file = os.path.join(tmp_dir, "test_2.arrow")
dset = dset.shuffle(seed=1234, indices_cache_file_name=tmp_file)
self.assertEqual(len(dset), 10)
self.assertEqual(dset[0]["filename"], "my_name-train_8")
self.assertEqual(dset[1]["filename"], "my_name-train_9")
# Sort
tmp_file = os.path.join(tmp_dir, "test_3.arrow")
fingerprint = dset._fingerprint
dset_sorted = dset.sort("filename", indices_cache_file_name=tmp_file)
for i, row in enumerate(dset_sorted):
self.assertEqual(int(row["filename"][-1]), i)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_sorted.features, Features({"filename": Value("string")}))
self.assertNotEqual(dset_sorted._fingerprint, fingerprint)
# Sort reversed
tmp_file = os.path.join(tmp_dir, "test_4.arrow")
fingerprint = dset._fingerprint
dset_sorted = dset.sort("filename", indices_cache_file_name=tmp_file, reverse=True)
for i, row in enumerate(dset_sorted):
self.assertEqual(int(row["filename"][-1]), len(dset_sorted) - 1 - i)
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_sorted.features, Features({"filename": Value("string")}))
self.assertNotEqual(dset_sorted._fingerprint, fingerprint)
# formatted
dset.set_format("numpy")
dset_sorted_formatted = dset.sort("filename")
self.assertEqual(dset_sorted_formatted.format["type"], "numpy")
del dset, dset_sorted, dset_sorted_formatted
@require_tf
def test_export(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
# Export the data
tfrecord_path = os.path.join(tmp_dir, "test.tfrecord")
formatted_dset = dset.map(
lambda ex, i: {
"id": i,
"question": f"Question {i}",
"answers": {"text": [f"Answer {i}-0", f"Answer {i}-1"], "answer_start": [0, 1]},
},
with_indices=True,
remove_columns=["filename"],
)
formatted_dset.flatten_()
formatted_dset.set_format("numpy")
formatted_dset.export(filename=tfrecord_path, format="tfrecord")
# Import the data
import tensorflow as tf
tf_dset = tf.data.TFRecordDataset([tfrecord_path])
feature_description = {
"id": tf.io.FixedLenFeature([], tf.int64),
"question": tf.io.FixedLenFeature([], tf.string),
"answers.text": tf.io.VarLenFeature(tf.string),
"answers.answer_start": tf.io.VarLenFeature(tf.int64),
}
tf_parsed_dset = tf_dset.map(
lambda example_proto: tf.io.parse_single_example(example_proto, feature_description)
)
# Test that keys match original dataset
for i, ex in enumerate(tf_parsed_dset):
self.assertEqual(ex.keys(), formatted_dset[i].keys())
# Test for equal number of elements
self.assertEqual(i, len(formatted_dset) - 1)
del dset, formatted_dset
def test_train_test_split(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
fingerprint = dset._fingerprint
dset_dict = dset.train_test_split(test_size=10, shuffle=False)
self.assertListEqual(list(dset_dict.keys()), ["train", "test"])
dset_train = dset_dict["train"]
dset_test = dset_dict["test"]
self.assertEqual(len(dset_train), 20)
self.assertEqual(len(dset_test), 10)
self.assertEqual(dset_train[0]["filename"], "my_name-train_0")
self.assertEqual(dset_train[-1]["filename"], "my_name-train_19")
self.assertEqual(dset_test[0]["filename"], "my_name-train_20")
self.assertEqual(dset_test[-1]["filename"], "my_name-train_29")
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_train.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_test.features, Features({"filename": Value("string")}))
self.assertNotEqual(dset_train._fingerprint, fingerprint)
self.assertNotEqual(dset_test._fingerprint, fingerprint)
self.assertNotEqual(dset_train._fingerprint, dset_test._fingerprint)
dset_dict = dset.train_test_split(test_size=0.5, shuffle=False)
self.assertListEqual(list(dset_dict.keys()), ["train", "test"])
dset_train = dset_dict["train"]
dset_test = dset_dict["test"]
self.assertEqual(len(dset_train), 15)
self.assertEqual(len(dset_test), 15)
self.assertEqual(dset_train[0]["filename"], "my_name-train_0")
self.assertEqual(dset_train[-1]["filename"], "my_name-train_14")
self.assertEqual(dset_test[0]["filename"], "my_name-train_15")
self.assertEqual(dset_test[-1]["filename"], "my_name-train_29")
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_train.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_test.features, Features({"filename": Value("string")}))
dset_dict = dset.train_test_split(train_size=10, shuffle=False)
self.assertListEqual(list(dset_dict.keys()), ["train", "test"])
dset_train = dset_dict["train"]
dset_test = dset_dict["test"]
self.assertEqual(len(dset_train), 10)
self.assertEqual(len(dset_test), 20)
self.assertEqual(dset_train[0]["filename"], "my_name-train_0")
self.assertEqual(dset_train[-1]["filename"], "my_name-train_9")
self.assertEqual(dset_test[0]["filename"], "my_name-train_10")
self.assertEqual(dset_test[-1]["filename"], "my_name-train_29")
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_train.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_test.features, Features({"filename": Value("string")}))
dset.set_format("numpy")
dset_dict = dset.train_test_split(train_size=10, seed=42)
self.assertListEqual(list(dset_dict.keys()), ["train", "test"])
dset_train = dset_dict["train"]
dset_test = dset_dict["test"]
self.assertEqual(len(dset_train), 10)
self.assertEqual(len(dset_test), 20)
self.assertEqual(dset_train.format["type"], "numpy")
self.assertEqual(dset_test.format["type"], "numpy")
self.assertNotEqual(dset_train[0]["filename"].item(), "my_name-train_0")
self.assertNotEqual(dset_train[-1]["filename"].item(), "my_name-train_9")
self.assertNotEqual(dset_test[0]["filename"].item(), "my_name-train_10")
self.assertNotEqual(dset_test[-1]["filename"].item(), "my_name-train_29")
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_train.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_test.features, Features({"filename": Value("string")}))
del dset, dset_test, dset_train, dset_dict
def test_shard(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
tmp_file = os.path.join(tmp_dir, "test.arrow")
dset = dset.select(range(10), indices_cache_file_name=tmp_file)
self.assertEqual(len(dset), 10)
# Shard
tmp_file_1 = os.path.join(tmp_dir, "test_1.arrow")
fingerprint = dset._fingerprint
dset_sharded = dset.shard(num_shards=8, index=1, indices_cache_file_name=tmp_file_1)
self.assertEqual(2, len(dset_sharded))
self.assertEqual(["my_name-train_1", "my_name-train_9"], dset_sharded["filename"])
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_sharded.features, Features({"filename": Value("string")}))
self.assertNotEqual(dset_sharded._fingerprint, fingerprint)
# Shard contiguous
tmp_file_2 = os.path.join(tmp_dir, "test_2.arrow")
dset_sharded_contiguous = dset.shard(
num_shards=3, index=0, contiguous=True, indices_cache_file_name=tmp_file_2
)
self.assertEqual([f"my_name-train_{i}" for i in (0, 1, 2, 3)], dset_sharded_contiguous["filename"])
self.assertDictEqual(dset.features, Features({"filename": Value("string")}))
self.assertDictEqual(dset_sharded_contiguous.features, Features({"filename": Value("string")}))
# Test lengths of sharded contiguous
self.assertEqual(
[4, 3, 3],
[
len(dset.shard(3, index=i, contiguous=True, indices_cache_file_name=tmp_file_2 + str(i)))
for i in range(3)
],
)
# formatted
dset.set_format("numpy")
dset_sharded_formatted = dset.shard(num_shards=3, index=0)
self.assertEqual(dset_sharded_formatted.format["type"], "numpy")
del dset, dset_sharded, dset_sharded_contiguous, dset_sharded_formatted
def test_flatten_indices(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
self.assertEqual(dset._indices, None)
tmp_file = os.path.join(tmp_dir, "test.arrow")
dset = dset.select(range(10), indices_cache_file_name=tmp_file)
self.assertEqual(len(dset), 10)
self.assertNotEqual(dset._indices, None)
# Test unique fail
with self.assertRaises(ValueError):
dset.unique(dset.column_names[0])
tmp_file_2 = os.path.join(tmp_dir, "test_2.arrow")
fingerprint = dset._fingerprint
dset.set_format("numpy")
dset = dset.flatten_indices(cache_file_name=tmp_file_2)
self.assertEqual(len(dset), 10)
self.assertEqual(dset._indices, None)
self.assertNotEqual(dset._fingerprint, fingerprint)
self.assertEqual(dset.format["type"], "numpy")
# Test unique works
dset.unique(dset.column_names[0])
del dset
@require_tf
@require_torch
def test_format_vectors(self, in_memory):
import numpy as np
import tensorflow as tf
import torch
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
dset = dset.map(lambda ex, i: {"vec": np.ones(3) * i}, with_indices=True)
columns = dset.column_names
self.assertIsNotNone(dset[0])
self.assertIsNotNone(dset[:2])
for col in columns:
self.assertIsInstance(dset[0][col], (str, list))
self.assertIsInstance(dset[:2][col], list)
self.assertDictEqual(
dset.features, Features({"filename": Value("string"), "vec": Sequence(Value("float64"))})
)
dset.set_format("tensorflow")
self.assertIsNotNone(dset[0])
self.assertIsNotNone(dset[:2])
for col in columns:
self.assertIsInstance(dset[0][col], (tf.Tensor, tf.RaggedTensor))
self.assertIsInstance(dset[:2][col], (tf.Tensor, tf.RaggedTensor))
self.assertIsInstance(dset[col], (tf.Tensor, tf.RaggedTensor))
self.assertEqual(tuple(dset[:2]["vec"].shape), (2, None))
self.assertEqual(tuple(dset["vec"][:2].shape), (2, None))
dset.set_format("numpy")
self.assertIsNotNone(dset[0])
self.assertIsNotNone(dset[:2])
for col in columns:
self.assertIsInstance(dset[0][col], np.ndarray)
self.assertIsInstance(dset[:2][col], np.ndarray)
self.assertIsInstance(dset[col], np.ndarray)
self.assertEqual(dset[:2]["vec"].shape, (2, 3))
self.assertEqual(dset["vec"][:2].shape, (2, 3))
dset.set_format("torch", columns=["vec"])
self.assertIsNotNone(dset[0])
self.assertIsNotNone(dset[:2])
# torch.Tensor is only for numerical columns
self.assertIsInstance(dset[0]["vec"], torch.Tensor)
self.assertIsInstance(dset[:2]["vec"], torch.Tensor)
self.assertIsInstance(dset["vec"][:2], torch.Tensor)
self.assertEqual(dset[:2]["vec"].shape, (2, 3))
self.assertEqual(dset["vec"][:2].shape, (2, 3))
del dset
@require_tf
@require_torch
def test_format_ragged_vectors(self, in_memory):
import numpy as np
import tensorflow as tf
import torch
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
dset = dset.map(lambda ex, i: {"vec": np.ones(3 + i) * i}, with_indices=True)
columns = dset.column_names
self.assertIsNotNone(dset[0])
self.assertIsNotNone(dset[:2])
for col in columns:
self.assertIsInstance(dset[0][col], (str, list))
self.assertIsInstance(dset[:2][col], list)
self.assertDictEqual(
dset.features, Features({"filename": Value("string"), "vec": Sequence(Value("float64"))})
)
dset.set_format("tensorflow")
self.assertIsNotNone(dset[0])
self.assertIsNotNone(dset[:2])
for col in columns:
self.assertIsInstance(dset[0][col], (tf.Tensor, tf.RaggedTensor))
self.assertIsInstance(dset[:2][col], (tf.Tensor, tf.RaggedTensor))
self.assertIsInstance(dset[col], (tf.Tensor, tf.RaggedTensor))
# dim is None for ragged vectors in tensorflow
self.assertListEqual(dset[:2]["vec"].shape.as_list(), [2, None])
self.assertListEqual(dset["vec"][:2].shape.as_list(), [2, None])
dset.set_format("numpy")
self.assertIsNotNone(dset[0])
self.assertIsNotNone(dset[:2])
for col in columns:
self.assertIsInstance(dset[0][col], np.ndarray)
self.assertIsInstance(dset[:2][col], np.ndarray)
self.assertIsInstance(dset[col], np.ndarray)
# array is flat for raged vectors in numpy
self.assertEqual(dset[:2]["vec"].shape, (2,))
self.assertEqual(dset["vec"][:2].shape, (2,))
dset.set_format("torch", columns=["vec"])
self.assertIsNotNone(dset[0])
self.assertIsNotNone(dset[:2])
# torch.Tensor is only for numerical columns
self.assertIsInstance(dset[0]["vec"], torch.Tensor)
self.assertIsInstance(dset[:2]["vec"][0], torch.Tensor)
self.assertIsInstance(dset["vec"][0], torch.Tensor)
# pytorch doesn't support ragged tensors, so we should have lists
self.assertIsInstance(dset[:2]["vec"], list)
self.assertIsInstance(dset["vec"][:2], list)
del dset
@require_tf
@require_torch
def test_format_nested(self, in_memory):
import numpy as np
import tensorflow as tf
import torch
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir)
dset = dset.map(lambda ex: {"nested": [{"foo": np.ones(3)}] * len(ex["filename"])}, batched=True)
self.assertDictEqual(
dset.features, Features({"filename": Value("string"), "nested": {"foo": Sequence(Value("float64"))}})
)
dset.set_format("tensorflow")
self.assertIsNotNone(dset[0])
self.assertIsInstance(dset[0]["nested"]["foo"], (tf.Tensor, tf.RaggedTensor))
self.assertIsNotNone(dset[:2])
self.assertIsInstance(dset[:2]["nested"][0]["foo"], (tf.Tensor, tf.RaggedTensor))
self.assertIsInstance(dset["nested"][0]["foo"], (tf.Tensor, tf.RaggedTensor))
dset.set_format("numpy")
self.assertIsNotNone(dset[0])
self.assertIsInstance(dset[0]["nested"]["foo"], np.ndarray)
self.assertIsNotNone(dset[:2])
self.assertIsInstance(dset[:2]["nested"][0]["foo"], np.ndarray)
self.assertIsInstance(dset["nested"][0]["foo"], np.ndarray)
dset.set_format("torch", columns="nested")
self.assertIsNotNone(dset[0])
self.assertIsInstance(dset[0]["nested"]["foo"], torch.Tensor)
self.assertIsNotNone(dset[:2])
self.assertIsInstance(dset[:2]["nested"][0]["foo"], torch.Tensor)
self.assertIsInstance(dset["nested"][0]["foo"], torch.Tensor)
del dset
def test_format_pandas(self, in_memory):
with tempfile.TemporaryDirectory() as tmp_dir:
dset = self._create_dummy_dataset(in_memory, tmp_dir, multiple_columns=True)
import pandas as pd
dset.set_format("pandas")
self.assertIsInstance(dset[0], pd.DataFrame)
self.assertIsInstance(dset[:2], pd.DataFrame)
self.assertIsInstance(dset["col_1"], pd.Series)
del dset
def test_transmit_format_single(self, in_memory):
@transmit_format
def my_single_transform(self, return_factory, *args, **kwargs):
return return_factory()
with tempfile.TemporaryDirectory() as tmp_dir:
return_factory = partial(
self._create_dummy_dataset, in_memory=in_memory, tmp_dir=tmp_dir, multiple_columns=True
)
dset = return_factory()
dset.set_format("numpy", columns=["col_1"])
prev_format = dset.format
transformed_dset = my_single_transform(dset, return_factory)
self.assertDictEqual(transformed_dset.format, prev_format)
del dset, transformed_dset
def test_transmit_format_dict(self, in_memory):
@transmit_format
def my_split_transform(self, return_factory, *args, **kwargs):
return DatasetDict({"train": return_factory()})
with tempfile.TemporaryDirectory() as tmp_dir:
return_factory = partial(
self._create_dummy_dataset, in_memory=in_memory, tmp_dir=tmp_dir, multiple_columns=True
)
dset = return_factory()
dset.set_format("numpy", columns=["col_1"])
prev_format = dset.format
transformed_dset = my_split_transform(dset, return_factory)["train"]
self.assertDictEqual(transformed_dset.format, prev_format)
del dset, transformed_dset
class MiscellaneousDatasetTest(TestCase):
def test_from_pandas(self):
data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
df = pd.DataFrame.from_dict(data)
dset = Dataset.from_pandas(df)
self.assertListEqual(dset["col_1"], data["col_1"])
self.assertListEqual(dset["col_2"], data["col_2"])
self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2"])
self.assertDictEqual(dset.features, Features({"col_1": Value("int64"), "col_2": Value("string")}))
features = Features({"col_1": Value("int64"), "col_2": Value("string")})
dset = Dataset.from_pandas(df, features=features)
self.assertListEqual(dset["col_1"], data["col_1"])
self.assertListEqual(dset["col_2"], data["col_2"])
self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2"])
self.assertDictEqual(dset.features, Features({"col_1": Value("int64"), "col_2": Value("string")}))
features = Features({"col_1": Value("int64"), "col_2": Value("string")})
dset = Dataset.from_pandas(df, features=features, info=DatasetInfo(features=features))
self.assertListEqual(dset["col_1"], data["col_1"])
self.assertListEqual(dset["col_2"], data["col_2"])
self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2"])
self.assertDictEqual(dset.features, Features({"col_1": Value("int64"), "col_2": Value("string")}))
features = Features({"col_1": Value("string"), "col_2": Value("string")})
self.assertRaises(pa.ArrowTypeError, Dataset.from_pandas, df, features=features)
def test_from_dict(self):
data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
dset = Dataset.from_dict(data)
self.assertListEqual(dset["col_1"], data["col_1"])
self.assertListEqual(dset["col_2"], data["col_2"])
self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2"])
self.assertDictEqual(dset.features, Features({"col_1": Value("int64"), "col_2": Value("string")}))
features = Features({"col_1": Value("int64"), "col_2": Value("string")})
dset = Dataset.from_dict(data, features=features)
self.assertListEqual(dset["col_1"], data["col_1"])
self.assertListEqual(dset["col_2"], data["col_2"])
self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2"])
self.assertDictEqual(dset.features, Features({"col_1": Value("int64"), "col_2": Value("string")}))
features = Features({"col_1": Value("int64"), "col_2": Value("string")})
dset = Dataset.from_dict(data, features=features, info=DatasetInfo(features=features))
self.assertListEqual(dset["col_1"], data["col_1"])
self.assertListEqual(dset["col_2"], data["col_2"])
self.assertListEqual(list(dset.features.keys()), ["col_1", "col_2"])
self.assertDictEqual(dset.features, Features({"col_1": Value("int64"), "col_2": Value("string")}))
features = Features({"col_1": Value("string"), "col_2": Value("string")})
self.assertRaises(pa.ArrowTypeError, Dataset.from_dict, data, features=features)
def test_concatenate_mixed_memory_and_disk(self):
data1, data2, data3 = {"id": [0, 1, 2]}, {"id": [3, 4, 5]}, {"id": [6, 7]}
info1 = DatasetInfo(description="Dataset1")
info2 = DatasetInfo(description="Dataset2")
with tempfile.TemporaryDirectory() as tmp_dir:
dset1, dset2, dset3 = (
Dataset.from_dict(data1, info=info1).map(cache_file_name=os.path.join(tmp_dir, "d1.arrow")),
Dataset.from_dict(data2, info=info2).map(cache_file_name=os.path.join(tmp_dir, "d2.arrow")),
Dataset.from_dict(data3),
)
with self.assertRaises(ValueError):
_ = concatenate_datasets([dset1, dset2, dset3])
del dset1, dset2, dset3
| 49.979167 | 118 | 0.607753 |
2f58cc83cf2c05e08fc62da37866aebc40ef8860 | 2,894 | py | Python | monk/pip_unit_tests/keras/test_initializer_he_uniform.py | take2rohit/monk_v1 | 9c567bf2c8b571021b120d879ba9edf7751b9f92 | [
"Apache-2.0"
] | 542 | 2019-11-10T12:09:31.000Z | 2022-03-28T11:39:07.000Z | monk/pip_unit_tests/keras/test_initializer_he_uniform.py | take2rohit/monk_v1 | 9c567bf2c8b571021b120d879ba9edf7751b9f92 | [
"Apache-2.0"
] | 117 | 2019-11-12T09:39:24.000Z | 2022-03-12T00:20:41.000Z | monk/pip_unit_tests/keras/test_initializer_he_uniform.py | take2rohit/monk_v1 | 9c567bf2c8b571021b120d879ba9edf7751b9f92 | [
"Apache-2.0"
] | 246 | 2019-11-09T21:53:24.000Z | 2022-03-29T00:57:07.000Z | import os
import sys
import psutil
from monk.keras_prototype import prototype
from monk.compare_prototype import compare
from monk.pip_unit_tests.keras.common import print_start
from monk.pip_unit_tests.keras.common import print_status
import tensorflow as tf
if(tf.__version__[0] == '2'):
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
def test_initializer_he_uniform(system_dict):
forward = True;
test = "test_initializer_he_uniform";
system_dict["total_tests"] += 1;
print_start(test, system_dict["total_tests"])
if(forward):
try:
gtf = prototype(verbose=0);
gtf.Prototype("sample-project-1", "sample-experiment-1");
network = [];
network.append(gtf.convolution(output_channels=16));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.convolution(output_channels=32));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.average_pooling(kernel_size=2));
network.append(gtf.convolution(output_channels=64));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.convolution(output_channels=64));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.average_pooling(kernel_size=2));
network.append(gtf.convolution(output_channels=128));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.convolution(output_channels=128));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.average_pooling(kernel_size=2));
network.append(gtf.flatten());
network.append(gtf.dropout(drop_probability=0.2));
network.append(gtf.fully_connected(units=1024));
network.append(gtf.dropout(drop_probability=0.2));
network.append(gtf.fully_connected(units=2));
network.append(gtf.softmax());
gtf.Compile_Network(network, data_shape=(3, 32, 32), network_initializer="he_uniform");
x = tf.placeholder(tf.float32, shape=(1, 32, 32, 3))
y = gtf.system_dict["local"]["model"](x);
system_dict["successful_tests"] += 1;
print_status("Pass");
except Exception as e:
system_dict["failed_tests_exceptions"].append(e);
system_dict["failed_tests_lists"].append(test);
forward = False;
print_status("Fail");
else:
system_dict["skipped_tests_lists"].append(test);
print_status("Skipped");
return system_dict
| 34.452381 | 99 | 0.636144 |
d5be12b63e44413ab4d31ff16823026c22b9c953 | 4,445 | py | Python | app.py | newlancealot/BellyButtons | d465f50e30c18ff6d7c2da6444561469599e06de | [
"ADSL"
] | null | null | null | app.py | newlancealot/BellyButtons | d465f50e30c18ff6d7c2da6444561469599e06de | [
"ADSL"
] | null | null | null | app.py | newlancealot/BellyButtons | d465f50e30c18ff6d7c2da6444561469599e06de | [
"ADSL"
] | null | null | null | #################################################
# import dependencies
#################################################
import datetime as dt
import numpy as np
import pandas as pd
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine
# Flask (Server)
from flask import Flask, jsonify, render_template, request, flash, redirect
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
#################################################
# Database Setup:sqlite
#################################################
#from flask_sqlalchemy import SQLAlchemy
# The database URI
#app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite:///db/belly_button_biodiversity.sqlite"
#db = SQLAlchemy(app)
engine = create_engine("sqlite:///db/belly_button_biodiversity.sqlite")
Base = automap_base() # reflect an existing database into a new model
Base.prepare(engine, reflect=True) # reflect the tables
Base.classes.keys() # Save reference to the table
conn = engine.connect()
OTU = Base.classes.otu
Samples = Base.classes.samples
Samples_Metadata= Base.classes.samples_metadata
session = Session(engine)# Create our session (link) from Python to the DB
#Routes names
# - '/names'
# - '/otu'
# - '/metadata/<samples>'
# - '/wfreq/<sample>'
# - '/samples/<sample>
#################################################
# Flask Routes
#################################################
@app.route("/")
def default():
return render_template("index.html")
@app.route("/names")
def names():
# """Return a list of sample names."""
# Pandas for sql query
stmt = session.query(Samples).statement
df = pd.read_sql_query(stmt, session.bind)
df.set_index('otu_id', inplace=True)
# Returning list of the column names (sample names)
return jsonify(list(df.columns))
@app.route('/otu')
def otu():
#"""List of OTU descriptions."""
otu_desc = session.query(OTU.lowest_taxonomic_unit_found).all()
otu_descriptions = list(np.ravel(otu_desc))
return jsonify(otu_descriptions)
app.route('/metadata/<sample>')
def sample_metadata(sample):
"""Return the MetaData for a given sample."""
sel = [Samples_Metadata.SAMPLEID, Samples_Metadata.ETHNICITY,
Samples_Metadata.GENDER, Samples_Metadata.AGE,
Samples_Metadata.LOCATION, Samples_Metadata.BBTYPE]
# sample[3:] strips the `BB_` prefix from the sample name to match
# the numeric value of `SAMPLEID` from the database
results = session.query(*sel).\
filter(Samples_Metadata.SAMPLEID == sample[3:]).all()
# Create a dictionary entry for each row of metadata information
sample_metadata = {}
for result in results:
sample_metadata['SAMPLEID'] = result[0]
sample_metadata['ETHNICITY'] = result[1]
sample_metadata['GENDER'] = result[2]
sample_metadata['AGE'] = result[3]
sample_metadata['LOCATION'] = result[4]
sample_metadata['BBTYPE'] = result[5]
return jsonify(sample_metadata)
@app.route('/wfreq/<sample>')
def sample_wfreq(sample):
"""Return the Weekly Washing Frequency as a number."""
# "sample[3:]" to strip "BB_"
results = session.query(Samples_Metadata.WFREQ).\
filter(Samples_Metadata.SAMPLEID == sample[3:]).all()
wfreq = np.ravel(results)
# Returning first int for washing frequency
return jsonify(int(wfreq[0]))
# Returning list of dictionaries containing (otu_ids, sample_values)
@app.route('/samples/<sample>')
def samples(sample):
"""Return a list dictionaries containing `otu_ids` and `sample_values`."""
stmt = session.query(Samples).statement
df = pd.read_sql_query(stmt, session.bind)
# testing to ensure samples were found in the columns, otherwise (error)
if sample not in df.columns:
return jsonify(f" Sample: {sample} Not Found!"), 400
df = df[df[sample] > 1]# Returning samples greater than 1
df = df.sort_values(by=sample, ascending=0)# Sort by descending
# sending data as json after formating
data = [{
"otu_ids": df[sample].index.values.tolist(),
"sample_values": df[sample].values.tolist()
}]
return jsonify(data)
if __name__ == '__main__':
app.run(debug=True)
| 29.437086 | 88 | 0.622497 |
7259a07b6c7e9324030759af310508fd5480db8f | 5,301 | py | Python | run_usd.py | ousttrue/usd_cpp_samples | b35af2d26c0708d8977ce1ba2bb85a1a90bf7dcb | [
"MIT"
] | 5 | 2020-10-06T05:18:53.000Z | 2022-03-15T02:18:02.000Z | run_usd.py | ousttrue/usd_cpp_samples | b35af2d26c0708d8977ce1ba2bb85a1a90bf7dcb | [
"MIT"
] | null | null | null | run_usd.py | ousttrue/usd_cpp_samples | b35af2d26c0708d8977ce1ba2bb85a1a90bf7dcb | [
"MIT"
] | null | null | null | import pathlib
import os
import sys
import types
import importlib
from typing import Iterable, Dict, Optional, List
import logging
import pathlib
import ctypes
#
import glglue.glfw
from glglue.gl3.pydearcontroller import PydearController
from glglue.gl3.renderview import RenderView
from glglue.windowconfig import WindowConfig
import pydear as ImGui
from pydear.utils.dockspace import DockView
from glglue.scene.node import Node
from glglue.ctypesmath import TRS, Mat4, Camera
logger = logging.getLogger(__name__)
CONFIG_FILE = pathlib.Path("window.ini")
HERE = pathlib.Path(__file__).absolute().parent
# setup USD
sys.path.insert(0, str(HERE / 'build_release/lib/python'))
os.environ['PATH'] = f'{HERE / "build_release/bin"};{HERE / "build_release/lib"};{os.environ["PATH"]}'
from pxr import Usd # nopep8
from pxr import UsdImagingGL # nopep8
from pxr.Gf import Vec4f, Vec4d, Camera, Matrix4d, Vec3d # nopep8
from pxr.Glf import SimpleMaterial # nopep8
class ImguiDocks:
def __init__(self) -> None:
self.demo = DockView(
'demo', (ctypes.c_bool * 1)(True), ImGui.ShowDemoWindow)
self.metrics = DockView(
'metrics', (ctypes.c_bool * 1)(True), ImGui.ShowMetricsWindow)
self.selected = 'skin'
# logger
from pydear.utils.loghandler import ImGuiLogHandler
log_handle = ImGuiLogHandler()
log_handle.register_root()
self.logger = DockView('log', (ctypes.c_bool * 1)
(True), log_handle.draw)
def __iter__(self) -> Iterable[DockView]:
yield self.demo
yield self.metrics
yield self.logger
class SampleController(PydearController):
def __init__(self, scale: float = 1):
super().__init__(scale=scale)
# load usd
# path = pathlib.Path(os.environ['USERPROFILE']) / \
# "Desktop/Kitchen_set/Kitchen_set.usd"
path = HERE / 'test_sphere.usda'
assert path.exists()
self.engine = None
self.stage: Usd.Stage = Usd.Stage.Open(str(path))
self.params = UsdImagingGL.RenderParams()
self.params.clearColor = Vec4f(1.0, 0.5, 0.1, 1.0)
# params.frame = Usd.TimeCode.Default()
self.params.frame = 1.0
self.params.complexity = 1.0
self.params.forceRefresh = False
self.params.enableLighting = False
# params.enableLighting = True
self.params.enableSceneMaterials = True
self.params.drawMode = UsdImagingGL.DrawMode.DRAW_SHADED_SMOOTH
self.params.highlight = True
self.params.gammaCorrectColors = False
# params.colorCorrectionMode = TfToken("sRGB");
self.params.showGuides = True
self.params.showProxy = True
self.params.showRender = False
self.params.forceRefresh = True
material = SimpleMaterial()
ambient = Vec4f()
self.camera = Camera()
# self.camera.SetPerspectiveFromAspectRatioAndFieldOfView(16.0 / 9.0, 60, Camera.FOVHorizontal)
# self.camera.focus_distance = 100
dist = 100
trans = Matrix4d()
trans.SetTranslate(Vec3d.ZAxis() * dist)
# GfMatrix4d roty(1.0);
# GfMatrix4d rotz(1.0);
# GfMatrix4d rotx(1.0);
# roty.SetRotate(GfRotation(GfVec3d::YAxis(), -rotation[0]));
# rotx.SetRotate(GfRotation(GfVec3d::XAxis(), -rotation[1]));
# rotz.SetRotate(GfRotation(GfVec3d::ZAxis(), -rotation[2]));
# GfMatrix4d toCenter;
# toCenter.SetTranslate(center);
self.camera.transform = trans
self.camera.focus_distance = dist
def imgui_create_docks(self):
return ImguiDocks()
def lazy_init(self):
if self.engine:
return
self.engine = UsdImagingGL.Engine()
def draw_scene(self):
self.lazy_init()
frustum = self.camera.frustum
# engine.SetLightingState(_lights, _material, _ambient);
self.engine.SetRenderViewport(Vec4d(0, 0, *controller.viewport))
# engine.SetWindowPolicy(CameraUtilConformWindowPolicy::CameraUtilMatchHorizontally);
# If using a usd camera, use SetCameraPath renderer.SetCameraPath(sceneCam.GetPath())
# else set camera state
self.engine.SetCameraState(frustum.ComputeViewMatrix(
), frustum.ComputeProjectionMatrix())
self.engine.Render(self.stage.GetPseudoRoot(), self.params)
if __name__ == "__main__":
logging.basicConfig(
format='%(levelname)s:%(name)s:%(message)s', level=logging.DEBUG)
# ImGui
controller = SampleController()
# glfw
loop = glglue.glfw.LoopManager(
controller,
config=WindowConfig.load_json_from_path(CONFIG_FILE),
title="pydear")
# main loop
lastCount = 0
while True:
count = loop.begin_frame()
if not count:
break
d = count - lastCount
lastCount = count
if d > 0:
controller.onUpdate(d)
controller.draw()
loop.end_frame()
# save window config
config = loop.get_config()
config.save_json_to_path(CONFIG_FILE)
| 33.13125 | 104 | 0.632334 |
54b0bf8f0c23b3831fab3791c972fc5f44f16f62 | 1,628 | py | Python | setup.py | pjaos/rpi_stepper_motor | 173e51c45d55b0c9125b1936a4709746973dafac | [
"MIT"
] | null | null | null | setup.py | pjaos/rpi_stepper_motor | 173e51c45d55b0c9125b1936a4709746973dafac | [
"MIT"
] | null | null | null | setup.py | pjaos/rpi_stepper_motor | 173e51c45d55b0c9125b1936a4709746973dafac | [
"MIT"
] | null | null | null | import setuptools
MODULE_NAME = "smotor" # The python module name
VERSION = "1.0" # The version of the application
AUTHOR = "Paul Austen" # The name of the applications author
AUTHOR_EMAIL = "pausten.os@gmail.com" # The email address of the author
DESCRIPTION = "Basic control of a stepper motor on a Raspberry Pi platform." # A short description of the application
LICENSE = "MIT License" # The License that the application is distributed under
REQUIRED_LIBS = ['p3lib>=1.1.29', 'pigpio'] # A python list of required libs (optionally including versions)
STARTUP_SCRIPTS= ['scripts/smotor'] # The command line startup scripts to be installed.
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name=MODULE_NAME,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
description=DESCRIPTION,
long_description="", #This will be read from the README.md file
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
classifiers=[
"License :: %s" % (LICENSE),
"Operating System :: OS Independent",
],
install_requires=[
REQUIRED_LIBS
],
scripts=STARTUP_SCRIPTS
)
| 49.333333 | 154 | 0.530713 |
ba7021aff648f249b8b11ea136867b122be6a0ef | 51 | py | Python | fixtures/W0702/valid.py | dopustim/pylint-config | 24e62f1d478e2d151c6aec64e732142e844569d0 | [
"0BSD"
] | 1 | 2022-01-07T18:01:11.000Z | 2022-01-07T18:01:11.000Z | fixtures/W0702/valid.py | dopustim/pylint-config | 24e62f1d478e2d151c6aec64e732142e844569d0 | [
"0BSD"
] | null | null | null | fixtures/W0702/valid.py | dopustim/pylint-config | 24e62f1d478e2d151c6aec64e732142e844569d0 | [
"0BSD"
] | null | null | null |
try:
1 / 0
except ZeroDivisionError:
pass
| 8.5 | 25 | 0.627451 |
f8b2f06c5688075a78cbb23f5c99b4b00a9573ff | 270 | py | Python | curso-python/estruturas_controle_projetos/fibonacci_v5.py | gui-hub/Estudos-Python | 0219da2430526c4c3705248e86e65b8c847175b8 | [
"Apache-2.0"
] | null | null | null | curso-python/estruturas_controle_projetos/fibonacci_v5.py | gui-hub/Estudos-Python | 0219da2430526c4c3705248e86e65b8c847175b8 | [
"Apache-2.0"
] | null | null | null | curso-python/estruturas_controle_projetos/fibonacci_v5.py | gui-hub/Estudos-Python | 0219da2430526c4c3705248e86e65b8c847175b8 | [
"Apache-2.0"
] | null | null | null | # 0, 1, 1, 2, 3, 5, 8, 13, 21...
def fibonacci(limite):
resultado = [0, 1]
while resultado[-1] < limite:
resultado.append(sum(resultado[-2:]))
return resultado
if __name__ == '__main__':
for fib in fibonacci(10000):
print(fib, end=',')
| 22.5 | 45 | 0.574074 |
917a806cdbd036c29c31a35e8312cf7acb9660cf | 2,862 | py | Python | lib/train_pg.py | samsafadi/PointRCNN | 761d4cadb3e634dc0994f2e95318240c37fbb485 | [
"MIT"
] | 1 | 2020-11-16T20:11:26.000Z | 2020-11-16T20:11:26.000Z | lib/train_pg.py | samsafadi/PointRCNN | 761d4cadb3e634dc0994f2e95318240c37fbb485 | [
"MIT"
] | null | null | null | lib/train_pg.py | samsafadi/PointRCNN | 761d4cadb3e634dc0994f2e95318240c37fbb485 | [
"MIT"
] | null | null | null | import _init_path
import torch
import tqdm
import json
import sys
import os
sys.path.append('../')
import argparse
from lib.env import PointRCNNEnv
from lib.pg_agent import PG
HOME_DIR = os.path.join(os.getcwd(), '..')
def train(agent, env, config, device):
"""
train pg model
"""
# For logging
batch_obs = []
batch_acts = []
batch_rets = []
batch_lens = []
# Starting state, reward, and trajectories
for _ in tqdm.tqdm(range(len(env.test_loader))):
# loads a new state (image)
state, done = env.reset(), False
ep_rews = []
while True:
# Save state
batch_obs.append(state.copy())
s = state.shape
state = state.reshape(s[0], s[3], s[1], s[2])
state = torch.Tensor(state).cuda()
# get the agent action for this state
act, prob_action, log_prob_action, _ = agent.get_action(state)
# get reward
obs, reward, done, _ = env.step(act.view(act.shape[1:]))
# calculates loss against baseline and steps optimizer
batch_loss = agent.update(obs, act, reward)
batch_obs.append(obs)
batch_acts.append(act)
ep_rews.append(batch_reward)
if done:
# Record info about episode
ep_ret, ep_len = sum(ep_rews), len(ep_rews)
batch_rets.append(ep_ret)
batch_lens.append(ep_len)
# reset episode-specific variables
state, done, ep_rews = env.reset(), False, []
# end experience loop if we have enough of it
if len(batch_obs) > config['batchsize']:
break
return batch_loss, batch_rets, batch_lens
def load_config(config_path):
with open(config_path, 'r') as f:
return json.load(f)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="arg parser")
parser.add_argument("--eval_mode", type=str, default='rpn', required=True, help="specify the evaluation mode")
parser.add_argument("--num_epochs", type=int, default=10, required=False, help="specify the number of epochs")
args = parser.parse_args()
if torch.cuda.is_available():
device = torch.device('cuda')
print('Running on GPU {}'.format(torch.cuda.get_device_name(0)))
else:
device = torch.device('cpu')
print('Running on CPU')
config_path = os.path.join(HOME_DIR, 'tools/configs/pg.json')
config = load_config(config_path)
debug = True
# initialize the PointRCNNEnv and set it networks into eval mode
env = PointRCNNEnv()
# initialize the agent along with the networks inside it
agent = PG(config, env=env)
for i in range(args.num_epochs):
train(agent, env, config, device)
| 28.336634 | 114 | 0.604123 |
12a0f71f5d438e0d5303f11712c61b919f58567d | 67,236 | py | Python | Lib/test/test_zipfile.py | deadsnakes/python3.3 | 4faaf44cd5478410ac3b977351c1965fa054b5e9 | [
"PSF-2.0"
] | 652 | 2015-07-26T00:00:17.000Z | 2022-02-24T18:30:04.000Z | Lib/test/test_zipfile.py | deadsnakes/python3.3 | 4faaf44cd5478410ac3b977351c1965fa054b5e9 | [
"PSF-2.0"
] | 8 | 2015-09-07T03:38:19.000Z | 2021-05-23T03:18:51.000Z | check-python33-manual/samples/standard_library_337/Lib/test/test_zipfile.py | DaveKaretnyk/parsing-utils2 | 40085bbd399fa605f2f2a4708d385a64ffc907de | [
"MIT"
] | 40 | 2015-07-24T19:45:08.000Z | 2021-11-01T14:54:56.000Z | import io
import os
import sys
import imp
import time
import shutil
import struct
import zipfile
import unittest
from tempfile import TemporaryFile
from random import randint, random, getrandbits
from test.support import (TESTFN, findfile, unlink,
requires_zlib, requires_bz2, requires_lzma,
captured_stdout)
TESTFN2 = TESTFN + "2"
TESTFNDIR = TESTFN + "d"
FIXEDTEST_SIZE = 1000
DATAFILES_DIR = 'zipfile_datafiles'
SMALL_TEST_DATA = [('_ziptest1', '1q2w3e4r5t'),
('ziptest2dir/_ziptest2', 'qawsedrftg'),
('ziptest2dir/ziptest3dir/_ziptest3', 'azsxdcfvgb'),
('ziptest2dir/ziptest3dir/ziptest4dir/_ziptest3', '6y7u8i9o0p')]
def get_files(test):
yield TESTFN2
with TemporaryFile() as f:
yield f
test.assertFalse(f.closed)
with io.BytesIO() as f:
yield f
test.assertFalse(f.closed)
class AbstractTestsWithSourceFile:
@classmethod
def setUpClass(cls):
cls.line_gen = [bytes("Zipfile test line %d. random float: %f\n" %
(i, random()), "ascii")
for i in range(FIXEDTEST_SIZE)]
cls.data = b''.join(cls.line_gen)
def setUp(self):
# Make a source file with some lines
with open(TESTFN, "wb") as fp:
fp.write(self.data)
def make_test_archive(self, f, compression):
# Create the ZIP archive
with zipfile.ZipFile(f, "w", compression) as zipfp:
zipfp.write(TESTFN, "another.name")
zipfp.write(TESTFN, TESTFN)
zipfp.writestr("strfile", self.data)
def zip_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
self.assertEqual(zipfp.read(TESTFN), self.data)
self.assertEqual(zipfp.read("another.name"), self.data)
self.assertEqual(zipfp.read("strfile"), self.data)
# Print the ZIP directory
fp = io.StringIO()
zipfp.printdir(file=fp)
directory = fp.getvalue()
lines = directory.splitlines()
self.assertEqual(len(lines), 4) # Number of files + header
self.assertIn('File Name', lines[0])
self.assertIn('Modified', lines[0])
self.assertIn('Size', lines[0])
fn, date, time_, size = lines[1].split()
self.assertEqual(fn, 'another.name')
self.assertTrue(time.strptime(date, '%Y-%m-%d'))
self.assertTrue(time.strptime(time_, '%H:%M:%S'))
self.assertEqual(size, str(len(self.data)))
# Check the namelist
names = zipfp.namelist()
self.assertEqual(len(names), 3)
self.assertIn(TESTFN, names)
self.assertIn("another.name", names)
self.assertIn("strfile", names)
# Check infolist
infos = zipfp.infolist()
names = [i.filename for i in infos]
self.assertEqual(len(names), 3)
self.assertIn(TESTFN, names)
self.assertIn("another.name", names)
self.assertIn("strfile", names)
for i in infos:
self.assertEqual(i.file_size, len(self.data))
# check getinfo
for nm in (TESTFN, "another.name", "strfile"):
info = zipfp.getinfo(nm)
self.assertEqual(info.filename, nm)
self.assertEqual(info.file_size, len(self.data))
# Check that testzip doesn't raise an exception
zipfp.testzip()
def test_basic(self):
for f in get_files(self):
self.zip_test(f, self.compression)
def zip_open_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
zipdata1 = []
with zipfp.open(TESTFN) as zipopen1:
while True:
read_data = zipopen1.read(256)
if not read_data:
break
zipdata1.append(read_data)
zipdata2 = []
with zipfp.open("another.name") as zipopen2:
while True:
read_data = zipopen2.read(256)
if not read_data:
break
zipdata2.append(read_data)
self.assertEqual(b''.join(zipdata1), self.data)
self.assertEqual(b''.join(zipdata2), self.data)
def test_open(self):
for f in get_files(self):
self.zip_open_test(f, self.compression)
def zip_random_open_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
zipdata1 = []
with zipfp.open(TESTFN) as zipopen1:
while True:
read_data = zipopen1.read(randint(1, 1024))
if not read_data:
break
zipdata1.append(read_data)
self.assertEqual(b''.join(zipdata1), self.data)
def test_random_open(self):
for f in get_files(self):
self.zip_random_open_test(f, self.compression)
def zip_read1_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp, \
zipfp.open(TESTFN) as zipopen:
zipdata = []
while True:
read_data = zipopen.read1(-1)
if not read_data:
break
zipdata.append(read_data)
self.assertEqual(b''.join(zipdata), self.data)
def test_read1(self):
for f in get_files(self):
self.zip_read1_test(f, self.compression)
def zip_read1_10_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp, \
zipfp.open(TESTFN) as zipopen:
zipdata = []
while True:
read_data = zipopen.read1(10)
self.assertLessEqual(len(read_data), 10)
if not read_data:
break
zipdata.append(read_data)
self.assertEqual(b''.join(zipdata), self.data)
def test_read1_10(self):
for f in get_files(self):
self.zip_read1_10_test(f, self.compression)
def zip_readline_read_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp, \
zipfp.open(TESTFN) as zipopen:
data = b''
while True:
read = zipopen.readline()
if not read:
break
data += read
read = zipopen.read(100)
if not read:
break
data += read
self.assertEqual(data, self.data)
def test_readline_read(self):
# Issue #7610: calls to readline() interleaved with calls to read().
for f in get_files(self):
self.zip_readline_read_test(f, self.compression)
def zip_readline_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
with zipfp.open(TESTFN) as zipopen:
for line in self.line_gen:
linedata = zipopen.readline()
self.assertEqual(linedata, line)
def test_readline(self):
for f in get_files(self):
self.zip_readline_test(f, self.compression)
def zip_readlines_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
with zipfp.open(TESTFN) as zipopen:
ziplines = zipopen.readlines()
for line, zipline in zip(self.line_gen, ziplines):
self.assertEqual(zipline, line)
def test_readlines(self):
for f in get_files(self):
self.zip_readlines_test(f, self.compression)
def zip_iterlines_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
with zipfp.open(TESTFN) as zipopen:
for line, zipline in zip(self.line_gen, zipopen):
self.assertEqual(zipline, line)
def test_iterlines(self):
for f in get_files(self):
self.zip_iterlines_test(f, self.compression)
def test_low_compression(self):
"""Check for cases where compressed data is larger than original."""
# Create the ZIP archive
with zipfile.ZipFile(TESTFN2, "w", self.compression) as zipfp:
zipfp.writestr("strfile", '12')
# Get an open object for strfile
with zipfile.ZipFile(TESTFN2, "r", self.compression) as zipfp:
with zipfp.open("strfile") as openobj:
self.assertEqual(openobj.read(1), b'1')
self.assertEqual(openobj.read(1), b'2')
def test_writestr_compression(self):
zipfp = zipfile.ZipFile(TESTFN2, "w")
zipfp.writestr("b.txt", "hello world", compress_type=self.compression)
info = zipfp.getinfo('b.txt')
self.assertEqual(info.compress_type, self.compression)
def test_read_return_size(self):
# Issue #9837: ZipExtFile.read() shouldn't return more bytes
# than requested.
for test_size in (1, 4095, 4096, 4097, 16384):
file_size = test_size + 1
junk = getrandbits(8 * file_size).to_bytes(file_size, 'little')
with zipfile.ZipFile(io.BytesIO(), "w", self.compression) as zipf:
zipf.writestr('foo', junk)
with zipf.open('foo', 'r') as fp:
buf = fp.read(test_size)
self.assertEqual(len(buf), test_size)
def test_truncated_zipfile(self):
fp = io.BytesIO()
with zipfile.ZipFile(fp, mode='w') as zipf:
zipf.writestr('strfile', self.data, compress_type=self.compression)
end_offset = fp.tell()
zipfiledata = fp.getvalue()
fp = io.BytesIO(zipfiledata)
with zipfile.ZipFile(fp) as zipf:
with zipf.open('strfile') as zipopen:
fp.truncate(end_offset - 20)
with self.assertRaises(EOFError):
zipopen.read()
fp = io.BytesIO(zipfiledata)
with zipfile.ZipFile(fp) as zipf:
with zipf.open('strfile') as zipopen:
fp.truncate(end_offset - 20)
with self.assertRaises(EOFError):
while zipopen.read(100):
pass
fp = io.BytesIO(zipfiledata)
with zipfile.ZipFile(fp) as zipf:
with zipf.open('strfile') as zipopen:
fp.truncate(end_offset - 20)
with self.assertRaises(EOFError):
while zipopen.read1(100):
pass
def tearDown(self):
unlink(TESTFN)
unlink(TESTFN2)
class StoredTestsWithSourceFile(AbstractTestsWithSourceFile,
unittest.TestCase):
compression = zipfile.ZIP_STORED
test_low_compression = None
def zip_test_writestr_permissions(self, f, compression):
# Make sure that writestr creates files with mode 0600,
# when it is passed a name rather than a ZipInfo instance.
self.make_test_archive(f, compression)
with zipfile.ZipFile(f, "r") as zipfp:
zinfo = zipfp.getinfo('strfile')
self.assertEqual(zinfo.external_attr, 0o600 << 16)
def test_writestr_permissions(self):
for f in get_files(self):
self.zip_test_writestr_permissions(f, zipfile.ZIP_STORED)
def test_absolute_arcnames(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, "/absolute")
with zipfile.ZipFile(TESTFN2, "r", zipfile.ZIP_STORED) as zipfp:
self.assertEqual(zipfp.namelist(), ["absolute"])
def test_append_to_zip_file(self):
"""Test appending to an existing zipfile."""
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, TESTFN)
with zipfile.ZipFile(TESTFN2, "a", zipfile.ZIP_STORED) as zipfp:
zipfp.writestr("strfile", self.data)
self.assertEqual(zipfp.namelist(), [TESTFN, "strfile"])
def test_append_to_non_zip_file(self):
"""Test appending to an existing file that is not a zipfile."""
# NOTE: this test fails if len(d) < 22 because of the first
# line "fpin.seek(-22, 2)" in _EndRecData
data = b'I am not a ZipFile!'*10
with open(TESTFN2, 'wb') as f:
f.write(data)
with zipfile.ZipFile(TESTFN2, "a", zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, TESTFN)
with open(TESTFN2, 'rb') as f:
f.seek(len(data))
with zipfile.ZipFile(f, "r") as zipfp:
self.assertEqual(zipfp.namelist(), [TESTFN])
def test_ignores_newline_at_end(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, TESTFN)
with open(TESTFN2, 'a') as f:
f.write("\r\n\00\00\00")
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
self.assertIsInstance(zipfp, zipfile.ZipFile)
def test_ignores_stuff_appended_past_comments(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.comment = b"this is a comment"
zipfp.write(TESTFN, TESTFN)
with open(TESTFN2, 'a') as f:
f.write("abcdef\r\n")
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
self.assertIsInstance(zipfp, zipfile.ZipFile)
self.assertEqual(zipfp.comment, b"this is a comment")
def test_write_default_name(self):
"""Check that calling ZipFile.write without arcname specified
produces the expected result."""
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
zipfp.write(TESTFN)
with open(TESTFN, "rb") as f:
self.assertEqual(zipfp.read(TESTFN), f.read())
def test_write_to_readonly(self):
"""Check that trying to call write() on a readonly ZipFile object
raises a RuntimeError."""
with zipfile.ZipFile(TESTFN2, mode="w") as zipfp:
zipfp.writestr("somefile.txt", "bogus")
with zipfile.ZipFile(TESTFN2, mode="r") as zipfp:
self.assertRaises(RuntimeError, zipfp.write, TESTFN)
def test_add_file_before_1980(self):
# Set atime and mtime to 1970-01-01
os.utime(TESTFN, (0, 0))
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
self.assertRaises(ValueError, zipfp.write, TESTFN)
@requires_zlib
class DeflateTestsWithSourceFile(AbstractTestsWithSourceFile,
unittest.TestCase):
compression = zipfile.ZIP_DEFLATED
def test_per_file_compression(self):
"""Check that files within a Zip archive can have different
compression options."""
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
zipfp.write(TESTFN, 'storeme', zipfile.ZIP_STORED)
zipfp.write(TESTFN, 'deflateme', zipfile.ZIP_DEFLATED)
sinfo = zipfp.getinfo('storeme')
dinfo = zipfp.getinfo('deflateme')
self.assertEqual(sinfo.compress_type, zipfile.ZIP_STORED)
self.assertEqual(dinfo.compress_type, zipfile.ZIP_DEFLATED)
@requires_bz2
class Bzip2TestsWithSourceFile(AbstractTestsWithSourceFile,
unittest.TestCase):
compression = zipfile.ZIP_BZIP2
@requires_lzma
class LzmaTestsWithSourceFile(AbstractTestsWithSourceFile,
unittest.TestCase):
compression = zipfile.ZIP_LZMA
class AbstractTestZip64InSmallFiles:
# These tests test the ZIP64 functionality without using large files,
# see test_zipfile64 for proper tests.
@classmethod
def setUpClass(cls):
line_gen = (bytes("Test of zipfile line %d." % i, "ascii")
for i in range(0, FIXEDTEST_SIZE))
cls.data = b'\n'.join(line_gen)
def setUp(self):
self._limit = zipfile.ZIP64_LIMIT
zipfile.ZIP64_LIMIT = 5
# Make a source file with some lines
with open(TESTFN, "wb") as fp:
fp.write(self.data)
def zip_test(self, f, compression):
# Create the ZIP archive
with zipfile.ZipFile(f, "w", compression, allowZip64=True) as zipfp:
zipfp.write(TESTFN, "another.name")
zipfp.write(TESTFN, TESTFN)
zipfp.writestr("strfile", self.data)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
self.assertEqual(zipfp.read(TESTFN), self.data)
self.assertEqual(zipfp.read("another.name"), self.data)
self.assertEqual(zipfp.read("strfile"), self.data)
# Print the ZIP directory
fp = io.StringIO()
zipfp.printdir(fp)
directory = fp.getvalue()
lines = directory.splitlines()
self.assertEqual(len(lines), 4) # Number of files + header
self.assertIn('File Name', lines[0])
self.assertIn('Modified', lines[0])
self.assertIn('Size', lines[0])
fn, date, time_, size = lines[1].split()
self.assertEqual(fn, 'another.name')
self.assertTrue(time.strptime(date, '%Y-%m-%d'))
self.assertTrue(time.strptime(time_, '%H:%M:%S'))
self.assertEqual(size, str(len(self.data)))
# Check the namelist
names = zipfp.namelist()
self.assertEqual(len(names), 3)
self.assertIn(TESTFN, names)
self.assertIn("another.name", names)
self.assertIn("strfile", names)
# Check infolist
infos = zipfp.infolist()
names = [i.filename for i in infos]
self.assertEqual(len(names), 3)
self.assertIn(TESTFN, names)
self.assertIn("another.name", names)
self.assertIn("strfile", names)
for i in infos:
self.assertEqual(i.file_size, len(self.data))
# check getinfo
for nm in (TESTFN, "another.name", "strfile"):
info = zipfp.getinfo(nm)
self.assertEqual(info.filename, nm)
self.assertEqual(info.file_size, len(self.data))
# Check that testzip doesn't raise an exception
zipfp.testzip()
def test_basic(self):
for f in get_files(self):
self.zip_test(f, self.compression)
def tearDown(self):
zipfile.ZIP64_LIMIT = self._limit
unlink(TESTFN)
unlink(TESTFN2)
class StoredTestZip64InSmallFiles(AbstractTestZip64InSmallFiles,
unittest.TestCase):
compression = zipfile.ZIP_STORED
def large_file_exception_test(self, f, compression):
with zipfile.ZipFile(f, "w", compression) as zipfp:
self.assertRaises(zipfile.LargeZipFile,
zipfp.write, TESTFN, "another.name")
def large_file_exception_test2(self, f, compression):
with zipfile.ZipFile(f, "w", compression) as zipfp:
self.assertRaises(zipfile.LargeZipFile,
zipfp.writestr, "another.name", self.data)
def test_large_file_exception(self):
for f in get_files(self):
self.large_file_exception_test(f, zipfile.ZIP_STORED)
self.large_file_exception_test2(f, zipfile.ZIP_STORED)
def test_absolute_arcnames(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED,
allowZip64=True) as zipfp:
zipfp.write(TESTFN, "/absolute")
with zipfile.ZipFile(TESTFN2, "r", zipfile.ZIP_STORED) as zipfp:
self.assertEqual(zipfp.namelist(), ["absolute"])
@requires_zlib
class DeflateTestZip64InSmallFiles(AbstractTestZip64InSmallFiles,
unittest.TestCase):
compression = zipfile.ZIP_DEFLATED
@requires_bz2
class Bzip2TestZip64InSmallFiles(AbstractTestZip64InSmallFiles,
unittest.TestCase):
compression = zipfile.ZIP_BZIP2
@requires_lzma
class LzmaTestZip64InSmallFiles(AbstractTestZip64InSmallFiles,
unittest.TestCase):
compression = zipfile.ZIP_LZMA
class PyZipFileTests(unittest.TestCase):
def assertCompiledIn(self, name, namelist):
if name + 'o' not in namelist:
self.assertIn(name + 'c', namelist)
def test_write_pyfile(self):
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
fn = __file__
if fn.endswith('.pyc') or fn.endswith('.pyo'):
path_split = fn.split(os.sep)
if os.altsep is not None:
path_split.extend(fn.split(os.altsep))
if '__pycache__' in path_split:
fn = imp.source_from_cache(fn)
else:
fn = fn[:-1]
zipfp.writepy(fn)
bn = os.path.basename(fn)
self.assertNotIn(bn, zipfp.namelist())
self.assertCompiledIn(bn, zipfp.namelist())
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
fn = __file__
if fn.endswith(('.pyc', '.pyo')):
fn = fn[:-1]
zipfp.writepy(fn, "testpackage")
bn = "%s/%s" % ("testpackage", os.path.basename(fn))
self.assertNotIn(bn, zipfp.namelist())
self.assertCompiledIn(bn, zipfp.namelist())
def test_write_python_package(self):
import email
packagedir = os.path.dirname(email.__file__)
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
zipfp.writepy(packagedir)
# Check for a couple of modules at different levels of the
# hierarchy
names = zipfp.namelist()
self.assertCompiledIn('email/__init__.py', names)
self.assertCompiledIn('email/mime/text.py', names)
def test_write_with_optimization(self):
import email
packagedir = os.path.dirname(email.__file__)
# use .pyc if running test in optimization mode,
# use .pyo if running test in debug mode
optlevel = 1 if __debug__ else 0
ext = '.pyo' if optlevel == 1 else '.pyc'
with TemporaryFile() as t, \
zipfile.PyZipFile(t, "w", optimize=optlevel) as zipfp:
zipfp.writepy(packagedir)
names = zipfp.namelist()
self.assertIn('email/__init__' + ext, names)
self.assertIn('email/mime/text' + ext, names)
def test_write_python_directory(self):
os.mkdir(TESTFN2)
try:
with open(os.path.join(TESTFN2, "mod1.py"), "w") as fp:
fp.write("print(42)\n")
with open(os.path.join(TESTFN2, "mod2.py"), "w") as fp:
fp.write("print(42 * 42)\n")
with open(os.path.join(TESTFN2, "mod2.txt"), "w") as fp:
fp.write("bla bla bla\n")
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
zipfp.writepy(TESTFN2)
names = zipfp.namelist()
self.assertCompiledIn('mod1.py', names)
self.assertCompiledIn('mod2.py', names)
self.assertNotIn('mod2.txt', names)
finally:
shutil.rmtree(TESTFN2)
def test_write_non_pyfile(self):
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
with open(TESTFN, 'w') as f:
f.write('most definitely not a python file')
self.assertRaises(RuntimeError, zipfp.writepy, TESTFN)
os.remove(TESTFN)
def test_write_pyfile_bad_syntax(self):
os.mkdir(TESTFN2)
try:
with open(os.path.join(TESTFN2, "mod1.py"), "w") as fp:
fp.write("Bad syntax in python file\n")
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
# syntax errors are printed to stdout
with captured_stdout() as s:
zipfp.writepy(os.path.join(TESTFN2, "mod1.py"))
self.assertIn("SyntaxError", s.getvalue())
# as it will not have compiled the python file, it will
# include the .py file not .pyc or .pyo
names = zipfp.namelist()
self.assertIn('mod1.py', names)
self.assertNotIn('mod1.pyc', names)
self.assertNotIn('mod1.pyo', names)
finally:
shutil.rmtree(TESTFN2)
class ExtractTests(unittest.TestCase):
def test_extract(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
writtenfile = zipfp.extract(fpath)
# make sure it was written to the right place
correctfile = os.path.join(os.getcwd(), fpath)
correctfile = os.path.normpath(correctfile)
self.assertEqual(writtenfile, correctfile)
# make sure correct data is in correct file
with open(writtenfile, "rb") as f:
self.assertEqual(fdata.encode(), f.read())
os.remove(writtenfile)
# remove the test file subdirectories
shutil.rmtree(os.path.join(os.getcwd(), 'ziptest2dir'))
def test_extract_all(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
zipfp.extractall()
for fpath, fdata in SMALL_TEST_DATA:
outfile = os.path.join(os.getcwd(), fpath)
with open(outfile, "rb") as f:
self.assertEqual(fdata.encode(), f.read())
os.remove(outfile)
# remove the test file subdirectories
shutil.rmtree(os.path.join(os.getcwd(), 'ziptest2dir'))
def check_file(self, filename, content):
self.assertTrue(os.path.isfile(filename))
with open(filename, 'rb') as f:
self.assertEqual(f.read(), content)
def test_sanitize_windows_name(self):
san = zipfile.ZipFile._sanitize_windows_name
# Passing pathsep in allows this test to work regardless of platform.
self.assertEqual(san(r',,?,C:,foo,bar/z', ','), r'_,C_,foo,bar/z')
self.assertEqual(san(r'a\b,c<d>e|f"g?h*i', ','), r'a\b,c_d_e_f_g_h_i')
self.assertEqual(san('../../foo../../ba..r', '/'), r'foo/ba..r')
def test_extract_hackers_arcnames_common_cases(self):
common_hacknames = [
('../foo/bar', 'foo/bar'),
('foo/../bar', 'foo/bar'),
('foo/../../bar', 'foo/bar'),
('foo/bar/..', 'foo/bar'),
('./../foo/bar', 'foo/bar'),
('/foo/bar', 'foo/bar'),
('/foo/../bar', 'foo/bar'),
('/foo/../../bar', 'foo/bar'),
]
self._test_extract_hackers_arcnames(common_hacknames)
@unittest.skipIf(os.path.sep != '\\', 'Requires \\ as path separator.')
def test_extract_hackers_arcnames_windows_only(self):
"""Test combination of path fixing and windows name sanitization."""
windows_hacknames = [
(r'..\foo\bar', 'foo/bar'),
(r'..\/foo\/bar', 'foo/bar'),
(r'foo/\..\/bar', 'foo/bar'),
(r'foo\/../\bar', 'foo/bar'),
(r'C:foo/bar', 'foo/bar'),
(r'C:/foo/bar', 'foo/bar'),
(r'C://foo/bar', 'foo/bar'),
(r'C:\foo\bar', 'foo/bar'),
(r'//conky/mountpoint/foo/bar', 'foo/bar'),
(r'\\conky\mountpoint\foo\bar', 'foo/bar'),
(r'///conky/mountpoint/foo/bar', 'conky/mountpoint/foo/bar'),
(r'\\\conky\mountpoint\foo\bar', 'conky/mountpoint/foo/bar'),
(r'//conky//mountpoint/foo/bar', 'conky/mountpoint/foo/bar'),
(r'\\conky\\mountpoint\foo\bar', 'conky/mountpoint/foo/bar'),
(r'//?/C:/foo/bar', 'foo/bar'),
(r'\\?\C:\foo\bar', 'foo/bar'),
(r'C:/../C:/foo/bar', 'C_/foo/bar'),
(r'a:b\c<d>e|f"g?h*i', 'b/c_d_e_f_g_h_i'),
('../../foo../../ba..r', 'foo/ba..r'),
]
self._test_extract_hackers_arcnames(windows_hacknames)
@unittest.skipIf(os.path.sep != '/', r'Requires / as path separator.')
def test_extract_hackers_arcnames_posix_only(self):
posix_hacknames = [
('//foo/bar', 'foo/bar'),
('../../foo../../ba..r', 'foo../ba..r'),
(r'foo/..\bar', r'foo/..\bar'),
]
self._test_extract_hackers_arcnames(posix_hacknames)
def _test_extract_hackers_arcnames(self, hacknames):
for arcname, fixedname in hacknames:
content = b'foobar' + arcname.encode()
with zipfile.ZipFile(TESTFN2, 'w', zipfile.ZIP_STORED) as zipfp:
zinfo = zipfile.ZipInfo()
# preserve backslashes
zinfo.filename = arcname
zinfo.external_attr = 0o600 << 16
zipfp.writestr(zinfo, content)
arcname = arcname.replace(os.sep, "/")
targetpath = os.path.join('target', 'subdir', 'subsub')
correctfile = os.path.join(targetpath, *fixedname.split('/'))
with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
writtenfile = zipfp.extract(arcname, targetpath)
self.assertEqual(writtenfile, correctfile,
msg='extract %r: %r != %r' %
(arcname, writtenfile, correctfile))
self.check_file(correctfile, content)
shutil.rmtree('target')
with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
zipfp.extractall(targetpath)
self.check_file(correctfile, content)
shutil.rmtree('target')
correctfile = os.path.join(os.getcwd(), *fixedname.split('/'))
with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
writtenfile = zipfp.extract(arcname)
self.assertEqual(writtenfile, correctfile,
msg="extract %r" % arcname)
self.check_file(correctfile, content)
shutil.rmtree(fixedname.split('/')[0])
with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
zipfp.extractall()
self.check_file(correctfile, content)
shutil.rmtree(fixedname.split('/')[0])
os.remove(TESTFN2)
class OtherTests(unittest.TestCase):
def test_open_via_zip_info(self):
# Create the ZIP archive
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.writestr("name", "foo")
with self.assertWarns(UserWarning):
zipfp.writestr("name", "bar")
self.assertEqual(zipfp.namelist(), ["name"] * 2)
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
infos = zipfp.infolist()
data = b""
for info in infos:
with zipfp.open(info) as zipopen:
data += zipopen.read()
self.assertIn(data, {b"foobar", b"barfoo"})
data = b""
for info in infos:
data += zipfp.read(info)
self.assertIn(data, {b"foobar", b"barfoo"})
def test_universal_readaheads(self):
f = io.BytesIO()
data = b'a\r\n' * 16 * 1024
with zipfile.ZipFile(f, 'w', zipfile.ZIP_STORED) as zipfp:
zipfp.writestr(TESTFN, data)
data2 = b''
with zipfile.ZipFile(f, 'r') as zipfp, \
zipfp.open(TESTFN, 'rU') as zipopen:
for line in zipopen:
data2 += line
self.assertEqual(data, data2.replace(b'\n', b'\r\n'))
def test_writestr_extended_local_header_issue1202(self):
with zipfile.ZipFile(TESTFN2, 'w') as orig_zip:
for data in 'abcdefghijklmnop':
zinfo = zipfile.ZipInfo(data)
zinfo.flag_bits |= 0x08 # Include an extended local header.
orig_zip.writestr(zinfo, data)
def test_close(self):
"""Check that the zipfile is closed after the 'with' block."""
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
self.assertIsNotNone(zipfp.fp, 'zipfp is not open')
self.assertIsNone(zipfp.fp, 'zipfp is not closed')
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
self.assertIsNotNone(zipfp.fp, 'zipfp is not open')
self.assertIsNone(zipfp.fp, 'zipfp is not closed')
def test_close_on_exception(self):
"""Check that the zipfile is closed if an exception is raised in the
'with' block."""
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
try:
with zipfile.ZipFile(TESTFN2, "r") as zipfp2:
raise zipfile.BadZipFile()
except zipfile.BadZipFile:
self.assertIsNone(zipfp2.fp, 'zipfp is not closed')
def test_unsupported_version(self):
# File has an extract_version of 120
data = (b'PK\x03\x04x\x00\x00\x00\x00\x00!p\xa1@\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00xPK\x01\x02x\x03x\x00\x00\x00\x00'
b'\x00!p\xa1@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00xPK\x05\x06'
b'\x00\x00\x00\x00\x01\x00\x01\x00/\x00\x00\x00\x1f\x00\x00\x00\x00\x00')
self.assertRaises(NotImplementedError, zipfile.ZipFile,
io.BytesIO(data), 'r')
@requires_zlib
def test_read_unicode_filenames(self):
# bug #10801
fname = findfile('zip_cp437_header.zip')
with zipfile.ZipFile(fname) as zipfp:
for name in zipfp.namelist():
zipfp.open(name).close()
def test_write_unicode_filenames(self):
with zipfile.ZipFile(TESTFN, "w") as zf:
zf.writestr("foo.txt", "Test for unicode filename")
zf.writestr("\xf6.txt", "Test for unicode filename")
self.assertIsInstance(zf.infolist()[0].filename, str)
with zipfile.ZipFile(TESTFN, "r") as zf:
self.assertEqual(zf.filelist[0].filename, "foo.txt")
self.assertEqual(zf.filelist[1].filename, "\xf6.txt")
def test_create_non_existent_file_for_append(self):
if os.path.exists(TESTFN):
os.unlink(TESTFN)
filename = 'testfile.txt'
content = b'hello, world. this is some content.'
try:
with zipfile.ZipFile(TESTFN, 'a') as zf:
zf.writestr(filename, content)
except IOError:
self.fail('Could not append data to a non-existent zip file.')
self.assertTrue(os.path.exists(TESTFN))
with zipfile.ZipFile(TESTFN, 'r') as zf:
self.assertEqual(zf.read(filename), content)
def test_close_erroneous_file(self):
# This test checks that the ZipFile constructor closes the file object
# it opens if there's an error in the file. If it doesn't, the
# traceback holds a reference to the ZipFile object and, indirectly,
# the file object.
# On Windows, this causes the os.unlink() call to fail because the
# underlying file is still open. This is SF bug #412214.
#
with open(TESTFN, "w") as fp:
fp.write("this is not a legal zip file\n")
try:
zf = zipfile.ZipFile(TESTFN)
except zipfile.BadZipFile:
pass
def test_is_zip_erroneous_file(self):
"""Check that is_zipfile() correctly identifies non-zip files."""
# - passing a filename
with open(TESTFN, "w") as fp:
fp.write("this is not a legal zip file\n")
self.assertFalse(zipfile.is_zipfile(TESTFN))
# - passing a file object
with open(TESTFN, "rb") as fp:
self.assertFalse(zipfile.is_zipfile(fp))
# - passing a file-like object
fp = io.BytesIO()
fp.write(b"this is not a legal zip file\n")
self.assertFalse(zipfile.is_zipfile(fp))
fp.seek(0, 0)
self.assertFalse(zipfile.is_zipfile(fp))
def test_damaged_zipfile(self):
"""Check that zipfiles with missing bytes at the end raise BadZipFile."""
# - Create a valid zip file
fp = io.BytesIO()
with zipfile.ZipFile(fp, mode="w") as zipf:
zipf.writestr("foo.txt", b"O, for a Muse of Fire!")
zipfiledata = fp.getvalue()
# - Now create copies of it missing the last N bytes and make sure
# a BadZipFile exception is raised when we try to open it
for N in range(len(zipfiledata)):
fp = io.BytesIO(zipfiledata[:N])
self.assertRaises(zipfile.BadZipFile, zipfile.ZipFile, fp)
def test_is_zip_valid_file(self):
"""Check that is_zipfile() correctly identifies zip files."""
# - passing a filename
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.writestr("foo.txt", b"O, for a Muse of Fire!")
self.assertTrue(zipfile.is_zipfile(TESTFN))
# - passing a file object
with open(TESTFN, "rb") as fp:
self.assertTrue(zipfile.is_zipfile(fp))
fp.seek(0, 0)
zip_contents = fp.read()
# - passing a file-like object
fp = io.BytesIO()
fp.write(zip_contents)
self.assertTrue(zipfile.is_zipfile(fp))
fp.seek(0, 0)
self.assertTrue(zipfile.is_zipfile(fp))
def test_non_existent_file_raises_IOError(self):
# make sure we don't raise an AttributeError when a partially-constructed
# ZipFile instance is finalized; this tests for regression on SF tracker
# bug #403871.
# The bug we're testing for caused an AttributeError to be raised
# when a ZipFile instance was created for a file that did not
# exist; the .fp member was not initialized but was needed by the
# __del__() method. Since the AttributeError is in the __del__(),
# it is ignored, but the user should be sufficiently annoyed by
# the message on the output that regression will be noticed
# quickly.
self.assertRaises(IOError, zipfile.ZipFile, TESTFN)
def test_empty_file_raises_BadZipFile(self):
f = open(TESTFN, 'w')
f.close()
self.assertRaises(zipfile.BadZipFile, zipfile.ZipFile, TESTFN)
with open(TESTFN, 'w') as fp:
fp.write("short file")
self.assertRaises(zipfile.BadZipFile, zipfile.ZipFile, TESTFN)
def test_closed_zip_raises_RuntimeError(self):
"""Verify that testzip() doesn't swallow inappropriate exceptions."""
data = io.BytesIO()
with zipfile.ZipFile(data, mode="w") as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
# This is correct; calling .read on a closed ZipFile should raise
# a RuntimeError, and so should calling .testzip. An earlier
# version of .testzip would swallow this exception (and any other)
# and report that the first file in the archive was corrupt.
self.assertRaises(RuntimeError, zipf.read, "foo.txt")
self.assertRaises(RuntimeError, zipf.open, "foo.txt")
self.assertRaises(RuntimeError, zipf.testzip)
self.assertRaises(RuntimeError, zipf.writestr, "bogus.txt", "bogus")
with open(TESTFN, 'w') as f:
f.write('zipfile test data')
self.assertRaises(RuntimeError, zipf.write, TESTFN)
def test_bad_constructor_mode(self):
"""Check that bad modes passed to ZipFile constructor are caught."""
self.assertRaises(RuntimeError, zipfile.ZipFile, TESTFN, "q")
def test_bad_open_mode(self):
"""Check that bad modes passed to ZipFile.open are caught."""
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipf:
# read the data to make sure the file is there
zipf.read("foo.txt")
self.assertRaises(RuntimeError, zipf.open, "foo.txt", "q")
def test_read0(self):
"""Check that calling read(0) on a ZipExtFile object returns an empty
string and doesn't advance file pointer."""
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
# read the data to make sure the file is there
with zipf.open("foo.txt") as f:
for i in range(FIXEDTEST_SIZE):
self.assertEqual(f.read(0), b'')
self.assertEqual(f.read(), b"O, for a Muse of Fire!")
def test_open_non_existent_item(self):
"""Check that attempting to call open() for an item that doesn't
exist in the archive raises a RuntimeError."""
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
self.assertRaises(KeyError, zipf.open, "foo.txt", "r")
def test_bad_compression_mode(self):
"""Check that bad compression methods passed to ZipFile.open are
caught."""
self.assertRaises(RuntimeError, zipfile.ZipFile, TESTFN, "w", -1)
def test_unsupported_compression(self):
# data is declared as shrunk, but actually deflated
data = (b'PK\x03\x04.\x00\x00\x00\x01\x00\xe4C\xa1@\x00\x00\x00'
b'\x00\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00x\x03\x00PK\x01'
b'\x02.\x03.\x00\x00\x00\x01\x00\xe4C\xa1@\x00\x00\x00\x00\x02\x00\x00'
b'\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x80\x01\x00\x00\x00\x00xPK\x05\x06\x00\x00\x00\x00\x01\x00\x01\x00'
b'/\x00\x00\x00!\x00\x00\x00\x00\x00')
with zipfile.ZipFile(io.BytesIO(data), 'r') as zipf:
self.assertRaises(NotImplementedError, zipf.open, 'x')
def test_null_byte_in_filename(self):
"""Check that a filename containing a null byte is properly
terminated."""
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.writestr("foo.txt\x00qqq", b"O, for a Muse of Fire!")
self.assertEqual(zipf.namelist(), ['foo.txt'])
def test_struct_sizes(self):
"""Check that ZIP internal structure sizes are calculated correctly."""
self.assertEqual(zipfile.sizeEndCentDir, 22)
self.assertEqual(zipfile.sizeCentralDir, 46)
self.assertEqual(zipfile.sizeEndCentDir64, 56)
self.assertEqual(zipfile.sizeEndCentDir64Locator, 20)
def test_comments(self):
"""Check that comments on the archive are handled properly."""
# check default comment is empty
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
self.assertEqual(zipf.comment, b'')
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipfr:
self.assertEqual(zipfr.comment, b'')
# check a simple short comment
comment = b'Bravely taking to his feet, he beat a very brave retreat.'
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.comment = comment
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipfr:
self.assertEqual(zipf.comment, comment)
# check a comment of max length
comment2 = ''.join(['%d' % (i**3 % 10) for i in range((1 << 16)-1)])
comment2 = comment2.encode("ascii")
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.comment = comment2
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipfr:
self.assertEqual(zipfr.comment, comment2)
# check a comment that is too long is truncated
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
with self.assertWarns(UserWarning):
zipf.comment = comment2 + b'oops'
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipfr:
self.assertEqual(zipfr.comment, comment2)
# check that comments are correctly modified in append mode
with zipfile.ZipFile(TESTFN,mode="w") as zipf:
zipf.comment = b"original comment"
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN,mode="a") as zipf:
zipf.comment = b"an updated comment"
with zipfile.ZipFile(TESTFN,mode="r") as zipf:
self.assertEqual(zipf.comment, b"an updated comment")
# check that comments are correctly shortened in append mode
with zipfile.ZipFile(TESTFN,mode="w") as zipf:
zipf.comment = b"original comment that's longer"
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN,mode="a") as zipf:
zipf.comment = b"shorter comment"
with zipfile.ZipFile(TESTFN,mode="r") as zipf:
self.assertEqual(zipf.comment, b"shorter comment")
def test_unicode_comment(self):
with zipfile.ZipFile(TESTFN, "w", zipfile.ZIP_STORED) as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with self.assertRaises(TypeError):
zipf.comment = "this is an error"
def test_change_comment_in_empty_archive(self):
with zipfile.ZipFile(TESTFN, "a", zipfile.ZIP_STORED) as zipf:
self.assertFalse(zipf.filelist)
zipf.comment = b"this is a comment"
with zipfile.ZipFile(TESTFN, "r") as zipf:
self.assertEqual(zipf.comment, b"this is a comment")
def test_change_comment_in_nonempty_archive(self):
with zipfile.ZipFile(TESTFN, "w", zipfile.ZIP_STORED) as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, "a", zipfile.ZIP_STORED) as zipf:
self.assertTrue(zipf.filelist)
zipf.comment = b"this is a comment"
with zipfile.ZipFile(TESTFN, "r") as zipf:
self.assertEqual(zipf.comment, b"this is a comment")
def test_empty_zipfile(self):
# Check that creating a file in 'w' or 'a' mode and closing without
# adding any files to the archives creates a valid empty ZIP file
zipf = zipfile.ZipFile(TESTFN, mode="w")
zipf.close()
try:
zipf = zipfile.ZipFile(TESTFN, mode="r")
except zipfile.BadZipFile:
self.fail("Unable to create empty ZIP file in 'w' mode")
zipf = zipfile.ZipFile(TESTFN, mode="a")
zipf.close()
try:
zipf = zipfile.ZipFile(TESTFN, mode="r")
except:
self.fail("Unable to create empty ZIP file in 'a' mode")
def test_open_empty_file(self):
# Issue 1710703: Check that opening a file with less than 22 bytes
# raises a BadZipFile exception (rather than the previously unhelpful
# IOError)
f = open(TESTFN, 'w')
f.close()
self.assertRaises(zipfile.BadZipFile, zipfile.ZipFile, TESTFN, 'r')
def test_create_zipinfo_before_1980(self):
self.assertRaises(ValueError,
zipfile.ZipInfo, 'seventies', (1979, 1, 1, 0, 0, 0))
def tearDown(self):
unlink(TESTFN)
unlink(TESTFN2)
class AbstractBadCrcTests:
def test_testzip_with_bad_crc(self):
"""Tests that files with bad CRCs return their name from testzip."""
zipdata = self.zip_with_bad_crc
with zipfile.ZipFile(io.BytesIO(zipdata), mode="r") as zipf:
# testzip returns the name of the first corrupt file, or None
self.assertEqual('afile', zipf.testzip())
def test_read_with_bad_crc(self):
"""Tests that files with bad CRCs raise a BadZipFile exception when read."""
zipdata = self.zip_with_bad_crc
# Using ZipFile.read()
with zipfile.ZipFile(io.BytesIO(zipdata), mode="r") as zipf:
self.assertRaises(zipfile.BadZipFile, zipf.read, 'afile')
# Using ZipExtFile.read()
with zipfile.ZipFile(io.BytesIO(zipdata), mode="r") as zipf:
with zipf.open('afile', 'r') as corrupt_file:
self.assertRaises(zipfile.BadZipFile, corrupt_file.read)
# Same with small reads (in order to exercise the buffering logic)
with zipfile.ZipFile(io.BytesIO(zipdata), mode="r") as zipf:
with zipf.open('afile', 'r') as corrupt_file:
corrupt_file.MIN_READ_SIZE = 2
with self.assertRaises(zipfile.BadZipFile):
while corrupt_file.read(2):
pass
class StoredBadCrcTests(AbstractBadCrcTests, unittest.TestCase):
compression = zipfile.ZIP_STORED
zip_with_bad_crc = (
b'PK\003\004\024\0\0\0\0\0 \213\212;:r'
b'\253\377\f\0\0\0\f\0\0\0\005\0\0\000af'
b'ilehello,AworldP'
b'K\001\002\024\003\024\0\0\0\0\0 \213\212;:'
b'r\253\377\f\0\0\0\f\0\0\0\005\0\0\0\0'
b'\0\0\0\0\0\0\0\200\001\0\0\0\000afi'
b'lePK\005\006\0\0\0\0\001\0\001\0003\000'
b'\0\0/\0\0\0\0\0')
@requires_zlib
class DeflateBadCrcTests(AbstractBadCrcTests, unittest.TestCase):
compression = zipfile.ZIP_DEFLATED
zip_with_bad_crc = (
b'PK\x03\x04\x14\x00\x00\x00\x08\x00n}\x0c=FA'
b'KE\x10\x00\x00\x00n\x00\x00\x00\x05\x00\x00\x00af'
b'ile\xcbH\xcd\xc9\xc9W(\xcf/\xcaI\xc9\xa0'
b'=\x13\x00PK\x01\x02\x14\x03\x14\x00\x00\x00\x08\x00n'
b'}\x0c=FAKE\x10\x00\x00\x00n\x00\x00\x00\x05'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00'
b'\x00afilePK\x05\x06\x00\x00\x00\x00\x01\x00'
b'\x01\x003\x00\x00\x003\x00\x00\x00\x00\x00')
@requires_bz2
class Bzip2BadCrcTests(AbstractBadCrcTests, unittest.TestCase):
compression = zipfile.ZIP_BZIP2
zip_with_bad_crc = (
b'PK\x03\x04\x14\x03\x00\x00\x0c\x00nu\x0c=FA'
b'KE8\x00\x00\x00n\x00\x00\x00\x05\x00\x00\x00af'
b'ileBZh91AY&SY\xd4\xa8\xca'
b'\x7f\x00\x00\x0f\x11\x80@\x00\x06D\x90\x80 \x00 \xa5'
b'P\xd9!\x03\x03\x13\x13\x13\x89\xa9\xa9\xc2u5:\x9f'
b'\x8b\xb9"\x9c(HjTe?\x80PK\x01\x02\x14'
b'\x03\x14\x03\x00\x00\x0c\x00nu\x0c=FAKE8'
b'\x00\x00\x00n\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00 \x80\x80\x81\x00\x00\x00\x00afilePK'
b'\x05\x06\x00\x00\x00\x00\x01\x00\x01\x003\x00\x00\x00[\x00'
b'\x00\x00\x00\x00')
@requires_lzma
class LzmaBadCrcTests(AbstractBadCrcTests, unittest.TestCase):
compression = zipfile.ZIP_LZMA
zip_with_bad_crc = (
b'PK\x03\x04\x14\x03\x00\x00\x0e\x00nu\x0c=FA'
b'KE\x1b\x00\x00\x00n\x00\x00\x00\x05\x00\x00\x00af'
b'ile\t\x04\x05\x00]\x00\x00\x00\x04\x004\x19I'
b'\xee\x8d\xe9\x17\x89:3`\tq!.8\x00PK'
b'\x01\x02\x14\x03\x14\x03\x00\x00\x0e\x00nu\x0c=FA'
b'KE\x1b\x00\x00\x00n\x00\x00\x00\x05\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00 \x80\x80\x81\x00\x00\x00\x00afil'
b'ePK\x05\x06\x00\x00\x00\x00\x01\x00\x01\x003\x00\x00'
b'\x00>\x00\x00\x00\x00\x00')
class DecryptionTests(unittest.TestCase):
"""Check that ZIP decryption works. Since the library does not
support encryption at the moment, we use a pre-generated encrypted
ZIP file."""
data = (
b'PK\x03\x04\x14\x00\x01\x00\x00\x00n\x92i.#y\xef?&\x00\x00\x00\x1a\x00'
b'\x00\x00\x08\x00\x00\x00test.txt\xfa\x10\xa0gly|\xfa-\xc5\xc0=\xf9y'
b'\x18\xe0\xa8r\xb3Z}Lg\xbc\xae\xf9|\x9b\x19\xe4\x8b\xba\xbb)\x8c\xb0\xdbl'
b'PK\x01\x02\x14\x00\x14\x00\x01\x00\x00\x00n\x92i.#y\xef?&\x00\x00\x00'
b'\x1a\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x01\x00 \x00\xb6\x81'
b'\x00\x00\x00\x00test.txtPK\x05\x06\x00\x00\x00\x00\x01\x00\x01\x006\x00'
b'\x00\x00L\x00\x00\x00\x00\x00' )
data2 = (
b'PK\x03\x04\x14\x00\t\x00\x08\x00\xcf}38xu\xaa\xb2\x14\x00\x00\x00\x00\x02'
b'\x00\x00\x04\x00\x15\x00zeroUT\t\x00\x03\xd6\x8b\x92G\xda\x8b\x92GUx\x04'
b'\x00\xe8\x03\xe8\x03\xc7<M\xb5a\xceX\xa3Y&\x8b{oE\xd7\x9d\x8c\x98\x02\xc0'
b'PK\x07\x08xu\xaa\xb2\x14\x00\x00\x00\x00\x02\x00\x00PK\x01\x02\x17\x03'
b'\x14\x00\t\x00\x08\x00\xcf}38xu\xaa\xb2\x14\x00\x00\x00\x00\x02\x00\x00'
b'\x04\x00\r\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa4\x81\x00\x00\x00\x00ze'
b'roUT\x05\x00\x03\xd6\x8b\x92GUx\x00\x00PK\x05\x06\x00\x00\x00\x00\x01'
b'\x00\x01\x00?\x00\x00\x00[\x00\x00\x00\x00\x00' )
plain = b'zipfile.py encryption test'
plain2 = b'\x00'*512
def setUp(self):
with open(TESTFN, "wb") as fp:
fp.write(self.data)
self.zip = zipfile.ZipFile(TESTFN, "r")
with open(TESTFN2, "wb") as fp:
fp.write(self.data2)
self.zip2 = zipfile.ZipFile(TESTFN2, "r")
def tearDown(self):
self.zip.close()
os.unlink(TESTFN)
self.zip2.close()
os.unlink(TESTFN2)
def test_no_password(self):
# Reading the encrypted file without password
# must generate a RunTime exception
self.assertRaises(RuntimeError, self.zip.read, "test.txt")
self.assertRaises(RuntimeError, self.zip2.read, "zero")
def test_bad_password(self):
self.zip.setpassword(b"perl")
self.assertRaises(RuntimeError, self.zip.read, "test.txt")
self.zip2.setpassword(b"perl")
self.assertRaises(RuntimeError, self.zip2.read, "zero")
@requires_zlib
def test_good_password(self):
self.zip.setpassword(b"python")
self.assertEqual(self.zip.read("test.txt"), self.plain)
self.zip2.setpassword(b"12345")
self.assertEqual(self.zip2.read("zero"), self.plain2)
def test_unicode_password(self):
self.assertRaises(TypeError, self.zip.setpassword, "unicode")
self.assertRaises(TypeError, self.zip.read, "test.txt", "python")
self.assertRaises(TypeError, self.zip.open, "test.txt", pwd="python")
self.assertRaises(TypeError, self.zip.extract, "test.txt", pwd="python")
class AbstractTestsWithRandomBinaryFiles:
@classmethod
def setUpClass(cls):
datacount = randint(16, 64)*1024 + randint(1, 1024)
cls.data = b''.join(struct.pack('<f', random()*randint(-1000, 1000))
for i in range(datacount))
def setUp(self):
# Make a source file with some lines
with open(TESTFN, "wb") as fp:
fp.write(self.data)
def tearDown(self):
unlink(TESTFN)
unlink(TESTFN2)
def make_test_archive(self, f, compression):
# Create the ZIP archive
with zipfile.ZipFile(f, "w", compression) as zipfp:
zipfp.write(TESTFN, "another.name")
zipfp.write(TESTFN, TESTFN)
def zip_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
testdata = zipfp.read(TESTFN)
self.assertEqual(len(testdata), len(self.data))
self.assertEqual(testdata, self.data)
self.assertEqual(zipfp.read("another.name"), self.data)
def test_read(self):
for f in get_files(self):
self.zip_test(f, self.compression)
def zip_open_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
zipdata1 = []
with zipfp.open(TESTFN) as zipopen1:
while True:
read_data = zipopen1.read(256)
if not read_data:
break
zipdata1.append(read_data)
zipdata2 = []
with zipfp.open("another.name") as zipopen2:
while True:
read_data = zipopen2.read(256)
if not read_data:
break
zipdata2.append(read_data)
testdata1 = b''.join(zipdata1)
self.assertEqual(len(testdata1), len(self.data))
self.assertEqual(testdata1, self.data)
testdata2 = b''.join(zipdata2)
self.assertEqual(len(testdata2), len(self.data))
self.assertEqual(testdata2, self.data)
def test_open(self):
for f in get_files(self):
self.zip_open_test(f, self.compression)
def zip_random_open_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
zipdata1 = []
with zipfp.open(TESTFN) as zipopen1:
while True:
read_data = zipopen1.read(randint(1, 1024))
if not read_data:
break
zipdata1.append(read_data)
testdata = b''.join(zipdata1)
self.assertEqual(len(testdata), len(self.data))
self.assertEqual(testdata, self.data)
def test_random_open(self):
for f in get_files(self):
self.zip_random_open_test(f, self.compression)
class StoredTestsWithRandomBinaryFiles(AbstractTestsWithRandomBinaryFiles,
unittest.TestCase):
compression = zipfile.ZIP_STORED
@requires_zlib
class DeflateTestsWithRandomBinaryFiles(AbstractTestsWithRandomBinaryFiles,
unittest.TestCase):
compression = zipfile.ZIP_DEFLATED
@requires_bz2
class Bzip2TestsWithRandomBinaryFiles(AbstractTestsWithRandomBinaryFiles,
unittest.TestCase):
compression = zipfile.ZIP_BZIP2
@requires_lzma
class LzmaTestsWithRandomBinaryFiles(AbstractTestsWithRandomBinaryFiles,
unittest.TestCase):
compression = zipfile.ZIP_LZMA
@requires_zlib
class TestsWithMultipleOpens(unittest.TestCase):
def setUp(self):
# Create the ZIP archive
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_DEFLATED) as zipfp:
zipfp.writestr('ones', '1'*FIXEDTEST_SIZE)
zipfp.writestr('twos', '2'*FIXEDTEST_SIZE)
def test_same_file(self):
# Verify that (when the ZipFile is in control of creating file objects)
# multiple open() calls can be made without interfering with each other.
with zipfile.ZipFile(TESTFN2, mode="r") as zipf:
with zipf.open('ones') as zopen1, zipf.open('ones') as zopen2:
data1 = zopen1.read(500)
data2 = zopen2.read(500)
data1 += zopen1.read(500)
data2 += zopen2.read(500)
self.assertEqual(data1, data2)
def test_different_file(self):
# Verify that (when the ZipFile is in control of creating file objects)
# multiple open() calls can be made without interfering with each other.
with zipfile.ZipFile(TESTFN2, mode="r") as zipf:
with zipf.open('ones') as zopen1, zipf.open('twos') as zopen2:
data1 = zopen1.read(500)
data2 = zopen2.read(500)
data1 += zopen1.read(500)
data2 += zopen2.read(500)
self.assertEqual(data1, b'1'*FIXEDTEST_SIZE)
self.assertEqual(data2, b'2'*FIXEDTEST_SIZE)
def test_interleaved(self):
# Verify that (when the ZipFile is in control of creating file objects)
# multiple open() calls can be made without interfering with each other.
with zipfile.ZipFile(TESTFN2, mode="r") as zipf:
with zipf.open('ones') as zopen1, zipf.open('twos') as zopen2:
data1 = zopen1.read(500)
data2 = zopen2.read(500)
data1 += zopen1.read(500)
data2 += zopen2.read(500)
self.assertEqual(data1, b'1'*FIXEDTEST_SIZE)
self.assertEqual(data2, b'2'*FIXEDTEST_SIZE)
def tearDown(self):
unlink(TESTFN2)
class TestWithDirectory(unittest.TestCase):
def setUp(self):
os.mkdir(TESTFN2)
def test_extract_dir(self):
with zipfile.ZipFile(findfile("zipdir.zip")) as zipf:
zipf.extractall(TESTFN2)
self.assertTrue(os.path.isdir(os.path.join(TESTFN2, "a")))
self.assertTrue(os.path.isdir(os.path.join(TESTFN2, "a", "b")))
self.assertTrue(os.path.exists(os.path.join(TESTFN2, "a", "b", "c")))
def test_bug_6050(self):
# Extraction should succeed if directories already exist
os.mkdir(os.path.join(TESTFN2, "a"))
self.test_extract_dir()
def test_store_dir(self):
os.mkdir(os.path.join(TESTFN2, "x"))
zipf = zipfile.ZipFile(TESTFN, "w")
zipf.write(os.path.join(TESTFN2, "x"), "x")
self.assertTrue(zipf.filelist[0].filename.endswith("x/"))
def tearDown(self):
shutil.rmtree(TESTFN2)
if os.path.exists(TESTFN):
unlink(TESTFN)
class AbstractUniversalNewlineTests:
@classmethod
def setUpClass(cls):
cls.line_gen = [bytes("Test of zipfile line %d." % i, "ascii")
for i in range(FIXEDTEST_SIZE)]
cls.seps = (b'\r', b'\r\n', b'\n')
cls.arcdata = {}
for n, s in enumerate(cls.seps):
cls.arcdata[s] = s.join(cls.line_gen) + s
def setUp(self):
self.arcfiles = {}
for n, s in enumerate(self.seps):
self.arcfiles[s] = '%s-%d' % (TESTFN, n)
with open(self.arcfiles[s], "wb") as f:
f.write(self.arcdata[s])
def make_test_archive(self, f, compression):
# Create the ZIP archive
with zipfile.ZipFile(f, "w", compression) as zipfp:
for fn in self.arcfiles.values():
zipfp.write(fn, fn)
def read_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
for sep, fn in self.arcfiles.items():
with zipfp.open(fn, "rU") as fp:
zipdata = fp.read()
self.assertEqual(self.arcdata[sep], zipdata)
def test_read(self):
for f in get_files(self):
self.read_test(f, self.compression)
def readline_read_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
for sep, fn in self.arcfiles.items():
with zipfp.open(fn, "rU") as zipopen:
data = b''
while True:
read = zipopen.readline()
if not read:
break
data += read
read = zipopen.read(5)
if not read:
break
data += read
self.assertEqual(data, self.arcdata[b'\n'])
def test_readline_read(self):
for f in get_files(self):
self.readline_read_test(f, self.compression)
def readline_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
for sep, fn in self.arcfiles.items():
with zipfp.open(fn, "rU") as zipopen:
for line in self.line_gen:
linedata = zipopen.readline()
self.assertEqual(linedata, line + b'\n')
def test_readline(self):
for f in get_files(self):
self.readline_test(f, self.compression)
def readlines_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
for sep, fn in self.arcfiles.items():
with zipfp.open(fn, "rU") as fp:
ziplines = fp.readlines()
for line, zipline in zip(self.line_gen, ziplines):
self.assertEqual(zipline, line + b'\n')
def test_readlines(self):
for f in get_files(self):
self.readlines_test(f, self.compression)
def iterlines_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
for sep, fn in self.arcfiles.items():
with zipfp.open(fn, "rU") as fp:
for line, zipline in zip(self.line_gen, fp):
self.assertEqual(zipline, line + b'\n')
def test_iterlines(self):
for f in get_files(self):
self.iterlines_test(f, self.compression)
def tearDown(self):
for sep, fn in self.arcfiles.items():
os.remove(fn)
unlink(TESTFN)
unlink(TESTFN2)
class StoredUniversalNewlineTests(AbstractUniversalNewlineTests,
unittest.TestCase):
compression = zipfile.ZIP_STORED
@requires_zlib
class DeflateUniversalNewlineTests(AbstractUniversalNewlineTests,
unittest.TestCase):
compression = zipfile.ZIP_DEFLATED
@requires_bz2
class Bzip2UniversalNewlineTests(AbstractUniversalNewlineTests,
unittest.TestCase):
compression = zipfile.ZIP_BZIP2
@requires_lzma
class LzmaUniversalNewlineTests(AbstractUniversalNewlineTests,
unittest.TestCase):
compression = zipfile.ZIP_LZMA
if __name__ == "__main__":
unittest.main()
| 39.36534 | 84 | 0.59016 |
a837c3aca2060a2d91a4a4b5fc84cb69ecf44c08 | 7,299 | py | Python | saleor/checkout/models.py | saurabh1e/saleor | adf8f17cb7629f177af9f941dd3090376d986f13 | [
"BSD-3-Clause"
] | 1 | 2022-02-28T02:24:34.000Z | 2022-02-28T02:24:34.000Z | saleor/checkout/models.py | saurabh1e/saleor | adf8f17cb7629f177af9f941dd3090376d986f13 | [
"BSD-3-Clause"
] | null | null | null | saleor/checkout/models.py | saurabh1e/saleor | adf8f17cb7629f177af9f941dd3090376d986f13 | [
"BSD-3-Clause"
] | null | null | null | """Checkout-related ORM models."""
from decimal import Decimal
from operator import attrgetter
from uuid import uuid4
from django.conf import settings
from django.contrib.postgres.fields import JSONField
from django.core.validators import MinValueValidator
from django.db import models
from django.utils.encoding import smart_str
from django.utils.timezone import now
from django_prices.models import MoneyField
from ..account.models import Address
from ..core.models import ModelWithMetadata
from ..core.taxes import zero_money
from ..core.weight import zero_weight
from ..giftcard.models import GiftCard
from ..shipping.models import ShippingMethod
CENTS = Decimal("0.01")
class CheckoutQueryset(models.QuerySet):
"""A specialized queryset for dealing with checkouts."""
def for_display(self):
"""Annotate the queryset for display purposes.
Prefetches additional data from the database to avoid the n+1 queries
problem.
"""
return self.prefetch_related(
"lines__variant__translations",
"lines__variant__product__translations",
"lines__variant__product__images",
"lines__variant__product__product_type__product_attributes__values",
) # noqa
class Checkout(ModelWithMetadata):
"""A shopping checkout."""
created = models.DateTimeField(auto_now_add=True)
last_change = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=True,
null=True,
related_name="checkouts",
on_delete=models.CASCADE,
)
email = models.EmailField()
token = models.UUIDField(primary_key=True, default=uuid4, editable=False)
quantity = models.PositiveIntegerField(default=0)
billing_address = models.ForeignKey(
Address, related_name="+", editable=False, null=True, on_delete=models.SET_NULL
)
shipping_address = models.ForeignKey(
Address, related_name="+", editable=False, null=True, on_delete=models.SET_NULL
)
shipping_method = models.ForeignKey(
ShippingMethod,
blank=True,
null=True,
related_name="checkouts",
on_delete=models.SET_NULL,
)
note = models.TextField(blank=True, default="")
discount_amount = MoneyField(
currency=settings.DEFAULT_CURRENCY,
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES,
default=zero_money,
)
discount_name = models.CharField(max_length=255, blank=True, null=True)
translated_discount_name = models.CharField(max_length=255, blank=True, null=True)
voucher_code = models.CharField(max_length=12, blank=True, null=True)
gift_cards = models.ManyToManyField(GiftCard, blank=True, related_name="checkouts")
objects = CheckoutQueryset.as_manager()
class Meta:
ordering = ("-last_change",)
def __repr__(self):
return "Checkout(quantity=%s)" % (self.quantity,)
def __iter__(self):
return iter(self.lines.all())
def __len__(self):
return self.lines.count()
def get_customer_email(self):
return self.user.email if self.user else self.email
def is_shipping_required(self):
"""Return `True` if any of the lines requires shipping."""
return any(line.is_shipping_required() for line in self)
def get_shipping_price(self):
return (
self.shipping_method.get_total()
if self.shipping_method and self.is_shipping_required()
else zero_money()
)
def get_subtotal(self, discounts=None):
"""Return the total cost of the checkout prior to shipping."""
subtotals = (line.get_total(discounts) for line in self)
return sum(subtotals, zero_money(currency=settings.DEFAULT_CURRENCY))
def get_total(self, discounts=None):
"""Return the total cost of the checkout."""
total = (
self.get_subtotal(discounts)
+ self.get_shipping_price()
- self.discount_amount
)
return max(total, zero_money(total.currency))
def get_total_gift_cards_balance(self):
"""Return the total balance of the gift cards assigned to the checkout."""
balance = self.gift_cards.aggregate(models.Sum("current_balance"))[
"current_balance__sum"
]
return balance or zero_money(currency=settings.DEFAULT_CURRENCY)
def get_total_weight(self):
# Cannot use `sum` as it parses an empty Weight to an int
weights = zero_weight()
for line in self:
weights += line.variant.get_weight() * line.quantity
return weights
def get_line(self, variant):
"""Return a line matching the given variant and data if any."""
matching_lines = (line for line in self if line.variant.pk == variant.pk)
return next(matching_lines, None)
def get_last_active_payment(self):
payments = [payment for payment in self.payments.all() if payment.is_active]
return max(payments, default=None, key=attrgetter("pk"))
class CheckoutShipping(models.Model):
delivery_date = models.DateField(default=now)
time_slot = models.CharField(max_length=64, null=True)
checkout = models.OneToOneField(Checkout, on_delete=models.CASCADE, unique=True)
class CheckoutLine(models.Model):
"""A single checkout line.
Multiple lines in the same checkout can refer to the same product variant if
their `data` field is different.
"""
checkout = models.ForeignKey(
Checkout, related_name="lines", on_delete=models.CASCADE
)
variant = models.ForeignKey(
"product.ProductVariant", related_name="+", on_delete=models.CASCADE
)
quantity = models.PositiveIntegerField(validators=[MinValueValidator(1)])
data = JSONField(blank=True, default=dict)
class Meta:
unique_together = ("checkout", "variant", "data")
ordering = ("id",)
def __str__(self):
return smart_str(self.variant)
__hash__ = models.Model.__hash__
def __eq__(self, other):
if not isinstance(other, CheckoutLine):
return NotImplemented
return self.variant == other.variant and self.quantity == other.quantity
def __ne__(self, other):
return not self == other # pragma: no cover
def __repr__(self):
return "CheckoutLine(variant=%r, quantity=%r)" % (self.variant, self.quantity)
def __getstate__(self):
return self.variant, self.quantity
def __setstate__(self, data):
self.variant, self.quantity = data
def get_total(self, discounts=None):
"""Return the total price of this line."""
amount = self.quantity * self.variant.get_price(discounts)
return amount.quantize(CENTS)
def is_shipping_required(self):
"""Return `True` if the related product variant requires shipping."""
return self.variant.is_shipping_required()
class CheckoutDeliverySchedule(models.Model):
delivery_date = models.DateField()
time_slot = models.CharField(max_length=64)
checkout = models.ForeignKey(
Checkout, related_name="checkout_delivery_date", editable=False, null=False, on_delete=models.CASCADE
)
| 33.948837 | 109 | 0.688176 |
4ddcdf92c59ee3155948b54e565cfa77ecf05b85 | 4,047 | py | Python | backend/src/models/Inventory.py | ahmedsalahacc/Inventory-Management-System | 192160548bc538a334f7a35041c77013d759e473 | [
"MIT"
] | null | null | null | backend/src/models/Inventory.py | ahmedsalahacc/Inventory-Management-System | 192160548bc538a334f7a35041c77013d759e473 | [
"MIT"
] | null | null | null | backend/src/models/Inventory.py | ahmedsalahacc/Inventory-Management-System | 192160548bc538a334f7a35041c77013d759e473 | [
"MIT"
] | null | null | null | from models import BaseModel, gen_id
class InventoryModel(BaseModel):
'''
ORM for Inventory table with the following structure
inventory(
id CHARACTER(10) NOT NULL PRIMARY KEY,
name TEXT NOT NULL,
desc TEXT NULL,
warehouse_id CHARACTER(10) NULL,
)
'''
def __init__(self, db_filename: str):
super(InventoryModel, self).__init__(db_filename)
def insert(self, data_tuple: tuple):
'''
Inserts a new record in inventory table
Parameters
----------
data_tuple: tuple
tuple of new values (name, desc, warehouse_id)
'''
# aquiring cursor
cursor = self.conn.cursor()
# sql script
sql_script = '''
INSERT INTO inventory VALUES (?, ?, ?, ?)
'''
# executing script
id = gen_id()
data_tuple = (id, *data_tuple)
cursor.execute(sql_script, data_tuple)
self.conn.commit()
# conceding cursor
cursor.close()
return id
def delete(self, id: str):
'''
Deletes a record from inventory table
Parameters
----------
id: str
'''
# aquiring cursor
cursor = self.conn.cursor()
# sql script
sql_script = '''
DELETE FROM inventory WHERE id = ?
'''
# executing script
cursor.execute(sql_script, (id,))
self.conn.commit()
# conceding cursor
cursor.close()
def update(self, id: str, new_data: tuple):
'''
Updates a record of the inventory table using id
Parameters
----------
id: str
id of the record in the db
new_data: tuple
tuple of new values (name, desc, warehouse_id)
'''
# aquiring cursor
cursor = self.conn.cursor()
# sql script
sql_script = '''
UPDATE inventory
SET name = ?,
desc = ?,
warehouse_id = ?
WHERE id=?
'''
# executing script
new_data = (*new_data, id)
cursor.execute(sql_script, new_data)
self.conn.commit()
# conceding cursor
cursor.close()
def getByID(self, id: str):
'''
gets a record from the inventory table using id
Parameters
----------
id: str
id of the record in the db
Returns
-------
query: tuple
represents the result
'''
# aquiring cursor
cursor = self.conn.cursor()
# sql script
sql_script = '''
SELECT inventory.id, inventory.name,
inventory.desc, inventory.warehouse_id,
warehouse.name, warehouse.location
FROM inventory JOIN warehouse
ON inventory.warehouse_id = warehouse.id
WHERE inventory.id = ?
'''
# executing script
cursor.execute(sql_script, (id,))
query = cursor.fetchone()
# conceding cursor
cursor.close()
return query
def getAll(self, order: str = 'ASC'):
'''
gets a record from the inventory table using id
Parameters
----------
order: str Default = 'asc'
arrangement of the returned query
ASC: ascending order
DESC: descending order
Returns
-------
query: list
results list
'''
# aquiring cursor
cursor = self.conn.cursor()
# sql script
sql_script = f'''
SELECT inventory.id, inventory.name,
inventory.desc, inventory.warehouse_id,
warehouse.name
FROM inventory JOIN warehouse
ON inventory.warehouse_id = warehouse.id
ORDER BY inventory.name {order}
'''
# executing script
cursor.execute(sql_script)
query = cursor.fetchall()
# conceding cursor
cursor.close()
return query
| 22.994318 | 58 | 0.522856 |
a87ebd4a14c927451fd7a199531afdcc23370c25 | 1,150 | py | Python | test/rpc/test_run_sql.py | jankytara2/dbt | 3f4069ab6d4d5b3fc34f8fe785761b5617357b0f | [
"Apache-2.0"
] | 1 | 2020-11-18T21:25:53.000Z | 2020-11-18T21:25:53.000Z | test/rpc/test_run_sql.py | jankytara2/dbt | 3f4069ab6d4d5b3fc34f8fe785761b5617357b0f | [
"Apache-2.0"
] | 50 | 2021-11-02T06:20:50.000Z | 2022-03-31T06:23:16.000Z | test/rpc/test_run_sql.py | jankytara2/dbt | 3f4069ab6d4d5b3fc34f8fe785761b5617357b0f | [
"Apache-2.0"
] | 1 | 2021-02-01T17:54:24.000Z | 2021-02-01T17:54:24.000Z | import pytest
from .util import (
get_querier,
ProjectDefinition,
)
@pytest.mark.supported('any')
def test_rpc_run_sql_nohang(
project_root, profiles_root, dbt_profile, unique_schema
):
project = ProjectDefinition(
models={'my_model.sql': 'select 1 as id'}
)
querier_ctx = get_querier(
project_def=project,
project_dir=project_root,
profiles_dir=profiles_root,
schema=unique_schema,
test_kwargs={},
)
with querier_ctx as querier:
querier.async_wait_for_result(querier.run_sql('select 1 as id'))
@pytest.mark.supported('snowflake')
def test_snowflake_rpc_run_sql_keepalive_nohang(
project_root, profiles_root, dbt_profile, unique_schema
):
project = ProjectDefinition(
models={'my_model.sql': 'select 1 as id'}
)
querier_ctx = get_querier(
project_def=project,
project_dir=project_root,
profiles_dir=profiles_root,
schema=unique_schema,
test_kwargs={},
target='keepalives',
)
with querier_ctx as querier:
querier.async_wait_for_result(querier.run_sql('select 1 as id'))
| 26.136364 | 72 | 0.681739 |
53f5e7b4ee8d2145966b14c0eddc6b62bf6546c1 | 495 | py | Python | 0433 Minimum Distance of Two Words in a Sentence.py | ansabgillani/binarysearchcomproblems | 12fe8632f8cbb5058c91a55bae53afa813a3247e | [
"MIT"
] | 1 | 2020-12-29T21:17:26.000Z | 2020-12-29T21:17:26.000Z | 0433 Minimum Distance of Two Words in a Sentence.py | ansabgillani/binarysearchcomproblems | 12fe8632f8cbb5058c91a55bae53afa813a3247e | [
"MIT"
] | null | null | null | 0433 Minimum Distance of Two Words in a Sentence.py | ansabgillani/binarysearchcomproblems | 12fe8632f8cbb5058c91a55bae53afa813a3247e | [
"MIT"
] | 4 | 2021-09-09T17:42:43.000Z | 2022-03-18T04:54:03.000Z | class Solution:
def solve(self, text, word0, word1):
words = text.split()
if word0 not in words or word1 not in words: return -1
last0, last1 = -inf,-inf
ans = inf
for i in range(len(words)):
word = words[i]
if word == word0:
last0 = i
ans = min(ans, i-last1-1)
if word == word1:
last1 = i
ans = min(ans, i-last0-1)
return ans
| 24.75 | 62 | 0.444444 |
be9a84fe5e8c7ce1fe44cb757265bae1f88ab899 | 2,237 | py | Python | mod_archive.py | nugycal/SomethingAwesome | cadfef61545df48b08061d5a4d3236c436a44feb | [
"MIT"
] | 1 | 2020-04-13T05:30:20.000Z | 2020-04-13T05:30:20.000Z | mod_archive.py | nugycal/SomethingAwesome | cadfef61545df48b08061d5a4d3236c436a44feb | [
"MIT"
] | 2 | 2021-06-08T21:17:26.000Z | 2022-01-13T02:33:04.000Z | mod_archive.py | nugycal/SomethingAwesome | cadfef61545df48b08061d5a4d3236c436a44feb | [
"MIT"
] | null | null | null | from Module import Module
from zipfile import ZipFile
import tempfile
from os import access, R_OK
modules = []
def zip_files(files):
temp, filename = tempfile.mkstemp()
filename += ".zip"
zip = ZipFile(filename, "w")
file_list = files.split("|")
file_list = [f.strip() for f in file_list]
for f in file_list:
if access(f, R_OK):
try:
zip.write(f)
except Exception as e:
print(e)
continue
else:
print(f"Unable to access file {f}.")
continue
zip.close()
return filename
def zip_files_existing(f, options):
if len(options) == 1 or options[1].strip() == "":
zip = ZipFile(options[0], "a")
else:
zip = ZipFile(options[0], "a", pwd=options[1])
try:
zip.write(f)
except Exception as e:
print(e)
zip.close()
return f
def add_comment(zip, comment):
zipfile = ZipFile(zip, "a")
zipfile.comment = comment.encode()
zipfile.close()
return zip
def add_comment_flag(flag, zip):
return add_comment(zip[0], flag)
def add_comment_generated(zip, text):
return add_comment(zip, text[0])
def add_password(zip, passwd):
zipfile = ZipFile(zip, "a")
zipfile.setpassword(passwd)
zipfile.close()
return zip
def add_password_flag(flag, zip):
return add_password(zip[0], flag)
def add_password_gen(zip, text):
return add_password(zip, text[0])
modules.append(Module("Add Generated File to new Zip Archive", zip_files, []))
modules.append(Module("Add Generated File to existing Zip Archive", zip_files_existing, ["Absolute Path to Zip Archive (Required)", "Password (leave blank if none)"]))
modules.append(Module("Add flag as comment to existing zip archive", add_comment_flag, ["Absolute Path to Zip Archive (Required)"]))
modules.append(Module("Add comment to generated zip archive", add_comment_generated, ["Comment (cannot excede 65535 bytes, required)"]))
modules.append(Module("Add flag as password to existing zip archive", add_password_gen, ["Absolute Path to Zip Archive (Required)"]))
modules.append(Module("Add password to generated zip archive", add_password_flag, ["Password (Required)"]))
| 30.643836 | 167 | 0.6616 |
fee0038bcad7bec639ce857ac310e519e48bb958 | 7,102 | py | Python | src/archive/train_model.py | leocnj/call2018 | dc815eff13bd8d601098ae26eefbb073b0e9466a | [
"Apache-2.0"
] | null | null | null | src/archive/train_model.py | leocnj/call2018 | dc815eff13bd8d601098ae26eefbb073b0e9466a | [
"Apache-2.0"
] | null | null | null | src/archive/train_model.py | leocnj/call2018 | dc815eff13bd8d601098ae26eefbb073b0e9466a | [
"Apache-2.0"
] | null | null | null | import pickle
import numpy as np
from hyperopt import fmin, tpe, Trials
from hyperopt import hp
from hyperopt import space_eval
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
from xgboost.sklearn import XGBClassifier
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.model_selection import cross_val_score
from mlens.ensemble import BlendEnsemble
from utils import *
"""
- Support RF, XGBoost, and SVM
- Use **kwargs dict to pass parameters
- Use ML ens to stack models
- Use hyperopt for more convenient space definition
"""
SEED = 1
def define_model(**params):
if params['model'] == 'RF':
n_estimators = params['n_estimators']
max_features = params['max_features']
model = RandomForestClassifier(n_estimators=n_estimators,
max_features=max_features)
elif params['model'] == 'XGB':
n_estimators = params['n_estimators']
max_depth = params['max_depth']
model = XGBClassifier(n_estimators=n_estimators,
max_depth=max_depth)
elif params['model'] == 'SVM':
C = params['C']
kernel = params['kernel']
model = SVC(C=C, kernel=kernel, probability=True)
else:
pass
return model
@timeit
def find_optm_params(objective, space, max_evals=20):
trials = Trials()
best = fmin(objective,
space,
algo=tpe.suggest,
max_evals=max_evals,
trials=trials)
best_params = space_eval(space, best)
return best_params
def stack_models(base_estimators, type):
blender = BlendEnsemble()
blender.add(base_estimators, proba=True)
blender.add_meta(LogisticRegression())
train_eval(blender, type)
if __name__ == '__main__':
# year17 data
with open('../data/processed/numpy/year17_withHuy.pkl', 'rb') as pf:
objs = pickle.load(pf)
year17_lang_train_X = objs[0]
year17_lang_train_y = objs[1]
year17_lang_test_X = objs[2]
year17_lang_test_y = objs[3]
year17_meaning_train_X = objs[4]
year17_meaning_train_y = objs[5]
year17_meaning_test_X = objs[6]
year17_meaning_test_y = objs[7]
year17_train_X = np.concatenate((year17_meaning_train_X, year17_lang_train_X), axis=1)
year17_train_y = np.column_stack((year17_meaning_train_y, year17_lang_train_y))
year17_test_X = np.concatenate((year17_meaning_test_X, year17_lang_test_X), axis=1)
year17_test_y = np.column_stack((year17_meaning_test_y, year17_lang_test_y))
# skip lots of sklearn depreciation warnings
import warnings
warnings.filterwarnings("ignore")
# logger = get_logger(__name__, simple=True)
"""
"""
# to use same CV data splitting
shuffle = StratifiedKFold(n_splits=10, shuffle=True, random_state=SEED)
scoring_metric = 'roc_auc'
# define inside main to easily access data
def acc_l_objective(params):
model = define_model(**params)
acc_score = cross_val_score(model,
year17_lang_train_X, year17_lang_train_y,
cv=shuffle,
scoring=scoring_metric, n_jobs=-1)
return 1 - acc_score.mean()
def acc_m_objective(params):
model = define_model(**params)
acc_score = cross_val_score(model,
year17_meaning_train_X, year17_meaning_train_y,
cv=shuffle,
scoring=scoring_metric, n_jobs=-1)
return 1 - acc_score.mean()
def eval_best_model(optm_params, type):
best_model = define_model(**optm_params)
train_eval(best_model, type)
def train_eval(best_model, type):
if type == 'lang':
best_model.fit(year17_lang_train_X, year17_lang_train_y)
y_true, y_pred = year17_lang_test_y, best_model.predict(year17_lang_test_X)
else:
best_model.fit(year17_meaning_train_X, year17_meaning_train_y)
y_true, y_pred = year17_meaning_test_y, best_model.predict(year17_meaning_test_X)
print('acc: %1.3f' % accuracy_score(y_true, y_pred))
RF = True
SVM = True
XGB = True
if not RF:
optm_params_RF_l, optm_params_RF_m = None, None
else:
space = {'model': 'RF',
'n_estimators': 1 + hp.randint('n_estimators', 40),
'max_features': 1 + hp.randint('max_features', 15)}
optm_params_RF_l = find_optm_params(acc_l_objective, space)
print(optm_params_RF_l)
eval_best_model(optm_params_RF_l, 'lang')
optm_params_RF_m = find_optm_params(acc_m_objective, space)
print(optm_params_RF_m)
eval_best_model(optm_params_RF_m, 'meaning')
if not SVM:
optm_params_SVM_l, optm_params_SVM_m = None, None
else:
space = {
'model': 'SVM',
'C': hp.choice('C', [0.1, 0.5, 1.0]),
'kernel': 'linear'
# 'kernel': hp.choice('svm_kernel', [
# {'ktype': 'linear'},
# {'ktype': 'rbf', 'width': hp.lognormal('svm_rbf_width', 0, 1)},
}
optm_params_SVM_l = find_optm_params(acc_l_objective, space)
print(optm_params_SVM_l)
eval_best_model(optm_params_SVM_l, 'lang')
optm_params_SVM_m = find_optm_params(acc_m_objective, space)
print(optm_params_SVM_m)
eval_best_model(optm_params_SVM_m, 'meaning')
if not XGB:
optm_params_XGB_l, optm_params_XGB_m = None, None
else:
# space = {'model': 'XGB',
# 'n_estimators': hp.choice('n_estimators', [10, 15, 20]),
# 'max_depth': hp.choice('max_depth', [4, 6, 8])}
space = {'model': 'XGB',
'n_estimators': hp.choice('n_estimators', list(range(1,20))),
'max_depth': hp.choice('max_depth', [4, 6, 8])}
optm_params_XGB_l = find_optm_params(acc_l_objective, space)
print(optm_params_XGB_l)
eval_best_model(optm_params_XGB_l, 'lang')
optm_params_XGB_m = find_optm_params(acc_m_objective, space)
print(optm_params_XGB_m)
eval_best_model(optm_params_XGB_m, 'meaning')
# ensemble
base_estimators = [define_model(**optm_params_RF_l),
define_model(**optm_params_XGB_l)]
stack_models(base_estimators, 'lang')
base_estimators = [define_model(**optm_params_RF_m),
define_model(**optm_params_XGB_m)]
stack_models(base_estimators, 'meaning')
with open('../data/processed/numpy/year17_models.pkl', 'wb') as pf:
pickle.dump([optm_params_RF_l, optm_params_RF_m,
optm_params_XGB_l, optm_params_XGB_m,
optm_params_SVM_l, optm_params_SVM_m], pf)
# pickle.dump([optm_params_SVM_l, optm_params_SVM_m], pf)
| 34.643902 | 94 | 0.634469 |
f891466cf01eb1c1e897887f5af166069d783104 | 3,493 | py | Python | taotao-cloud-python/taotao-cloud-oldboy/day30-python-tcp-udp/socket_server_tcp.py | shuigedeng/taotao-cloud-paren | 3d281b919490f7cbee4520211e2eee5da7387564 | [
"Apache-2.0"
] | 47 | 2021-04-13T10:32:13.000Z | 2022-03-31T10:30:30.000Z | taotao-cloud-python/taotao-cloud-oldboy/day30-python-tcp-udp/socket_server_tcp.py | shuigedeng/taotao-cloud-paren | 3d281b919490f7cbee4520211e2eee5da7387564 | [
"Apache-2.0"
] | 1 | 2021-11-01T07:41:04.000Z | 2021-11-01T07:41:10.000Z | taotao-cloud-python/taotao-cloud-oldboy/day30-python-tcp-udp/socket_server_tcp.py | shuigedeng/taotao-cloud-paren | 3d281b919490f7cbee4520211e2eee5da7387564 | [
"Apache-2.0"
] | 21 | 2021-04-13T10:32:17.000Z | 2022-03-26T07:43:22.000Z | # from socket import *
# import subprocess
# ip_port=('127.0.0.1',8080)
# back_log=5
# buffer_size=1024
#
# tcp_server=socket(AF_INET,SOCK_STREAM)
# tcp_server.bind(ip_port)
# tcp_server.listen(back_log)
#
# while True:
# conn,addr=tcp_server.accept()
# print('新的客户端链接',addr)
# while True:
# #收
# try:
# cmd=conn.recv(buffer_size)
# if not cmd:break
# print('收到客户端的命令',cmd)
#
# #执行命令,得到命令的运行结果cmd_res
# res=subprocess.Popen(cmd.decode('utf-8'),shell=True,
# stderr=subprocess.PIPE,
# stdout=subprocess.PIPE,
# stdin=subprocess.PIPE)
# err=res.stderr.read()
# if err:
# cmd_res=err
# else:
# cmd_res=res.stdout.read()
#
# #发
# if not cmd_res:
# cmd_res='执行成功'.encode('gbk')
# conn.send(cmd_res)
# except Exception as e:
# print(e)
# break
#low版解决粘包版本
# from socket import *
# import subprocess
# ip_port=('127.0.0.1',8080)
# back_log=5
# buffer_size=1024
#
# tcp_server=socket(AF_INET,SOCK_STREAM)
# tcp_server.bind(ip_port)
# tcp_server.listen(back_log)
#
# while True:
# conn,addr=tcp_server.accept()
# print('新的客户端链接',addr)
# while True:
# #收
# try:
# cmd=conn.recv(buffer_size)
# if not cmd:break
# print('收到客户端的命令',cmd)
#
# #执行命令,得到命令的运行结果cmd_res
# res=subprocess.Popen(cmd.decode('utf-8'),shell=True,
# stderr=subprocess.PIPE,
# stdout=subprocess.PIPE,
# stdin=subprocess.PIPE)
# err=res.stderr.read()
# if err:
# cmd_res=err
# else:
# cmd_res=res.stdout.read()
#
# #发
# if not cmd_res:
# cmd_res='执行成功'.encode('gbk')
#
# length=len(cmd_res)
# conn.send(str(length).encode('utf-8'))
# client_ready=conn.recv(buffer_size)
# if client_ready == b'ready':
# conn.send(cmd_res)
# except Exception as e:
# print(e)
# break
from socket import *
import subprocess
import struct
ip_port=('127.0.0.1',8080)
back_log=5
buffer_size=1024
tcp_server=socket(AF_INET,SOCK_STREAM)
tcp_server.bind(ip_port)
tcp_server.listen(back_log)
while True:
conn,addr=tcp_server.accept()
print('新的客户端链接',addr)
while True:
#收
try:
cmd=conn.recv(buffer_size)
if not cmd:break
print('收到客户端的命令',cmd)
#执行命令,得到命令的运行结果cmd_res
res=subprocess.Popen(cmd.decode('utf-8'),shell=True,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
err=res.stderr.read()
if err:
cmd_res=err
else:
cmd_res=res.stdout.read()
#发
if not cmd_res:
cmd_res='执行成功'.encode('gbk')
length=len(cmd_res)
data_length=struct.pack('i',length)
conn.send(data_length)
conn.send(cmd_res)
except Exception as e:
print(e)
break
| 26.462121 | 66 | 0.494417 |
645b1de574dc89525a904e9d2a1fd66244145362 | 12,123 | py | Python | app/Complaint.py | nlohmann/bahn_auftragsnummer | 88f4ac0a0993e6b709909023131eb68738b67db2 | [
"MIT"
] | 2 | 2017-12-20T22:24:20.000Z | 2021-07-07T13:49:15.000Z | app/Complaint.py | nlohmann/bahn_auftragsnummer | 88f4ac0a0993e6b709909023131eb68738b67db2 | [
"MIT"
] | 1 | 2019-07-19T18:33:43.000Z | 2019-07-19T18:33:43.000Z | app/Complaint.py | nlohmann/bahn_auftragsnummer | 88f4ac0a0993e6b709909023131eb68738b67db2 | [
"MIT"
] | 2 | 2017-12-20T22:24:24.000Z | 2020-06-18T15:26:17.000Z | # coding=utf-8
########################################################################
# Licensed under the MIT License <http://opensource.org/licenses/MIT>. #
# Copyright (c) 2017 Niels Lohmann <http://nlohmann.me>. #
########################################################################
from fdfgen import forge_fdf
import os
import tempfile
import aniso8601
import pytz
import datetime
from typing import Optional
class Complaint(object):
def __init__(self):
self.datum_reise = None # type: Optional[datetime.date]
self.startbahnhof = None # type: Optional[str]
self.startbahnhof_abfahrt_soll = None # type: Optional[datetime.datetime]
self.datum_ankunft = None # type: Optional[datetime.date]
self.zielbahnhof = None # type: Optional[str]
self.zielbahnhof_ankunft_soll = None # type: Optional[datetime.datetime]
self.zielbahnhof_ankunft_ist = None # type: Optional[datetime.datetime]
self.zug_ankunft = None # type: Optional[str]
self.zug_verspaetet = None # type: Optional[str]
self.zug_verspaetet_abfahrt_ist = None # type: Optional[datetime.datetime]
self.bahnhof_anschlusszug_verpasst = None # type: Optional[str]
self.bahnhof_letzter_umstieg = None # type: Optional[str]
self.bahnhof_reise_abgebrochen = None # type: Optional[str]
self.bahnhof_reise_unterbrochen = None # type: Optional[str]
self.geschlecht = None # type: Optional[str]
self.titel = None # type: Optional[str]
self.firma = None # type: Optional[str]
self.vorname = None # type: Optional[str]
self.nachname = None # type: Optional[str]
self.co = None # type: Optional[str]
self.telefonnumer = None # type: Optional[str]
self.strasse = None # type: Optional[str]
self.hausnummer = None # type: Optional[str]
self.staat = None # type: Optional[str]
self.postleitzahl = None # type: Optional[str]
self.wohnort = None # type: Optional[str]
self.bahncard_100_nummer = None # type: Optional[str]
self.zeitkarten_nummer = None # type: Optional[str]
self.geburtsdatum = None # type: Optional[datetime.date]
self.email = None # type: Optional[str]
self.marktforschung = False # type: bool
self.entschaedigung_ueberweisung = True # type: bool
self.kontoinhaber = None # type: Optional[str]
self.iban = None # type: Optional[str]
self.bic = None # type: Optional[str]
def fill(self, payload):
def get_date(date_string):
if date_string is not None:
return aniso8601.parse_date(date_string)
return None
def get_datetime(datetime_string):
if datetime_string is not None:
result = aniso8601.parse_datetime(datetime_string)
try:
result = result.astimezone(pytz.timezone('Europe/Berlin'))
except ValueError:
pass
return result
return None
def get_train(train_string):
if train_string is not None:
return train_string.split()
return None
self.datum_reise = get_date(payload.get('travelStartDate'))
self.datum_ankunft = get_date(payload.get('travelEndDate'))
self.startbahnhof = payload.get('startStation')
self.startbahnhof_abfahrt_soll = get_datetime(payload.get('startStationPlannedDeparture'))
self.zielbahnhof = payload.get('endStation')
self.zielbahnhof_ankunft_soll = get_datetime(payload.get('startStationPlannedArrival'))
self.zielbahnhof_ankunft_ist = get_datetime(payload.get('startStationActualArrival'))
self.zug_ankunft = get_train(payload.get('trainArrived'))
self.zug_verspaetet = get_train(payload.get('trainFirstDelayed'))
self.zug_verspaetet_abfahrt_ist = get_datetime(payload.get('trainFirstDelayedPlannedDeparture'))
self.bahnhof_anschlusszug_verpasst = payload.get('stationMissedTrain')
self.bahnhof_letzter_umstieg = payload.get('stationLastChange')
self.bahnhof_reise_abgebrochen = payload.get('stationTripAborted')
self.bahnhof_reise_unterbrochen = payload.get('stationTripSuspended')
self.geschlecht = payload.get('gender')
self.titel = payload.get('title')
self.firma = payload.get('company')
self.nachname = payload.get('lastName')
self.vorname = payload.get('firstName')
self.co = payload.get('co')
self.telefonnumer = payload.get('phone')
self.strasse = payload.get('street')
self.hausnummer = payload.get('houseNumber')
self.staat = payload.get('state')
self.postleitzahl = payload.get('postcode')
self.wohnort = payload.get('city')
self.bahncard_100_nummer = payload.get('numberBahncard100')
self.zeitkarten_nummer = payload.get('numberSeasonTicket')
self.geburtsdatum = get_date(payload.get('dateOfBirth'))
self.email = payload.get('email')
if payload.get('marketingResearch') is not None:
self.marktforschung = payload.get('marketingResearch')
if payload.get('compensation'):
self.entschaedigung_ueberweisung = payload.get('compensation') == 'transfer'
self.kontoinhaber = payload.get('accountHolder')
self.iban = payload.get('iban')
self.bic = payload.get('bic')
def __form_fields(self):
"""
converts stored information to FDF values
:return: list of pairs to use as FDF for the Fahrgastrechte PDF
"""
result = []
if self.datum_reise is not None:
result.append(('S1F1', self.datum_reise.strftime('%d')))
result.append(('S1F2', self.datum_reise.strftime('%m')))
result.append(('S1F3', self.datum_reise.strftime('%y')))
if self.startbahnhof is not None:
result.append(('S1F4', self.startbahnhof))
if self.startbahnhof_abfahrt_soll is not None:
result.append(('S1F5', self.startbahnhof_abfahrt_soll.strftime('%H')))
result.append(('S1F6', self.startbahnhof_abfahrt_soll.strftime('%M')))
if self.zielbahnhof is not None:
result.append(('S1F7', self.zielbahnhof))
if self.zielbahnhof_ankunft_soll is not None:
result.append(('S1F8', self.zielbahnhof_ankunft_soll.strftime('%H')))
result.append(('S1F9', self.zielbahnhof_ankunft_soll.strftime('%M')))
if self.datum_ankunft is not None:
result.append(('S1F10', self.datum_ankunft.strftime('%d')))
result.append(('S1F11', self.datum_ankunft.strftime('%m')))
result.append(('S1F12', self.datum_ankunft.strftime('%y')))
if self.zug_ankunft is not None:
result.append(('S1F13', self.zug_ankunft[0]))
result.append(('S1F14', self.zug_ankunft[1]))
if self.zielbahnhof_ankunft_ist is not None:
result.append(('S1F15', self.zielbahnhof_ankunft_ist.strftime('%H')))
result.append(('S1F16', self.zielbahnhof_ankunft_ist.strftime('%M')))
if self.zug_verspaetet is not None:
result.append(('S1F17', self.zug_verspaetet[0]))
result.append(('S1F18', self.zug_verspaetet[1]))
if self.zug_verspaetet_abfahrt_ist is not None:
result.append(('S1F19', self.zug_verspaetet_abfahrt_ist.strftime('%H')))
result.append(('S1F20', self.zug_verspaetet_abfahrt_ist.strftime('%M')))
if self.bahnhof_anschlusszug_verpasst is not None:
result.append(('S1F21', 'Ja'))
result.append(('S1F22', self.bahnhof_anschlusszug_verpasst))
if self.bahnhof_letzter_umstieg is not None:
result.append(('S1F23', 'Ja'))
result.append(('S1F24', self.bahnhof_letzter_umstieg))
if self.bahnhof_reise_abgebrochen is not None:
result.append(('S1F25', 'Ja'))
result.append(('S1F26', self.bahnhof_reise_abgebrochen))
if self.bahnhof_reise_unterbrochen is not None:
result.append(('S1F27', 'Ja'))
result.append(('S1F28', self.bahnhof_reise_unterbrochen))
if not self.entschaedigung_ueberweisung:
result.append(('S1F29', 'Gutschein'))
if self.geschlecht is not None:
if self.geschlecht in ['Herr', u'Herr']:
result.append(('S2F1', 'Herr'))
elif self.geschlecht in ['Frau', u'Frau']:
result.append(('S2F1', 'Frau'))
else:
raise ValueError
if self.titel is not None:
result.append(('S2F2', self.titel))
if self.firma is not None:
result.append(('S2F3', self.firma))
if self.nachname is not None:
result.append(('S2F4', self.nachname))
if self.vorname is not None:
result.append(('S2F5', self.vorname))
if self.co is not None:
result.append(('S2F6', self.co))
if self.telefonnumer is not None:
result.append(('S2F7', self.telefonnumer))
if self.strasse is not None:
result.append(('S2F8', self.strasse))
if self.hausnummer is not None:
result.append(('S2F9', self.hausnummer))
if self.staat is not None and self.staat != 'D':
result.append(('S2F10', self.staat))
if self.postleitzahl is not None:
result.append(('S2F11', self.postleitzahl))
if self.wohnort is not None:
result.append(('S2F12', self.wohnort))
if self.bahncard_100_nummer is not None:
result.append(('S2F13', 'BahnCard 100-Nr.'))
result.append(('S2F15', self.bahncard_100_nummer))
if self.zeitkarten_nummer is not None:
result.append(('S2F13', 'Zeitkarten-Nr.'))
result.append(('S2F15', self.zeitkarten_nummer))
if self.geburtsdatum is not None:
result.append(('S2F16', self.geburtsdatum.strftime('%d')))
result.append(('S2F17', self.geburtsdatum.strftime('%m')))
result.append(('S2F18', self.geburtsdatum.strftime('%Y')))
if self.email is not None:
result.append(('S2F19', self.email))
if self.kontoinhaber is not None:
result.append(('S2F20', self.kontoinhaber))
if self.iban is not None:
result.append(('S2F21', self.iban))
if self.bic is not None:
result.append(('S2F22', self.bic))
if self.marktforschung:
result.append(('S2F23', 'Ja'))
return result
def create_pdf(self):
fdf = forge_fdf("", self.__form_fields(), [], [], [])
fdf_file = tempfile.NamedTemporaryFile('wb', delete=False)
fdf_file.write(fdf)
fdf_file.close()
pdf_file = tempfile.NamedTemporaryFile('wb', delete=False)
pdf_file.close()
# create absolute paths
basedir = os.path.dirname(os.path.realpath(__file__))
form_template = os.path.abspath(os.path.join(basedir, 'static', 'fgr.pdf'))
pdftk_cmd = "pdftk {form_template} fill_form {fdf_file} output {form_output}".format(
fdf_file=fdf_file.name, form_template=form_template, form_output=pdf_file.name)
ret = os.system(pdftk_cmd)
if ret != 0:
raise SystemError # pragma: no cover
os.remove(fdf_file.name)
return pdf_file.name
| 46.448276 | 104 | 0.591438 |
ca9fe22880418011914846d5c2650015c17bea4b | 4,668 | py | Python | tensorflow/modules/utils.py | nicolasrosa/Sparse-to-Continuous | 8664de17d6b6c6cc39bf8fcebfcb829249367f2f | [
"BSD-2-Clause"
] | 19 | 2018-09-25T01:58:13.000Z | 2021-07-27T09:54:36.000Z | tensorflow/modules/utils.py | nicolasrosa/Sparse-to-Continuous | 8664de17d6b6c6cc39bf8fcebfcb829249367f2f | [
"BSD-2-Clause"
] | 18 | 2020-03-24T18:18:56.000Z | 2022-02-10T00:35:13.000Z | tensorflow/modules/utils.py | nicolasrosa/Sparse-to-Continuous | 8664de17d6b6c6cc39bf8fcebfcb829249367f2f | [
"BSD-2-Clause"
] | 4 | 2019-11-14T02:35:34.000Z | 2020-08-27T11:36:02.000Z | # ===========
# Libraries
# ===========
import glob
import os
import time
from collections import deque
from itertools import chain
from sys import getsizeof, stderr
import imageio
from skimage import exposure, img_as_uint
from modules.args import args
# ========= #
# Classes #
# ========= #
class Settings:
def __init__(self, app_name, output_dir, output_tmp_dir, output_log_file):
self.app_name = app_name
self.datetime = time.strftime("%Y-%m-%d") + '_' + time.strftime("%H-%M-%S")
# Defines folders paths for saving the model variables to disk.
px_str = args.px + '_px'
relative_save_path = output_dir + self.app_name + '/' + args.dataset + '/' + px_str + '/' + args.loss + '/' + self.datetime + '/'
self.save_path = os.path.join(os.getcwd(), relative_save_path)
self.save_restore_path = os.path.join(self.save_path, 'restore/')
self.output_dir = output_dir
self.output_tmp_dir = output_tmp_dir
self.output_tmp_pred_dir = output_tmp_dir + 'pred/'
self.output_tmp_gt_dir = output_tmp_dir + 'gt/'
self.log_tb = self.save_path + args.log_directory
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
for root, dirs, files in os.walk(self.output_tmp_dir, topdown=False):
for name in files:
# print(os.path.join(root, name))
os.remove(os.path.join(root, name))
for name in dirs:
# print(os.path.join(root, name))
os.rmdir(os.path.join(root, name))
if not os.path.exists(self.output_tmp_pred_dir):
os.makedirs(self.output_tmp_pred_dir)
if not os.path.exists(self.output_tmp_gt_dir):
os.makedirs(self.output_tmp_gt_dir)
if not os.path.exists(self.save_restore_path):
os.makedirs(self.save_restore_path)
def get_save_path(self):
return self.save_path
def get_save_restore_path(self):
return self.save_restore_path
# ===========
# Functions
# ===========
def detect_available_models():
if args.model_path == '':
found_models = glob.glob(settings.output_dir + "models/*/*/*/*/restore/*.meta")
found_models.sort()
for i, model in enumerate(found_models):
print(i, model)
selected_model_id = input("\nSelect Model: ")
print()
selected_model_path = os.path.splitext(found_models[int(selected_model_id)])[0]
else:
selected_model_path = args.model_path
return selected_model_path
def imsave_as_uint16_png(filename, image_float32):
""" Converts the predictions from float32 to uint16 and saves them as PNG images """
image_uint16 = img_as_uint(exposure.rescale_intensity(image_float32, out_range='float'))
imageio.imsave(filename, image_uint16)
def total_size(o, handlers=None, verbose=False):
"""Returns the approximate memory footprint an object and all of its contents.
Automatically finds the contents of the following builtin containers and
their subclasses: tuple, list, deque, dict, set and frozenset.
To search other containers, add handlers to iterate over their contents:
handlers = {SomeContainerClass: iter,
OtherContainerClass: OtherContainerClass.get_elements}
"""
if handlers is None:
handlers = {}
def dict_handler(d):
return chain.from_iterable(d.items())
all_handlers = {tuple: iter,
list: iter,
deque: iter,
dict: dict_handler,
set: iter,
frozenset: iter,
}
all_handlers.update(handlers) # user handlers take precedence
seen = set() # track which object id's have already been seen
default_size = getsizeof(0) # estimate sizeof object without __sizeof__
def sizeof(var):
if id(var) in seen: # do not double count the same object
return 0
seen.add(id(var))
s = getsizeof(var, default_size)
if verbose:
print(s, type(var), repr(var), file=stderr)
for typ, handler in all_handlers.items():
if isinstance(var, typ):
# noinspection PyCallingNonCallable
s += sum(map(sizeof, handler(var)))
break
return s
return sizeof(o)
# ================== #
# Global Variables #
# ================== #
settings = Settings(app_name='fcrn',
output_dir='output/',
output_tmp_dir='output/tmp/',
output_log_file='log.txt')
| 32.193103 | 137 | 0.612468 |
4eeabc62920c222feb714f88e1acdda2c60506f9 | 2,790 | py | Python | dephell/commands/package_purge.py | eli-schwartz/dephell | f3e07b497e3ad7998e563b1a4c5ad9f023834d36 | [
"MIT"
] | null | null | null | dephell/commands/package_purge.py | eli-schwartz/dephell | f3e07b497e3ad7998e563b1a4c5ad9f023834d36 | [
"MIT"
] | null | null | null | dephell/commands/package_purge.py | eli-schwartz/dephell | f3e07b497e3ad7998e563b1a4c5ad9f023834d36 | [
"MIT"
] | null | null | null | # built-in
from argparse import ArgumentParser
from packaging.utils import canonicalize_name
# app
from ..actions import get_python_env
from ..config import builders
from ..controllers import analyze_conflict, Resolver, Mutator, Graph
from ..converters import InstalledConverter
from ..models import Requirement
from ..package_manager import PackageManager
from .base import BaseCommand
class PackagePurgeCommand(BaseCommand):
"""Remove given packages and their dependencies.
https://dephell.readthedocs.io/en/latest/cmd-package-purge.html
"""
@classmethod
def get_parser(cls) -> ArgumentParser:
parser = ArgumentParser(
prog='dephell package purge',
description=cls.__doc__,
)
builders.build_config(parser)
builders.build_venv(parser)
builders.build_output(parser)
builders.build_other(parser)
parser.add_argument('name', nargs='+', help='names of packages to remove')
return parser
def __call__(self) -> bool:
python = get_python_env(config=self.config)
manager = PackageManager(executable=python.path)
converter = InstalledConverter()
# get installed packages
root = converter.load(paths=python.lib_paths)
names = set(self.args.name) & {canonicalize_name(dep.name) for dep in root.dependencies}
if not names:
self.logger.error('packages is not installed', extra=dict(python=python.path))
return False
# resolve graph
self.logger.info('build dependencies graph...')
resolver = Resolver(
graph=Graph(root),
mutator=Mutator(),
)
resolved = resolver.resolve(silent=self.config['silent'])
if not resolved:
conflict = analyze_conflict(resolver=resolver)
self.logger.warning('conflict was found')
print(conflict)
return False
# get packages to remove
reqs = []
for name in names:
parent = resolver.graph.get(name=name)
reqs.append(Requirement(dep=parent, lock=True))
for dep in resolver.graph.get_children(dep=parent).values():
if not dep:
raise LookupError('cannot find dep in graph')
if dep.constraint.sources - {root.name} - names:
continue
reqs.append(Requirement(dep=dep, lock=True))
# remove installed packages
self.logger.info('removing packages...', extra=dict(
python=python.path,
packages=[req.name for req in reqs],
))
code = manager.remove(reqs=reqs)
if code != 0:
return False
self.logger.info('removed')
return True
| 33.614458 | 96 | 0.628674 |
88a72a1627fdc2e3f7df5e0e51761544e82fd472 | 1,365 | py | Python | tests/unit/molior/test_configuration.py | randombenj/molior | 5f22935a1860c9ab206acfa52ba6206ae1755594 | [
"Apache-2.0"
] | null | null | null | tests/unit/molior/test_configuration.py | randombenj/molior | 5f22935a1860c9ab206acfa52ba6206ae1755594 | [
"Apache-2.0"
] | null | null | null | tests/unit/molior/test_configuration.py | randombenj/molior | 5f22935a1860c9ab206acfa52ba6206ae1755594 | [
"Apache-2.0"
] | null | null | null | """
Provides test molior configuration class.
"""
from pathlib import Path
from mock import patch, mock_open
from molior.molior.configuration import Configuration
def test_config():
"""
Test configuration get config
"""
cfg = Configuration()
with patch("molior.molior.configuration.Configuration._load_config"):
cfg._config = {"test": "config"}
assert cfg.config() == {"test": "config"}
def test_load_config_non_existent():
"""
Test load config non-existent
"""
cfg = Configuration()
assert cfg._load_config(Path("/non/existent")) is None
def test_load_config():
"""
Test load config
"""
cfg = Configuration()
with patch(
"molior.molior.configuration.open", mock_open(read_data="{'test': 'config'}")
):
path = "/"
cfg._load_config(path)
assert cfg._config == {"test": "config"}
def test_get_config_attr():
"""
Test get config attribute
"""
cfg = Configuration()
cfg._config = {"test": "config"}
assert cfg.test == "config"
def test_get_config_attr_no_cfg():
"""
Test get config attribute if config is empty
"""
cfg = Configuration()
cfg._config = {}
with patch("molior.molior.configuration.Configuration._load_config") as load_cfg:
assert cfg.test == {}
assert load_cfg.called
| 22.75 | 85 | 0.635165 |
cc2024bdd06ac460ceb18cfb552fde9bcf83d336 | 69 | py | Python | bootstrap.py | seko24/cmpdirs | 04f1b886a27674dc96a4ca39b84a24c2d65b48fb | [
"MIT"
] | null | null | null | bootstrap.py | seko24/cmpdirs | 04f1b886a27674dc96a4ca39b84a24c2d65b48fb | [
"MIT"
] | null | null | null | bootstrap.py | seko24/cmpdirs | 04f1b886a27674dc96a4ca39b84a24c2d65b48fb | [
"MIT"
] | null | null | null | import sys
import cmpdirs.cmpdirs
cmpdirs.cmpdirs.cli(sys.argv[1:]) | 17.25 | 33 | 0.782609 |
6aa3862dc33f23a22bbf8126a4685074edf7a8c9 | 1,203 | py | Python | src/python_eulerian_video_magnification/magnifymotion_riesz.py | shei-pi/PyEVM | 52a44c84f88c27f1ed032e94b426f854faa4198f | [
"BSD-2-Clause"
] | null | null | null | src/python_eulerian_video_magnification/magnifymotion_riesz.py | shei-pi/PyEVM | 52a44c84f88c27f1ed032e94b426f854faa4198f | [
"BSD-2-Clause"
] | null | null | null | src/python_eulerian_video_magnification/magnifymotion_riesz.py | shei-pi/PyEVM | 52a44c84f88c27f1ed032e94b426f854faa4198f | [
"BSD-2-Clause"
] | null | null | null | import cv2
import numpy as np
from python_eulerian_video_magnification.filter import butter_bandpass_filter
from python_eulerian_video_magnification.magnify import Magnify
from python_eulerian_video_magnification.pyramid import laplacian_video
from python_eulerian_video_magnification.riesz_pyramid import riesz_video
class MagnifyMotionRiesz(Magnify):
def _magnify_impl(self, tensor: np.ndarray, fps: int) -> np.ndarray:
riesz_video_list = riesz_video(tensor, amplification_factor= self._amplification,
low_cutoff=self._low, high_cutoff=self._high,
sampling_rate=fps, levels=self._levels)
recon = self._reconstruct_from_tensor_list(riesz_video_list)
return tensor + recon
# return recon
def _reconstruct_from_tensor_list(self, filter_tensor_list):
final = np.zeros(filter_tensor_list[-1].shape)
for i in range(filter_tensor_list[0].shape[0]):
up = filter_tensor_list[0][i]
for n in range(self._levels - 1):
up = cv2.pyrUp(up) + filter_tensor_list[n + 1][i]
final[i] = up
return final
| 44.555556 | 90 | 0.680798 |
e1bf6af0d18e2317672d403dd29ec692a9fdee70 | 703 | py | Python | covid_act_now/setup.py | sgsmob/covidcast-indicators | 424ef5fd5361c4ed7b3ed88cf31813349d35240e | [
"MIT"
] | null | null | null | covid_act_now/setup.py | sgsmob/covidcast-indicators | 424ef5fd5361c4ed7b3ed88cf31813349d35240e | [
"MIT"
] | null | null | null | covid_act_now/setup.py | sgsmob/covidcast-indicators | 424ef5fd5361c4ed7b3ed88cf31813349d35240e | [
"MIT"
] | null | null | null | from setuptools import setup
from setuptools import find_packages
required = [
"numpy",
"pandas",
"pydocstyle",
"pytest",
"pytest-cov",
"pylint",
"delphi-utils",
"covidcast",
"pyarrow",
]
setup(
name="delphi_covid_act_now",
version="0.1.0",
description="Indicators from COVID Act Now",
author="Eu Jing Chua",
author_email="eujingc@andrew.cmu.edu",
url="https://github.com/cmu-delphi/covidcast-indicators",
install_requires=required,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3.8",
],
packages=find_packages(),
)
| 22.677419 | 61 | 0.633001 |
93c87edfb64de72d0bc4cf9c683e800d9cddeae8 | 635 | py | Python | tests/test_shops.py | deekshag175/shopping-centre-sim | bc558a1e423ca21afa638d7323b36aeba16e7923 | [
"Apache-2.0"
] | null | null | null | tests/test_shops.py | deekshag175/shopping-centre-sim | bc558a1e423ca21afa638d7323b36aeba16e7923 | [
"Apache-2.0"
] | 8 | 2020-10-25T16:21:10.000Z | 2020-12-11T14:24:17.000Z | tests/test_shops.py | deekshag175/shopping-centre-sim | bc558a1e423ca21afa638d7323b36aeba16e7923 | [
"Apache-2.0"
] | null | null | null | import pytest
from power.shops import shops
def test_get_shopum_firstrow():
s = shops(shops_pygame_fake(None, '1_image'))
s.draw_to_screen()
assert s.shopnum == 1
assert s.posx == 140
assert s.posy == 50
def test_get_shopum_secondrow():
s = shops(shops_pygame_fake(None, '5_image'))
s.draw_to_screen()
assert s.shopnum == 5
assert s.posx == 140
assert s.posy == 678
class shops_pygame_fake:
def __init__(self, surface, image_uri):
self.image_uri = image_uri
self.surface = surface
self.store_logo = None
def draw_to_screen(self, posx, posy):
pass | 24.423077 | 49 | 0.659843 |
2687d5db4a3b3054ae00cbcc1b814fee8763bacb | 1,248 | py | Python | car-controller/src/mainController/Workflows/HomepageServerThread.py | iisys-hof/autonomous-driving | 9f2ab64713b6dbec38f4ca6dcb953729f39a2746 | [
"Apache-2.0"
] | null | null | null | car-controller/src/mainController/Workflows/HomepageServerThread.py | iisys-hof/autonomous-driving | 9f2ab64713b6dbec38f4ca6dcb953729f39a2746 | [
"Apache-2.0"
] | null | null | null | car-controller/src/mainController/Workflows/HomepageServerThread.py | iisys-hof/autonomous-driving | 9f2ab64713b6dbec38f4ca6dcb953729f39a2746 | [
"Apache-2.0"
] | null | null | null | # @PascalPuchtler
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import threading
from CrossCutting.TimeMeasurement.TimeMeasurement import TimeMeasurement
from View.Homepage.VideoServer import VideoServer
from View.Render.RenderImage import RenderImage
class HomepageServerThread(threading.Thread):
def __init__(self,videoStreamQueue, manualMovementQueue, moveControllerCommandQueue, trajectoryPlanningCommandQueue ):
super(HomepageServerThread, self).__init__()
self.videoServer = VideoServer(videoStreamQueue, manualMovementQueue, moveControllerCommandQueue, trajectoryPlanningCommandQueue)
def run(self):
self.videoServer.run()
| 40.258065 | 137 | 0.741987 |
67539dbc44560398ae58b659f3249ff4028074c5 | 2,272 | py | Python | app/core/impulse.py | Matexer/BSPR | a503a8795cb0f4cebe2eedd148aa00aea75b570e | [
"MIT"
] | null | null | null | app/core/impulse.py | Matexer/BSPR | a503a8795cb0f4cebe2eedd148aa00aea75b570e | [
"MIT"
] | null | null | null | app/core/impulse.py | Matexer/BSPR | a503a8795cb0f4cebe2eedd148aa00aea75b570e | [
"MIT"
] | null | null | null | from typing import Optional, Type, NamedTuple, Tuple
import math
from .template import DesignationTemplate, Data, Config
from ..head.objects import Survey, Fuel
class ImpulseOutput(NamedTuple):
total_impulse: float
unit_impulse: float
smp_time: float #ms
jet_d: float # mm
jet_field: float # mm
fuel_mass: float # g
chamber_length: float
chamber_d: float
a: Optional[float] = None
class Impulse(DesignationTemplate):
def calculate_impulse(self, survey: Survey)\
-> Type[ImpulseOutput]:
fuel_mass = self.g_to_kg(survey.fuel_mass)
smp_time = self.ms_to_s(survey.sampling_time)
jet_d = self.mm_to_m(survey.jet_diameter)
times = (survey.t0, survey.tc, survey.tk)
values = tuple(self.cut_values(val, survey.sampling_time,
times, 2)
for val in survey.values)
if survey.type == "pressthru":
thrust_values = self.kN_to_N(values[1])
press_values = self.MPa_to_Pa(values[0])
else:
thrust_values = self.kN_to_N(values[0])
jet_field = math.pi * (jet_d**2) / 4
total_impulse = self.integrate(
thrust_values, smp_time)
unit_impulse = total_impulse / fuel_mass
if survey.type == "pressthru":
a = self.get_a(press_values, thrust_values, jet_field)
else:
a = None
return ImpulseOutput(total_impulse, unit_impulse,
survey.sampling_time, survey.jet_diameter, jet_field,
fuel_mass, survey.chamber_length, survey.chamber_diameter, a)
def get_results(self) -> Tuple[ImpulseOutput, ...]:
return tuple(self.calculate_impulse(survey)
for survey in self.data.surveys)
def get_a(self, press_values: Tuple[float, ...], thrust_values: Tuple[float, ...],
jet_field: float)\
-> float:
sum_a = float(0)
skip = 0
for press, thrust in zip(press_values, thrust_values):
if press:
a = thrust / (press * jet_field)
else:
a = 0
if not thrust:
skip += 1
sum_a += a
return sum_a / max((len(press_values) - skip), 1)
| 32 | 86 | 0.598592 |
65861bd26bb781b4ed0cb661ef97babb601d2efb | 6,073 | py | Python | pywhale/lib/detector.py | stefan2200/pywhale | 7e0ffce7eeaf426236381db75fa83915e716fc17 | [
"Apache-2.0"
] | 1 | 2020-11-27T16:21:01.000Z | 2020-11-27T16:21:01.000Z | pywhale/lib/detector.py | stefan2200/pywhale | 7e0ffce7eeaf426236381db75fa83915e716fc17 | [
"Apache-2.0"
] | null | null | null | pywhale/lib/detector.py | stefan2200/pywhale | 7e0ffce7eeaf426236381db75fa83915e716fc17 | [
"Apache-2.0"
] | null | null | null | import re
import base64
import validators
import tldextract
class HeaderChecker:
header = None
headers = {
}
reports = [
]
opts = {
}
#report = {"header": "From", "indicator": "danger", "data": "From name contains uncommon HTML symbols"}
def __init__(self, headers={}):
self.headers = headers
self.reports = []
def test(self):
if self.header in self.headers:
self.check()
return True
return False
def check(self):
print("You should override this!!")
return None
def header_decode(self, header):
if "=?" not in header:
return header
for h in re.findall(r'(=\?.+?\?=)', header):
parts = h.split('?')
enc_type = parts[2]
body = parts[3]
if enc_type.lower() == "b":
body = base64.b64decode(body).decode()
header = header.replace(h, body)
return header
def close(self):
self.reports = []
class ReplyHostChecker(HeaderChecker):
header = "Return-Path"
def check(self):
value = self.headers[self.header]
return_domain = None
from_domain = None
if "<" in value and ">" in value:
value = value.replace('<', '').replace('>', '')
if "@" in value:
value = value.split("@")[1]
if not validators.domain(value.lower().strip()):
self.reports.append({
"header": self.header,
"indicator": "warning",
"data": "Unable to parse return domain: %s" % (value)
})
else:
return_domain = value.lower().strip()
if "From" in self.headers:
get_email = re.search(r'<(.+?)>', self.headers['From'])
if get_email:
email = get_email.group(1)
if not validators.domain(email.split('@')[1].lower().strip()):
self.reports.append({
"header": self.header,
"indicator": "warning",
"data": "Unable to parse FROM address domain: %s" % (email.split('@')[1])
})
else:
from_domain = email.split('@')[1].lower().strip()
if from_domain and return_domain:
from_domain = tldextract.extract(from_domain)
return_domain = tldextract.extract(return_domain)
if from_domain.domain != return_domain.domain or from_domain.suffix != return_domain.suffix:
self.reports.append({
"header": self.header,
"indicator": "warning",
"data": "From root domain %s is not part of return root domain: %s" % (from_domain, return_domain)
})
return True
class ToHostChecker(HeaderChecker):
header = "To"
def check(self):
value = self.headers[self.header]
to_addrs = []
if "," in value:
to_addrs = [x.lower().strip() for x in value.split(',')]
else:
to_addrs = [value.lower().strip()]
if "delivery_addr" in self.opts and self.opts['delivery_addr'] not in to_addrs:
self.reports.append({
"header": self.header,
"indicator": "warning",
"data": "To address %s does not contain: %s" % (value, self.opts['delivery_addr'])
})
if len(to_addrs) > 10:
self.reports.append({
"header": self.header,
"indicator": "warning",
"data": "To address line contains %d entries, this might indicate mass-mail" % (len(to_addrs))
})
class SenderHostChecker(HeaderChecker):
header = "From"
def check(self):
value = self.headers[self.header]
email = None
name = None
get_email = re.search(r'(.+?)\s*<(.+?)>', value)
if get_email:
email = get_email.group(2)
name = get_email.group(1)
if name.startswith('"') and name.endswith('"'):
name = name[1:-1]
else:
email = value
if name:
esc_name = name.encode('ascii', errors='ignore').decode()
if esc_name != name:
self.reports.append({
"header": self.header,
"indicator": "warning",
"data": "Name %s contains special characters, real: %s" % (name, esc_name)
})
dec = self.header_decode(esc_name)
if dec != esc_name:
self.reports.append({
"header": self.header,
"indicator": "warning",
"data": "Name %s mixed encoding was applied, real: %s" % (esc_name, dec)
})
if "@" in dec and email not in dec:
self.reports.append({
"header": self.header,
"indicator": "danger",
"data": "Encoded text: %s contains an email but does not contain the from email: %s" % (
dec, email)
})
if '\u200e' in dec:
dec = dec.replace('\u200e', '')
self.reports.append({
"header": self.header,
"indicator": "danger",
"data": "Text: %s contains ‎ (Left-to-Right) character" % (
dec)
})
if "@" in esc_name and email not in esc_name:
self.reports.append({
"header": self.header,
"indicator": "danger",
"data": "Name text: %s contains an email but does not contain the from email: %s" % (esc_name, email)
})
if dec:
self.headers['stripped_from'] = dec
if esc_name:
self.headers['unescaped_from'] = esc_name
return True
| 33.738889 | 121 | 0.483945 |
423b866b85fcc9f3f13795c7cabfe61e0ec8e30d | 1,844 | py | Python | tests/utils/test_uploaders.py | KaitoRyouga/CTFd | 827a22e8ce9bdfd43ae0689e6cbcf2a6e253e920 | [
"Apache-2.0"
] | null | null | null | tests/utils/test_uploaders.py | KaitoRyouga/CTFd | 827a22e8ce9bdfd43ae0689e6cbcf2a6e253e920 | [
"Apache-2.0"
] | null | null | null | tests/utils/test_uploaders.py | KaitoRyouga/CTFd | 827a22e8ce9bdfd43ae0689e6cbcf2a6e253e920 | [
"Apache-2.0"
] | null | null | null | import os
from io import BytesIO
import boto3
from moto import mock_s3
from CTFd.utils.uploads import S3Uploader, rmdir
from tests.helpers import create_ctfd, destroy_ctfd
@mock_s3
def test_s3_uploader():
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="bucket")
app = create_ctfd()
with app.app_context():
app.config["UPLOAD_PROVIDER"] = "s3"
app.config["AWS_ACCESS_KEY_ID"] = "AKIAIOSFODNN7EXAMPLE"
app.config["AWS_SECRET_ACCESS_KEY"] = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
app.config["AWS_S3_BUCKET"] = "bucket"
uploader = S3Uploader()
assert uploader.s3
assert uploader.bucket == "bucket"
fake_file = BytesIO("fakedfile".encode())
path = uploader.upload(fake_file, "fake_file.txt")
assert "fake_file.txt" in uploader.download(path).location
destroy_ctfd(app)
@mock_s3
def test_s3_sync():
conn = boto3.resource("s3", region_name="us-east-1")
conn.create_bucket(Bucket="bucket")
app = create_ctfd()
with app.app_context():
app.config["UPLOAD_PROVIDER"] = "s3"
app.config["AWS_ACCESS_KEY_ID"] = "AKIAIOSFODNN7EXAMPLE"
app.config["AWS_SECRET_ACCESS_KEY"] = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
app.config["AWS_S3_BUCKET"] = "bucket"
uploader = S3Uploader()
uploader.sync()
fake_file = BytesIO("fakedfile".encode())
path = uploader.upload(fake_file, "fake_file.txt")
full_path = os.path.join(app.config["UPLOAD_FOLDER"], path)
try:
uploader.sync()
with open(full_path) as f:
assert f.read() == "fakedfile"
finally:
rmdir(os.path.dirname(full_path))
destroy_ctfd(app)
| 30.229508 | 89 | 0.632863 |
5e2a0ac2db74b1a38ab5912ae934c682f260a1e6 | 1,218 | py | Python | serialization/test/scenario_set_serializer_test.py | cirrostratus1/benchmark-database | 8f8d956408749a4c2932a68bd425407fe26d4a53 | [
"MIT"
] | null | null | null | serialization/test/scenario_set_serializer_test.py | cirrostratus1/benchmark-database | 8f8d956408749a4c2932a68bd425407fe26d4a53 | [
"MIT"
] | null | null | null | serialization/test/scenario_set_serializer_test.py | cirrostratus1/benchmark-database | 8f8d956408749a4c2932a68bd425407fe26d4a53 | [
"MIT"
] | null | null | null | # Copyright (c) 2019 fortiss GmbH
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
import unittest
import os
import matplotlib
from serialization.scenario_set_serializer import ScenarioSetSerializer
from modules.runtime.commons.parameters import ParameterServer
class ScenarioSetSerializerTests(unittest.TestCase):
def test_highway_merging(self):
scenario_param_file =os.path.join("database","scenario_sets", "highway_merging", "test_1.json")
param_server = ParameterServer(filename = scenario_param_file)
param_server["Scenario"]["Generation"]["NumScenarios"] = 1 # set this down to reduce test runtime
scenario_set_serializer = ScenarioSetSerializer(params=param_server)
scenario_set_serializer.dump(os.path.join("database", "scenario_sets", "highway_merging"))
scenario_set_serializer.load()
test_result = scenario_set_serializer.test(num_scenarios=1, num_steps=5, visualize_test=False)
self.assertTrue(test_result)
test_result = scenario_set_serializer.test(num_scenarios=1, num_steps=5, visualize_test=True)
self.assertTrue(test_result)
if __name__ == '__main__':
unittest.main()
| 45.111111 | 105 | 0.766831 |
9f25bf2c8cbcbab2967bbc9b750fbd47e6df9ffe | 2,200 | py | Python | robosuite/demos/demo_random_action.py | spatric5/robosuite | 9e6b9691eb949fbf33a23fbe8a8c6faea61c50b6 | [
"MIT"
] | 397 | 2020-09-28T02:49:58.000Z | 2022-03-30T18:08:19.000Z | robosuite/demos/demo_random_action.py | spatric5/robosuite | 9e6b9691eb949fbf33a23fbe8a8c6faea61c50b6 | [
"MIT"
] | 169 | 2020-09-28T02:17:59.000Z | 2022-03-29T13:32:43.000Z | robosuite/demos/demo_random_action.py | spatric5/robosuite | 9e6b9691eb949fbf33a23fbe8a8c6faea61c50b6 | [
"MIT"
] | 131 | 2020-09-28T14:50:35.000Z | 2022-03-31T02:27:33.000Z | from robosuite.controllers import load_controller_config
from robosuite.utils.input_utils import *
if __name__ == "__main__":
# Create dict to hold options that will be passed to env creation call
options = {}
# print welcome info
print("Welcome to robosuite v{}!".format(suite.__version__))
print(suite.__logo__)
# Choose environment and add it to options
options["env_name"] = choose_environment()
# If a multi-arm environment has been chosen, choose configuration and appropriate robot(s)
if "TwoArm" in options["env_name"]:
# Choose env config and add it to options
options["env_configuration"] = choose_multi_arm_config()
# If chosen configuration was bimanual, the corresponding robot must be Baxter. Else, have user choose robots
if options["env_configuration"] == 'bimanual':
options["robots"] = 'Baxter'
else:
options["robots"] = []
# Have user choose two robots
print("A multiple single-arm configuration was chosen.\n")
for i in range(2):
print("Please choose Robot {}...\n".format(i))
options["robots"].append(choose_robots(exclude_bimanual=True))
# Else, we simply choose a single (single-armed) robot to instantiate in the environment
else:
options["robots"] = choose_robots(exclude_bimanual=True)
# Choose controller
controller_name = choose_controller()
# Load the desired controller
options["controller_configs"] = load_controller_config(default_controller=controller_name)
# Help message to user
print()
print("Press \"H\" to show the viewer control panel.")
# initialize the task
env = suite.make(
**options,
has_renderer=True,
has_offscreen_renderer=False,
ignore_done=True,
use_camera_obs=False,
control_freq=20,
)
env.reset()
env.viewer.set_camera(camera_id=0)
# Get action limits
low, high = env.action_spec
# do visualization
for i in range(10000):
action = np.random.uniform(low, high)
obs, reward, done, _ = env.step(action)
env.render()
| 31.884058 | 117 | 0.655455 |
56d9130b6cfcc674fe4099f00b0d1f6a8b292df4 | 76 | py | Python | stoxapp/__main__.py | RolfBly/stoxsnschulz | 39b33665216401bdaa0c9650de0f7c5352f2a115 | [
"Apache-2.0"
] | null | null | null | stoxapp/__main__.py | RolfBly/stoxsnschulz | 39b33665216401bdaa0c9650de0f7c5352f2a115 | [
"Apache-2.0"
] | null | null | null | stoxapp/__main__.py | RolfBly/stoxsnschulz | 39b33665216401bdaa0c9650de0f7c5352f2a115 | [
"Apache-2.0"
] | null | null | null | from __init__ import *
if __name__ == "__main__":
app.run(debug=True)
| 12.666667 | 26 | 0.671053 |
b9b3cfe03a5fc0852fc2a012cf67618eaa334502 | 732 | py | Python | content-security-policy/unsafe-eval/support/echo-eval-with-policy.py | qanat/wpt | 7c61a4594a95682531367b6956d1c37f8b8fd486 | [
"BSD-3-Clause"
] | 1 | 2021-12-12T18:13:24.000Z | 2021-12-12T18:13:24.000Z | content-security-policy/unsafe-eval/support/echo-eval-with-policy.py | qanat/wpt | 7c61a4594a95682531367b6956d1c37f8b8fd486 | [
"BSD-3-Clause"
] | 112 | 2021-09-27T14:39:02.000Z | 2022-03-30T14:26:35.000Z | content-security-policy/unsafe-eval/support/echo-eval-with-policy.py | qanat/wpt | 7c61a4594a95682531367b6956d1c37f8b8fd486 | [
"BSD-3-Clause"
] | null | null | null | def main(request, response):
policy = request.GET.first(b"policy")
return [(b"Content-Type", b"text/html"), (b"Content-Security-Policy", policy)], b"""
<!DOCTYPE html>
<html>
<script>
function check_eval(context) {
context.eval_check_variable = 0;
try {
id = context.eval("eval_check_variable + 1");
} catch (e) {
if (e instanceof EvalError) {
if (context.eval_check_variable === 0)
return "blocked";
else
return "EvalError exception, but eval was executed";
} else {
return "Unexpected exception: " + e.message;
}
}
return "allowed";
}
window.parent.postMessage({
evalInIframe: check_eval(window),
evalInParent: check_eval(parent),
});
</script>
</html>
"""
| 23.612903 | 88 | 0.642077 |
29ccedbd8a42f5633f1f04e35c42cc2555b1c09d | 509 | py | Python | setup.py | ibleaman/yiddish_text_tools | cfd6f482542627471e943bceb10566ef4dc7c836 | [
"MIT"
] | 3 | 2022-02-03T15:15:45.000Z | 2022-03-29T20:47:29.000Z | setup.py | ibleaman/yiddish | cfd6f482542627471e943bceb10566ef4dc7c836 | [
"MIT"
] | null | null | null | setup.py | ibleaman/yiddish | cfd6f482542627471e943bceb10566ef4dc7c836 | [
"MIT"
] | null | null | null | from setuptools import setup
with open('README.md', 'r', encoding='utf-8') as fh:
long_description = fh.read()
setup(
name='yiddish',
version='0.0.3',
author='Isaac L. Bleaman',
author_email='bleaman@berkeley.edu',
description='A Python library for processing Yiddish text',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/ibleaman/yiddish',
license='MIT',
packages=['yiddish'],
test_suite='tests',
)
| 26.789474 | 63 | 0.685658 |
f800dd045d79557009d442df4c82f194f1c6977d | 392 | py | Python | virtcol/virtcol/wsgi.py | filipgorczynski/django-virtual-column-sort | 02092288d308e0747b1f84163de1896df2135000 | [
"MIT"
] | null | null | null | virtcol/virtcol/wsgi.py | filipgorczynski/django-virtual-column-sort | 02092288d308e0747b1f84163de1896df2135000 | [
"MIT"
] | null | null | null | virtcol/virtcol/wsgi.py | filipgorczynski/django-virtual-column-sort | 02092288d308e0747b1f84163de1896df2135000 | [
"MIT"
] | null | null | null | """
WSGI config for virtcol project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "virtcol.settings")
application = get_wsgi_application()
| 23.058824 | 78 | 0.785714 |
beb6689f520a003417b7583812c0511a38319b9a | 16,549 | py | Python | model_compiler/openvino_2021.2/downloader.py | roboflow-ai/blobconverter | 243555c8da59bfd94509f407c13cdee6b95e7afc | [
"MIT"
] | 3 | 2021-03-19T23:08:39.000Z | 2022-01-14T21:24:24.000Z | model_compiler/openvino_2021.2/downloader.py | roboflow-ai/blobconverter | 243555c8da59bfd94509f407c13cdee6b95e7afc | [
"MIT"
] | 19 | 2021-03-24T20:36:32.000Z | 2022-03-18T17:39:17.000Z | model_compiler/openvino_2021.2/downloader.py | roboflow-ai/blobconverter | 243555c8da59bfd94509f407c13cdee6b95e7afc | [
"MIT"
] | 2 | 2021-07-06T05:17:51.000Z | 2022-03-29T06:41:16.000Z | #!/usr/bin/env python3
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import concurrent.futures
import contextlib
import functools
import hashlib
import os
import re
from urllib.request import url2pathname
import requests
import shlex
import shutil
import ssl
import sys
import tempfile
import threading
import time
import types
from pathlib import Path
import common
CHUNK_SIZE = 1 << 15 if sys.stdout.isatty() else 1 << 20
def process_download(reporter, chunk_iterable, size, progress, file):
start_time = time.monotonic()
start_size = progress.size
try:
for chunk in chunk_iterable:
reporter.job_context.check_interrupted()
if chunk:
duration = time.monotonic() - start_time
progress.size += len(chunk)
progress.hasher.update(chunk)
if duration != 0:
speed = int((progress.size - start_size) / (1024 * duration))
else:
speed = '?'
percent = progress.size * 100 // size
reporter.print_progress('... {}%, {} KB, {} KB/s, {} seconds passed',
percent, progress.size // 1024, speed, int(duration))
reporter.emit_event('model_file_download_progress', size=progress.size)
file.write(chunk)
# don't attempt to finish a file if it's bigger than expected
if progress.size > size:
break
finally:
reporter.end_progress()
def try_download(reporter, file, num_attempts, start_download, size):
progress = types.SimpleNamespace(size=0)
for attempt in range(num_attempts):
if attempt != 0:
retry_delay = 10
reporter.print("Will retry in {} seconds...", retry_delay, flush=True)
time.sleep(retry_delay)
try:
reporter.job_context.check_interrupted()
chunk_iterable, continue_offset = start_download(offset=progress.size)
if continue_offset not in {0, progress.size}:
# Somehow we neither restarted nor continued from where we left off.
# Try to restart.
chunk_iterable, continue_offset = start_download(offset=0)
if continue_offset != 0:
reporter.log_error("Remote server refuses to send whole file, aborting")
return None
if continue_offset == 0:
file.seek(0)
file.truncate()
progress.size = 0
progress.hasher = hashlib.sha256()
process_download(reporter, chunk_iterable, size, progress, file)
if progress.size > size:
reporter.log_error("Remote file is longer than expected ({} B), download aborted", size)
# no sense in retrying - if the file is longer, there's no way it'll fix itself
return None
elif progress.size < size:
reporter.log_error("Downloaded file is shorter ({} B) than expected ({} B)",
progress.size, size)
# it's possible that we got disconnected before receiving the full file,
# so try again
else:
return progress.hasher.digest()
except (requests.exceptions.RequestException, ssl.SSLError):
reporter.log_error("Download failed", exc_info=True)
return None
def verify_hash(reporter, actual_hash, expected_hash, path):
if actual_hash != bytes.fromhex(expected_hash):
reporter.log_error('Hash mismatch for "{}"', path)
reporter.log_details('Expected: {}', expected_hash)
reporter.log_details('Actual: {}', actual_hash.hex())
return False
return True
class NullCache:
def has(self, hash): return False
def get(self, model_file, path, reporter): return False
def put(self, hash, path): pass
class DirCache:
_FORMAT = 1 # increment if backwards-incompatible changes to the format are made
_HASH_LEN = hashlib.sha256().digest_size * 2
def __init__(self, cache_dir):
self._cache_dir = cache_dir / str(self._FORMAT)
self._cache_dir.mkdir(parents=True, exist_ok=True)
self._staging_dir = self._cache_dir / 'staging'
self._staging_dir.mkdir(exist_ok=True)
def _hash_path(self, hash):
hash = hash.lower()
assert len(hash) == self._HASH_LEN
assert re.fullmatch('[0-9a-f]+', hash)
return self._cache_dir / hash[:2] / hash[2:]
def has(self, hash):
return self._hash_path(hash).exists()
def get(self, model_file, path, reporter):
cache_path = self._hash_path(model_file.sha256)
cache_sha256 = hashlib.sha256()
cache_size = 0
with open(cache_path, 'rb') as cache_file, open(path, 'wb') as destination_file:
while True:
data = cache_file.read(CHUNK_SIZE)
if not data:
break
cache_size += len(data)
if cache_size > model_file.size:
reporter.log_error("Cached file is longer than expected ({} B), copying aborted", model_file.size)
return False
cache_sha256.update(data)
destination_file.write(data)
if cache_size < model_file.size:
reporter.log_error("Cached file is shorter ({} B) than expected ({} B)", cache_size, model_file.size)
return False
return verify_hash(reporter, cache_sha256.digest(), model_file.sha256, path)
def put(self, hash, path):
# A file in the cache must have the hash implied by its name. So when we upload a file,
# we first copy it to a temporary file and then atomically move it to the desired name.
# This prevents interrupted runs from corrupting the cache.
with path.open('rb') as src_file:
with tempfile.NamedTemporaryFile(dir=str(self._staging_dir), delete=False) as staging_file:
staging_path = Path(staging_file.name)
shutil.copyfileobj(src_file, staging_file)
hash_path = self._hash_path(hash)
hash_path.parent.mkdir(parents=True, exist_ok=True)
staging_path.replace(self._hash_path(hash))
def try_retrieve_from_cache(reporter, cache, model_file, destination):
try:
if cache.has(model_file.sha256):
reporter.job_context.check_interrupted()
reporter.print_section_heading('Retrieving {} from the cache', destination)
if not cache.get(model_file, destination, reporter):
reporter.print('Will retry from the original source.')
reporter.print()
return False
reporter.print()
return True
except Exception:
reporter.log_warning('Cache retrieval failed; falling back to downloading', exc_info=True)
reporter.print()
return False
def try_update_cache(reporter, cache, hash, source):
try:
cache.put(hash, source)
except Exception:
reporter.log_warning('Failed to update the cache', exc_info=True)
def try_retrieve(reporter, destination, model_file, cache, num_attempts, start_download):
destination.parent.mkdir(parents=True, exist_ok=True)
if try_retrieve_from_cache(reporter, cache, model_file, destination):
return True
reporter.print_section_heading('Downloading {}', destination)
success = False
with destination.open('w+b') as f:
actual_hash = try_download(reporter, f, num_attempts, start_download, model_file.size)
if actual_hash and verify_hash(reporter, actual_hash, model_file.sha256, destination):
try_update_cache(reporter, cache, model_file.sha256, destination)
success = True
reporter.print()
return success
def download_model(reporter, args, cache, session_factory, requested_precisions, model):
session = session_factory()
reporter.print_group_heading('Downloading {}', model.name)
reporter.emit_event('model_download_begin', model=model.name, num_files=len(model.files))
output = args.output_dir / model.subdirectory
output.mkdir(parents=True, exist_ok=True)
for model_file in model.files:
if len(model_file.name.parts) == 2:
p = model_file.name.parts[0]
if p in common.KNOWN_PRECISIONS and p not in requested_precisions:
continue
model_file_reporter = reporter.with_event_context(model=model.name, model_file=model_file.name.as_posix())
model_file_reporter.emit_event('model_file_download_begin', size=model_file.size)
destination = output / model_file.name
if not try_retrieve(model_file_reporter, destination, model_file, cache, args.num_attempts,
functools.partial(model_file.source.start_download, session, CHUNK_SIZE)):
try:
destination.unlink()
except FileNotFoundError:
pass
model_file_reporter.emit_event('model_file_download_end', successful=False)
reporter.emit_event('model_download_end', model=model.name, successful=False)
return False
model_file_reporter.emit_event('model_file_download_end', successful=True)
reporter.emit_event('model_download_end', model=model.name, successful=True)
if model.postprocessing:
reporter.emit_event('model_postprocessing_begin', model=model.name)
for postproc in model.postprocessing:
postproc.apply(reporter, output)
reporter.emit_event('model_postprocessing_end', model=model.name)
reporter.print()
return True
class DownloaderArgumentParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
def positive_int_arg(value_str):
try:
value = int(value_str)
if value > 0: return value
except ValueError:
pass
raise argparse.ArgumentTypeError('must be a positive integer (got {!r})'.format(value_str))
class LocalFileAdapter(requests.adapters.BaseAdapter):
@staticmethod
def _chkpath(method, path):
"""Return an HTTP status for the given filesystem path."""
if method.lower() in ('put', 'delete'):
return 501, "Not Implemented" # TODO
elif method.lower() not in ('get', 'head'):
return 405, "Method Not Allowed"
elif os.path.isdir(path):
return 400, "Path Not A File"
elif not os.path.isfile(path):
return 404, "File Not Found"
elif not os.access(path, os.R_OK):
return 403, "Access Denied"
else:
return 200, "OK"
def send(self, req, **kwargs): # pylint: disable=unused-argument
"""Return the file specified by the given request
@type req: C{PreparedRequest}
@todo: Should I bother filling `response.headers` and processing
If-Modified-Since and friends using `os.stat`?
"""
path = os.path.normcase(os.path.normpath(url2pathname(req.path_url)))
response = requests.Response()
response.status_code, response.reason = self._chkpath(req.method, path)
if response.status_code == 200 and req.method.lower() != 'head':
try:
response.raw = open(path, 'rb')
except (OSError, IOError) as err:
response.status_code = 500
response.reason = str(err)
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
response.request = req
response.connection = self
return response
def close(self):
pass
# There is no evidence that the requests.Session class is thread-safe,
# so for safety, we use one Session per thread. This class ensures that
# each thread gets its own Session.
class ThreadSessionFactory:
def __init__(self, exit_stack):
self._lock = threading.Lock()
self._thread_local = threading.local()
self._exit_stack = exit_stack
def __call__(self):
try:
session = self._thread_local.session
except AttributeError:
with self._lock: # ExitStack might not be thread-safe either
session = self._exit_stack.enter_context(requests.Session())
session.mount('file://', LocalFileAdapter())
self._thread_local.session = session
return session
def main():
parser = DownloaderArgumentParser()
parser.add_argument('--name', metavar='PAT[,PAT...]',
help='download only models whose names match at least one of the specified patterns')
parser.add_argument('--list', type=Path, metavar='FILE.LST',
help='download only models whose names match at least one of the patterns in the specified file')
parser.add_argument('--all', action='store_true', help='download all available models')
parser.add_argument('--print_all', action='store_true', help='print all available models')
parser.add_argument('--precisions', metavar='PREC[,PREC...]',
help='download only models with the specified precisions (actual for DLDT networks)')
parser.add_argument('-o', '--output_dir', type=Path, metavar='DIR',
default=Path.cwd(), help='path where to save models')
parser.add_argument('--cache_dir', type=Path, metavar='DIR',
help='directory to use as a cache for downloaded files')
parser.add_argument('--num_attempts', type=positive_int_arg, metavar='N', default=1,
help='attempt each download up to N times')
parser.add_argument('--progress_format', choices=('text', 'json'), default='text',
help='which format to use for progress reporting')
# unlike Model Converter, -jauto is not supported here, because CPU count has no
# relation to the optimal number of concurrent downloads
parser.add_argument('-j', '--jobs', type=positive_int_arg, metavar='N', default=1,
help='how many downloads to perform concurrently')
# lux
parser.add_argument('--model_root', type=Path, default=None, help='path to models folder')
args = parser.parse_args()
def make_reporter(context):
return common.Reporter(context,
enable_human_output=args.progress_format == 'text',
enable_json_output=args.progress_format == 'json')
reporter = make_reporter(common.DirectOutputContext())
cache = NullCache() if args.cache_dir is None else DirCache(args.cache_dir)
models = common.load_models_from_args(parser, args)
failed_models = set()
if args.precisions is None:
requested_precisions = common.KNOWN_PRECISIONS
else:
requested_precisions = set(args.precisions.split(','))
unknown_precisions = requested_precisions - common.KNOWN_PRECISIONS
if unknown_precisions:
sys.exit('Unknown precisions specified: {}.'.format(', '.join(sorted(unknown_precisions))))
with contextlib.ExitStack() as exit_stack:
session_factory = ThreadSessionFactory(exit_stack)
if args.jobs == 1:
results = [download_model(reporter, args, cache, session_factory, requested_precisions, model)
for model in models]
else:
results = common.run_in_parallel(args.jobs,
lambda context, model: download_model(
make_reporter(context), args, cache, session_factory, requested_precisions, model),
models)
failed_models = {model.name for model, successful in zip(models, results) if not successful}
if failed_models:
reporter.print('FAILED:')
for failed_model_name in failed_models:
reporter.print(failed_model_name)
sys.exit(1)
if __name__ == '__main__':
main()
| 38.131336 | 118 | 0.648075 |
cb6031ec14794d13c158d841f9c40f4978cbcc85 | 27,017 | py | Python | recipes/opencv/3.x/conanfile.py | rockandsalt/conan-center-index | d739adcec3e4dd4c250eff559ceb738e420673dd | [
"MIT"
] | 562 | 2019-09-04T12:23:43.000Z | 2022-03-29T16:41:43.000Z | recipes/opencv/3.x/conanfile.py | rockandsalt/conan-center-index | d739adcec3e4dd4c250eff559ceb738e420673dd | [
"MIT"
] | 9,799 | 2019-09-04T12:02:11.000Z | 2022-03-31T23:55:45.000Z | recipes/opencv/3.x/conanfile.py | rockandsalt/conan-center-index | d739adcec3e4dd4c250eff559ceb738e420673dd | [
"MIT"
] | 1,126 | 2019-09-04T11:57:46.000Z | 2022-03-31T16:43:38.000Z | from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import os
import textwrap
required_conan_version = ">=1.33.0"
class OpenCVConan(ConanFile):
name = "opencv"
license = "BSD-3-Clause"
homepage = "https://opencv.org"
description = "OpenCV (Open Source Computer Vision Library)"
url = "https://github.com/conan-io/conan-center-index"
topics = ("computer-vision", "deep-learning", "image-processing")
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False],
"fPIC": [True, False],
"contrib": [True, False],
"parallel": [False, "tbb", "openmp"],
"with_jpeg": [False, "libjpeg", "libjpeg-turbo"],
"with_png": [True, False],
"with_tiff": [True, False],
"with_jasper": [True, False],
"with_openexr": [True, False],
"with_eigen": [True, False],
"with_webp": [True, False],
"with_gtk": [True, False],
"nonfree": [True, False],
}
default_options = {"shared": False,
"fPIC": True,
"parallel": False,
"contrib": False,
"with_jpeg": "libjpeg",
"with_png": True,
"with_tiff": True,
"with_jasper": True,
"with_openexr": True,
"with_eigen": True,
"with_webp": True,
"with_gtk": True,
"nonfree": False,
}
short_paths = True
exports_sources = "CMakeLists.txt"
generators = "cmake", "cmake_find_package"
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
@property
def _contrib_folder(self):
return "contrib"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
if self.settings.os != "Linux":
del self.options.with_gtk
def configure(self):
if self.settings.compiler.cppstd and self.options.with_openexr:
tools.check_min_cppstd(self, 11)
if self.options.shared:
del self.options.fPIC
def validate(self):
if self.settings.compiler == "Visual Studio" and \
"MT" in str(self.settings.compiler.runtime) and self.options.shared:
raise ConanInvalidConfiguration("Visual Studio and Runtime MT is not supported for shared library.")
if self.settings.compiler == "clang" and tools.Version(self.settings.compiler.version) < "4":
raise ConanInvalidConfiguration("Clang 3.x cannot build OpenCV 3.x due an internal bug.")
self.options["*"].jpeg = self.options.with_jpeg
self.options["*"].with_libjpeg = self.options.with_jpeg
self.options["*"].with_jpeg = self.options.with_jpeg
def requirements(self):
self.requires("zlib/1.2.11")
if self.options.with_jpeg == "libjpeg":
self.requires("libjpeg/9d")
elif self.options.with_jpeg == "libjpeg-turbo":
self.requires("libjpeg-turbo/2.0.6")
if self.options.with_png:
self.requires("libpng/1.6.37")
if self.options.with_jasper:
self.requires("jasper/2.0.25")
if self.options.with_openexr:
self.requires("openexr/2.5.5")
if self.options.with_tiff:
self.requires("libtiff/4.2.0")
if self.options.with_eigen:
self.requires("eigen/3.3.9")
if self.options.parallel == "tbb":
self.requires("tbb/2020.3")
if self.options.with_webp:
self.requires("libwebp/1.1.0")
if self.options.contrib:
self.requires("freetype/2.10.4")
self.requires("harfbuzz/2.7.4")
self.requires("gflags/2.2.2")
self.requires("glog/0.4.0")
if self.options.get_safe("with_gtk"):
self.requires("gtk/system")
def source(self):
tools.get(**self.conan_data["sources"][self.version][0])
os.rename("opencv-{}".format(self.version), self._source_subfolder)
tools.get(**self.conan_data["sources"][self.version][1])
os.rename("opencv_contrib-{}".format(self.version), self._contrib_folder)
def _patch_opencv(self):
tools.rmdir(os.path.join(self._source_subfolder, "3rdparty"))
if self.options.contrib:
freetype_cmake = os.path.join(self._contrib_folder, "modules", "freetype", "CMakeLists.txt")
tools.replace_in_file(freetype_cmake, "ocv_check_modules(FREETYPE freetype2)", "find_package(Freetype REQUIRED)")
tools.replace_in_file(freetype_cmake, "FREETYPE_", "Freetype_")
tools.replace_in_file(freetype_cmake, "ocv_check_modules(HARFBUZZ harfbuzz)", "find_package(harfbuzz REQUIRED)")
tools.replace_in_file(freetype_cmake, "HARFBUZZ_", "harfbuzz_")
tools.replace_in_file(os.path.join(self._source_subfolder, "CMakeLists.txt"), "ANDROID OR NOT UNIX", "FALSE")
tools.replace_in_file(os.path.join(self._source_subfolder, "modules", "imgcodecs", "CMakeLists.txt"), "JASPER_", "Jasper_")
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["OPENCV_CONFIG_INSTALL_PATH"] = "cmake"
self._cmake.definitions["OPENCV_BIN_INSTALL_PATH"] = "bin"
self._cmake.definitions["OPENCV_LIB_INSTALL_PATH"] = "lib"
self._cmake.definitions["OPENCV_3P_LIB_INSTALL_PATH"] = "lib"
self._cmake.definitions["OPENCV_OTHER_INSTALL_PATH"] = "res"
self._cmake.definitions["OPENCV_LICENSES_INSTALL_PATH"] = "licenses"
self._cmake.definitions["BUILD_EXAMPLES"] = False
self._cmake.definitions["BUILD_DOCS"] = False
self._cmake.definitions["BUILD_TESTS"] = False
self._cmake.definitions["BUILD_PACKAGE"] = False
self._cmake.definitions["BUILD_PERF_TESTS"] = False
self._cmake.definitions["BUILD_JAVA"] = False
self._cmake.definitions["BUILD_FAT_JAVA_LIB"] = False
self._cmake.definitions["BUILD_PERF_TESTS"] = False
self._cmake.definitions["BUILD_ZLIB"] = False
self._cmake.definitions["BUILD_JPEG"] = False
self._cmake.definitions["BUILD_PNG"] = False
self._cmake.definitions["BUILD_TIFF"] = False
self._cmake.definitions["BUILD_JASPER"] = False
self._cmake.definitions["BUILD_OPENEXR"] = False
self._cmake.definitions["BUILD_WEBP"] = False
self._cmake.definitions["BUILD_TBB"] = False
self._cmake.definitions["BUILD_JPEG_TURBO_DISABLE"] = True
self._cmake.definitions["BUILD_IPP_IW"] = False
self._cmake.definitions["BUILD_ITT"] = False
self._cmake.definitions["BUILD_PROTOBUF"] = False
self._cmake.definitions["BUILD_USE_SYMLINKS"] = False
self._cmake.definitions["OPENCV_FORCE_3RDPARTY_BUILD"] = False
self._cmake.definitions["BUILD_opencv_java_bindings_gen"] = False
self._cmake.definitions["BUILD_opencv_js"] = False
self._cmake.definitions["BUILD_opencv_apps"] = False
self._cmake.definitions["BUILD_opencv_java"] = False
self._cmake.definitions["BUILD_opencv_python2"] = False
self._cmake.definitions["BUILD_opencv_python3"] = False
self._cmake.definitions["BUILD_opencv_python_bindings_g"] = False
self._cmake.definitions["BUILD_opencv_python_tests"] = False
self._cmake.definitions["BUILD_opencv_ts"] = False
self._cmake.definitions["WITH_CUFFT"] = False
self._cmake.definitions["WITH_CUBLAS"] = False
self._cmake.definitions["WITH_NVCUVID"] = False
self._cmake.definitions["WITH_FFMPEG"] = False
self._cmake.definitions["WITH_GSTREAMER"] = False
self._cmake.definitions["WITH_OPENCL"] = False
self._cmake.definitions["WITH_CUDA"] = False
self._cmake.definitions["WITH_1394"] = False
self._cmake.definitions["WITH_ADE"] = False
self._cmake.definitions["WITH_ARAVIS"] = False
self._cmake.definitions["WITH_CLP"] = False
self._cmake.definitions["WITH_HALIDE"] = False
self._cmake.definitions["WITH_HPX"] = False
self._cmake.definitions["WITH_IMGCODEC_HDR"] = False
self._cmake.definitions["WITH_IMGCODEC_PFM"] = False
self._cmake.definitions["WITH_IMGCODEC_PXM"] = False
self._cmake.definitions["WITH_IMGCODEC_SUNRASTER"] = False
self._cmake.definitions["WITH_INF_ENGINE"] = False
self._cmake.definitions["WITH_IPP"] = False
self._cmake.definitions["WITH_ITT"] = False
self._cmake.definitions["WITH_LIBREALSENSE"] = False
self._cmake.definitions["WITH_MFX"] = False
self._cmake.definitions["WITH_NGRAPH"] = False
self._cmake.definitions["WITH_OPENCLAMDBLAS"] = False
self._cmake.definitions["WITH_OPENCLAMDFFT"] = False
self._cmake.definitions["WITH_OPENCL_SVM"] = False
self._cmake.definitions["WITH_OPENGL"] = False
self._cmake.definitions["WITH_OPENNI"] = False
self._cmake.definitions["WITH_OPENNI2"] = False
self._cmake.definitions["WITH_OPENVX"] = False
self._cmake.definitions["WITH_PLAIDML"] = False
self._cmake.definitions["WITH_PROTOBUF"] = False
self._cmake.definitions["WITH_PTHREADS_PF"] = False
self._cmake.definitions["WITH_PVAPI"] = False
self._cmake.definitions["WITH_QT"] = False
self._cmake.definitions["WITH_QUIRC"] = False
self._cmake.definitions["WITH_V4L"] = False
self._cmake.definitions["WITH_VA"] = False
self._cmake.definitions["WITH_VA_INTEL"] = False
self._cmake.definitions["WITH_VTK"] = False
self._cmake.definitions["WITH_VULKAN"] = False
self._cmake.definitions["WITH_XIMEA"] = False
self._cmake.definitions["WITH_XINE"] = False
self._cmake.definitions["WITH_LAPACK"] = False
self._cmake.definitions["WITH_IPP_IW"] = False
self._cmake.definitions["WITH_CAROTENE"] = False
self._cmake.definitions["WITH_PROTOBUF"] = False
self._cmake.definitions["WITH_LAPACK"] = False
self._cmake.definitions["WITH_JPEG"] = self.options.with_jpeg
self._cmake.definitions["WITH_PNG"] = self.options.with_png
self._cmake.definitions["WITH_TIFF"] = self.options.with_tiff
self._cmake.definitions["WITH_JASPER"] = self.options.with_jasper
self._cmake.definitions["WITH_OPENEXR"] = self.options.with_openexr
self._cmake.definitions["WITH_EIGEN"] = self.options.with_eigen
self._cmake.definitions["WITH_WEBP"] = self.options.with_webp
self._cmake.definitions["WITH_DSHOW"] = self.settings.compiler == "Visual Studio"
self._cmake.definitions["WITH_MSMF"] = self.settings.compiler == "Visual Studio"
self._cmake.definitions["WITH_MSMF_DXVA"] = self.settings.compiler == "Visual Studio"
self._cmake.definitions["WITH_GTK"] = self.options.get_safe("with_gtk", False)
self._cmake.definitions["WITH_GTK_2_X"] = self.options.get_safe("with_gtk", False)
self._cmake.definitions["OPENCV_MODULES_PUBLIC"] = "opencv"
self._cmake.definitions["OPENCV_ENABLE_NONFREE"] = self.options.nonfree
if self.options.parallel:
self._cmake.definitions["WITH_TBB"] = self.options.parallel == "tbb"
self._cmake.definitions["WITH_OPENMP"] = self.options.parallel == "openmp"
if self.options.contrib:
self._cmake.definitions["OPENCV_EXTRA_MODULES_PATH"] = os.path.join(self.build_folder, self._contrib_folder, 'modules')
if self.settings.compiler == "Visual Studio":
self._cmake.definitions["BUILD_WITH_STATIC_CRT"] = self.settings.compiler.runtime in ("MT", "MTd")
if self.options.with_openexr:
self._cmake.definitions["OPENEXR_ROOT"] = self.deps_cpp_info['openexr'].rootpath.replace("\\", "/")
self._cmake.definitions["ENABLE_PIC"] = self.options.get_safe("fPIC", True)
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def build(self):
self._patch_opencv()
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "cmake"))
if os.path.isfile(os.path.join(self.package_folder, "setup_vars_opencv3.cmd")):
os.rename(os.path.join(self.package_folder, "setup_vars_opencv3.cmd"),
os.path.join(self.package_folder, "res", "setup_vars_opencv3.cmd"))
self._create_cmake_module_alias_targets(
os.path.join(self.package_folder, self._module_subfolder, self._module_file),
{component["target"]:"opencv::{}".format(component["target"]) for component in self._opencv_components}
)
@staticmethod
def _create_cmake_module_alias_targets(module_file, targets):
content = ""
for alias, aliased in targets.items():
content += textwrap.dedent("""\
if(TARGET {aliased} AND NOT TARGET {alias})
add_library({alias} INTERFACE IMPORTED)
set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})
endif()
""".format(alias=alias, aliased=aliased))
tools.save(module_file, content)
@property
def _module_subfolder(self):
return os.path.join("lib", "cmake")
@property
def _module_file(self):
return "conan-official-{}-targets.cmake".format(self.name)
@property
def _opencv_components(self):
def imageformats_deps():
components = []
if self.options.with_jasper:
components.append("jasper::jasper")
if self.options.with_png:
components.append("libpng::libpng")
if self.options.with_jpeg:
components.append("{0}::{0}".format(self.options.with_jpeg))
if self.options.with_tiff:
components.append("libtiff::libtiff")
if self.options.with_openexr:
components.append("openexr::openexr")
if self.options.with_webp:
components.append("libwebp::libwebp")
return components
def eigen():
return ["eigen::eigen"] if self.options.with_eigen else []
def parallel():
if self.options.parallel:
return ["tbb::tbb"] if self.options.parallel == "tbb" else ["openmp"]
return []
def xfeatures2d():
return ["opencv_xfeatures2d"] if self.options.contrib else []
def freetype():
return ["freetype::freetype"] if self.options.contrib else []
def gtk():
return ["gtk::gtk"] if self.options.get_safe("with_gtk") else []
opencv_components = [
{"target": "opencv_core", "lib": "core", "requires": ["zlib::zlib"] + eigen() + parallel()},
{"target": "opencv_flann", "lib": "flann", "requires": ["opencv_core"] + eigen()},
{"target": "opencv_imgproc", "lib": "imgproc", "requires": ["opencv_core"] + eigen()},
{"target": "opencv_ml", "lib": "ml", "requires": ["opencv_core"] + eigen()},
{"target": "opencv_photo", "lib": "photo", "requires": ["opencv_core", "opencv_imgproc"] + eigen()},
{"target": "opencv_video", "lib": "video", "requires": ["opencv_core", "opencv_imgproc"] + eigen()},
{"target": "opencv_features2d", "lib": "features2d", "requires": ["opencv_core", "opencv_flann", "opencv_imgproc"] + eigen()},
{"target": "opencv_imgcodecs", "lib": "imgcodecs", "requires": ["opencv_core", "opencv_imgproc"] + eigen() + imageformats_deps()},
{"target": "opencv_shape", "lib": "shape", "requires": ["opencv_core", "opencv_imgproc", "opencv_video"] + eigen()},
{"target": "opencv_videoio", "lib": "videoio", "requires": ["opencv_core", "opencv_imgproc", "opencv_imgcodecs"] + eigen()},
{"target": "opencv_calib3d", "lib": "calib3d", "requires": ["opencv_core", "opencv_flann", "opencv_imgproc", "opencv_features2d"] + eigen()},
{"target": "opencv_highgui", "lib": "highgui", "requires": ["opencv_core", "opencv_imgproc", "opencv_imgcodecs", "opencv_videoio"] + eigen() + gtk() + freetype()},
{"target": "opencv_objdetect", "lib": "objdetect", "requires": ["opencv_core", "opencv_flann", "opencv_imgproc", "opencv_features2d", "opencv_calib3d"] + eigen()},
{"target": "opencv_stitching", "lib": "stitching", "requires": ["opencv_core", "opencv_flann", "opencv_imgproc", "opencv_ml", "opencv_video", "opencv_features2d", "opencv_shape", "opencv_calib3d"] + xfeatures2d() + eigen()},
{"target": "opencv_superres", "lib": "superres", "requires": ["opencv_core", "opencv_imgproc", "opencv_video", "opencv_imgcodecs", "opencv_videoio"] + eigen()},
{"target": "opencv_videostab", "lib": "videostab", "requires": ["opencv_core", "opencv_flann", "opencv_imgproc", "opencv_photo", "opencv_video", "opencv_features2d", "opencv_imgcodecs", "opencv_videoio", "opencv_calib3d"] + eigen()}
]
if self.options.contrib:
opencv_components.extend([
{"target": "opencv_phase_unwrapping", "lib": "phase_unwrapping", "requires": ["opencv_core", "opencv_imgproc"] + eigen()},
{"target": "opencv_plot", "lib": "plot", "requires": ["opencv_core", "opencv_imgproc"] + eigen()},
{"target": "opencv_reg", "lib": "reg", "requires": ["opencv_core", "opencv_imgproc"] + eigen()},
{"target": "opencv_surface_matching", "lib": "surface_matching", "requires": ["opencv_core", "opencv_flann"] + eigen()},
{"target": "opencv_xphoto", "lib": "xphoto", "requires": ["opencv_core", "opencv_imgproc", "opencv_photo"] + eigen()},
{"target": "opencv_freetype", "lib": "freetype", "requires": ["opencv_core", "opencv_imgproc", "freetype::freetype", "harfbuzz::harfbuzz"] + eigen()},
{"target": "opencv_fuzzy", "lib": "fuzzy", "requires": ["opencv_core", "opencv_imgproc"] + eigen()},
{"target": "opencv_hfs", "lib": "hfs", "requires": ["opencv_core", "opencv_imgproc"] + eigen()},
{"target": "opencv_img_hash", "lib": "img_hash", "requires": ["opencv_core", "opencv_imgproc"] + eigen()},
{"target": "opencv_line_descriptor", "lib": "line_descriptor", "requires": ["opencv_core", "opencv_flann", "opencv_imgproc", "opencv_features2d"] + eigen()},
{"target": "opencv_saliency", "lib": "saliency", "requires": ["opencv_core", "opencv_flann", "opencv_imgproc", "opencv_features2d"] + eigen()},
{"target": "opencv_datasets", "lib": "datasets", "requires": ["opencv_core", "opencv_flann", "opencv_imgproc", "opencv_ml", "opencv_imgcodecs"] + eigen()},
{"target": "opencv_rgbd", "lib": "rgbd", "requires": ["opencv_core", "opencv_flann", "opencv_imgproc", "opencv_features2d", "opencv_calib3d"] + eigen()},
{"target": "opencv_stereo", "lib": "stereo", "requires": ["opencv_core", "opencv_flann", "opencv_imgproc", "opencv_features2d", "opencv_calib3d"] + eigen()},
{"target": "opencv_structured_light", "lib": "structured_light", "requires": ["opencv_core", "opencv_flann", "opencv_imgproc", "opencv_phase_unwrapping", "opencv_features2d", "opencv_calib3d"] + eigen()},
{"target": "opencv_tracking", "lib": "tracking", "requires": ["opencv_core", "opencv_flann", "opencv_imgproc", "opencv_ml", "opencv_plot", "opencv_video", "opencv_imgcodecs", "opencv_datasets"] + eigen()},
{"target": "opencv_xfeatures2d", "lib": "xfeatures2d", "requires": ["opencv_core", "opencv_flann", "opencv_imgproc", "opencv_ml", "opencv_video", "opencv_features2d", "opencv_shape", "opencv_calib3d"] + eigen()},
{"target": "opencv_ximgproc", "lib": "ximgproc", "requires": ["opencv_core", "opencv_flann", "opencv_imgproc", "opencv_features2d", "opencv_imgcodecs", "opencv_calib3d"] + eigen()},
{"target": "opencv_xobjdetect", "lib": "xobjdetect", "requires": ["opencv_core", "opencv_flann", "opencv_imgproc", "opencv_features2d", "opencv_imgcodecs", "opencv_calib3d", "opencv_objdetect"] + eigen()},
{"target": "opencv_aruco", "lib": "aruco", "requires": ["opencv_core", "opencv_flann", "opencv_imgproc", "opencv_features2d", "opencv_calib3d"] + eigen()},
{"target": "opencv_bgsegm", "lib": "bgsegm", "requires": ["opencv_core", "opencv_flann", "opencv_imgproc", "opencv_video", "opencv_features2d", "opencv_calib3d"] + eigen()},
{"target": "opencv_bioinspired", "lib": "bioinspired", "requires": ["opencv_core", "opencv_imgproc", "opencv_imgcodecs", "opencv_videoio", "opencv_highgui"] + eigen()},
{"target": "opencv_ccalib", "lib": "ccalib", "requires": ["opencv_core", "opencv_flann", "opencv_imgproc", "opencv_features2d", "opencv_imgcodecs", "opencv_videoio", "opencv_calib3d", "opencv_highgui"] + eigen()},
{"target": "opencv_dpm", "lib": "dpm", "requires": ["opencv_core", "opencv_flann", "opencv_imgproc", "opencv_features2d", "opencv_imgcodecs", "opencv_videoio", "opencv_calib3d", "opencv_highgui", "opencv_objdetect"] + eigen()},
{"target": "opencv_face", "lib": "face", "requires": ["opencv_core", "opencv_flann", "opencv_imgproc", "opencv_photo", "opencv_video", "opencv_features2d", "opencv_calib3d", "opencv_objdetect"] + eigen()},
{"target": "opencv_optflow", "lib": "optflow", "requires": ["opencv_core", "opencv_flann", "opencv_imgproc", "opencv_video", "opencv_features2d", "opencv_imgcodecs", "opencv_calib3d", "opencv_ximgproc"] + eigen()},
{"target": "opencv_sfm", "lib": "sfm", "requires": ["opencv_core", "opencv_flann", "opencv_imgproc", "opencv_ml", "opencv_video", "opencv_features2d", "opencv_imgcodecs", "opencv_shape", "opencv_calib3d", "opencv_xfeatures2d", "correspondence", "multiview", "numeric", "glog::glog", "gflags::gflags"] + eigen()},
{"target": "correspondence", "lib": "correspondence", "requires": ["glog::glog", "multiview"] + eigen()},
{"target": "multiview", "lib": "multiview", "requires": ["glog::glog", "numeric"] + eigen()},
{"target": "numeric", "lib": "numeric", "requires": eigen()},
])
return opencv_components
def package_info(self):
version = self.version.split(".")
version = "".join(version) if self.settings.os == "Windows" else ""
debug = "d" if self.settings.build_type == "Debug" and self.settings.compiler == "Visual Studio" else ""
def get_lib_name(module):
prefix = "" if module in ("correspondence", "multiview", "numeric") else "opencv_"
return "%s%s%s%s" % (prefix, module, version, debug)
def add_components(components):
for component in components:
conan_component = component["target"]
cmake_target = component["target"]
lib_name = get_lib_name(component["lib"])
requires = component["requires"]
self.cpp_info.components[conan_component].names["cmake_find_package"] = cmake_target
self.cpp_info.components[conan_component].names["cmake_find_package_multi"] = cmake_target
self.cpp_info.components[conan_component].builddirs.append(self._module_subfolder)
module_rel_path = os.path.join(self._module_subfolder, self._module_file)
self.cpp_info.components[conan_component].build_modules["cmake_find_package"] = [module_rel_path]
self.cpp_info.components[conan_component].build_modules["cmake_find_package_multi"] = [module_rel_path]
self.cpp_info.components[conan_component].libs = [lib_name]
self.cpp_info.components[conan_component].libs = [lib_name]
self.cpp_info.components[conan_component].requires = requires
if self.settings.os == "Linux":
self.cpp_info.components[conan_component].system_libs = ["dl", "m", "pthread", "rt"]
# CMake components names
cmake_component = component["lib"]
if cmake_component != cmake_target:
conan_component_alias = conan_component + "_alias"
self.cpp_info.components[conan_component_alias].names["cmake_find_package"] = cmake_component
self.cpp_info.components[conan_component_alias].names["cmake_find_package_multi"] = cmake_component
self.cpp_info.components[conan_component_alias].requires = [conan_component]
self.cpp_info.components[conan_component_alias].includedirs = []
self.cpp_info.components[conan_component_alias].libdirs = []
self.cpp_info.components[conan_component_alias].resdirs = []
self.cpp_info.components[conan_component_alias].bindirs = []
self.cpp_info.components[conan_component_alias].frameworkdirs = []
self.cpp_info.filenames["cmake_find_package"] = "OpenCV"
self.cpp_info.filenames["cmake_find_package_multi"] = "OpenCV"
add_components(self._opencv_components)
if self.settings.os == "Windows":
self.cpp_info.components["opencv_imgcodecs"].system_libs = ["comctl32", "gdi32", "ole32", "setupapi", "ws2_32", "vfw32"]
elif self.settings.os == "Macos":
self.cpp_info.components["opencv_imgcodecs"].frameworks = ['OpenCL', 'Accelerate', 'CoreMedia', 'CoreVideo', 'CoreGraphics', 'AVFoundation', 'QuartzCore', 'Cocoa']
| 62.108046 | 354 | 0.617574 |
2e216ae7a07c05717a368bf005e35e1c0e415167 | 1,086 | py | Python | Exercises/Exercise4AbstractClass.py | davidavg/OOP_Python | ca4e8376a50b9c81b5ac18c466bd8d147bdbe679 | [
"MIT"
] | null | null | null | Exercises/Exercise4AbstractClass.py | davidavg/OOP_Python | ca4e8376a50b9c81b5ac18c466bd8d147bdbe679 | [
"MIT"
] | null | null | null | Exercises/Exercise4AbstractClass.py | davidavg/OOP_Python | ca4e8376a50b9c81b5ac18c466bd8d147bdbe679 | [
"MIT"
] | null | null | null | '''
Created on Aug 13, 2018
@author: david avalos
'''
#import abstract library
from abc import abstractmethod, ABC
class Person(ABC):
def __init__(self, name):
self.name = name
def talk(self, words):
print(self.name, "says:", words)
@abstractmethod
def hobby(self):
#print("My hobby is to watch movies")
pass
class Teacher(Person):
def __init__(self, name, signature):
self.name = name
self.signature = signature
def hobby(self):
print("My hobby is to read")
def teach(self):
print(self.name,"is giving",self.signature,"class")
class Engineer(Person):
def hobby(self):
print("My hobby is to play video games")
#myPerson = Person("David")
#myPerson.talk("Hello! :)")
#myPerson.hobby()
print()
myTeacher = Teacher("Jorge","Math")
myTeacher.talk("Ready for my class?")
myTeacher.hobby()
myTeacher.teach()
print()
myEngineer = Engineer("Genaro")
myEngineer.talk("Don't know what to say D:")
myEngineer.hobby()
| 18.40678 | 59 | 0.608656 |
fb2c5a43464bc9b4e33b5d74f5d9d78e97aa319a | 95,081 | py | Python | theano/tensor/nnet/conv.py | joschu/Theano | a53650487647b25f4258deef9d91ec950902c610 | [
"BSD-3-Clause"
] | 1 | 2020-12-08T02:23:42.000Z | 2020-12-08T02:23:42.000Z | theano/tensor/nnet/conv.py | joschu/Theano | a53650487647b25f4258deef9d91ec950902c610 | [
"BSD-3-Clause"
] | null | null | null | theano/tensor/nnet/conv.py | joschu/Theano | a53650487647b25f4258deef9d91ec950902c610 | [
"BSD-3-Clause"
] | null | null | null | """
Contains an Op for convolving input images with a set of filters. This was
developed especially for Convolutional Neural Networks.
For related ops, including downsampling and subsampling, see
tensor.signal and tensor.signal.downsample.
See especially conv2d().
"""
__docformat__ = "restructuredtext en"
import logging
import numpy
import theano
from theano.tensor import (as_tensor_variable, blas, get_scalar_constant_value,
patternbroadcast, NotScalarConstantError)
from theano import OpenMPOp, config
from theano.gof import Apply
from theano.gof.python25 import any
imported_scipy_signal = False
try:
# TODO: move these back out to global scope when they no longer
# cause an atexit error
from scipy.signal.signaltools import _valfrommode, _bvalfromboundary
from scipy.signal.sigtools import _convolve2d
imported_scipy_signal = True
except ImportError:
pass
_logger = logging.getLogger("theano.tensor.nnet.conv")
def conv2d(input, filters, image_shape=None, filter_shape=None,
border_mode='valid', subsample=(1, 1), **kargs):
"""This function will build the symbolic graph for convolving a stack of
input images with a set of filters. The implementation is modelled after
Convolutional Neural Networks (CNN). It is simply a wrapper to the ConvOp
but provides a much cleaner interface.
:type input: symbolic 4D tensor
:param input: mini-batch of feature map stacks, of shape
(batch size, stack size, nb row, nb col)
see the optional parameter image_shape
:type filters: symbolic 4D tensor
:param filters: set of filters used in CNN layer of shape
(nb filters, stack size, nb row, nb col)
see the optional parameter filter_shape
:param border_mode:
'valid'-- only apply filter to complete patches of the image. Generates
output of shape: image_shape - filter_shape + 1
'full' -- zero-pads image to multiple of filter shape to generate output
of shape: image_shape + filter_shape - 1
:type subsample: tuple of len 2
:param subsample: factor by which to subsample the output.
Also called strides elsewhere.
:type image_shape: None, tuple/list of len 4 of int or Constant variable
:param image_shape: The shape of the input parameter.
Optional, used for optimization like loop unrolling
You can put None for any element of the list
to tell that this element is not constant.
:type filter_shape: None, tuple/list of len 4 of int or Constant variable
:param filter_shape: Optional, used for optimization like loop unrolling
You can put None for any element of the list
to tell that this element is not constant.
:param kwargs: kwargs are passed onto ConvOp.
Can be used to set the following:
unroll_batch, unroll_kern, unroll_patch,
openmp (see ConvOp doc)
openmp: By default have the same value as
config.openmp. For small image, filter,
batch size, nkern and stack size, it can be
faster to disable manually openmp. A fast and
incomplete test show that with image size
6x6, filter size 4x4, batch size==1,
n kern==1 and stack size==1, it is faster
to disable it in valid mode. But if we
grow the batch size to 10, it is faster
with openmp on a core 2 duo.
:rtype: symbolic 4D tensor
:return: set of feature maps generated by convolutional layer. Tensor is
of shape (batch size, nb filters, output row, output col)
"""
#accept Constant value for image_shape and filter_shape.
if image_shape is not None:
image_shape = list(image_shape)
for i in xrange(len(image_shape)):
if image_shape[i] is not None:
try:
image_shape[i] = get_scalar_constant_value(
as_tensor_variable(image_shape[i]))
except NotScalarConstantError, e:
raise NotScalarConstantError(
"The convolution need that the shape"
" information are constant values. We got"
" %s for the image_shape parameter" %
image_shape[i])
assert str(image_shape[i].dtype).startswith('int')
image_shape[i] = int(image_shape[i])
if filter_shape is not None:
filter_shape = list(filter_shape)
for i in xrange(len(filter_shape)):
if filter_shape[i] is not None:
try:
filter_shape[i] = get_scalar_constant_value(
as_tensor_variable(filter_shape[i]))
except NotScalarConstantError, e:
raise NotScalarConstantError(
"The convolution need that the shape"
" information are constant values. We got"
" %s for the filter_shape "
"parameter" % filter_shape[i])
assert str(filter_shape[i].dtype).startswith('int')
filter_shape[i] = int(filter_shape[i])
if image_shape and filter_shape:
try:
assert image_shape[1] == filter_shape[1]
except Exception:
print 'image ', image_shape, ' filters ', filter_shape
raise
if filter_shape is not None:
nkern = filter_shape[0]
kshp = filter_shape[2:]
else:
nkern, kshp = None, None
if image_shape is not None:
bsize = image_shape[0]
imshp = image_shape[1:]
else:
bsize, imshp = None, None
op = ConvOp(output_mode=border_mode, dx=subsample[0], dy=subsample[1],
imshp=imshp, kshp=kshp, nkern=nkern, bsize=bsize, **kargs)
return op(input, filters)
class ConvOp(OpenMPOp):
"""
This Op serves a dual purpose: it can implement a vanilla 2D convolution
(as taught in any signal processing class) or implement the
convolutional layers found in Convolutional Neural Networks.
In this setting, a set of 3D images is convolved with a set of 3D kernels,
with the particularity that their leading dimensions are of equal length.
Vanilla 2D convolution is treated as a special case of this.
The input parameter represents a mini-batch of multiple images. Its shape is:
batch size x num. input feature maps x image height x image width
The kernel parameter represents a set of 3D kernels. Its shape is:
number of filters x num. input images x filter height x filter width
The output of ConvOp is a 4D tensor, generated as follows:
output[b,k,:,:] = \sum_i input[b,i,:,:] * filter[k,i,:,:] \forall b,k
where b is the mini-batch index, k the filter index and * is the
convolution operator.
"""
__attrnames = ['imshp', 'kshp', 'nkern', 'bsize', 'dx', 'dy', 'out_mode',
'unroll_batch', 'unroll_kern', 'unroll_patch',
'imshp_logical', 'kshp_logical', 'kshp_logical_top_aligned']
"""These attributes uniquely identify the behaviour of this op for
given inputs. Do not set openmp here.
"""
#the value of speed_unroll_batch_kern,speed_unroll_patch_noshape,speed_unroll_patch_shape
#have bean calculated on maggie36 when their is only 1 session logged on and only this was running.
#It is an Intel(R) Xeon(R) CPU E5430 @ 2.66GHz. It is computer with theano/tensor/nnet/tests/speed_test_conv.py
# and took 5 minutes to run.
#TODO: we should compute this table for each computer/os as this can change.
# I saw on one computer that the speed with the shape can be slower than without!
# using the real shape and the same dtype could also help.
#unroll_batch, unroll_kern, valid time, full time
speed_unroll_batch_kern = [(1, 1, 2.4661250114440918, 6.5472931861877441),
(1, 2, 1.5869178771972656, 5.1499760150909424),
(1, 3, 1.4270510673522949, 3.6593470573425293),
(1, 4, 1.3373479843139648, 3.3451821804046631),
(1, 5, 1.2818830013275146, 3.1444568634033203),
(1, 6, 1.2521560192108154, 3.0256359577178955),
(1, 10, 1.2134110927581787, 2.9174180030822754),
(2, 1, 1.657214879989624, 4.5261678695678711),
(2, 2, 1.2123160362243652, 2.9747390747070312),
(2, 3, 1.0758891105651855, 2.5690360069274902),
(2, 4, 1.0683329105377197, 2.4233770370483398),
(2, 5, 1.0955719947814941, 2.3999948501586914),
(2, 6, 1.5935721397399902, 2.6878271102905273),
(2, 10, 1.8511250019073486, 3.2417428493499756),
(3, 1, 1.5948119163513184, 3.631148099899292),
(3, 2, 1.0761330127716064, 2.6011371612548828),
(3, 3, 1.0551531314849854, 2.4200370311737061),
(3, 4, 1.3930759429931641, 2.5211219787597656),
(3, 5, 1.4330689907073975, 2.5704989433288574),
(3, 6, 1.362138032913208, 2.5964410305023193),
(3, 10, 1.6582000255584717, 2.9907989501953125),
(4, 1, 1.4793620109558105, 3.3473429679870605),
(4, 2, 1.0671560764312744, 2.4171769618988037),
(4, 3, 1.2569692134857178, 2.2807950973510742),
(4, 4, 1.3456289768218994, 2.6219108104705811),
(4, 5, 1.4055080413818359, 2.4606490135192871),
(4, 6, 1.372107982635498, 2.551663875579834),
(4, 10, 1.599470853805542, 2.9172940254211426),
(5, 1, 1.4115700721740723, 3.2077109813690186),
(5, 2, 1.0635769367218018, 2.2648060321807861),
(5, 3, 1.3842809200286865, 2.6135518550872803),
(5, 4, 1.3470511436462402, 2.3852400779724121),
(5, 5, 1.3539440631866455, 2.5245928764343262),
(5, 6, 1.4037849903106689, 2.5985310077667236),
(5, 10, 1.6120610237121582, 2.8127608299255371),
(6, 1, 1.3623628616333008, 3.021122932434082),
(6, 2, 1.1697649955749512, 2.6285450458526611),
(6, 3, 1.2980999946594238, 2.4746189117431641),
(6, 4, 1.3739941120147705, 2.5579929351806641),
(6, 5, 1.3967819213867188, 2.5522029399871826),
(6, 6, 1.4279270172119141, 2.6127138137817383),
(6, 10, 1.605496883392334, 2.864037036895752),
(10, 1, 1.6401121616363525, 2.970099925994873),
(10, 2, 1.46710205078125, 2.7231831550598145),
(10, 3, 1.4193780422210693, 2.6087639331817627),
(10, 4, 1.4657118320465088, 2.6246678829193115),
(10, 5, 1.5052611827850342, 2.6542458534240723),
(10, 6, 1.5214400291442871, 2.7243161201477051),
(10, 10, 1.6116268634796143, 2.956165075302124)]
#valid time, full time
speed_unroll_patch_noshape = [2.0109100341796875, 5.8175678253173828]
#valid time, full time
speed_unroll_patch_shape = [1.2967290878295898, 5.5283889770507812]
@staticmethod
def has_all_shape(imshp, kshp, nkern=1, bsize=1):
return (nkern is not None and bsize is not None and
all(shp is not None for shp in imshp) and
all(shp is not None for shp in kshp))
@staticmethod
def getOutputShape(inshp, kshp, stride=(1, 1), mode='valid'):
"""
Computes the output dimensions of convolving an image of shape "inshp"
with kernels of shape "kshp". Accepts symbolic or integer shapes.
Propagates `None`s (for unknown shapes).
:param inshp: (rows,cols) of input image
:param kshp: (rows,cols) of filters
:param mode: 'valid' or 'full' (see 'border_mode' in conv2d's doc)
:return: (rows,cols) of output image
"""
# The formula would be ceil((i + s * k - s * 1) / float(d)),
# with s=1 for mode=='full' and s=-1 for mode=='valid'.
# To support symbolic shapes, we express this with integer arithmetics.
return tuple(None if i is None or k is None
else ((i - k) // d + 1) if mode == 'valid'
else ((i + k + d - 2) // d)
for i, k, d in zip(inshp, kshp, stride))
def __init__(self, imshp=None, kshp=None, nkern=None, bsize=None,
dx=1, dy=1,
output_mode='valid',
unroll_batch=None,
unroll_kern=None,
unroll_patch=None,
imshp_logical=None,
kshp_logical=None,
kshp_logical_top_aligned=True,
verbose=0,
version=-1,
direction_hint='forward',
openmp=None):
"""
Initializes a ConvOp with given output_mode (full/valid). All other
parameters are optional and are only used to generate more optimized c
code, or to enable graph optimizers to optimally replace the ConvOp.
NOTES ON OPTIMIZATION:
Their is two type of optimization. The first is the selection of the
fastest algo when bsize and nkern are probided with imshp and kshp.
By default we try to select the fastest version. You can specify it
with the unroll_batch, unroll_kern, and unroll_patch parameter.
The second type of optimization is hardcoding some dimensions into the
code when all shape are know.
This make a significant difference for the 'full' output_mode.
Some times, the fastest implementation on x86-64 uses
{unroll_batch=4, unroll_kern=4, unroll_patch=False}
with all other shape parameters being provided.
For optimizing other architectures, see:
Kazushige Goto and Robert A. Van De Geijn, Anatomy of High-Performance
Matrix Multiplication, (mr x nr). ACM Transactions on Mathematical
Software, May 2008.
Figure 12: (mr x nr). For x86 use 2x4, itanium 8x8, etc.
:type output_mode: string
:param output_mode: 'valid' -- gives an output smaller then the image
'full' -- gives an output bigger then the image
Optional parameters: (will generate more optimal c code)
:type imshp: tuple of len 2 or 3: 2 for 2d image,
3 for a stack of 2d images.
:param imshp: (stacksize, nb image row, nb image col)
:type kshp: tuple of len 2
:param kshp: (nb kernel row, nb kernel col)
:type nkern: int
:param nkern: the number of kernel
:type bsize: int
:param bsize: the size of the minibatch
:type dx: int
:param dx: patch stride rows
:type dy: int
:param dy: patch stride cols
Params which select the version of code used:
:type unroll_patch: bool
:param unroll_patch: use a version of c_code that unroll the patch loop
that don't request all shape information to work, but if all shape
information are present, will
use it to hardcode the value in the code for faster code.
:type unroll_batch:int
:param unroll_batch: use a version of c_code that unroll the batch
(by unroll_batch) and the nkern(by unroll_kern) loop. The size
must by a multiple of bsize or nkern respectively.
:type unroll_kern:int
:param unroll_kern: use a version of c_code that unroll the batch
(by unroll_batch) and the nkern(by unroll_kern) loop. The size
must by a multiple of bsize or nkern
respectively.
:type verbose: int
:param verbose: passed to GpuConv
:type version: int or str
:param version: passed to GpuConv, if version='no_fft', fft
optimization will be desactivated at the op level.
:param direction_hint: 'forward', 'bprop weights' or 'bprop inputs'.
Passed to GpuConv, used by graph optimizers to aid algorithm choice
The 3 following parameters are used internally when we generate
the gradient when dx!=1 or dy!=1.
:param imshp_logical: Default None. None value is equivalent to imshp
value. When imshp_logical != imshp, it tell we need to insert 0 in
the image before we do the convolution. For example, when dx==dy==2
and the image is [[1, 2], [3, 4]], we should make as if the image
was [[1, 0, 2, 0], [0, 0, 0, 0], [3, 0, 4, 0], [0, 0, 0, 0]].
Our python code insert the zero, but the c code optimize it.
imshp_logical != imshp when taking the grad again the weights or
the image when the output_mode is full and `dx != 1` or `dy != 1`.
:param kshp_logical: idem but for kshp and used for the grad again the
weights when the output_mode is valid and `dx != 1` or `dy != 1`.
:param kshp_logical_top_aligned: Used in the same case.Default to True.
Set to False in the grad again the weight when the
output_mode is full.
"""
# Deactivate fft_optimization at the op level if specified
if version == "no_fft":
self.fft_opt = False
version = -1
else:
self.fft_opt = True
# Expand unknown image / kernel shapes into tuples of Nones
if imshp is None:
imshp = (None, None, None)
else:
imshp = tuple(imshp)
if kshp is None:
kshp = (None, None)
else:
kshp = tuple(kshp)
# Check imshp and kshp dimensionality
if len(imshp) == 2:
imshp = (1,) + imshp
elif len(imshp) != 3:
raise ValueError("len(imshp) must be 2 or 3, got %d" % len(imshp))
if len(kshp) != 2:
raise ValueError("len(kshp) must be 2, got %d" % len(kshp))
# We must continue to consider None as 1 for backward compatibility.
if dx is None:
dx = 1
if dy is None:
dy = 1
if int(dx) != dx:
raise TypeError('ConvOp.__init__ param dx must be an int', dx)
dx = int(dx)
if int(dy) != dy:
raise TypeError('ConvOp.__init__ param dy must be an int', dy)
dy = int(dy)
all_shape = self.has_all_shape(imshp, kshp, nkern, bsize)
if (unroll_batch or unroll_kern) and not all_shape:
raise Exception("In ConvOp, when using unroll_batch and"
" unroll_nkern, all shape are needed")
#Init the openmp attribute
super(ConvOp, self).__init__(openmp=openmp)
if not all_shape or self.openmp:
# Only this version is parallelized
unroll_patch = True
self.imshp = imshp
self.kshp = kshp
self.nkern = nkern
self.bsize = bsize
self.dx = dx
self.dy = dy
self.verbose = verbose
self.version = version
self.direction_hint = direction_hint
# a triple
if imshp_logical is None:
self.imshp_logical = self.imshp
else:
imshp_logical = tuple(imshp_logical)
if len(imshp_logical) != 3:
raise ValueError("len(imshp_logical) must be 3, got %d" % len(imshp_logical))
self.imshp_logical = imshp_logical
# a pair
if kshp_logical is None:
self.kshp_logical = self.kshp
else:
kshp_logical = tuple(kshp_logical)
if len(kshp_logical) != 2:
raise ValueError("len(kshp_logical) must be 2, got %d" % len(kshp_logical))
self.kshp_logical = kshp_logical
# a bool
self.kshp_logical_top_aligned = kshp_logical_top_aligned
self.unroll_batch = unroll_batch
self.unroll_kern = unroll_kern
self.unroll_patch = unroll_patch
if self.unroll_batch and not self.unroll_kern:
self.unroll_kern = 1
if self.unroll_kern and not self.unroll_batch:
self.unroll_batch = 1
# downcast unroll_batch if not a divisor of batch size
if self.unroll_batch is not None and self.unroll_batch > 0 and self.bsize % self.unroll_batch != 0:
if self.bsize <= self.unroll_batch:
self.unroll_batch = self.bsize
else:
#find the maximum value under unroll_batch that would work
new = self.unroll_batch
assert(new >= 1)
while self.bsize % new != 0:
new -= 1
warnstr = ("OPTIMISATION WARNING: in ConvOp.__init__() "
"unroll_batch(%i) must be 0 or a divisor of"
" bsize(%i). We revert it to %i. This"
" won't change the result, but may make it slower.")
_logger.warn(warnstr, self.unroll_batch, self.bsize, new)
self.unroll_batch = new
#downcast unroll_kern if not a divisor of nb of kernel
if self.unroll_kern is not None and self.unroll_kern > 0 and self.nkern % self.unroll_kern != 0:
if self.nkern <= self.unroll_kern:
self.unroll_kern = self.nkern
else:
#find the maximum value under unroll_kern that would work
new = self.unroll_kern
assert(new >= 1)
while self.nkern % new != 0:
new -= 1
warnstr = ("OPTIMISATION WARNING: in ConvOp.__init__()"
" unroll_kern(%i) should be 0 or a divisor of"
" nkern(%i). We revert it to %i. This"
" won't change the result, but may make it slower.")
_logger.warn(warnstr, self.unroll_kern, self.nkern, new)
self.unroll_kern = new
self.outshp = ConvOp.getOutputShape(self.imshp_logical[1:],
self.kshp_logical, (dx, dy),
output_mode)
self.fulloutshp = ConvOp.getOutputShape(self.imshp_logical[1:],
self.kshp_logical, (1, 1),
output_mode)
self.out_mode = output_mode
if not self.out_mode in ["valid", "full"]:
raise Exception("Mode %s not implemented" % self.out_mode)
if any((shp is not None) and (shp <= 0) for shp in self.outshp):
raise Exception("Bad size for the output shape. Verify that [post-"
"supersampling] input shape (%s) and kern"
" shape(%s) are ok. (Hint: kerns must fit inside"
" image in valid mode)" %
(self.imshp_logical, self.kshp_logical))
if (self.unroll_kern is None and
self.unroll_batch is None and
self.unroll_patch is None):
#no version specified. Find the faster we have
if self.bsize is None and self.nkern is None:
self.unroll_patch = True
elif self.bsize is not None and self.nkern is not None:
bsize = self.bsize
nkern = self.nkern
mode_idx = 0
if self.out_mode != "valid":
mode_idx = 1
if self.has_all_shape(self.imshp, self.kshp):
time_unroll_patch = self.speed_unroll_patch_shape[mode_idx]
else:
time_unroll_patch = self.speed_unroll_patch_noshape[
mode_idx]
time_unroll_batch_kern = 9999999
for i in xrange(len(self.speed_unroll_batch_kern)):
if (bsize % self.speed_unroll_batch_kern[i][0] == 0 and
nkern % self.speed_unroll_batch_kern[i][1] == 0):
if self.speed_unroll_batch_kern[i][2 + mode_idx] < time_unroll_batch_kern:
time_unroll_batch_kern = self.speed_unroll_batch_kern[i][2 + mode_idx]
time_unroll_batch_kern_idx = i
if time_unroll_patch < time_unroll_batch_kern:
self.unroll_patch = True
else:
self.unroll_batch = self.speed_unroll_batch_kern[
time_unroll_batch_kern_idx][0]
self.unroll_kern = self.speed_unroll_batch_kern[
time_unroll_batch_kern_idx][1]
self.unroll_patch = False
_logger.debug("AUTO FIND VERSION OF C_CODE OF CONV OP "
"%s %s %s %s %s %s %s",
self.unroll_batch, self.unroll_kern,
self.unroll_patch,
self.bsize, self.nkern, time_unroll_patch,
time_unroll_batch_kern)
self._rehash()
def __eq__(self, other):
if type(self) != type(other):
return False
for a in self.__attrnames:
if getattr(self, a) != getattr(other, a):
return False
return True
def __setstate__(self, d):
super(ConvOp, self).__setstate__(d)
self.direction_hint = d.get("direction_hint", None)
self._rehash()
def _rehash(self):
hashval = hash(type(self))
for a in self.__attrnames:
hashval = hashval ^ hash(getattr(self, a))
self.__hashval = hashval
def __hash__(self):
return self.__hashval
def __str__(self):
return "ConvOp{" + ",".join(str((a, getattr(self, a)))
for a in self.__attrnames) + "}"
def flops(self, inputs, outputs):
""" Useful with the hack in profilemode to print the MFlops"""
images, kerns = inputs
out, = outputs
assert images[1] == kerns[1]
flops = 0
if self.out_mode == "valid":
# nb mul and add by output pixel
flops = kerns[2] * kerns[3] * 2
#nb flops by output image
flops *= out[2] * out[3]
# nb patch multiplied
flops *= images[1] * kerns[0] * images[0]
else:
flops = (images[0] * kerns[0] * images[1] *
kerns[2] * kerns[3] *
images[2] * images[3] * 2)
return flops
def make_node(self, inputs, kerns):
# TODO: find a way to make ConvOp work for N-D (after NIPS09)
"""
inputs - 4 dim: batches x stacksize x rows x cols
kerns - 4 dim: nkern x stackidx x rows x cols
"""
outdim = kerns.ndim
_inputs = as_tensor_variable(inputs)
_kerns = as_tensor_variable(kerns)
# TODO: lift this restriction by upcasting either inputs or kerns
if _inputs.ndim != 4:
raise TypeError('ConvOp (make_node) requires input be a 4D tensor;'
' received "%s" (%i dims)' %
(inputs, _inputs.ndim))
if _kerns.ndim != 4:
raise TypeError('make_node requires 4D tensor of kernels')
if _inputs.type.dtype != _kerns.type.dtype:
raise NotImplementedError(
"The image and the kernel must have the same type."
"inputs(%s), kerns(%s)" % (_inputs.dtype, _kerns.dtype))
bcastable23 = [self.outshp[0] == 1, self.outshp[1] == 1]
output = theano.tensor.tensor(dtype=_inputs.type.dtype,
broadcastable=[_inputs.broadcastable[0],
_kerns.broadcastable[0]] +
bcastable23)
return Apply(self, [_inputs, _kerns], [output])
def infer_shape(self, node, input_shapes):
imshp = input_shapes[0] # 4D image shape
kshp = input_shapes[1] # 4D filter shape
bsize, imshp = imshp[0], list(imshp[1:])
nkern, kshp = kshp[0], list(kshp[2:])
# replace symbolic shapes with known shapes
if self.bsize is not None:
bsize = self.bsize
for i in [0, 1, 2]:
if self.imshp_logical[i] is not None:
imshp[i] = self.imshp_logical[i]
if self.nkern is not None:
nkern = self.nkern
for i in [0, 1]:
if self.kshp_logical[i] is not None:
kshp[i] = self.kshp_logical[i]
# infer output shape from what we have
outshp = ConvOp.getOutputShape(imshp[1:], kshp, (self.dx, self.dy),
self.out_mode)
return [(bsize, nkern) + outshp]
def perform(self, node, inp, out):
"""
By default if len(img2d.shape)==3, we
"""
img2d, filtersflipped = inp
z, = out
if not imported_scipy_signal:
raise theano.gof.utils.MethodNotDefined(
"c_headers", type(self), self.__class__.__name__,
"Need the python package for scipy.signal to be installed "
"for the python implementation. You can use the C"
" implementation instead.")
# TODO: move these back out to global scope when they no longer
# cause an atexit error
imshp = self.imshp
if any(x is None for x in imshp):
imshp = tuple(img2d.shape[1:])
kshp = self.kshp
if any(x is None for x in kshp):
kshp = tuple(filtersflipped.shape[2:])
bsize = self.bsize
if bsize is None:
bsize = img2d.shape[0]
nkern = self.nkern
if nkern is None:
nkern = filtersflipped.shape[0]
imshp_logical = self.imshp_logical
if imshp_logical[0] is None:
imshp_logical = (imshp[0],) + imshp_logical[1:]
if imshp_logical[1] is None:
imshp_logical = (imshp_logical[0], imshp[1], imshp_logical[2])
if imshp_logical[2] is None:
imshp_logical = imshp_logical[:2] + (imshp[2],)
assert all(x is not None for x in imshp_logical)
kshp_logical = self.kshp_logical
if kshp_logical[0] is None:
kshp_logical = (kshp[0], kshp_logical[1])
if kshp_logical[1] is None:
kshp_logical = (kshp_logical[0], kshp[1])
assert all(x is not None for x in kshp_logical)
if all(shp is not None for shp in self.fulloutshp):
fulloutshp = tuple(self.fulloutshp)
else:
fulloutshp = tuple(ConvOp.getOutputShape(imshp_logical[
1:], kshp_logical, (1, 1), self.out_mode))
if z[0] is None or z[0].shape != (bsize, nkern,) + fulloutshp:
z[0] = numpy.zeros((bsize, nkern,) + fulloutshp,
dtype=img2d.dtype)
zz = z[0]
stacklen = imshp[0]
img2d = img2d.reshape((bsize,) + imshp)
filtersflipped = filtersflipped.reshape((nkern, stacklen) + kshp)
if self.imshp != self.imshp_logical:
# assuming that to get from imshp to imshp logical we insert zeros in missing spots
rstride = int(numpy.ceil(imshp_logical[1] / float(imshp[1])))
cstride = int(numpy.ceil(imshp_logical[2] / float(imshp[2])))
buf = numpy.zeros((bsize,) + imshp_logical, dtype=img2d.dtype)
buf[:, :, ::rstride, ::cstride] = img2d
img2d = buf
del buf, rstride, cstride
if kshp != kshp_logical:
rstride = int(numpy.ceil(kshp_logical[0] / float(kshp[0])))
cstride = int(numpy.ceil(kshp_logical[1] / float(kshp[1])))
buf = numpy.zeros((nkern, stacklen) +
self.kshp_logical, dtype=filtersflipped.dtype)
if self.kshp_logical_top_aligned:
roffset = coffset = 0
else:
roffset = (kshp_logical[0] - (kshp[0] *
rstride) - 1 + rstride) % rstride
coffset = (kshp_logical[1] - (kshp[1] *
cstride) - 1 + cstride) % cstride
assert roffset >= 0
assert coffset >= 0
buf[:, :, roffset::rstride, coffset::cstride] = filtersflipped
filtersflipped = buf
del buf, rstride, cstride
val = _valfrommode(self.out_mode)
bval = _bvalfromboundary('fill')
for b in xrange(bsize):
for n in xrange(nkern):
zz[b, n, ...].fill(0)
for im0 in xrange(stacklen):
zz[b, n, ...] += _convolve2d(img2d[b, im0, ...],
filtersflipped[n, im0, ...],
1, val, bval, 0)
if False:
if False and self.out_mode == "full":
img2d2 = numpy.zeros((bsize, stacklen,
imshp[1] + 2 * kshp[0] - 2,
imshp[2] + 2 * kshp[1] - 2))
img2d2[:, :, kshp[0] - 1:kshp[0] - 1 + imshp[1],
kshp[1] - 1:kshp[1] - 1 + imshp[2]] = img2d
img2d = img2d2
#N_image_shape = image_data.shape
for b in xrange(bsize):
for n in xrange(nkern):
zz[b, n, ...].fill(0)
for im0 in xrange(stacklen):
for row in xrange(0, zz.shape[2], self.dx):
for col in xrange(0, zz.shape[3], self.dy):
zz[b, n, row, col] += (img2d[b, im0, row:row + kshp[0], col:col + kshp[1]] *
filtersflipped[n, im0, ::-1, ::-1]).sum()
#We copy it to remove the Stride mismatch warning from DEBUG_MODE.
#The copy make that we return an object with the same stride as the c version.
#The copy don't affect the performence during our experience as in that case we
#execute the c version which is much faster.
if self.dx > 1 or self.dy > 1:
zz = zz[:, :, 0::self.dx, 0::self.dy].copy()
z[0] = zz
def grad(self, inp, grads):
inputs, kerns = inp
gz, = grads
if self.imshp != self.imshp_logical or self.kshp != self.kshp_logical:
raise NotImplementedError('todo')
if self.out_mode == 'valid' and (self.dx, self.dy) != (1, 1):
# Use the gradient as defined in conv3D, because the implementation
# by Conv is slow (about 3x slower than conv3D, and probably 10x
# slower than it could be), and incorrect when dx or dy > 2.
# build a "node", that should be equivalent to the one given by
# self.make_node, but using conv3D instead of self.
shuffled_inputs = inputs.dimshuffle(0, 2, 3, 'x', 1)
if inputs.name is not None:
shuffled_inputs.name = 'shuffle_for_conv3D(%s)' % inputs.name
flipped_kerns = kerns[:, :, ::-1, ::-1]
if kerns.name is not None:
flipped_kerns.name = 'flipped(%s)' % kerns.name
shuffled_kerns = flipped_kerns.dimshuffle(0, 2, 3, 'x', 1)
if flipped_kerns.name is not None:
shuffled_kerns.name = 'shuffled_for_conv3D(%s)' % flipped_kerns.name
tmp_node = theano.tensor.nnet.conv3D(
V=shuffled_inputs,
W=shuffled_kerns,
b=theano.tensor.alloc(numpy.asarray(0, dtype=kerns.dtype),
kerns.shape[0]),
d=(self.dx, self.dy, 1))
node = theano.tensor.addbroadcast(
tmp_node, 3).dimshuffle(0, 4, 1, 2)
# mimic what happens inside theano.grad: get the input gradient
# of the final cost wrt all variables involved.
return theano.gradient.grad(cost=None,
known_grads={node: gz}, wrt=[inputs, kerns])
if self.dx not in (1, 2) or self.dy not in (1, 2):
raise NotImplementedError(
"ERROR: We disable ConvOp.grad now when dx or "
"dy are different from 1 and 2, as there is a bug in it.")
all_shape = self.has_all_shape(self.imshp, self.kshp,
self.nkern, self.bsize)
if not all_shape and (self.dx != 1 or self.dy != 1):
raise Exception("ConvOp.grad when dx!=1 or dy!=1 we must have all "
"the optional shape information")
####### Determine gradient on kernels ########
assert inputs.ndim == 4 and kerns.ndim == 4
newin = inputs.dimshuffle((1, 0, 2, 3))
newgz = gz.dimshuffle((1, 0, 2, 3))
un_p = self.unroll_patch
if self.out_mode == 'valid':
(img, filters) = (newin, newgz)
kshp_logical = self.fulloutshp
kshp_logical_top_aligned = False
imshp_logical = None
(bsize, nkern) = (self.imshp[0], self.nkern)
imshp = (self.bsize, self.imshp[1], self.imshp[2])
kshp = self.outshp
un_b = self.unroll_batch
un_k = self.unroll_kern
elif self.out_mode == 'full':
(img, filters) = (newgz, newin)
kshp_logical = None
kshp_logical_top_aligned = True
imshp_logical = (self.bsize,
self.fulloutshp[0],
self.fulloutshp[1])
(bsize, nkern) = (self.nkern, self.imshp[0])
imshp = (self.bsize, self.outshp[0], self.outshp[1])
kshp = self.imshp[1:]
un_b = self.unroll_kern
un_k = self.unroll_batch
else:
raise NotImplementedError(
'Only [full,valid] modes are currently supported.')
filters = filters[:, :, ::-1, ::-1] # flip them
if 0: # find good value for the unroll
if all_shape and un_b != 0 and bsize % un_b != 0:
if bsize < un_b:
un_b = bsize
else:
un_b = 1
_logger.warn(
"Optimization Warning: in ConvOp.grad() we can't "
" determine a good unroll value for the batch."
" Maybe you can optimize this!")
if all_shape and un_k != 0 and nkern % un_k != 0:
if nkern < un_k:
un_k = nkern
else:
un_k = 1
_logger.warn(
"Optimization Warning: in ConvOp.grad() we can't"
" determine a good unroll value for the kernel. Maybe"
" you can optimize this!")
dw = ConvOp(imshp, kshp, nkern, bsize, 1, 1, output_mode='valid',
unroll_batch=un_b, unroll_kern=un_k, unroll_patch=un_p,
imshp_logical=imshp_logical,
kshp_logical=kshp_logical,
kshp_logical_top_aligned=kshp_logical_top_aligned,
version=self.version,
direction_hint='bprop weights',
verbose=self.verbose)
else: # let __init__ choose c params be chosen automatically from shapes
dw = ConvOp(imshp, kshp, nkern, bsize, 1, 1, output_mode='valid',
unroll_batch=None, unroll_kern=None, unroll_patch=None,
imshp_logical=imshp_logical,
kshp_logical=kshp_logical,
kshp_logical_top_aligned=kshp_logical_top_aligned,
version=self.version,
direction_hint='bprop weights',
verbose=self.verbose)
dw = dw(img, filters)
if all_shape:
assert all(o == k for o, k in zip(dw.owner.op.outshp, self.kshp))
if self.out_mode == 'valid':
# before DimShuffle, dw is of shape visdim x nkern x kshp[0] x kshp[1]
dw = dw.dimshuffle((1, 0, 2, 3))
dw = dw[:, :, ::-1, ::-1]
####### Determine gradient on inputs ########
mode = 'valid'
if not self.out_mode == 'full':
mode = 'full'
filters = kerns.dimshuffle((1, 0, 2, 3))
filters = filters[:, :, ::-1, ::-1]
nkern = self.imshp[0]
imshp = (self.nkern, self.outshp[0], self.outshp[1])
imshp_logical = (self.nkern, self.fulloutshp[0],
self.fulloutshp[1])
if 0: # hard-code c generation parameters
din = ConvOp(imshp, self.kshp, nkern, self.bsize,
1, 1, output_mode=mode,
unroll_batch=un_b, unroll_kern=un_k,
unroll_patch=un_p,
imshp_logical=imshp_logical,
kshp_logical=None,
version=-1, # we we change the mode, we don't forward the version.
direction_hint='bprop inputs',
verbose=self.verbose)
else: # let __init__ figure out the unrolling / patch sizes
din = ConvOp(imshp, self.kshp, nkern, self.bsize,
1, 1, output_mode=mode,
unroll_batch=None, unroll_kern=None,
unroll_patch=None,
imshp_logical=imshp_logical,
kshp_logical=None,
version=-1, # we we change the mode, we don't forward the version.
direction_hint='bprop inputs',
verbose=self.verbose)
din = din(gz, filters)
assert all(o is None or o == i
for o, i in zip(din.owner.op.outshp, self.imshp[1:]))
# din and dw should have the same broadcasting pattern as the
# parameters they are the gradient of (resp. inputs and kerns).
din = patternbroadcast(din, inputs.broadcastable)
dw = patternbroadcast(dw, kerns.broadcastable)
return [din, dw]
def R_op(self, inputs, eval_points):
rval = None
if eval_points[0] is not None:
rval = self.make_node(eval_points[0], inputs[1]).outputs[0]
if eval_points[1] is not None:
if rval is None:
rval = self.make_node(inputs[0], eval_points[1]).outputs[0]
else:
rval += self.make_node(inputs[0], eval_points[1]).outputs[0]
return [rval]
def c_headers(self):
return ['<numpy/noprefix.h>', '<iostream>', '<sstream>']
def c_code_cache_version(self):
return (11, self.openmp, blas.blas_header_version())
def c_support_code(self):
return """
#define STRIDES(arr) (PyArray_STRIDES(arr))
#define FULL 2
#define SAME 1
#define VALID 0
#define MOD %
using namespace std;
""" + blas.blas_header_text()
def use_blas(self):
""" Return True if we will generate code that use gemm.
"""
#the gemm version only support that case
if self.out_mode == 'valid' and self.dx == 0 and self.dy == 0:
#We use a faster version in those case.
if (self.imshp != self.imshp_logical or
self.kshp != self.kshp_logical or
self.unroll_patch or
self.unroll_batch > 0 or
self.unroll_kern > 0):
return False
return True
return False
def c_libraries(self):
if self.use_blas():
return blas.ldflags()
return []
def c_no_compile_args(self):
#when the ksph==(1,1) gcc 4.3.0 segfault during the
#compilation with -O3. This don't happen at -O2
if (theano.gof.cmodule.gcc_version() in ['4.3.0'] and
self.kshp == (1, 1)):
return ['-O3']
else:
return []
def c_compile_args(self):
ret = []
if self.use_blas():
ret = blas.ldflags(libs=False, flags=True)
if (theano.gof.cmodule.gcc_version() in ['4.3.0'] and
self.kshp == (1, 1)):
ret += ['-O2']
#Add the -fopenmp flags
ret += super(ConvOp, self).c_compile_args()
return ret
def c_lib_dirs(self):
if self.use_blas():
return blas.ldflags(libs=False, libs_dir=True)
return []
def c_header_dirs(self):
if self.use_blas():
return blas.ldflags(libs=False, include_dir=True)
return []
def c_code(self, node, name, inp, out, sub):
img2d, filtersflipped = inp
z, = out
if node.inputs[0].type.dtype != node.inputs[1].type.dtype:
raise NotImplementedError()
assert node.inputs[0].type.dtype == node.inputs[1].type.dtype
d = locals()
d.update(sub)
all_shape = (self.has_all_shape(self.imshp, self.kshp,
self.nkern, self.bsize) and
self.has_all_shape(self.imshp_logical, self.kshp_logical))
d["self_out_mode"] = self.out_mode
d["self_dx"] = self.dx
d["self_dy"] = self.dy
d["mode"] = self.out_mode.upper()
d["affectation"] = "="
# Default values, will be overrided if the shape info is provided
d["self_bsize"] = "PyArray_DIMS(%(img2d)s)[0]" % d
d["self_nkern"] = "PyArray_DIMS(%(filtersflipped)s)[0]" % d
d["self_outshp0"] = "-1"
d["self_outshp1"] = "-1"
d["self_imshp0"] = "PyArray_DIMS(%(img2d)s)[1]" % d
d["self_imshp1"] = "PyArray_DIMS(%(img2d)s)[2]" % d
d["self_imshp2"] = "PyArray_DIMS(%(img2d)s)[3]" % d
d["self_kshp0"] = "PyArray_DIMS(%(filtersflipped)s)[2]" % d
d["self_kshp1"] = "PyArray_DIMS(%(filtersflipped)s)[3]" % d
# Override the default value if we have it
if self.kshp[0] is not None:
d["self_kshp0"] = self.kshp[0]
if self.kshp[1] is not None:
d["self_kshp1"] = self.kshp[1]
if self.outshp[0] is not None:
d["self_outshp0"] = self.outshp[0]
if self.outshp[1] is not None:
d["self_outshp1"] = self.outshp[1]
if self.imshp[0] is not None:
d["self_imshp0"] = self.imshp[0]
if self.imshp[1] is not None:
d["self_imshp1"] = self.imshp[1]
if self.imshp[2] is not None:
d["self_imshp2"] = self.imshp[2]
if self.bsize is not None:
d["self_bsize"] = self.bsize
if self.nkern is not None:
d["self_nkern"] = self.nkern
# Other hard coded stuff only if we have all shapes
if all_shape:
d["self_kshp_logical_r"] = self.kshp_logical[0]
d["self_kshp_logical_c"] = self.kshp_logical[1]
d["self_kshp_logical_stride_r"] = int(numpy.ceil(
self.kshp_logical[0] / float(self.kshp[0])))
d["self_kshp_logical_stride_c"] = int(numpy.ceil(
self.kshp_logical[1] / float(self.kshp[1])))
d["self_imshp_logical_r"] = self.imshp_logical[1]
#numpy.B. 1 not 0
d["self_imshp_logical_c"] = self.imshp_logical[2]
# numpy.B. 2 not 1
d["self_imshp_logical_stride_r"] = int(numpy.ceil(
self.imshp_logical[1] / float(self.imshp[1])))
d["self_imshp_logical_stride_c"] = int(numpy.ceil(
self.imshp_logical[2] / float(self.imshp[2])))
if not self.imshp[0] == 1:
d["affectation"] = "+="
d["all_shape"] = "1"
d["dim_zz_const"] = "const"
d["dim_zz_affect"] = ""
d["assert_size"] = """
// Check the batch size and the number of kernels (sometimes constant in the graph)
if(img2d_dim[0] != %(self_bsize)s!=0){
PyErr_Format(PyExc_ValueError,
"the batch size in the image (%%ld) at run time is different"
" than at build time (%%ld) for the ConvOp.",
(long)img2d_dim[0], (long)%(self_bsize)s);
%(fail)s;
}
if(kerns_dim[0] != %(self_nkern)s!=0){
PyErr_Format(PyExc_ValueError,
"the number of kernels in the filter (%%ld) at run time is"
" different than at build time (%%ld) for the ConvOp.",
(long)kerns_dim[0], (long)%(self_nkern)s);
%(fail)s;
}
// Check the size of the image (sometimes constant in the graph)
if(img2d_dim[1] != %(self_imshp0)s){
PyErr_Format(PyExc_ValueError,
"the image stack size (%%ld) at run time is different than"
" at build time (%%ld) for the ConvOp.",
(long)img2d_dim[1], (long)%(self_imshp0)s);
%(fail)s;
}
if(img2d_dim[2] != %(self_imshp1)s){
PyErr_Format(PyExc_ValueError,
"the number of rows in the image (%%ld) at run time is different"
" than at build time (%%ld) for the ConvOp.",
(long)img2d_dim[2], (long)%(self_imshp1)s);
%(fail)s;
}
if(img2d_dim[3] != %(self_imshp2)s){
PyErr_Format(PyExc_ValueError,
"the number of columns in the image (%%ld) at run time is"
" different than at build time (%%ld) for the ConvOp.",
(long)img2d_dim[3], (long)%(self_imshp2)s);
%(fail)s;
}
// Check the size of the output (sometimes constant in the graph)
if(dim_zz[0] != %(self_outshp0)s!=0){
PyErr_Format(PyExc_ValueError,
"the precomputed number of rows in the output (%%ld) at run time"
" is different than at build time (%%ld) for the ConvOp.",
(long)dim_zz[0], (long)%(self_outshp0)s);
%(fail)s;
}
if(dim_zz[1] != %(self_outshp1)s!=0){
PyErr_Format(PyExc_ValueError,
"the precomputed number of columns in the output (%%ld) at run"
" time is different than at build time (%%ld) for the ConvOp.",
(long)dim_zz[1], (long)%(self_outshp1)s);
%(fail)s;
}
// Check the size of the filter (sometimes constant in the graph)
if(kerns_dim[1] %% %(self_imshp0)s!=0){
PyErr_Format(PyExc_ValueError,
"the filter stack size (%%ld) at run time is different than at"
" build time (%%ld) for the ConvOp.",
(long)kerns_dim[1], (long)%(self_imshp0)s);
%(fail)s;
}
if(kerns_dim[2] %% %(self_kshp0)s!=0){
PyErr_Format(PyExc_ValueError,
"the number of rows in the filter (%%ld) at run time is different"
" than at build time (%%ld) for the ConvOp.",
(long)kerns_dim[2], (long)%(self_kshp0)s);
%(fail)s;
}
if(kerns_dim[3] %% %(self_kshp1)s!=0){
PyErr_Format(PyExc_ValueError,
"the number of columns in the filter (%%ld) at run time is"
" different than at build time (%%ld) for the ConvOp.",
(long)kerns_dim[3], (long)%(self_kshp1)s);
%(fail)s;
}
""" % (locals())
else:
d["affectation"] = "+="
d["all_shape"] = "0"
d["dim_zz_const"] = ""
d["dim_zz_affect"] = """
if (mode == FULL) {
dim_zz[0] = (int)ceil((dim_im[0]+dim_ker0-1)/float(%(self_dx)s));
dim_zz[1] = (int)ceil((dim_im[1]+dim_ker1-1)/float(%(self_dy)s));
} else {
dim_zz[0] = (int)ceil((dim_im[0]-dim_ker0+1)/float(%(self_dx)s));
dim_zz[1] = (int)ceil((dim_im[1]-dim_ker1+1)/float(%(self_dy)s));
}
""" % d
d["assert_size"] = ""
if self.kshp_logical_top_aligned:
d["self_kshp_logical_offset_r"] = 0
d["self_kshp_logical_offset_c"] = 0
elif all_shape:
rstride = d["self_kshp_logical_stride_r"]
cstride = d["self_kshp_logical_stride_c"]
d["self_kshp_logical_offset_r"] = (self.kshp_logical[0] -
(self.kshp[0] * rstride) -
1 + rstride) % rstride
d["self_kshp_logical_offset_c"] = (self.kshp_logical[1] -
(self.kshp[1] * cstride) -
1 + cstride) % cstride
del rstride, cstride
if node.inputs[0].type.dtype == "float32":
d["type"] = "float"
elif node.inputs[0].type.dtype == "float64":
d["type"] = "double"
else:
raise Exception("Type %s not implemented" %
node.inputs[0].type.dtype)
d["gemm"] = 'dgemm_'
if not d["type"] == "double":
d["gemm"] = 'sgemm_'
if self.imshp != self.imshp_logical or self.kshp != self.kshp_logical:
if self.verbose:
_logger.debug("return imshp!=imshp_logical or"
" self.kshp != self.kshp_logical shape version")
return _conv_op_code_a % d
if self.unroll_patch:
if self.verbose:
_logger.debug("return unroll patch version. all_shape=%s",
all_shape)
return _conv_op_code_unroll_patch % d
if ((self.unroll_batch is not None and self.unroll_batch > 0) or
(self.unroll_kern is not None and self.unroll_kern > 0)):
assert self.unroll_batch > 0
assert self.unroll_kern > 0
if self.verbose:
_logger.debug("return unrolled batch (%s) and kern code (%s)",
str(self.unroll_batch), str(self.unroll_kern))
return gen_conv_code_unroll_batch_kern(d, self.unroll_batch,
self.unroll_kern)
#TODO: should we choose the unroll size automatically with the bigger divisor under 5?
if self.out_mode == 'valid' and self.dx == 0 and self.dy == 0:
if self.verbose:
_logger.debug("return gemm version")
return _conv_op_code_valid_gemm % d
else:
if self.verbose:
_logger.debug("return no gemm version")
return _conv_op_code_a % d
_conv_op_code_a = """
const int mode=%(mode)s;
int typenum=0, typenum_f=0;
PyArrayObject *ain1=NULL, *ain2=NULL;
PyArrayObject *filtersflipped_arr=NULL, *img2d_arr=NULL, *z_arr=NULL;
const %(type)s fill_value = 0;
int type_im=PyArray_TYPE(%(img2d)s);
int type_ker=PyArray_TYPE(%(filtersflipped)s);
npy_intp dim_zz[2]={%(self_outshp0)s,%(self_outshp1)s};
npy_intp dim_im_phys[2]={%(self_imshp1)s,%(self_imshp2)s};
npy_intp dim_im_log[2]={%(self_imshp_logical_r)s,%(self_imshp_logical_c)s};
npy_intp dim_ker_phys[2]={%(self_kshp0)s,%(self_kshp1)s};
npy_intp dim_ker_log[2]={%(self_kshp_logical_r)s,%(self_kshp_logical_c)s};
PyArray_Dims img2d_shape;
npy_intp img2d_dim[4]={1,1,0,0};
img2d_shape.ptr=img2d_dim;
img2d_shape.len=4;
PyArray_Dims kerns_shape;
npy_intp kerns_dim[4]={1,1,0,0};
kerns_shape.ptr=kerns_dim;
kerns_shape.len=4;
PyObject *img2d=NULL, *contig, *filtersflipped=NULL;
if(PyArray_NDIM(%(img2d)s)==2){
img2d_dim[3]=PyArray_DIMS(%(img2d)s)[1];
img2d_dim[2]=PyArray_DIMS(%(img2d)s)[0];
}else if(PyArray_NDIM(%(img2d)s)==3){
img2d_dim[3]=PyArray_DIMS(%(img2d)s)[2];
img2d_dim[2]=PyArray_DIMS(%(img2d)s)[1];
img2d_dim[0]=PyArray_DIMS(%(img2d)s)[0];
}else if(PyArray_NDIM(%(img2d)s)==4){
img2d_dim[3]=PyArray_DIMS(%(img2d)s)[3];
img2d_dim[2]=PyArray_DIMS(%(img2d)s)[2];
img2d_dim[1]=PyArray_DIMS(%(img2d)s)[1];
img2d_dim[0]=PyArray_DIMS(%(img2d)s)[0];
}else {
PyErr_SetString(PyExc_ValueError, "img don't have a good shape");
%(fail)s;
}
if(PyArray_NDIM(%(filtersflipped)s)==3){
kerns_dim[3]=PyArray_DIMS(%(filtersflipped)s)[2];
kerns_dim[2]=PyArray_DIMS(%(filtersflipped)s)[1];
kerns_dim[0]=PyArray_DIMS(%(filtersflipped)s)[0];
}else if(PyArray_NDIM(%(filtersflipped)s)==4){
kerns_dim[3]=PyArray_DIMS(%(filtersflipped)s)[3];
kerns_dim[2]=PyArray_DIMS(%(filtersflipped)s)[2];
kerns_dim[1]=PyArray_DIMS(%(filtersflipped)s)[1];
kerns_dim[0]=PyArray_DIMS(%(filtersflipped)s)[0];
}else{
std::stringstream temp;
temp << "nddim="<<PyArray_NDIM(%(filtersflipped)s);
std::string param = temp.str();
PyErr_SetString(PyExc_ValueError,
("kernel don't have a good shape. " + param).c_str());
%(fail)s;
}
%(assert_size)s
img2d = PyArray_Newshape(%(img2d)s,&img2d_shape, NPY_CORDER);
img2d_arr = (PyArrayObject*)img2d;
if ((PyArray_STRIDES(img2d_arr)[3] != (npy_intp)sizeof(%(type)s))
|| (PyArray_STRIDES(img2d_arr)[2] != PyArray_DIMS(img2d_arr)[3]*(npy_intp)sizeof(%(type)s))){
contig = (PyObject*)(PyArray_GETCONTIGUOUS((PyArrayObject*)img2d));
Py_DECREF(img2d);
img2d = contig;
img2d_arr = (PyArrayObject*)img2d;
if (!PyArray_ISCONTIGUOUS(img2d_arr)){
PyErr_SetString(PyExc_ValueError, "img2d isn't contiguous");
%(fail)s;
}
}
filtersflipped = PyArray_Newshape(%(filtersflipped)s,&kerns_shape, NPY_CORDER);
filtersflipped_arr = (PyArrayObject*)filtersflipped;
if ((PyArray_STRIDES(filtersflipped_arr)[3] != (npy_intp)sizeof(%(type)s))
|| (PyArray_STRIDES(filtersflipped_arr)[2] != PyArray_DIMS(filtersflipped_arr)[3]*(npy_intp)sizeof(%(type)s))){
contig = (PyObject*)(PyArray_GETCONTIGUOUS((PyArrayObject*)filtersflipped));
Py_DECREF(filtersflipped);
filtersflipped = contig;
filtersflipped_arr = (PyArrayObject*)filtersflipped;
if (!PyArray_ISCONTIGUOUS(filtersflipped_arr)){
PyErr_SetString(PyExc_ValueError, "filtersflipped isn't contiguous");
%(fail)s;
}
}
if(mode != VALID && mode != FULL){
PyErr_SetString(PyExc_ValueError,
"invalid mode, only full and valid are supported");
%(fail)s;
}
typenum = PyArray_ObjectType((PyObject*)%(img2d)s, 0);
typenum_f = PyArray_ObjectType((PyObject*)%(filtersflipped)s, 0);
if (typenum < 0) {PyErr_SetString(PyExc_ValueError, "Invalid type"); %(fail)s;}
if (typenum != typenum_f) {
PyErr_SetString(PyExc_ValueError, "Input types must match");
%(fail)s;
}
if (!img2d)
{
PyErr_SetString(PyExc_AssertionError, "!img2d");
%(fail)s;
}
if (!filtersflipped)
{
PyErr_SetString(PyExc_AssertionError, "!filtersflipped");
%(fail)s;
}
if ((!%(z)s)
|| *PyArray_DIMS(%(z)s)!=4
||(PyArray_DIMS(%(z)s)[0] != %(self_bsize)s)
||(PyArray_DIMS(%(z)s)[1] != %(self_nkern)s)
||(PyArray_DIMS(%(z)s)[2] != dim_zz[0])
||(PyArray_DIMS(%(z)s)[3] != dim_zz[1])
||!PyArray_ISCONTIGUOUS(%(z)s)
)
{
{Py_XDECREF(%(z)s);}
npy_intp dims[4] = {0,0,0,0};
dims[0]=%(self_bsize)s;
dims[1]=%(self_nkern)s;
dims[2]=dim_zz[0];
dims[3]=dim_zz[1];
%(z)s = (PyArrayObject*) PyArray_ZEROS(4, dims, typenum,0);
}else{
//PyArray_FILLWBYTE((PyObject*)%(z)s,0);
}
z_arr = (PyArrayObject*) %(z)s;
int Os[2];
Os[0]=%(self_outshp0)s;
Os[1]=%(self_outshp1)s;
//assertions
if (!PyArray_ISCONTIGUOUS(%(z)s))
{
PyErr_SetString(PyExc_AssertionError, "Output (%(z)s) not contiguous");
%(fail)s;
}
for(int b=0;b< %(self_bsize)s;b++){
for(int n_kern=0;n_kern<%(self_nkern)s;n_kern++){
%(type)s * __restrict__ out=(%(type)s *)(PyArray_GETPTR2(z_arr,b,n_kern));
for (int i = 0; i < dim_zz[0]*dim_zz[1]; ++i) out[i] = 0;
for(int stack_size=0;stack_size<%(self_imshp0)s;stack_size++){
const %(type)s * __restrict__ in=(%(type)s *)(PyArray_GETPTR2(img2d_arr,b,stack_size));
const %(type)s * __restrict__ hvals=(%(type)s *)(PyArray_GETPTR2(filtersflipped_arr,n_kern,stack_size));
for (int iter_m=0; iter_m < Os[0]; iter_m++) {
// Reposition index into input image based on requested output size
//row position in logical output image
int pos_m = iter_m*%(self_dx)s;
//row anchor in logical input image (we will loop upward from here)
int new_m;
if (mode == FULL) new_m = pos_m ;
else new_m = (pos_m+dim_ker_log[0]-1);
for (int iter_n=0; iter_n < Os[1]; iter_n++) { // loop over columns
// current col position in logical output image
int pos_n=iter_n*%(self_dy)s;
%(type)s sum=0;
// Sum over kernel, if index into image is out of bounds
// fill with the value
// loop over logical rows in kernel
for (int j_log=0; j_log < %(self_kshp_logical_r)s; j_log++) {
// ind0_log: row position in logical input image
int ind0_log = (new_m-j_log);
if ((j_log < %(self_kshp_logical_offset_r)s) ||
(j_log - %(self_kshp_logical_offset_r)s) MOD %(self_kshp_logical_stride_r)s)
continue;
if (ind0_log MOD %(self_imshp_logical_stride_r)s)
continue;
int j_phys = ((j_log- %(self_kshp_logical_offset_r)s) /
%(self_kshp_logical_stride_r)s);
int ind0_phys = (ind0_log / %(self_imshp_logical_stride_r)s);
//std::cerr <<"j_log" << j_log << " j_phys " << j_phys << " " << ind0_phys << "\\n";
if(mode==FULL){
//This is a pointer to the current row of the kernel
const %(type)s * idx_hvals=&hvals[j_phys*dim_ker_phys[1]];
if(ind0_log < 0 || ind0_log >= dim_im_log[0]){
// the current row of the kernel is off the image
}else{
int k = max((int)(pos_n-dim_im_log[1])+1,0);
int max_k=min(pos_n+1,(int)dim_ker_log[1]);
const %(type)s * idx_in=&in[ind0_phys*dim_im_phys[1]];
for (int ind1_log=pos_n-k; k<max_k; k++,ind1_log--) {
if (1)
{
if ((k < %(self_kshp_logical_offset_c)s) ||
(k - %(self_kshp_logical_offset_c)s) MOD
%(self_kshp_logical_stride_c)s)
continue;
if (ind1_log MOD
%(self_imshp_logical_stride_c)s)
continue;
}
sum += idx_hvals[(k-%(self_kshp_logical_offset_c)s) /
%(self_kshp_logical_stride_c)s] *
idx_in[ind1_log / %(self_imshp_logical_stride_c)s];
}
}
}else{ // mode==VALID
//JB: should be dim_im[1] right? (was dim_im[0])
const %(type)s* idx_in=&in[ind0_phys*dim_im_phys[1]];
const %(type)s* idx_hvals=&hvals[j_phys*dim_ker_phys[1]];
int new_n = (pos_n+dim_ker_log[1]-1);
if (%(self_imshp_logical_stride_c)s != 1) // a general loop
{
for (int k=0,last=new_n; k < dim_ker_log[1]; k++,last--) {
if ((k < %(self_kshp_logical_offset_c)s) ||
(k - %(self_kshp_logical_offset_c)s) MOD
%(self_kshp_logical_stride_c)s)
continue;
else if (last MOD %(self_imshp_logical_stride_c)s)
continue;
else
{
sum+=idx_hvals[(k-%(self_kshp_logical_offset_c)s) /
%(self_kshp_logical_stride_c)s] *
idx_in[last/%(self_imshp_logical_stride_c)s];
}
}
}
else // self_imshp_stride_c == 1
{
int offset = %(self_kshp_logical_offset_c)s;
int k_phys=0;
for (int k_log=offset,last=new_n-offset;
k_log < dim_ker_log[1]; ) {
sum += idx_hvals[k_phys]*idx_in[last];
++k_phys;
last -= %(self_kshp_logical_stride_c)s;
k_log += %(self_kshp_logical_stride_c)s;
}
}
}
}//for j_log
out[iter_m*dim_zz[1]+iter_n] %(affectation)s sum;
}//for iter_n
}//for iter_m
}//for stack_size
if (0 && (mode==FULL)){
for (int i = 0; i < dim_zz[0]*dim_zz[1]; ++i)
std::cout << " " << out[i];
std::cout << "\\n";
}
}//for n_kern
}//for b
Py_XDECREF(img2d);
Py_XDECREF(filtersflipped);
"""
#########
######### ConvOp c_code for valid mode (uses gemm)
#########
_conv_op_code_valid_gemm = """
int typenum=0, typenum_f=0;
PyArrayObject *ain1=NULL, *ain2=NULL, *img2d_arr=NULL, *z_arr=NULL;
const int NKERN = %(self_nkern)s;
int type_im=PyArray_TYPE(%(img2d)s);
int type_ker=PyArray_TYPE(%(filtersflipped)s);
npy_intp dim_zz[2]={%(self_outshp0)s,%(self_outshp1)s};
npy_intp dim_im[2]={%(self_imshp1)s,%(self_imshp2)s};
const npy_intp dim_ker0=%(self_kshp0)s;
const npy_intp dim_ker1=%(self_kshp1)s;
PyArray_Dims img2d_shape;
npy_intp img2d_dim[4]={1,1,0,0};
img2d_shape.ptr=img2d_dim;
img2d_shape.len=4;
PyArray_Dims kerns_shape;
npy_intp kerns_dim[4]={1,1,0,0};
kerns_shape.ptr=kerns_dim;
kerns_shape.len=4;
PyObject *img2d=NULL, *contig;
if(PyArray_NDIM(%(img2d)s)==2){
img2d_dim[3]=PyArray_DIMS(%(img2d)s)[1];
img2d_dim[2]=PyArray_DIMS(%(img2d)s)[0];
}else if(PyArray_NDIM(%(img2d)s)==3){
img2d_dim[3]=PyArray_DIMS(%(img2d)s)[2];
img2d_dim[2]=PyArray_DIMS(%(img2d)s)[1];
img2d_dim[0]=PyArray_DIMS(%(img2d)s)[0];
}else if(PyArray_NDIM(%(img2d)s)==4){
img2d_dim[3]=PyArray_DIMS(%(img2d)s)[3];
img2d_dim[2]=PyArray_DIMS(%(img2d)s)[2];
img2d_dim[1]=PyArray_DIMS(%(img2d)s)[1];
img2d_dim[0]=PyArray_DIMS(%(img2d)s)[0];
}else {
PyErr_SetString(PyExc_ValueError, "img don't have a good shape");
%(fail)s;
}
if(PyArray_NDIM(%(filtersflipped)s)==3){
kerns_dim[3]=PyArray_DIMS(%(filtersflipped)s)[2];
kerns_dim[2]=PyArray_DIMS(%(filtersflipped)s)[1];
kerns_dim[0]=PyArray_DIMS(%(filtersflipped)s)[0];
}else if(PyArray_NDIM(%(filtersflipped)s)==4){
kerns_dim[3]=PyArray_DIMS(%(filtersflipped)s)[3];
kerns_dim[2]=PyArray_DIMS(%(filtersflipped)s)[2];
kerns_dim[1]=PyArray_DIMS(%(filtersflipped)s)[1];
kerns_dim[0]=PyArray_DIMS(%(filtersflipped)s)[0];
}else{
std::stringstream temp;
temp << "nddim="<<PyArray_NDIM(%(filtersflipped)s);
std::string param = temp.str();
PyErr_SetString(PyExc_ValueError,
("kernel don't have a good shape. " + param).c_str());
%(fail)s;
}
if (NKERN != kerns_dim[0])
{
PyErr_SetString(PyExc_NotImplementedError, "nonsense nkern");
%(fail)s;
}
img2d = PyArray_Newshape(%(img2d)s,&img2d_shape, NPY_CORDER);
img2d_arr = (PyArrayObject*)img2d;
if ((PyArray_STRIDES(img2d_arr)[3] != (npy_intp)sizeof(%(type)s))
|| (PyArray_STRIDES(img2d_arr)[2] != PyArray_DIMS(img2d_arr)[3]*(npy_intp)sizeof(%(type)s))){
contig = (PyObject*)(PyArray_GETCONTIGUOUS((PyArrayObject*)img2d));
Py_DECREF(img2d);
img2d = contig;
img2d_arr = (PyArrayObject*)img2d;
if (!PyArray_ISCONTIGUOUS(img2d_arr)){
PyErr_SetString(PyExc_ValueError, "img2d isn't contiguous");
%(fail)s;
}
}
typenum = PyArray_ObjectType((PyObject*)%(img2d)s, 0);
typenum_f = PyArray_ObjectType((PyObject*)%(filtersflipped)s, 0);
if (typenum < 0) {PyErr_SetString(PyExc_ValueError, "Invalid type"); %(fail)s;}
if (typenum != typenum_f) {PyErr_SetString(PyExc_ValueError, "Input types must match"); %(fail)s;}
if (!img2d) {
PyErr_SetString(PyExc_ValueError, "Null argument img2d");
%(fail)s;
}
if ((!%(z)s)
|| *PyArray_DIMS(%(z)s)!=4
||(PyArray_DIMS(%(z)s)[0] != %(self_bsize)s)
||(PyArray_DIMS(%(z)s)[1] != %(self_nkern)s)
||(PyArray_DIMS(%(z)s)[2] != dim_zz[0])
|| (PyArray_DIMS(%(z)s)[3] != dim_zz[1])
)
{
{Py_XDECREF(%(z)s);}
npy_intp dims[4] = {0,0,0,0};
dims[0]=%(self_bsize)s;
dims[1]=%(self_nkern)s;
dims[2]=dim_zz[0];
dims[3]=dim_zz[1];
%(z)s = (PyArrayObject*) PyArray_ZEROS(4, dims, typenum,0);
}else{
PyArray_FILLWBYTE((PyObject*)%(z)s,0);
}
z_arr = (PyArrayObject*) %(z)s;
%(assert_size)s
int Os[2];
Os[0] = dim_im[0]-dim_ker0+1;
Os[1] = dim_im[1]-dim_ker1+1;
// allocate a temporary buffer for storing the inner product of each nth kernel row
// with each row of an image
{
%(type)s * kbuf = (%(type)s *)malloc((Os[0] * NKERN + PyArray_Size((PyObject*)%(filtersflipped)s))* (npy_intp)sizeof(%(type)s));
int kbufstride = NKERN;
%(type)s * myfilters = kbuf + Os[0] * NKERN;
//copy out filtersflipped into filters un-flipped format
//std::cerr << "__filling myfilters__\\n";
for(int i=0;i < kerns_dim[0];++i){
for(int j=0;j < kerns_dim[1];++j){
for(int k=0;k < kerns_dim[2];++k){
for(int l=0;l < kerns_dim[3];++l){
%(type)s * ff = ((PyArray_NDIM(%(filtersflipped)s)) == 3)
? (%(type)s *)PyArray_GETPTR3(%(filtersflipped)s, i, kerns_dim[2]-1-k, kerns_dim[3]-1-l)
: (%(type)s *)PyArray_GETPTR4(%(filtersflipped)s, i, j, kerns_dim[2]-1-k, kerns_dim[3]-1-l);
myfilters[i * (kerns_dim[1]*kerns_dim[2]*kerns_dim[3])
+ j * (kerns_dim[2]*kerns_dim[3])
+ k * (kerns_dim[3])
+ l] = ff[0];
//std::cerr << " " << ff[0];
}
//std::cerr << "\\n";
}
//std::cerr << "(end of stack/batch " <<j << "/" << i << " ) \\n";
}
}
//std::cerr << "-----new loop ----\\n";
for(int b=0;b< %(self_bsize)s;b++){
for (int img_col = 0; img_col < Os[1]; ++img_col){
for (int filter_row = 0; filter_row < kerns_dim[2]; ++filter_row){
for (int stackidx = 0; stackidx < %(self_imshp0)s; ++stackidx){
%(type)s * img_colview =
(%(type)s *)(PyArray_GETPTR4(img2d, b, stackidx, filter_row, img_col));
%(type)s * filter_rows = myfilters + stackidx * (kerns_dim[2]*kerns_dim[3]) +
filter_row * kerns_dim[3];
//std::cerr << "filterview offset: " << filter_rows - myfilters << "\\n";
char N = 'N'; char T = 'T';
int Nz0 = Os[0];
int Nz1 = NKERN;
int K = kerns_dim[3];
%(type)s alpha = 1.0;
%(type)s beta = stackidx ? 1.0 : 0.0;
int imgview_stride = dim_im[1];
int filter_rows_stride =kerns_dim[1]*kerns_dim[2]*kerns_dim[3];
//remember, Fortran wants a column-major interpretation
assert(PyArray_STRIDES(img2d)[3] == (npy_intp)sizeof(%(type)s));
if (0){
std::cerr << "b " << b << " img_col " << img_col << " filterrow " << filter_row << " stackidx " <<stackidx << "\\n";
std::cerr << "colview (physical layout) stride: " << imgview_stride << "\\n";
for (int ii = 0; ii < Nz0; ++ii){
for (int jj = 0; jj < K; ++jj){
std::cerr << " " << img_colview[ii * imgview_stride + jj];
}
std::cerr << "\\n";
}
std::cerr << "filterview ("<<filter_row<<"'th rows) stride: " << filter_rows_stride << "\\n";
for (int ii = 0; ii < Nz1; ++ii){
for (int jj = 0; jj < K; ++jj){
std::cerr << " " << filter_rows[ii * filter_rows_stride + jj];
}
std::cerr << "\\n";
}
std::cerr << Nz1 << " " << Nz0 << " " << K << "\\n" ;
}
%(gemm)s(&T, &N,
&Nz1, &Nz0, &K,
&alpha,
filter_rows, &filter_rows_stride,
img_colview, &imgview_stride,
&beta, kbuf, &kbufstride);
if (0){
std::cerr << "z (logical layout) beta" << beta << "\\n";
for (int ii = 0; ii < Nz0; ++ii){
for (int jj = 0; jj < Nz1; ++jj){
std::cerr << " " << kbuf[ii * kbufstride + jj];
}
std::cerr << "\\n";
}
}
}
// now kbuf the sum over the stack, put it into the outbuf
for (int img_row = 0; img_row < Os[0]; ++img_row) {
for (int kernel_idx = 0; kernel_idx < NKERN; ++kernel_idx) {
%(type)s * z_p = (%(type)s *)PyArray_GETPTR4(%(z)s, b, kernel_idx, img_row, img_col);
if (0)
{
if (b >= PyArray_DIMS(%(z)s)[0]) %(fail)s;
if (kernel_idx >= PyArray_DIMS(%(z)s)[1]) %(fail)s;
if (img_row >= PyArray_DIMS(%(z)s)[2]) %(fail)s;
if (img_col >= PyArray_DIMS(%(z)s)[3]) %(fail)s;
}
z_p[0] += kbuf[img_row * kbufstride + kernel_idx];
}
}
}
}
}
free(kbuf);
}
Py_XDECREF(img2d);
"""
def gen_conv_code_unroll_batch_kern(d, unroll_bsize=1, unroll_ksize=1):
""" c_code for ConvOp that unroll the batch size loop
"""
assert unroll_bsize > 0 and unroll_ksize > 0
if "unroll_bsize" in d or "unroll_ksize" in d or "unroll_iter" in d or "unroll_biter" in d or "unroll_kiter" in d:
raise Exception("We can't use this dictionnary as we will overwrite some of its containt")
d = d.copy()
d["unroll_bsize"] = unroll_bsize
d["unroll_ksize"] = unroll_ksize
def my_dup(st, size):
s = ""
for i in xrange(size):
d["unroll_iter"] = i
s += st % d
return s + "\n"
def my_dup2(st):
s = ""
iter = 0
for i in xrange(unroll_bsize):
d["unroll_biter"] = i
for j in xrange(unroll_ksize):
d["unroll_kiter"] = j
d["unroll_iter"] = iter
iter += 1
s += st % d
return s + "\n"
ret = """
const int mode=%(mode)s;
int typenum=0, typenum_f=0;
PyArrayObject *ain1=NULL, *ain2=NULL, *filtersflipped_arr=NULL, *img2d_arr=NULL, *z_arr=NULL;;
const %(type)s fill_value = 0;
int type_im=PyArray_TYPE(%(img2d)s);
int type_ker=PyArray_TYPE(%(filtersflipped)s);
npy_intp dim_zz[2]={%(self_outshp0)s,%(self_outshp1)s};
npy_intp dim_im[2]={%(self_imshp1)s,%(self_imshp2)s};
const npy_intp dim_ker0=%(self_kshp0)s;
const npy_intp dim_ker1=%(self_kshp1)s;
PyArray_Dims img2d_shape;
npy_intp img2d_dim[4]={1,1,0,0};
img2d_shape.ptr=img2d_dim;
img2d_shape.len=4;
PyArray_Dims kerns_shape;
npy_intp kerns_dim[4]={1,1,0,0};
kerns_shape.ptr=kerns_dim;
kerns_shape.len=4;
PyObject *img2d=NULL, *contig, *filtersflipped=NULL;
if(PyArray_NDIM(%(img2d)s)==2){
img2d_dim[3]=PyArray_DIMS(%(img2d)s)[1];
img2d_dim[2]=PyArray_DIMS(%(img2d)s)[0];
}else if(PyArray_NDIM(%(img2d)s)==3){
img2d_dim[3]=PyArray_DIMS(%(img2d)s)[2];
img2d_dim[2]=PyArray_DIMS(%(img2d)s)[1];
img2d_dim[0]=PyArray_DIMS(%(img2d)s)[0];
}else if(PyArray_NDIM(%(img2d)s)==4){
img2d_dim[3]=PyArray_DIMS(%(img2d)s)[3];
img2d_dim[2]=PyArray_DIMS(%(img2d)s)[2];
img2d_dim[1]=PyArray_DIMS(%(img2d)s)[1];
img2d_dim[0]=PyArray_DIMS(%(img2d)s)[0];
}else {
std::stringstream temp;
temp << "nddim="<<PyArray_NDIM(%(img2d)s);
std::string param = temp.str();
PyErr_SetString(PyExc_ValueError,
("img don't have a good shape. " + param).c_str());
%(fail)s;
}
if(PyArray_NDIM(%(filtersflipped)s)==3){
kerns_dim[3]=PyArray_DIMS(%(filtersflipped)s)[2];
kerns_dim[2]=PyArray_DIMS(%(filtersflipped)s)[1];
kerns_dim[0]=PyArray_DIMS(%(filtersflipped)s)[0];
}else if(PyArray_NDIM(%(filtersflipped)s)==4){
kerns_dim[3]=PyArray_DIMS(%(filtersflipped)s)[3];
kerns_dim[2]=PyArray_DIMS(%(filtersflipped)s)[2];
kerns_dim[1]=PyArray_DIMS(%(filtersflipped)s)[1];
kerns_dim[0]=PyArray_DIMS(%(filtersflipped)s)[0];
}else{
PyErr_SetString(PyExc_ValueError, "kernel don't have a good shape");
%(fail)s;
}
%(assert_size)s
img2d = PyArray_Newshape(%(img2d)s,&img2d_shape, NPY_CORDER);
img2d_arr = (PyArrayObject*)img2d;
if ((PyArray_STRIDES(img2d_arr)[3] != (npy_intp)sizeof(%(type)s))
|| (PyArray_STRIDES(img2d_arr)[2] != PyArray_DIMS(img2d_arr)[3]*(npy_intp)sizeof(%(type)s))){
contig = (PyObject*)(PyArray_GETCONTIGUOUS((PyArrayObject*)img2d));
Py_DECREF(img2d);
img2d = contig;
img2d_arr = (PyArrayObject*)img2d;
if (!PyArray_ISCONTIGUOUS(img2d_arr)){
PyErr_SetString(PyExc_ValueError, "img2d isn't contiguous");
%(fail)s;
}
}
filtersflipped = PyArray_Newshape(%(filtersflipped)s,&kerns_shape, NPY_CORDER);
filtersflipped_arr = (PyArrayObject*)filtersflipped;
if ((PyArray_STRIDES(filtersflipped_arr)[3] != (npy_intp)sizeof(%(type)s))
|| (PyArray_STRIDES(filtersflipped_arr)[2] != PyArray_DIMS(filtersflipped_arr)[3]*(npy_intp)sizeof(%(type)s))){
contig = (PyObject*)(PyArray_GETCONTIGUOUS((PyArrayObject*)filtersflipped));
Py_DECREF(filtersflipped);
filtersflipped = contig;
filtersflipped_arr = (PyArrayObject*)filtersflipped;
if (!PyArray_ISCONTIGUOUS(filtersflipped_arr)){
PyErr_SetString(PyExc_ValueError, "filtersflipped isn't contiguous");
%(fail)s;
}
}
if(mode != VALID && mode != FULL){
PyErr_SetString(PyExc_ValueError, "invalid mode, only full and valid are supported"); %(fail)s;
}
typenum = PyArray_ObjectType((PyObject*)%(img2d)s, 0);
typenum_f = PyArray_ObjectType((PyObject*)%(filtersflipped)s, 0);
if (typenum < 0) {PyErr_SetString(PyExc_ValueError, "Invalid type"); %(fail)s;}
if (typenum != typenum_f) {PyErr_SetString(PyExc_ValueError, "Input types must match"); %(fail)s;}
if (!img2d)
{
PyErr_SetString(PyExc_AssertionError, "!img2d");
%(fail)s;
}
if (!filtersflipped)
{
PyErr_SetString(PyExc_AssertionError, "!filtersflipped");
%(fail)s;
}
if ((!%(z)s)
|| *PyArray_DIMS(%(z)s)!=4
||(PyArray_DIMS(%(z)s)[0] != %(self_bsize)s)
||(PyArray_DIMS(%(z)s)[1] != %(self_nkern)s)
||(PyArray_DIMS(%(z)s)[2] != dim_zz[0])
||(PyArray_DIMS(%(z)s)[3] != dim_zz[1])
||!PyArray_ISCONTIGUOUS(%(z)s)
)
{
{Py_XDECREF(%(z)s);}
npy_intp dims[4] = {0,0,0,0};
dims[0]=%(self_bsize)s;
dims[1]=%(self_nkern)s;
dims[2]=dim_zz[0];
dims[3]=dim_zz[1];
%(z)s = (PyArrayObject*) PyArray_ZEROS(4, dims, typenum,0);
}else{
//PyArray_FILLWBYTE((PyObject*)%(z)s,0);
}
z_arr = (PyArrayObject*) %(z)s;
int Os[2];
Os[0]=%(self_outshp0)s;
Os[1]=%(self_outshp1)s;
//assertions
if (!PyArray_ISCONTIGUOUS(%(z)s))
{
PyErr_SetString(PyExc_AssertionError, "Output (%(z)s) not contiguous");
%(fail)s;
}
for(int b=0;b< %(self_bsize)s ;b+=%(unroll_bsize)s){
for(int n_kern=0;n_kern<%(self_nkern)s;n_kern+=%(unroll_ksize)s){
""" % d
ret += my_dup2("%(type)s * __restrict__ out%(unroll_iter)s=(%(type)s *)(PyArray_GETPTR2(z_arr,b+%(unroll_biter)s,n_kern+%(unroll_kiter)s));")
ret += my_dup("for (int i = 0; i < dim_zz[0]*dim_zz[1]; ++i) out%(unroll_iter)s[i] = 0;", unroll_bsize * unroll_ksize)
ret += """
for(int stack_size=0;stack_size<%(self_imshp0)s;stack_size++){
""" % d
ret += my_dup("const %(type)s * __restrict__ in%(unroll_iter)d=(%(type)s *)(PyArray_GETPTR2(img2d_arr,b+%(unroll_iter)s,stack_size));", unroll_bsize)
ret += my_dup("const %(type)s * __restrict__ hvals%(unroll_iter)s=(%(type)s *)(PyArray_GETPTR2(filtersflipped_arr,n_kern+%(unroll_iter)s,stack_size));", unroll_ksize)
ret += """
int new_m;
for (int iter_m=0; iter_m < Os[0]; iter_m++) {
// Reposition index into input image based on requested output size
int pos_m = iter_m*%(self_dx)s;//The position of the patch in the image
if (mode == FULL) new_m = pos_m ;
else new_m = (pos_m+dim_ker0-1);
for (int iter_n=0; iter_n < Os[1]; iter_n++) { // loop over columns
int pos_n=iter_n*%(self_dy)s;
""" % d
ret += my_dup(
"%(type)s sum%(unroll_iter)s=0;", unroll_bsize * unroll_ksize)
ret += """
// Sum over kernel, if index into image is out of bounds
// fill with the value
for (int j=0; j < dim_ker0; j++) {
int ind0 = (new_m-j);
if(mode==FULL){
""" % d
ret += my_dup("const %(type)s * idx_hvals%(unroll_iter)s=&hvals%(unroll_iter)s[j*dim_ker1];", unroll_ksize)
ret += """
if(ind0 < 0 || ind0 >= dim_im[0]){
if(fill_value!=0)
for (int k=0; k < dim_ker1; k++) {
""" % d
ret += my_dup2("sum%(unroll_iter)s += idx_hvals%(unroll_kiter)s[k] * fill_value;")
ret += """
}
}else{
//do the part where kernel is to the right of the img
int k=0,max_k=max((int)(pos_n-dim_im[1])+1,0);
if(fill_value!=0){
for(k=0;k<max_k;k++){
""" % d
ret += my_dup2("sum%(unroll_iter)s += idx_hvals%(unroll_kiter)s[k] * fill_value;")
ret += """
}
}else {k=max_k;}
//do the part where the kernel is on the img
max_k=min(pos_n+1,(int)dim_ker1);
""" % d
ret += my_dup("const %(type)s * idx_in%(unroll_iter)s=&in%(unroll_iter)s[ind0*dim_im[1]];", unroll_bsize)
ret += """
for (int ind1=pos_n-k; k<max_k; k++,ind1--) {
""" % d
ret += my_dup2("sum%(unroll_iter)s+= idx_hvals%(unroll_kiter)s[k] * idx_in%(unroll_biter)s[ind1];")
ret += """
}
//do the part to the left of the img
if(fill_value!=0)
for(;k<dim_ker1;k++){
""" % d
ret += my_dup2("sum%(unroll_iter)s += idx_hvals%(unroll_kiter)s[k] * fill_value;")
ret += """
}
}
}else{//valid mode
""" % d
ret += my_dup("const %(type)s* idx_in%(unroll_iter)s=&in%(unroll_iter)s[ind0*dim_im[1]];", unroll_bsize)
ret += my_dup("const %(type)s* idx_hvals%(unroll_iter)s=&hvals%(unroll_iter)s[j*dim_ker1];", unroll_ksize)
ret += """
int new_n = (pos_n+dim_ker1-1);
for (int k=0,last=new_n; k < dim_ker1; k++,last--) {
""" % d
ret += my_dup2("sum%(unroll_iter)s+=idx_hvals%(unroll_kiter)s[k]*idx_in%(unroll_biter)s[last];")
ret += """
}
}
}//for j
""" % d
ret += my_dup("out%(unroll_iter)s[iter_m*dim_zz[1]+iter_n] %(affectation)s sum%(unroll_iter)s;", unroll_bsize * unroll_ksize)
ret += """
}//for n
}//for m
}//for stack_size
}//for n_kern
}//for b
Py_XDECREF(img2d);
Py_XDECREF(filtersflipped);
"""
return ret
_conv_op_code_unroll_patch = """
const int mode=%(mode)s;
int typenum=0, typenum_f=0;
PyArrayObject *ain1=NULL, *ain2=NULL, *filtersflipped_arr=NULL, *img2d_arr=NULL, *z_arr=NULL;
const %(type)s fill_value = 0;//only value of 0 are currently tested and correctly implemented
int type_im=PyArray_TYPE(%(img2d)s);
int type_ker=PyArray_TYPE(%(filtersflipped)s);
const npy_intp dim_im[2]={%(self_imshp1)s,%(self_imshp2)s};
//The following line caused gcc 4.3.0 20080428 (Red Hat 4.3.0-8) to crash
//const npy_intp dim_ker[2]={%(self_kshp0)s,%(self_kshp1)s};
// The next line had gcc don't crash.
const npy_intp dim_ker0=%(self_kshp0)s;
const npy_intp dim_ker1=%(self_kshp1)s;
%(dim_zz_const)s npy_intp dim_zz[2]={%(self_outshp0)s,%(self_outshp1)s};
%(dim_zz_affect)s
PyArray_Dims img2d_shape;
npy_intp img2d_dim[4]={1,1,0,0};
img2d_shape.ptr=img2d_dim;
img2d_shape.len=4;
PyArray_Dims kerns_shape;
npy_intp kerns_dim[4]={1,1,0,0};
kerns_shape.ptr=kerns_dim;
kerns_shape.len=4;
PyObject *img2d=NULL, *contig, *filtersflipped=NULL;
if(PyArray_NDIM(%(img2d)s)==2){
img2d_dim[3]=PyArray_DIMS(%(img2d)s)[1];
img2d_dim[2]=PyArray_DIMS(%(img2d)s)[0];
}else if(PyArray_NDIM(%(img2d)s)==3){
img2d_dim[3]=PyArray_DIMS(%(img2d)s)[2];
img2d_dim[2]=PyArray_DIMS(%(img2d)s)[1];
img2d_dim[0]=PyArray_DIMS(%(img2d)s)[0];
}else if(PyArray_NDIM(%(img2d)s)==4){
img2d_dim[3]=PyArray_DIMS(%(img2d)s)[3];
img2d_dim[2]=PyArray_DIMS(%(img2d)s)[2];
img2d_dim[1]=PyArray_DIMS(%(img2d)s)[1];
img2d_dim[0]=PyArray_DIMS(%(img2d)s)[0];
}else {
PyErr_Format(PyExc_ValueError,
"image don't have a good number of dimensions %%d. ", PyArray_NDIM(%(filtersflipped)s));
%(fail)s;
}
if(PyArray_NDIM(%(filtersflipped)s)==3){
kerns_dim[3]=PyArray_DIMS(%(filtersflipped)s)[2];
kerns_dim[2]=PyArray_DIMS(%(filtersflipped)s)[1];
kerns_dim[0]=PyArray_DIMS(%(filtersflipped)s)[0];
}else if(PyArray_NDIM(%(filtersflipped)s)==4){
kerns_dim[3]=PyArray_DIMS(%(filtersflipped)s)[3];
kerns_dim[2]=PyArray_DIMS(%(filtersflipped)s)[2];
kerns_dim[1]=PyArray_DIMS(%(filtersflipped)s)[1];
kerns_dim[0]=PyArray_DIMS(%(filtersflipped)s)[0];
}else{
PyErr_Format(PyExc_ValueError,
"kernel don't have a good number of dimensions %%d. ", PyArray_NDIM(%(filtersflipped)s));
%(fail)s;
}
%(assert_size)s
img2d = PyArray_Newshape(%(img2d)s,&img2d_shape, NPY_CORDER);
img2d_arr = (PyArrayObject*)img2d;
if ((PyArray_STRIDES(img2d_arr)[3] != sizeof(%(type)s))
|| (PyArray_STRIDES(img2d_arr)[2] != PyArray_DIMS(img2d_arr)[3]*sizeof(%(type)s))){
contig = (PyObject*)(PyArray_GETCONTIGUOUS((PyArrayObject*)img2d));
Py_DECREF(img2d);
img2d = contig;
img2d_arr = (PyArrayObject*)img2d;
if (!PyArray_ISCONTIGUOUS(img2d_arr)){
PyErr_SetString(PyExc_ValueError, "img2d isn't contiguous");
%(fail)s;
}
}
filtersflipped = PyArray_Newshape(%(filtersflipped)s,&kerns_shape, NPY_CORDER);
filtersflipped_arr = (PyArrayObject*)filtersflipped;
if ((PyArray_STRIDES(filtersflipped_arr)[3] != sizeof(%(type)s))
|| (PyArray_STRIDES(filtersflipped_arr)[2] != PyArray_DIMS(filtersflipped_arr)[3]*sizeof(%(type)s))){
contig = (PyObject*)(PyArray_GETCONTIGUOUS((PyArrayObject*)filtersflipped));
Py_DECREF(filtersflipped);
filtersflipped = contig;
filtersflipped_arr = (PyArrayObject*)filtersflipped;
if (!PyArray_ISCONTIGUOUS(filtersflipped_arr)){
PyErr_SetString(PyExc_ValueError, "filtersflipped isn't contiguous");
%(fail)s;
}
}
if(mode != VALID && mode != FULL){
PyErr_SetString(PyExc_ValueError, "invalid mode, only full and valid are supported"); %(fail)s;
}
if(dim_zz[0]<=0 || dim_zz[1]<=0){
PyErr_Format(PyExc_ValueError,
"Output dimensions are not valid %%ldx%%ld",(long int)dim_zz[0],(long int)dim_zz[1]);
%(fail)s;
}
typenum = PyArray_ObjectType((PyObject*)%(img2d)s, 0);
typenum_f = PyArray_ObjectType((PyObject*)%(filtersflipped)s, 0);
if (typenum < 0) {PyErr_SetString(PyExc_ValueError, "Invalid type"); %(fail)s;}
if (typenum != typenum_f) {PyErr_SetString(PyExc_ValueError, "Input types must match"); %(fail)s;}
if (!img2d) %(fail)s;
if (!filtersflipped) %(fail)s;
if ((!%(z)s)
|| *PyArray_DIMS(%(z)s)!=4
||(PyArray_DIMS(%(z)s)[0] != %(self_bsize)s)
||(PyArray_DIMS(%(z)s)[1] != %(self_nkern)s)
||(PyArray_DIMS(%(z)s)[2] != dim_zz[0])
|| (PyArray_DIMS(%(z)s)[3] != dim_zz[1])
)
{
if (%(z)s) Py_DECREF(%(z)s);
npy_intp dims[4] = {0,0,0,0};
if(!dims) %(fail)s;
dims[0]=%(self_bsize)s;
dims[1]=%(self_nkern)s;
dims[2]=dim_zz[0];
dims[3]=dim_zz[1];
%(z)s = (PyArrayObject*) PyArray_ZEROS(4, dims, typenum,0);
}else{
//PyArray_FILLWBYTE((PyObject*)%(z)s,0);
}
z_arr = (PyArrayObject*) %(z)s;
//assertions
if (PyArray_STRIDES(%(z)s)[0] != PyArray_DIMS(%(z)s)[1] *PyArray_DIMS(%(z)s)[2] *PyArray_DIMS(%(z)s)[3] * sizeof(%(type)s)) %(fail)s;
if (PyArray_STRIDES(%(z)s)[1] != PyArray_DIMS(%(z)s)[2] * PyArray_DIMS(%(z)s)[3] * sizeof(%(type)s)) %(fail)s;
if (PyArray_STRIDES(%(z)s)[2] != PyArray_DIMS(%(z)s)[3] * sizeof(%(type)s)) %(fail)s;
if (PyArray_STRIDES(%(z)s)[3] != sizeof(%(type)s)) %(fail)s;
//The if on the number of loop make a speed up for small array.
//with g++ 4.5.1. The compiler should be smart enough to do this himself!
#pragma omp parallel for schedule(static) if(%(self_bsize)s * %(self_nkern)s > 1)
// We merge the 2 loop into one to make it easier to parallelize on both
// This is the equivalent of those 2 lines.
//for(int b=0;b< %(self_bsize)s;b++){
// for(int n_kern=0;n_kern<%(self_nkern)s;n_kern++){
for(int batch_kern_idx=0;
batch_kern_idx < %(self_bsize)s * %(self_nkern)s;
batch_kern_idx++){
int b = batch_kern_idx / %(self_nkern)s;
int n_kern = batch_kern_idx %% %(self_nkern)s;
%(type)s * __restrict__ out=(%(type)s *)(PyArray_GETPTR2(z_arr,b,n_kern));
for (int i = 0; i < dim_zz[0]*dim_zz[1]; ++i) out[i] = 0;
for(int stack_size=0;stack_size<%(self_imshp0)s;stack_size++){
const %(type)s * __restrict__ in=(%(type)s *)(PyArray_GETPTR2(img2d_arr,b,stack_size));
const %(type)s * __restrict__ hvals=(%(type)s *)(PyArray_GETPTR2(filtersflipped_arr,n_kern,stack_size));
int new_m;
for (int iter_m=0; iter_m < dim_zz[0]; iter_m++) {
// Reposition index into input image based on requested output size
int pos_m = iter_m*%(self_dx)s;//The position of the patch in the image
if (mode == FULL) new_m = pos_m ;
else new_m = (pos_m+dim_ker0-1);
for (int iter_n=0; iter_n < dim_zz[1]; iter_n++) { // loop over columns
int pos_n=iter_n*%(self_dy)s;
%(type)s sum=0;
%(type)s sum2=0;
%(type)s sum3=0;
%(type)s sum4=0;
int nb_sum=0;
// Sum over kernel, if index into image is out of bounds
// fill with the value
for (int j=0; j < dim_ker0; j++) {
int ind0 = (new_m-j);
if(mode==FULL){
const %(type)s * idx_hvals=&hvals[j*dim_ker1];
if(ind0 < 0 || ind0 >= dim_im[0]){
if(fill_value!=0)
for (int k=0; k < dim_ker1; k++) {
sum+= idx_hvals[k] * fill_value;
}
}else{
//do the part where kernel is to the right of the img
int k=0,max_k=max((int)(pos_n-dim_im[1])+1,0);
if(fill_value!=0){
for(k=0;k<max_k;k++){
sum+= idx_hvals[k]*fill_value;
}
}else {k=max_k;}
//do the part where the kernel is on the img
max_k=min(pos_n+1,(int)dim_ker1);
const %(type)s * idx_in=&in[ind0*dim_im[1]];
if(iter_n + 4*%(self_dy)s < dim_zz[1]
&& iter_n>dim_ker1-1
&& iter_n<dim_im[1]-dim_ker1+1-3){
nb_sum=4;
for (int ind1=pos_n-k; k<max_k; k++,ind1--) {
sum+=idx_hvals[k]*idx_in[ind1];
sum2+=idx_hvals[k]*idx_in[ind1+%(self_dy)s];
sum3+=idx_hvals[k]*idx_in[ind1+2*%(self_dy)s];
sum4+=idx_hvals[k]*idx_in[ind1+3*%(self_dy)s];
}
}else if(iter_n + 2*%(self_dy)s < dim_zz[1]
&& iter_n>dim_ker1-1
&& iter_n<dim_im[1]-dim_ker1+1){
nb_sum=2;
for (int ind1=pos_n-k; k<max_k; k++,ind1--) {
sum+=idx_hvals[k]*idx_in[ind1];
sum2+=idx_hvals[k]*idx_in[ind1+%(self_dy)s];
}
}else{
nb_sum=1;
/*
%(type)s sum_=0;
if((k-max_k) & 0x1 != 0){
sum+= idx_hvals[k] * idx_in[pos_n-k];
}
for (int ind1=pos_n-k; k<max_k; k+=2,ind1-=2) {
sum+= idx_hvals[k] * idx_in[ind1];
sum_+= idx_hvals[k+1] * idx_in[ind1-1];
}
sum+=sum_;
*/
for (int ind1=pos_n-k; k<max_k; k++,ind1--) {
sum+=idx_hvals[k]*idx_in[ind1];
}
}
//do the part to the left of the img
if(fill_value!=0)
for(;k<dim_ker1;k++) sum+= idx_hvals[k]*fill_value;
}
}else{//valid mode
const %(type)s* idx_in=&in[ind0*dim_im[1]];
const %(type)s* idx_hvals=&hvals[j*dim_ker1];
if(iter_n + 4*%(self_dy)s < dim_zz[1]){
nb_sum=4;
for (int k=dim_ker1-1,im_idx=pos_n; k >=0; k--,im_idx++) {
sum+=idx_hvals[k]*idx_in[im_idx];
sum2+=idx_hvals[k]*idx_in[im_idx+%(self_dy)s];
sum3+=idx_hvals[k]*idx_in[im_idx+2*%(self_dy)s];
sum4+=idx_hvals[k]*idx_in[im_idx+3*%(self_dy)s];
}
}else if(iter_n + 2*%(self_dy)s < dim_zz[1]){
nb_sum=2;
for (int k=dim_ker1-1,im_idx=pos_n; k >=0; k--,im_idx++) {
sum+=idx_hvals[k]*idx_in[im_idx];
sum2+=idx_hvals[k]*idx_in[im_idx+%(self_dy)s];
}
}else{
nb_sum=1;
for (int k=dim_ker1-1,im_idx=pos_n; k >=0; k--,im_idx++) {
sum+=idx_hvals[k]*idx_in[im_idx];
}
}
}//else valid mode
}//for j
switch(nb_sum){
case 4: out[iter_m*dim_zz[1]+iter_n+3] %(affectation)s sum4;
case 3: out[iter_m*dim_zz[1]+iter_n+2] %(affectation)s sum3;
case 2: out[iter_m*dim_zz[1]+iter_n+1] %(affectation)s sum2;
case 1: out[iter_m*dim_zz[1]+iter_n] %(affectation)s sum;
}
iter_n+=nb_sum-1;
}//for iter_n
}//for iter_m
}//for stack_size
}//for b and n_kern
Py_XDECREF(img2d);
Py_XDECREF(filtersflipped);
"""
| 41.000862 | 170 | 0.559912 |
826d57dfdd439877d0fbcbb0d1e9b962553f1c1d | 30,705 | py | Python | src/itemsapp/models.py | robertsmoto/sodavault | 200e843be7abe6cc447647bba55c7c1309092e5e | [
"BSD-3-Clause"
] | null | null | null | src/itemsapp/models.py | robertsmoto/sodavault | 200e843be7abe6cc447647bba55c7c1309092e5e | [
"BSD-3-Clause"
] | null | null | null | src/itemsapp/models.py | robertsmoto/sodavault | 200e843be7abe6cc447647bba55c7c1309092e5e | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
from imagekit.models import ProcessedImageField
from imagekit.processors import ResizeToFill
from ckeditor.fields import RichTextField
from configapp.models import Price
from django.db.models import Sum
from django.db.models import Prefetch
import math
from sodavault.utils_logging import svlog_info
class Group(models.Model):
CAT_TYPE_CHOICES = [
('CAT', 'Category'),
('TAG', 'Tag'),
('DEP', 'Department'),
]
cat_type = models.CharField(
max_length=3,
blank=True,
choices=CAT_TYPE_CHOICES,
)
name = models.CharField(max_length=200, blank=True)
slug = models.SlugField(max_length=50, null=True, blank=True)
class Meta:
ordering = ['name', ]
def __str__(self):
return '{}'.format(self.name)
class DepartmentManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(cat_type='DEP')
class Department(Group):
objects = DepartmentManager()
class Meta:
proxy = True
verbose_name_plural = "04. Departments"
def save(self, *args, **kwargs):
if self.cat_type == '':
self.cat_type = 'DEP'
super(Department, self).save(*args, **kwargs)
class CategoryManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(cat_type='CAT')
class Category(Group):
objects = CategoryManager()
class Meta:
proxy = True
verbose_name_plural = "05. Categories"
def save(self, *args, **kwargs):
if self.cat_type == '':
self.cat_type = 'CAT'
super(Category, self).save(*args, **kwargs)
class TagManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(cat_type='TAG')
class Tag(Group):
objects = TagManager()
class Meta:
proxy = True
verbose_name_plural = "06. Tags"
def save(self, *args, **kwargs):
if self.cat_type == '':
self.cat_type = 'TAG'
super(Tag, self).save(*args, **kwargs)
class PartManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(item_type='PART')
class AllProductManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(item_type='PROD')
class SimpleProductManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(item_type="PROD", product_type="SIMP")
class DigitalProductManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(item_type="PROD", product_type="DIGI")
class BundleProductManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(item_type="PROD", product_type="BUND")
class VariableProductManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(item_type="PROD", product_type="VARI")
class Item(models.Model):
price_class = models.ForeignKey(
Price,
related_name='price_items',
blank=True,
null=True,
on_delete=models.CASCADE)
departments = models.ManyToManyField(
Department,
related_name='department_items',
blank=True)
categories = models.ManyToManyField(
Category,
related_name='category_items',
blank=True)
tags = models.ManyToManyField(
Tag,
related_name='tag_item',
blank=True)
ITEM_TYPE_CHOICES = [
('PART', 'Part'),
('PROD', 'Product'),
]
item_type = models.CharField(
max_length=4,
blank=True,
choices=ITEM_TYPE_CHOICES,
)
PRODUCT_TYPE_CHOICES = [
('SIMP', 'Simple'),
('DIGI', 'Digital'),
('BUND', 'Bundled'),
('VARI', 'Variable'),
]
product_type = models.CharField(
max_length=4,
blank=True,
choices=PRODUCT_TYPE_CHOICES,
)
sku = models.CharField(max_length=100, blank=True)
name = models.CharField(max_length=100, blank=True)
description = models.TextField(
blank=True,
help_text="For internal and purchasing use.")
ecpu = models.DecimalField(
max_digits=14, decimal_places=4, blank=True, null=True)
ecpu_override = models.DecimalField(
max_digits=14, decimal_places=4, blank=True, null=True)
ecpu_calc_from = models.CharField(
max_length=100,
blank=True,
help_text="how ecpu has been calculated")
unit = models.CharField(
max_length=100,
blank=True,
help_text="singlular unit")
unit_override = models.CharField(
max_length=100,
blank=True,
help_text="singlular unit")
price = models.DecimalField(
max_digits=14, decimal_places=2, blank=True, null=True)
price_override = models.DecimalField(
max_digits=14, decimal_places=2, blank=True, null=True)
price_calc_from = models.CharField(
max_length=100,
blank=True,
help_text="how price has been calculated")
objects = models.Manager()
parts = PartManager()
all_products = AllProductManager()
simple_products = SimpleProductManager()
digital_products = DigitalProductManager()
bundle_products = BundleProductManager()
variable_products = VariableProductManager()
def __str__(self):
return "{} {}".format(self.sku, self.name)
@property
def is_digital(self):
# raising an exception is the default behavior for one-to-one relationships
try:
self.digital_options
return True
except Exception as e:
svlog_info(f"Digital options doe not exist: {e}")
return False
@property
def is_bundle(self):
return True if self.bundle_parents.exists() else False
@property
def is_variable(self):
return True if self.variation_parents.exists() else False
@property
def inv_stats(self):
_le_q = None
if self.item_type == 'PART':
_le_q = self.le_parts.filter(account='IRAW').aggregate(
Sum('debit_amount'),
Sum('credit_amount'),
Sum('debit_quantity'),
Sum('credit_quantity'))
if self.item_type == 'PROD':
_le_q = self.le_products.filter(account='IMER').aggregate(
Sum('debit_amount'),
Sum('credit_amount'),
Sum('debit_quantity'),
Sum('credit_quantity'))
"""
'debit_amount__sum': Decimal('3363.66'),
'credit_amount__sum': None,
'debit_quantity__sum': Decimal('10000.00'),
'credit_quantity__sum': None
"""
def check_for_zero(self, myvariable):
return 0 if myvariable is None else myvariable
_d_amount = 0
_c_amount = 0
_d_quantity = 0
_c_quantity = 0
if _le_q:
_d_amount = check_for_zero(self, _le_q['debit_amount__sum'])
_c_amount = check_for_zero(self, _le_q['credit_amount__sum'])
_d_quantity = check_for_zero(self, _le_q['debit_quantity__sum'])
_c_quantity = check_for_zero(self, _le_q['credit_quantity__sum'])
_cost = _d_amount - _c_amount
_quantity = _d_quantity - _c_quantity
_avg_cpu = _cost / _quantity if _quantity > 0 else 0
inv_stats_dict = {}
if _quantity == 0:
_cost = 0
_quantity = 0
_avg_cpu = 0
else:
inv_stats_dict['cost'] = _cost
inv_stats_dict['quantity'] = _quantity
inv_stats_dict['avg_cpu'] = _avg_cpu
# print("inv_stats_dict", inv_stats_dict)
return inv_stats_dict
# inv_stats['cost'], inv_stats['quantity'], inv_stats['avg_cpu']
@property
def available_inventory(self):
inv_stats = self.inv_stats
# print("here inv_stas", inv_stats)
if inv_stats:
return "{} units available. {} total cost. {} avg cpu".format(
int(inv_stats['quantity']),
round(inv_stats['cost'], 2),
round(inv_stats['avg_cpu'], 4)
)
else:
return "No available inventory"
@property
def calc_max_new_inventory(self):
# checks there is ecpu assigned to part
if self.ecpu == 0:
return """Before you can create new inventory, you must assign a cost.
You can assign costs by creating winning bids, assembling products
from parts, or by entering an estimated cost override (ecpu override).
"""
# for products assembed from parts, may be limited by parts inventory
_pid = None
_ppj_q = None
_part_inv = []
_part_dict = {}
# query is_unlimted = False,
# because an unlimited Part does not limit the creation of a Product
if self.item_type == "PROD":
_pid = self.id
print("self.id, self.name", self.id, self.name)
_ppj_q = ProductPartJoin.objects.filter(
products_id=_pid,
is_unlimited=False
).prefetch_related(
Prefetch('parts'))
if _ppj_q:
for ppj in _ppj_q:
_part_dict = {}
if ppj.parts.inv_stats:
_part_dict['id'] = ppj.parts.id
_part_dict['name'] = ppj.parts.name
_part_dict['total_quantity'] = math.floor(
ppj.parts.inv_stats['quantity'])
_part_dict['max_quantity'] = math.floor(
ppj.parts.inv_stats['quantity'] / ppj.quantity)
else:
_part_dict['id'] = ppj.parts.id
_part_dict['name'] = ppj.parts.name
_part_dict['total_quantity'] = 0
_part_dict['max_quantity'] = 0
_part_inv.append(_part_dict)
def by_max_quantity(p_list):
return p_list['max_quantity']
_part_inv.sort(key=by_max_quantity)
print("_part_inv", _part_inv)
# change the return to include meaningful information
if len(_part_inv) > 0:
_part_inv[0]['is_limited'] = True
return """This product is assembled from parts. You can bring a maximum
number of {} new pcs into inventory. New inventory is currently
limted by {} which currently has {} pcs in stock.
""".format(
_part_inv[0]['max_quantity'],
_part_inv[0]['name'],
_part_inv[0]['total_quantity']
), _part_inv[0]
else:
_part_inv.append(_part_dict)
_part_inv[0]['is_limited'] = False
_part_inv[0]['max_quantity'] = None
return """This product is assembled from parts, however,
it is not limited by the inventory of those parts.""", _part_inv[0]
# may create unlimited inventory
else:
_part_inv.append(_part_dict)
_part_inv[0]['is_limited'] = False
_part_inv[0]['max_quantity'] = None
return "You may create unlimited new inventory for this item.", _part_inv[0]
@property
def max_new_inventory(self):
return self.calc_max_new_inventory[0]
@property
def calc_ecpu(self):
_ecpu = 0
_unit = ""
_designator = ""
# check for estimated costs based on winning_bid
_items_q = None
_w_bid = None
if self.item_type == 'PART':
# print("made it to first PART")
# query part winning_bid
_w_bid = self.bid_parts.filter(is_winning_bid=True).first()
if self.item_type == 'PROD':
# print("made it to first PROD")
# query product winning_bid
_w_bid = self.bid_products.filter(is_winning_bid=True).first()
# check for ecpu based on winning bid
if _w_bid:
# print("_w_bid", _w_bid)
_cost = 0; _shipping = 0; _quantity = 0; _unit = ""
_cost = _w_bid.cost if _w_bid.cost is not None else 0
_shipping = _w_bid.shipping if _w_bid.shipping is not None else 0
_quantity = _w_bid.quantity
_unit = _w_bid.units
_unit = _unit.rstrip('s') # rstrip only from the end of string
_ecpu = (_cost + _shipping) / _quantity if _quantity > 0 else 0
_designator = "ecpu based on winning bid"
# check for ecpu based on assembled parts (for product)
_items_q = None
if self.item_type == 'PROD':
_items_q = self.ppj_products.all().prefetch_related('parts')
if _items_q:
_total_ecpu = 0
for it in _items_q:
_quantity = it.quantity if it.quantity is not None else 0
_ecpu = it.parts.ecpu if it.parts.ecpu is not None else 0
_total_ecpu = _total_ecpu + (_quantity * _ecpu)
_ecpu = round(_total_ecpu, 4)
_unit = "pc"
_designator = "ecpu based on assembled parts"
# check ecpu based on overrides
_ecpu = self.ecpu_override if self.ecpu_override is not None else _ecpu
_unit = self.unit_override if self.unit_override != "" else _unit
if self.ecpu_override:
_designator = "ecpu based on override"
calc_ecpu = {}
calc_ecpu['ecpu'] = _ecpu
calc_ecpu['unit'] = _unit
calc_ecpu['designator'] = _designator
print("calc_ecpu", calc_ecpu)
return calc_ecpu
@property
def calc_price(self):
"""
price can be calculated
from estimated cost
from avg cost of inventory
from ecpu override
"""
# find amount, and assign designator
_cost = 0
_designator = ""
_price = 0
# find cost
# based on cost override
# print("### self.inv_stats -->", self.inv_stats)
if self.ecpu_override is not None:
_cost = self.ecpu_override
_designator = "ecpu override"
# based on existing inventory
elif self.inv_stats:
_cost = self.inv_stats['avg_cpu']
_designator = "avg cpu of available inventory"
# based on estimated cost per unit
elif self.ecpu is not None:
_cost = self.ecpu
_designator = "estimated cost per unit (ecpu)"
else:
_cost = 0
_designator = "not able to calculate a cost per unit"
pass
# calcualte price based on cost
if self.price_class is not None:
if self.price_class.is_flat is True:
_price = _cost + self.price_class.amount
elif self.price_class.is_markup is True:
_price = _cost + (_cost * (self.price_class.amount / 100))
elif self.price_class.is_margin is True:
_price = _cost / (1 - (self.price_class.amount / 100))
else:
_price = 0
_designator = "please assign a price class"
if self.price_override is not None:
_price = self.price_override
_designator = "price override"
calc_price = {}
calc_price['cost'] = _cost
calc_price['price'] = _price
calc_price['designator'] = _designator
return calc_price
def save(self, *args, **kwargs):
_calc_ecpu = self.calc_ecpu
self.ecpu = _calc_ecpu['ecpu']
self.unit = _calc_ecpu['unit']
self.ecpu_calc_from = _calc_ecpu['designator']
_calc_price = self.calc_price
self.price = _calc_price['price']
self.price_calc_from = _calc_price['designator']
super(Item, self).save(*args, **kwargs)
class Part(Item):
objects = PartManager()
class Meta:
proxy = True
verbose_name_plural = "01. Parts"
def save(self, *args, **kwargs):
self.item_type="PART"
super(Part, self).save(*args, **kwargs)
class Product(Item):
objects = AllProductManager()
class Meta:
proxy = True
verbose_name_plural = "02a. All Products"
def save(self, *args, **kwargs):
self.item_type = "PROD"
super(Product, self).save(*args, **kwargs)
class SimpleProduct(Item):
objects = SimpleProductManager()
class Meta:
proxy = True
verbose_name_plural = "02b. Simple Products"
def save(self, *args, **kwargs):
self.item_type = "PROD"
if self.product_type == "":
self.product_type = "SIMP"
super(Product, self).save(*args, **kwargs)
class DigitalProduct(Item):
objects = DigitalProductManager()
class Meta:
proxy = True
verbose_name_plural = "02c. Digital Products"
def save(self, *args, **kwargs):
self.item_type = "PROD"
if self.product_type == "":
self.product_type = "DIGI"
super(Product, self).save(*args, **kwargs)
class BundleProduct(Item):
objects = BundleProductManager()
class Meta:
proxy = True
verbose_name_plural = "02d. Bundle Products"
def save(self, *args, **kwargs):
self.item_type = "PROD"
if self.product_type == "":
self.product_type = "BUND"
super(Product, self).save(*args, **kwargs)
class VariableProduct(Item):
objects = VariableProductManager()
class Meta:
proxy = True
verbose_name_plural = "02e. Varialbe Products"
def save(self, *args, **kwargs):
self.item_type = "PROD"
if self.product_type == "":
self.product_type = "VARI"
super(Product, self).save(*args, **kwargs)
class Identifier(models.Model):
item = models.OneToOneField(
Item,
related_name='identifiers',
null=True,
on_delete=models.CASCADE)
pid_i = models.BigIntegerField(null=True, blank=True)
pid_c = models.CharField(max_length=100, blank=True)
gtin = models.BigIntegerField(null=True, blank=True)
isbn = models.BigIntegerField(null=True, blank=True)
def __str__(self):
return '{}'.format(self.item.name)
class Measurement(models.Model):
item = models.OneToOneField(
Item,
related_name='measurements',
null=True,
on_delete=models.CASCADE,)
weight = models.DecimalField(max_digits=8, decimal_places=2, null=True)
length = models.DecimalField(max_digits=8, decimal_places=2, null=True)
width = models.DecimalField(max_digits=8, decimal_places=2, null=True)
height = models.DecimalField(max_digits=8, decimal_places=2, null=True)
def __str__(self):
return '{}'.format(self.item.name)
class Attribute(models.Model):
name = models.CharField(max_length=200, blank=True)
slug = models.SlugField(max_length=50)
class Meta:
verbose_name_plural = "03. Attributes"
def __str__(self):
return '{}'.format(self.name)
class Term(models.Model):
attribute = models.ForeignKey(
Attribute,
related_name='terms',
null=True,
blank=True,
on_delete=models.CASCADE)
name = models.CharField(max_length=200, blank=True)
slug = models.SlugField(max_length=50)
img = ProcessedImageField(
upload_to='product_images/%Y/%m/%d',
# processors=[ResizeToFill(1000, 1000)],
format='WebP',
options={'quality': 80},
blank=True,
null=True,
help_text="converts image to .WebP")
def __str__(self):
return '{}'.format(self.name)
class ProductAttributeJoin(models.Model):
"""Used for product attributes."""
items = models.ForeignKey(
Item,
related_name='product_att_join',
null=True,
blank=True,
on_delete=models.CASCADE)
attribute = models.ForeignKey(
Attribute,
null=True,
blank=True,
on_delete=models.CASCADE)
# ManyToMany for product-attributes
term = models.ManyToManyField(
Term,
blank=True)
def __str__(self):
return '{}'.format(self.attribute.name)
class Variation(models.Model):
parent = models.ForeignKey(
Item,
related_name='variation_parents',
null=True,
blank=True,
on_delete=models.CASCADE)
items = models.ForeignKey(
Item,
related_name='variation_products',
null=True,
on_delete=models.CASCADE)
def __str__(self):
return '{} : {}'.format(self.parent.sku, self.parent.name)
class VariationAttribute(models.Model):
items = models.ForeignKey(
Item,
null=True,
blank=True,
on_delete=models.CASCADE)
variations = models.ForeignKey(
Variation,
related_name='variation_attributes',
null=True,
blank=True,
on_delete=models.CASCADE)
attributes = models.ForeignKey(
Attribute,
null=True,
blank=True,
on_delete=models.CASCADE)
terms = models.ForeignKey(
Term,
null=True,
blank=True,
on_delete=models.CASCADE)
def __str__(self):
return '{}'.format(self.variations.product.sku)
class Bundle(models.Model):
parent = models.ForeignKey(
Item,
related_name='bundle_parents',
on_delete=models.CASCADE)
items = models.ForeignKey(
Item,
related_name='bundle_products',
null=True,
on_delete=models.CASCADE)
quantity_min = models.PositiveSmallIntegerField(default=1)
quantity_max = models.PositiveSmallIntegerField(blank=True, null=True)
is_optional = models.BooleanField(default=False)
def __str__(self):
return '{} : {}'.format(self.parent.sku, self.parent.name)
class DigitalOption(models.Model):
item = models.OneToOneField(
Item,
related_name='digital_options',
null=True,
on_delete=models.CASCADE)
name = models.CharField(max_length=200, blank=True)
# other things like a download key, file, expiration date etc ...
def __str__(self):
return '{}'.format(self.item.name)
class Promotion(models.Model):
items = models.ManyToManyField(
Item,
related_name='promotions')
promotion_override = models.ForeignKey(
'self',
related_name='overrides',
null=True,
on_delete=models.CASCADE)
name = models.CharField(max_length=200, blank=True)
slug = models.SlugField(max_length=50)
begins = models.DateField(null=True)
ends = models.DateField(null=True)
percentage = models.DecimalField(
max_digits=4, decimal_places=2, null=True,
help_text="Percentage discount eg. 25% off")
fixed = models.DecimalField(
max_digits=8, decimal_places=2, null=True,
help_text="Fixed discount eg. $5.00 off")
price = models.DecimalField(
max_digits=8, decimal_places=2, null=True,
help_text="Fixed price eg. Sale Price $25.00")
is_free_shipping = models.BooleanField(default=False)
bogx = models.PositiveSmallIntegerField(
null=True,
help_text="Buy One Get x Free")
def __str__(self):
return '{}'.format(self.name)
class Marketing(models.Model):
item = models.OneToOneField(
Item,
related_name='marketing_options',
null=True,
on_delete=models.CASCADE)
description_sm = RichTextField(
blank=True,
null=True,
max_length=500, help_text="500 characters max.")
description_md = RichTextField(
blank=True,
null=True,
max_length=1000, help_text="1000 characters max.")
description_lg = RichTextField(
blank=True,
null=True,
max_length=1000, help_text="1000 characters max.")
def __str__(self):
return '{}'.format(self.item.name)
class Image(models.Model):
item = models.ForeignKey(
Item,
related_name='images',
null=True,
blank=True,
on_delete=models.CASCADE)
name = models.CharField(max_length=100, blank=True)
# possibly make this a calculated field as well as gross margin?
order = models.IntegerField(
blank=True, null=True,
help_text='integer used to order images')
# images client-determined size
img_lg = ProcessedImageField(
upload_to='product_images/%Y/%m/%d',
format='WebP',
options={'quality': 80},
blank=True,
null=True,
help_text="converts to WebP format")
img_md = ProcessedImageField(
upload_to='product_images/%Y/%m/%d',
format='WebP',
options={'quality': 80},
blank=True,
null=True,
help_text="converts to WebP format")
img_sm = ProcessedImageField(
upload_to='product_images/%Y/%m/%d',
format='WebP',
options={'quality': 80},
blank=True,
null=True,
help_text="converts to WebP format")
# images square 1:1
img_1x1_lg = ProcessedImageField(
upload_to='product_images/%Y/%m/%d',
processors=[ResizeToFill(1000, 1000)],
format='WebP',
options={'quality': 80},
blank=True,
null=True,
help_text="1000px x 1000px")
img_1x1_md = ProcessedImageField(
upload_to='product_images/%Y/%m/%d',
processors=[ResizeToFill(500, 500)],
format='WebP',
options={'quality': 80},
blank=True,
null=True,
help_text="500px x 500px")
img_1x1_sm = ProcessedImageField(
upload_to='product_images/%Y/%m/%d',
processors=[ResizeToFill(250, 250)],
format='WebP',
options={'quality': 80},
blank=True,
null=True,
help_text="250px x 250px")
# images 2:1
img_2x1_lg = ProcessedImageField(
upload_to='product_images/%Y/%m/%d',
processors=[ResizeToFill(1000, 500)],
format='WebP',
options={'quality': 80},
blank=True,
null=True,
help_text="1000px x 500px")
img_2x1_md = ProcessedImageField(
upload_to='product_images/%Y/%m/%d',
processors=[ResizeToFill(500, 250)],
format='WebP',
options={'quality': 80},
blank=True,
null=True,
help_text="500px x 250px")
img_2x1_sm = ProcessedImageField(
upload_to='product_images/%Y/%m/%d',
processors=[ResizeToFill(250, 125)],
format='WebP',
options={'quality': 80},
blank=True,
null=True,
help_text="250px x 125px")
# images 1:2
img_1x2_lg = ProcessedImageField(
upload_to='product_images/%Y/%m/%d',
processors=[ResizeToFill(500, 1000)],
format='WebP',
options={'quality': 80},
blank=True,
null=True,
help_text="500px x 1000px")
img_1x2_md = ProcessedImageField(
upload_to='product_images/%Y/%m/%d',
processors=[ResizeToFill(250, 500)],
format='WebP',
options={'quality': 80},
blank=True,
null=True,
help_text="250px x 500px")
img_1x2_sm = ProcessedImageField(
upload_to='product_images/%Y/%m/%d',
processors=[ResizeToFill(125, 250)],
format='WebP',
options={'quality': 80},
blank=True,
null=True,
help_text="125px x 250px")
# 16_9
img_16x9 = ProcessedImageField(
upload_to='product_images/%Y/%m/%d',
processors=[ResizeToFill(1200, 675)],
format='WebP',
options={'quality': 80},
blank=True,
null=True,
help_text="16:9 1200px x 675px")
# 191_1
img_191x1 = ProcessedImageField(
upload_to='product_images/%Y/%m/%d',
processors=[ResizeToFill(1200, 628)],
format='WebP',
options={'quality': 80},
blank=True,
null=True,
help_text="1.91:1 1200px x 628px")
class Meta:
ordering = ('order', )
def __str__(self):
return '{}'.format(self.name)
class ProductPartJoin(models.Model):
parts = models.ForeignKey(
Part,
related_name='ppj_parts',
blank=True,
null=True,
on_delete=models.CASCADE)
products = models.ForeignKey(
Product,
related_name='ppj_products',
blank=True,
null=True,
on_delete=models.CASCADE)
simple_products = models.ForeignKey(
SimpleProduct,
related_name="ppj_simple_products",
blank=True,
null=True,
on_delete=models.CASCADE)
digital_products = models.ForeignKey(
DigitalProduct,
related_name="ppj_digital_products",
blank=True,
null=True,
on_delete=models.CASCADE)
bundle_products = models.ForeignKey(
BundleProduct,
related_name="ppj_bundle_products",
blank=True,
null=True,
on_delete=models.CASCADE)
variable_products = models.ForeignKey(
VariableProduct,
related_name="ppj_variable_products",
blank=True,
null=True,
on_delete=models.CASCADE)
quantity = models.IntegerField(
default=1,
help_text="How many parts per product?")
is_unlimited = models.BooleanField(
default=False,
help_text="Denotes if a part should be considered unlimited.")
use_all = models.BooleanField(
default=False,
help_text=(
"Use all part inventory when the related product is "
"brought into inventory."))
@property
def _ecpu(self):
ecpu = self.parts.ecpu if self.parts.ecpu is not None else 0
return round(ecpu, 4)
@property
def _unit(self):
return self.parts._unit
| 30.890342 | 88 | 0.585703 |
0f6b684fa8726abd4734d5aa3ee861d6c842a376 | 7,632 | py | Python | examples/pwr_run/checkpointing/throughput/final2_inverse/job21.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/throughput/final2_inverse/job21.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | examples/pwr_run/checkpointing/throughput/final2_inverse/job21.py | boringlee24/keras_old | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | [
"MIT"
] | null | null | null | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 64
args_lr = 0.0008
args_model = 'vgg16'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_final2_inverse/' + job_name + '*'
total_epochs = 49
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '16' in args_model:
base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '19' in args_model:
base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.BatchNormalization())
model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_final2_inverse/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| 32.755365 | 118 | 0.693658 |
765c5ea1f49a0a359498736525ce6afea0867e55 | 437 | py | Python | SumOfDigits.py | pcdoyle/Python-Challenges | 91eb3fcd4a197a1e96e00e8a0cb110251f2ab335 | [
"Unlicense"
] | null | null | null | SumOfDigits.py | pcdoyle/Python-Challenges | 91eb3fcd4a197a1e96e00e8a0cb110251f2ab335 | [
"Unlicense"
] | null | null | null | SumOfDigits.py | pcdoyle/Python-Challenges | 91eb3fcd4a197a1e96e00e8a0cb110251f2ab335 | [
"Unlicense"
] | null | null | null | """
__author__ = "Patrick Doyle"
__license__ = "The Unlicense"
__email__ = "me@pcdoyle.com"
Calculating the sum of the digits of a integer.
"""
def sumOfDigits(number):
'''
Will get a sum of all digits in an int.
'''
numberSum = 0
while number > 0:
numberSum += number % 10
number -= number % 10
number //= 10
return numberSum
if __name__ == "__main__":
print(sumOfDigits(123456))
| 19 | 47 | 0.615561 |
aaf5cab3daa5e2b4f203edfa02effe9aa7e494a3 | 19,101 | py | Python | dp/cloud/python/magma/test_runner/tests/test_dp_with_orc8r.py | sreedharkumartn/magma | 648289fb336ffbca0fceededb6eb33c263a48703 | [
"BSD-3-Clause"
] | null | null | null | dp/cloud/python/magma/test_runner/tests/test_dp_with_orc8r.py | sreedharkumartn/magma | 648289fb336ffbca0fceededb6eb33c263a48703 | [
"BSD-3-Clause"
] | null | null | null | dp/cloud/python/magma/test_runner/tests/test_dp_with_orc8r.py | sreedharkumartn/magma | 648289fb336ffbca0fceededb6eb33c263a48703 | [
"BSD-3-Clause"
] | null | null | null | """
Copyright 2022 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import annotations
import operator
from contextlib import contextmanager
from datetime import datetime, timezone
from http import HTTPStatus
from threading import Event, Thread
from time import sleep
from typing import Any, Dict, List, Optional
from uuid import uuid4
import pytest
import requests
from dp.protos.enodebd_dp_pb2 import CBSDRequest, CBSDStateResult, LteChannel
from magma.test_runner.config import TestConfig
from magma.test_runner.tests.integration_testcase import (
DomainProxyIntegrationTestCase,
)
from retrying import retry
config = TestConfig()
DP_HTTP_PREFIX = 'magma/v1/dp'
NETWORK = 'some_network'
SOME_FCC_ID = "some_fcc_id"
OTHER_FCC_ID = "other_fcc_id"
USER_ID = "some_user_id"
SAS = 'SAS'
DP = 'DP'
DATETIME_WAY_BACK = '2010-03-28T09:13:25.407877399+00:00'
@pytest.mark.orc8r
class DomainProxyOrc8rTestCase(DomainProxyIntegrationTestCase):
def setUp(self) -> None:
self.serial_number = self._testMethodName + '_' + str(uuid4())
def test_cbsd_sas_flow(self):
builder = CbsdAPIDataBuilder().with_serial_number(self.serial_number)
cbsd_id = self.given_cbsd_provisioned(builder)
with self.while_cbsd_is_active():
when_elastic_indexes_data()
logs = self.when_logs_are_fetched(get_current_sas_filters(self.serial_number))
self.then_logs_are(logs, self.get_sas_provision_messages())
filters = get_filters_for_request_type('heartbeat', self.serial_number)
self.then_message_is_eventually_sent(filters)
self.delete_cbsd(cbsd_id)
def test_cbsd_unregistered_when_requested_by_desired_state(self):
builder = CbsdAPIDataBuilder().with_serial_number(self.serial_number)
cbsd_id = self.given_cbsd_provisioned(builder)
with self.while_cbsd_is_active():
filters = get_filters_for_request_type('deregistration', self.serial_number)
builder = builder.with_desired_state('unregistered')
self.when_cbsd_is_updated(cbsd_id, builder.build_post_data())
# TODO maybe asking for state (cbsd api instead of log api) would be better
self.then_message_is_eventually_sent(filters)
def test_sas_flow_restarted_when_user_requested_deregistration(self):
builder = CbsdAPIDataBuilder().with_serial_number(self.serial_number)
cbsd_id = self.given_cbsd_provisioned(builder)
with self.while_cbsd_is_active():
filters = get_filters_for_request_type('deregistration', self.serial_number)
self.when_cbsd_is_deregistered(cbsd_id)
self.then_message_is_eventually_sent(filters)
self.then_state_is_eventually(builder.build_grant_state_data())
def test_sas_flow_restarted_for_updated_cbsd(self):
builder = CbsdAPIDataBuilder().with_serial_number(self.serial_number)
cbsd_id = self.given_cbsd_provisioned(builder)
with self.while_cbsd_is_active():
builder = builder.with_fcc_id(OTHER_FCC_ID)
self.when_cbsd_is_updated(cbsd_id, builder.build_post_data())
filters = get_filters_for_request_type('deregistration', self.serial_number)
self.then_message_is_eventually_sent(filters)
self.then_state_is_eventually(builder.build_grant_state_data())
cbsd = self.when_cbsd_is_fetched(builder.serial_number)
self.then_cbsd_is(cbsd, builder.build_registered_active_data())
self.delete_cbsd(cbsd_id)
def test_activity_status(self):
builder = CbsdAPIDataBuilder().with_serial_number(self.serial_number)
cbsd_id = self.given_cbsd_provisioned(builder)
cbsd = self.when_cbsd_is_fetched(builder.serial_number)
self.then_cbsd_is(cbsd, builder.build_registered_active_data())
self.when_cbsd_is_inactive()
cbsd = self.when_cbsd_is_fetched(builder.serial_number)
self.then_cbsd_is(cbsd, builder.build_registered_inactive_data())
def test_frequency_preferences(self):
builder = CbsdAPIDataBuilder(). \
with_serial_number(self.serial_number). \
with_frequency_preferences(5, [3625]). \
with_expected_grant(5, 3625, 31)
cbsd_id = self.given_cbsd_provisioned(builder)
self.delete_cbsd(cbsd_id)
def test_creating_cbsd_with_the_same_unique_fields_returns_409(self):
builder = CbsdAPIDataBuilder().with_serial_number(self.serial_number)
self.when_cbsd_is_created(builder.build_post_data())
self.when_cbsd_is_created(builder.build_post_data(), expected_status=HTTPStatus.CONFLICT)
def test_create_cbsd_with_single_step_fields(self):
# TODO extend the test to check if the registration actually works
builder = CbsdAPIDataBuilder().with_serial_number(self.serial_number)
self.when_cbsd_is_created(builder.build_unregistered_single_step_data())
cbsd = self.when_cbsd_is_fetched(builder.serial_number)
self.then_cbsd_is(cbsd, builder.build_unregistered_single_step_data())
def test_updating_cbsd_returns_409_when_setting_existing_serial_num(self):
builder = CbsdAPIDataBuilder()
cbsd1_serial = self.serial_number + "_foo"
cbsd2_serial = self.serial_number + "_bar"
cbsd1_payload = builder.with_serial_number(cbsd1_serial).build_post_data()
cbsd2_payload = builder.with_serial_number(cbsd2_serial).build_post_data()
self.when_cbsd_is_created(cbsd1_payload)
self.when_cbsd_is_created(cbsd2_payload)
cbsd2 = self.when_cbsd_is_fetched(serial_number=cbsd2_serial)
self.when_cbsd_is_updated(
cbsd_id=cbsd2.get("id"),
data=cbsd1_payload,
expected_status=HTTPStatus.CONFLICT,
)
def test_fetch_cbsds_filtered_by_serial_number(self):
cbsd1_serial = self.serial_number + "_foo"
cbsd2_serial = self.serial_number + "_bar"
builder1 = CbsdAPIDataBuilder().with_serial_number(cbsd1_serial)
builder2 = CbsdAPIDataBuilder().with_serial_number(cbsd2_serial)
self.when_cbsd_is_created(builder1.build_post_data())
self.when_cbsd_is_created(builder2.build_post_data())
cbsd1 = self.when_cbsd_is_fetched(serial_number=cbsd1_serial)
cbsd2 = self.when_cbsd_is_fetched(serial_number=cbsd2_serial)
self.then_cbsd_is(cbsd1, builder1.build_unregistered_data())
self.then_cbsd_is(cbsd2, builder2.build_unregistered_data())
def test_fetching_logs_with_custom_filters(self):
builder = CbsdAPIDataBuilder().with_serial_number(self.serial_number)
sas_to_dp_end_date_only = {
'serial_number': self.serial_number,
'from': SAS,
'to': DP,
'end': now(),
}
sas_to_dp_begin_date_only = {
'serial_number': self.serial_number,
'from': SAS,
'to': DP,
'begin': DATETIME_WAY_BACK,
}
sas_to_dp_end_date_too_early = {
'serial_number': self.serial_number,
'from': SAS,
'to': DP,
'end': DATETIME_WAY_BACK,
}
dp_to_sas = {
'serial_number': self.serial_number,
'from': DP,
'to': SAS,
}
dp_to_sas_incorrect_serial_number = {
'serial_number': 'incorrect_serial_number',
'from': DP,
'to': SAS,
}
sas_to_dp_with_limit = {
'limit': '100',
'from': SAS,
'to': DP,
}
sas_to_dp_with_limit_and_too_large_offset = {
'limit': '100',
'offset': '100',
'from': SAS,
'to': DP,
}
scenarios = [
(sas_to_dp_end_date_only, operator.eq, 0),
(sas_to_dp_begin_date_only, operator.gt, 3),
(sas_to_dp_end_date_too_early, operator.eq, 0),
(dp_to_sas, operator.gt, 0),
(dp_to_sas_incorrect_serial_number, operator.eq, 0),
(sas_to_dp_with_limit, operator.gt, 3),
(sas_to_dp_with_limit_and_too_large_offset, operator.eq, 0),
]
self.given_cbsd_provisioned(builder)
with self.while_cbsd_is_active():
when_elastic_indexes_data()
for params in scenarios:
self._verify_logs_count(params)
def given_cbsd_provisioned(self, builder: CbsdAPIDataBuilder) -> int:
self.when_cbsd_is_created(builder.build_post_data())
cbsd = self.when_cbsd_is_fetched(builder.serial_number)
self.then_cbsd_is(cbsd, builder.build_unregistered_data())
state = self.when_cbsd_asks_for_state()
self.then_state_is(state, get_empty_state())
self.then_state_is_eventually(builder.build_grant_state_data())
cbsd = self.when_cbsd_is_fetched(builder.serial_number)
self.then_cbsd_is(cbsd, builder.build_registered_active_data())
return cbsd['id']
def when_cbsd_is_created(self, data: Dict[str, Any], expected_status: int = HTTPStatus.CREATED):
r = send_request_to_backend('post', 'cbsds', json=data)
self.assertEqual(r.status_code, expected_status)
def when_cbsd_is_fetched(self, serial_number: str = None) -> Dict[str, Any]:
return self._check_for_cbsd(serial_number=serial_number)
def when_logs_are_fetched(self, params: Dict[str, Any]) -> Dict[str, Any]:
r = send_request_to_backend('get', 'logs', params=params)
self.assertEqual(r.status_code, HTTPStatus.OK)
data = r.json()
return data
def when_cbsd_is_deleted(self, cbsd_id: int):
r = send_request_to_backend('delete', f'cbsds/{cbsd_id}')
self.assertEqual(r.status_code, HTTPStatus.NO_CONTENT)
def when_cbsd_is_updated(self, cbsd_id: int, data: Dict[str, Any], expected_status: int = HTTPStatus.NO_CONTENT):
r = send_request_to_backend('put', f'cbsds/{cbsd_id}', json=data)
self.assertEqual(r.status_code, expected_status)
def when_cbsd_is_deregistered(self, cbsd_id: int):
r = send_request_to_backend('post', f'cbsds/{cbsd_id}/deregister')
self.assertEqual(r.status_code, HTTPStatus.NO_CONTENT)
def when_cbsd_asks_for_state(self) -> CBSDStateResult:
return self.dp_client.GetCBSDState(get_cbsd_request(self.serial_number))
@staticmethod
def when_cbsd_is_inactive():
inactivity = 3
polling = 1
delta = 3
total_wait_time = inactivity + 2 * polling + delta
sleep(total_wait_time)
@contextmanager
def while_cbsd_is_active(self):
done = Event()
def keep_asking_for_state():
while not done.wait(timeout=1):
self.when_cbsd_asks_for_state()
t = Thread(target=keep_asking_for_state)
try:
t.start()
yield
finally:
done.set()
t.join()
def then_cbsd_is(self, actual: Dict[str, Any], expected: Dict[str, Any]):
actual = actual.copy()
del actual['id']
grant = actual.get('grant')
if grant:
del grant['grant_expire_time']
del grant['transmit_expire_time']
self.assertEqual(actual, expected)
def then_cbsd_is_deleted(self, serial_number: str):
self._check_for_cbsd(serial_number=serial_number, should_exist=False)
def then_state_is(self, actual: CBSDStateResult, expected: CBSDStateResult):
self.assertEqual(actual, expected)
@retry(stop_max_attempt_number=30, wait_fixed=1000)
def then_state_is_eventually(self, expected):
actual = self.when_cbsd_asks_for_state()
self.then_state_is(actual, expected)
def then_logs_are(self, actual: Dict[str, Any], expected: List[str]):
actual = [x['type'] for x in actual['logs']]
self.assertEqual(actual, expected)
@retry(stop_max_attempt_number=60, wait_fixed=1000)
def then_message_is_eventually_sent(self, filters: Dict[str, Any]):
logs = self.when_logs_are_fetched(filters)
self.assertEqual(logs["total_count"], 1)
def delete_cbsd(self, cbsd_id: int):
filters = get_filters_for_request_type('deregistration', self.serial_number)
self.when_cbsd_is_deleted(cbsd_id)
self.then_cbsd_is_deleted(self.serial_number)
state = self.when_cbsd_asks_for_state()
self.then_state_is(state, get_empty_state())
self.then_message_is_eventually_sent(filters)
@staticmethod
def get_sas_provision_messages() -> List[str]:
names = ['heartbeat', 'grant', 'spectrumInquiry', 'registration']
return [f'{x}Response' for x in names]
def _verify_logs_count(self, params):
using_filters, _operator, expected_count = params
logs = self.when_logs_are_fetched(using_filters)
logs_len = len(logs["logs"])
comparison = _operator(logs_len, expected_count)
self.assertTrue(comparison)
def _check_for_cbsd(self, serial_number: str, should_exist: bool = True) -> Optional[Dict[str, Any]]:
params = {'serial_number': serial_number}
expected_count = 1 if should_exist else 0
r = send_request_to_backend('get', 'cbsds', params=params)
self.assertEqual(r.status_code, HTTPStatus.OK)
data = r.json()
total_count = data.get('total_count')
self.assertEqual(total_count, expected_count)
cbsds = data.get('cbsds', [])
self.assertEqual(len(cbsds), expected_count)
if should_exist:
return cbsds[0]
def get_current_sas_filters(serial_number: str) -> Dict[str, Any]:
return {
'serial_number': serial_number,
'from': SAS,
'to': DP,
'end': now(),
}
def get_filters_for_request_type(request_type: str, serial_number: str) -> Dict[str, Any]:
return {
'serial_number': serial_number,
'type': f'{request_type}Response',
'begin': now(),
}
def get_empty_state() -> CBSDStateResult:
return CBSDStateResult(radio_enabled=False)
def get_cbsd_request(serial_number: str) -> CBSDRequest:
return CBSDRequest(serial_number=serial_number)
def now() -> str:
return datetime.now(timezone.utc).isoformat()
@retry(stop_max_attempt_number=30, wait_fixed=1000)
def wait_for_elastic_to_start() -> None:
requests.get(f'{config.ELASTICSEARCH_URL}/_status')
def when_elastic_indexes_data():
# TODO use retrying instead
sleep(15)
def _delete_dp_elasticsearch_indices() -> None:
requests.delete(f"{config.ELASTICSEARCH_URL}/{config.ELASTICSEARCH_INDEX}*")
def send_request_to_backend(
method: str, url_suffix: str, params: Optional[Dict[str, Any]] = None,
json: Optional[Dict[str, Any]] = None,
) -> requests.Response:
return requests.request(
method,
f'{config.HTTP_SERVER}/{DP_HTTP_PREFIX}/{NETWORK}/{url_suffix}',
cert=(config.DP_CERT_PATH, config.DP_SSL_KEY_PATH),
verify=False, # noqa: S501
params=params,
json=json,
)
class CbsdAPIDataBuilder:
def __init__(self):
self.serial_number = str(uuid4())
self.fcc_id = SOME_FCC_ID
self.preferred_bandwidth_mhz = 20
self.preferred_frequencies_mhz = []
self.frequency_mhz = 3625
self.bandwidth_mhz = 10
self.max_eirp = 28
self.desired_state = 'registered'
def with_serial_number(self, serial_number: str) -> CbsdAPIDataBuilder:
self.serial_number = serial_number
return self
def with_fcc_id(self, fcc_id: str) -> CbsdAPIDataBuilder:
self.fcc_id = fcc_id
return self
def with_frequency_preferences(self, bandwidth_mhz: int, frequencies_mhz: List[int]) -> CbsdAPIDataBuilder:
self.preferred_bandwidth_mhz = bandwidth_mhz
self.preferred_frequencies_mhz = frequencies_mhz
return self
def with_desired_state(self, desired_state: str) -> CbsdAPIDataBuilder:
self.desired_state = desired_state
return self
def with_expected_grant(self, bandwidth_mhz: int, frequency_mhz: int, max_eirp: int) -> CbsdAPIDataBuilder:
self.bandwidth_mhz = bandwidth_mhz
self.frequency_mhz = frequency_mhz
self.max_eirp = max_eirp
return self
def build_post_data(self) -> Dict[str, Any]:
return {
'capabilities': {
'antenna_gain': 15,
'max_power': 20,
'min_power': 0,
'number_of_antennas': 2,
},
'frequency_preferences': {
'bandwidth_mhz': self.preferred_bandwidth_mhz,
'frequencies_mhz': self.preferred_frequencies_mhz,
},
'fcc_id': self.fcc_id,
'serial_number': self.serial_number,
'user_id': USER_ID,
'desired_state': self.desired_state,
"single_step_enabled": False,
"cbsd_category": "b",
}
def build_unregistered_single_step_data(self):
data = self.build_unregistered_data()
data.update({
'single_step_enabled': True,
'cbsd_category': 'a',
})
return data
def build_unregistered_data(self) -> Dict[str, Any]:
data = self.build_post_data()
data.update({
'is_active': False,
'state': 'unregistered',
})
return data
def build_registered_inactive_data(self) -> Dict[str, Any]:
data = self.build_post_data()
data.update({
'cbsd_id': f'{self.fcc_id}/{self.serial_number}',
'is_active': False,
'state': 'registered',
})
return data
def build_registered_active_data(self) -> Dict[str, Any]:
data = self.build_registered_inactive_data()
data.update({
'is_active': True,
'grant': {
'bandwidth_mhz': self.bandwidth_mhz,
'frequency_mhz': self.frequency_mhz,
'max_eirp': self.max_eirp,
'state': 'authorized',
},
})
return data
def build_grant_state_data(self) -> CBSDStateResult:
frequency_hz = int(1e6) * self.frequency_mhz
half_bandwidth_hz = int(5e5) * self.bandwidth_mhz
return CBSDStateResult(
radio_enabled=True,
channel=LteChannel(
low_frequency_hz=frequency_hz - half_bandwidth_hz,
high_frequency_hz=frequency_hz + half_bandwidth_hz,
max_eirp_dbm_mhz=self.max_eirp,
),
)
| 36.244782 | 117 | 0.670384 |
cf2646683fa5e67f8aee98227fe4cfbfdc70da35 | 2,904 | py | Python | examples/pybullet/gym/pybullet_envs/bullet/minitaur_env_randomizer.py | felipeek/bullet3 | 6a59241074720e9df119f2f86bc01765917feb1e | [
"Zlib"
] | 9,136 | 2015-01-02T00:41:45.000Z | 2022-03-31T15:30:02.000Z | examples/pybullet/gym/pybullet_envs/bullet/minitaur_env_randomizer.py | felipeek/bullet3 | 6a59241074720e9df119f2f86bc01765917feb1e | [
"Zlib"
] | 2,424 | 2015-01-05T08:55:58.000Z | 2022-03-30T19:34:55.000Z | examples/pybullet/gym/pybullet_envs/bullet/minitaur_env_randomizer.py | felipeek/bullet3 | 6a59241074720e9df119f2f86bc01765917feb1e | [
"Zlib"
] | 2,921 | 2015-01-02T10:19:30.000Z | 2022-03-31T02:48:42.000Z | """Randomize the minitaur_gym_env when reset() is called."""
import random
import numpy as np
from . import env_randomizer_base
# Relative range.
MINITAUR_BASE_MASS_ERROR_RANGE = (-0.2, 0.2) # 0.2 means 20%
MINITAUR_LEG_MASS_ERROR_RANGE = (-0.2, 0.2) # 0.2 means 20%
# Absolute range.
BATTERY_VOLTAGE_RANGE = (14.8, 16.8) # Unit: Volt
MOTOR_VISCOUS_DAMPING_RANGE = (0, 0.01) # Unit: N*m*s/rad (torque/angular vel)
MINITAUR_LEG_FRICTION = (0.8, 1.5) # Unit: dimensionless
class MinitaurEnvRandomizer(env_randomizer_base.EnvRandomizerBase):
"""A randomizer that change the minitaur_gym_env during every reset."""
def __init__(self,
minitaur_base_mass_err_range=MINITAUR_BASE_MASS_ERROR_RANGE,
minitaur_leg_mass_err_range=MINITAUR_LEG_MASS_ERROR_RANGE,
battery_voltage_range=BATTERY_VOLTAGE_RANGE,
motor_viscous_damping_range=MOTOR_VISCOUS_DAMPING_RANGE):
self._minitaur_base_mass_err_range = minitaur_base_mass_err_range
self._minitaur_leg_mass_err_range = minitaur_leg_mass_err_range
self._battery_voltage_range = battery_voltage_range
self._motor_viscous_damping_range = motor_viscous_damping_range
def randomize_env(self, env):
self._randomize_minitaur(env.minitaur)
def _randomize_minitaur(self, minitaur):
"""Randomize various physical properties of minitaur.
It randomizes the mass/inertia of the base, mass/inertia of the legs,
friction coefficient of the feet, the battery voltage and the motor damping
at each reset() of the environment.
Args:
minitaur: the Minitaur instance in minitaur_gym_env environment.
"""
base_mass = minitaur.GetBaseMassFromURDF()
randomized_base_mass = random.uniform(
base_mass * (1.0 + self._minitaur_base_mass_err_range[0]),
base_mass * (1.0 + self._minitaur_base_mass_err_range[1]))
minitaur.SetBaseMass(randomized_base_mass)
leg_masses = minitaur.GetLegMassesFromURDF()
leg_masses_lower_bound = np.array(leg_masses) * (1.0 + self._minitaur_leg_mass_err_range[0])
leg_masses_upper_bound = np.array(leg_masses) * (1.0 + self._minitaur_leg_mass_err_range[1])
randomized_leg_masses = [
np.random.uniform(leg_masses_lower_bound[i], leg_masses_upper_bound[i])
for i in range(len(leg_masses))
]
minitaur.SetLegMasses(randomized_leg_masses)
randomized_battery_voltage = random.uniform(BATTERY_VOLTAGE_RANGE[0], BATTERY_VOLTAGE_RANGE[1])
minitaur.SetBatteryVoltage(randomized_battery_voltage)
randomized_motor_damping = random.uniform(MOTOR_VISCOUS_DAMPING_RANGE[0],
MOTOR_VISCOUS_DAMPING_RANGE[1])
minitaur.SetMotorViscousDamping(randomized_motor_damping)
randomized_foot_friction = random.uniform(MINITAUR_LEG_FRICTION[0], MINITAUR_LEG_FRICTION[1])
minitaur.SetFootFriction(randomized_foot_friction)
| 44.676923 | 99 | 0.755854 |
f7e0f830a9f5f2091aab43d4ad34a4ecd86885a4 | 187 | py | Python | jammy_app.py | ContinuumBridge/jammy_app | 51d0b5290b135dbf3aa03d60f4d12343fb7de83f | [
"MIT"
] | null | null | null | jammy_app.py | ContinuumBridge/jammy_app | 51d0b5290b135dbf3aa03d60f4d12343fb7de83f | [
"MIT"
] | null | null | null | jammy_app.py | ContinuumBridge/jammy_app | 51d0b5290b135dbf3aa03d60f4d12343fb7de83f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# jammy_app.py
# Copyright (C) ContinuumBridge Limited, 2015 - All Rights Reserved
# Written by Peter Claydon
#
import sys
from jammy_app_a import App
App(sys.argv)
| 20.777778 | 67 | 0.764706 |
83a109999d15b1d164525e6825ac6dc06b73446b | 4,724 | py | Python | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/show_product_request.py | NQLoong/huaweicloud-sdk-python-v3 | 677944a0b722147c6e105c53df9110724d64152a | [
"Apache-2.0"
] | 1 | 2021-11-03T07:54:50.000Z | 2021-11-03T07:54:50.000Z | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/show_product_request.py | mawenbo-huawei/huaweicloud-sdk-python-v3 | 677944a0b722147c6e105c53df9110724d64152a | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/show_product_request.py | mawenbo-huawei/huaweicloud-sdk-python-v3 | 677944a0b722147c6e105c53df9110724d64152a | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import pprint
import re
import six
class ShowProductRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'product_id': 'str',
'app_id': 'str'
}
attribute_map = {
'instance_id': 'Instance-Id',
'product_id': 'product_id',
'app_id': 'app_id'
}
def __init__(self, instance_id=None, product_id=None, app_id=None):
"""ShowProductRequest - a model defined in huaweicloud sdk"""
self._instance_id = None
self._product_id = None
self._app_id = None
self.discriminator = None
if instance_id is not None:
self.instance_id = instance_id
self.product_id = product_id
if app_id is not None:
self.app_id = app_id
@property
def instance_id(self):
"""Gets the instance_id of this ShowProductRequest.
**参数说明**:实例ID。物理多租下各实例的唯一标识,一般华为云租户无需携带该参数,仅在物理多租场景下从管理面访问API时需要携带该参数。
:return: The instance_id of this ShowProductRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ShowProductRequest.
**参数说明**:实例ID。物理多租下各实例的唯一标识,一般华为云租户无需携带该参数,仅在物理多租场景下从管理面访问API时需要携带该参数。
:param instance_id: The instance_id of this ShowProductRequest.
:type: str
"""
self._instance_id = instance_id
@property
def product_id(self):
"""Gets the product_id of this ShowProductRequest.
**参数说明**:产品ID,用于唯一标识一个产品,在物联网平台创建产品后由平台分配获得。 **取值范围**:长度不超过36,只允许字母、数字、下划线(_)、连接符(-)的组合。
:return: The product_id of this ShowProductRequest.
:rtype: str
"""
return self._product_id
@product_id.setter
def product_id(self, product_id):
"""Sets the product_id of this ShowProductRequest.
**参数说明**:产品ID,用于唯一标识一个产品,在物联网平台创建产品后由平台分配获得。 **取值范围**:长度不超过36,只允许字母、数字、下划线(_)、连接符(-)的组合。
:param product_id: The product_id of this ShowProductRequest.
:type: str
"""
self._product_id = product_id
@property
def app_id(self):
"""Gets the app_id of this ShowProductRequest.
**参数说明**:资源空间ID。此参数为非必选参数,存在多资源空间的用户需要使用该接口时,必须携带该参数指定要查询的产品属于哪个资源空间,否则接口会提示错误。如果用户存在多资源空间,同时又不想携带该参数,可以联系华为技术支持对用户数据做资源空间合并。 **取值范围**:长度不超过36,只允许字母、数字、下划线(_)、连接符(-)的组合。
:return: The app_id of this ShowProductRequest.
:rtype: str
"""
return self._app_id
@app_id.setter
def app_id(self, app_id):
"""Sets the app_id of this ShowProductRequest.
**参数说明**:资源空间ID。此参数为非必选参数,存在多资源空间的用户需要使用该接口时,必须携带该参数指定要查询的产品属于哪个资源空间,否则接口会提示错误。如果用户存在多资源空间,同时又不想携带该参数,可以联系华为技术支持对用户数据做资源空间合并。 **取值范围**:长度不超过36,只允许字母、数字、下划线(_)、连接符(-)的组合。
:param app_id: The app_id of this ShowProductRequest.
:type: str
"""
self._app_id = app_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowProductRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.804878 | 177 | 0.587003 |
32a00b28e3b38edeb4b04c5b369de2247487de93 | 1,196 | py | Python | scripts/servo.py | EqualExperts/eesa | 7efe5e086ae2e3593ebf215929d91984761327ca | [
"Apache-2.0"
] | 8 | 2017-10-18T12:55:27.000Z | 2019-12-03T22:59:41.000Z | scripts/servo.py | EqualExperts/eesa | 7efe5e086ae2e3593ebf215929d91984761327ca | [
"Apache-2.0"
] | 3 | 2018-12-12T08:51:00.000Z | 2018-12-14T15:30:26.000Z | scripts/servo.py | EqualExperts/eesa | 7efe5e086ae2e3593ebf215929d91984761327ca | [
"Apache-2.0"
] | 3 | 2019-11-22T11:29:27.000Z | 2019-12-03T22:59:55.000Z | from __future__ import print_function
from threading import Timer
import sys
sys.path.append("/home/apsync/dronekit-python/")
print(sys.path)
import time
from dronekit import connect, VehicleMode, mavutil
connection_string = '0.0.0.0:9000'
pwm = sys.argv[1]
def set_servo(vehicle, servo_number, pwm_value):
pwm_value_int = int(pwm_value)
msg = vehicle.message_factory.command_long_encode(
0, 0, # target system, target component
mavutil.mavlink.MAV_CMD_DO_SET_SERVO,
0,
servo_number,
pwm_value_int,
0,0,0,0,0
)
vehicle.send_mavlink(msg)
# vehicle.on_message('*')
# def listener(self, name, message):
# print('message: %s' % message)
def servo_test(vehicle, pwm):
print("servo to %s" % pwm)
set_servo(vehicle, 9, pwm)
print("finished, closing connection")
vehicle.close()
print("Connecting to plane on %s" % (connection_string,))
vehicle = connect(connection_string)
print(" GPS: %s" % vehicle.gps_0)
print(" Battery: %s" % vehicle.battery)
print(" Last Heartbeat: %s" % vehicle.last_heartbeat)
print(" Is Armable?: %s" % vehicle.is_armable)
print(" System status: %s" % vehicle.system_status.state)
print(" Mode: %s" % vehicle.mode.name)
servo_test(vehicle, pwm)
| 24.916667 | 57 | 0.731605 |
204cfcf799a7d3d0e35bfac8225dd0307ba0851f | 10,018 | py | Python | CMC/AchillesEncoder_train.py | wtwt5237/Benisse | 2c7e569ff7f1d15d883576dd9487612e5ed1077f | [
"MIT"
] | null | null | null | CMC/AchillesEncoder_train.py | wtwt5237/Benisse | 2c7e569ff7f1d15d883576dd9487612e5ed1077f | [
"MIT"
] | null | null | null | CMC/AchillesEncoder_train.py | wtwt5237/Benisse | 2c7e569ff7f1d15d883576dd9487612e5ed1077f | [
"MIT"
] | null | null | null | #!usr/bin/env python3 in conda env torch_scBCR
#Adapted from CMC: https://github.com/HobbitLong/CMC
#Example code:
#python3 AchillesEncoder.py --input_data cleaned_BCRmltrain \
#--atchley_factors Atchley_factors.csv \
#--resume model_BCRmltrain \
#--break_point model_BCRmltrain/epoch_25.pt \
#--encode_dim 40 --pad_length 130
from __future__ import print_function
import sys
import os
import time
import torch
import torch.backends.cudnn as cudnn
import argparse
import socket
import pandas as pd
import csv
import numpy as np
import pickle
import re
from model_util import MyAlexNetCMC
from contrast_util import NCEAverage,AverageMeter,NCESoftmaxLoss
from torch.utils.data.sampler import SubsetRandomSampler
from data_pre import load_BCRdata,aamapping,datasetMap_nt,ntmapping
from data_util import Dataset
from random import seed,sample
from sklearn.metrics import roc_curve,auc
def parse_option():
parser=argparse.ArgumentParser('Arguments for training')
parser.add_argument('--input_data',type=str,help='Folder that saved data used for training')
parser.add_argument('--atchley_factors',type=str,help='File that saved the Atchely factors')
parser.add_argument('--resume',default='',metavar='PATH',help='Path to save the latest checkpoint (default: none)')
parser.add_argument('--break_point',default=None,type=str,help='The latest checkpoint file to load (default: none)')
parser.add_argument('--encode_dim',type=int,default=40,help='Columns of padded atchley matrix (default: 80)')
parser.add_argument('--pad_length',type=int,default=130,help='Length of padded nucleotide sequence (default: 130)')
opt=parser.parse_args()
return opt
# Load data
opt=parse_option()
full=load_BCRdata(opt)
aa_dict=dict()
with open(opt.atchley_factors,'r') as aa:
aa_reader=csv.reader(aa)
next(aa_reader, None)
for rows in aa_reader:
aa_name=rows[0]
aa_factor=rows[1:len(rows)]
aa_dict[aa_name]=np.asarray(aa_factor,dtype='float')
cdr_full,vdj_full,cdr3_seq_full=datasetMap_nt(full,aa_dict,opt.encode_dim,opt.pad_length)
# Generator
batch_size = 512
indices = list(set(vdj_full.keys()))
test_split = 0.01
random_seed= 123
split = int(np.floor(test_split * len(indices)))
shuffle_dataset=True
if shuffle_dataset :
np.random.seed(random_seed)
np.random.shuffle(indices)
train_indices, test_indices = indices[split:], indices[:split]
#NCE average parameter
device = "cuda: 0"
cdr_shape = cdr_full[list(cdr_full.keys())[0]].shape[0]
n_data = len(train_indices)
n_data_test = len(test_indices)
n_out_features = 20
train_set = Dataset(train_indices,cdr_full,vdj_full,cdr3_seq_full)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size,
shuffle=True, num_workers=1)
test_set = Dataset(test_indices,cdr_full,vdj_full,cdr3_seq_full)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size,
shuffle=True, num_workers=1)
# NCE parameters
nce_k = 1
nce_t = 0.2
nce_m = 0.9
feat_dim = n_out_features
in_feature=vdj_full[list(vdj_full.keys())[0]].size()[0]
# Training parameters
lr = 0.001
momentum = 0.9
weight_decay = 0.0001
gradient_clip = 5
# Set model
model = MyAlexNetCMC(in_feature=in_feature,feat_dim=feat_dim).cuda()
contrast = NCEAverage(n_out_features, n_data, nce_k, nce_t, nce_m).cuda()
criterion_cdr = NCESoftmaxLoss().cuda()
criterion_vdj = NCESoftmaxLoss().cuda()
# Set optimizer
optimizer = torch.optim.SGD(model.parameters(),
lr=lr,
momentum=momentum,
weight_decay=weight_decay)
#Optional: resume training from the latest break point
model_file = opt.break_point
if model_file is not None:
print("=> loading checkpoint '{}'".format(model_file))
checkpoint = torch.load(model_file, map_location=device)
start_epoch = checkpoint['epoch'] + 1
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
contrast.load_state_dict(checkpoint['contrast'])
del checkpoint
else:
start_epoch = 0
#Training function
def train(epoch, train_loader, model, contrast, criterion_cdr, criterion_vdj, optimizer,
gradient_clip=10, print_freq=1):
"""
One epoch training
"""
model.train()
contrast.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
cdr_loss_meter = AverageMeter()
vdj_loss_meter = AverageMeter()
cdr_prob_meter = AverageMeter()
vdj_prob_meter = AverageMeter()
end = time.time()
for idx, (data, index) in enumerate(train_loader):
data_time.update(time.time() - end)
batch_size = data['cdr'].size(0)
index = index.cuda()
for _ in data.keys():
data[_] = data[_].float().cuda()
# ===================forward=====================
feat_cdr, feat_vdj,cdr3_seq = model(data)
out_cdr, out_vdj = contrast(feat_cdr, feat_vdj, index)
cdr_loss = criterion_cdr(out_cdr)
vdj_loss = criterion_vdj(out_vdj)
cdr_prob = out_cdr[:, 0].mean()
vdj_prob = out_vdj[:, 0].mean()
loss = cdr_loss+vdj_loss
# ===================backward=====================
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), gradient_clip)
torch.nn.utils.clip_grad_norm_(contrast.parameters(), gradient_clip)
optimizer.step()
# ===================meters=====================
losses.update(loss.item(), batch_size)
cdr_loss_meter.update(cdr_loss.item(), batch_size)
cdr_prob_meter.update(cdr_prob.item(), batch_size)
vdj_loss_meter.update(vdj_loss.item(), batch_size)
vdj_prob_meter.update(vdj_prob.item(), batch_size)
torch.cuda.synchronize()
batch_time.update(time.time() - end)
end = time.time()
# print info
if (idx + 1) % print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'loss {loss.val:.3f} ({loss.avg:.3f})\t'
'cdr_p {cdr_probs.val:.3f} ({cdr_probs.avg:.3f})\t'
'vdj_p {vdj_probs.val:.3f} ({vdj_probs.avg:.3f})'.format(
epoch, idx + 1, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, cdr_probs=cdr_prob_meter,
vdj_probs=vdj_prob_meter))
sys.stdout.flush()
return cdr_loss_meter.avg, cdr_prob_meter.avg, vdj_loss_meter.avg, vdj_prob_meter.avg
#Acc and roc_score
def predict(test_loader, model, contrast,criterion_cdr,criterion_vdj):
acc=dict()
roc_score=dict()
model.eval()
contrast.eval()
with torch.no_grad():
for idx, (data, index) in enumerate(test_loader):
index = index.to(device)
for _ in list(data.keys())[0:2]:
data[_] = data[_].float().to(device)
feat_cdr,feat_vdj,cdr3_seq = model(data)
out_cdr, out_vdj = contrast(feat_cdr, feat_vdj, index)
loss_cdr=criterion_cdr(out_cdr)
loss_vdj=criterion_vdj(out_vdj)
loss=loss_cdr+loss_vdj
print('Batch {0}: test loss {1:.3f}'.format(idx,loss))
out_cdr=out_cdr.squeeze()
out_vdj=out_vdj.squeeze()
acc_cdr=torch.argmax(out_cdr,dim=1)
acc_vdj=torch.argmax(out_vdj,dim=1)
acc_vdj=acc_vdj.squeeze()
if idx==0:
acc['cdr']=acc_cdr
acc['vdj']=acc_vdj
roc_score['cdr']=out_cdr.flatten()
roc_score['vdj']=out_vdj.flatten()
else:
acc['cdr']=torch.cat((acc['cdr'],acc_cdr),0)
acc['vdj']=torch.cat((acc['vdj'],acc_vdj),0)
roc_score['cdr']=torch.cat((roc_score['cdr'],out_cdr.flatten()),0)
roc_score['vdj']=torch.cat((roc_score['vdj'],out_vdj.flatten()),0)
return acc,roc_score,loss
hist = dict()
hist['cdr_loss'] = []
hist['cdr_prob'] = []
hist['vdj_loss'] = []
hist['vdj_prob'] = []
hist['test_loss'] = []
save_freq = 5
for epoch in range(start_epoch,300):
cdr_loss, cdr_prob, vdj_loss, vdj_prob = train(epoch, train_loader, model, contrast, criterion_cdr, criterion_vdj, optimizer,
gradient_clip=gradient_clip, print_freq=1)
acc,roc_score,test_loss=predict(test_loader,model,contrast,criterion_cdr,criterion_vdj)
hist['cdr_loss'].append(cdr_loss)
hist['cdr_prob'].append(cdr_prob)
hist['vdj_loss'].append(vdj_loss)
hist['vdj_prob'].append(vdj_prob)
hist['test_loss'].append(test_loss)
acc['cdr']=acc['cdr'].cpu().numpy()
acc['vdj']=acc['vdj'].cpu().numpy()
roc_score['cdr']=roc_score['cdr'].cpu().numpy()
roc_score['vdj']=roc_score['vdj'].cpu().numpy()
predict_label=np.zeros(len(roc_score['cdr']))
predict_label[::2]=1
fpr,tpr,_ = roc_curve(predict_label,roc_score['cdr'])
roc_auc=auc(fpr,tpr)
print('cdr accuracy: ')
print(len(np.where(acc['cdr']==0)[0])/len(acc['cdr']))
print('nt accuracy: ')
print(len(np.where(acc['vdj']==0)[0])/len(acc['vdj']))
print('cdr AUC: ')
print(roc_auc)
#Save model
if epoch % save_freq == 0 and epoch != 0:
print("Saving model...")
state = { 'model': model.state_dict(),
'contrast': contrast.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch}
torch.save(state, opt.resume+"/epoch_{}.pt".format(epoch))
torch.save(hist, opt.resume+"/hist.pt")
#post check functions
import matplotlib.pyplot as plt
def plot_loss(hist):
plt.title('CDR loss')
plt.plot(hist['cdr_loss'])
plt.show()
plt.title('NT loss')
plt.plot(hist['vdj_loss'])
plt.show()
plt.title('Test loss')
plt.plot(hist['test_loss'])
plt.show()
#hist=torch.load(opt.resume+"/hist.pt")
#plot_loss(hist)
| 35.90681 | 130 | 0.650829 |
293de98ecdf9ba806d2744cb15b551ed1b28b368 | 14,458 | py | Python | test/functional/feature_dbcrash.py | blakelapierre/bitcoin-cash-node | a61fc457449817bef5e280ba25bd9bcb6c921421 | [
"MIT"
] | 61 | 2020-02-23T01:19:16.000Z | 2022-03-04T15:22:00.000Z | test/functional/feature_dbcrash.py | blakelapierre/bitcoin-cash-node | a61fc457449817bef5e280ba25bd9bcb6c921421 | [
"MIT"
] | null | null | null | test/functional/feature_dbcrash.py | blakelapierre/bitcoin-cash-node | a61fc457449817bef5e280ba25bd9bcb6c921421 | [
"MIT"
] | 20 | 2020-03-01T02:35:17.000Z | 2021-12-28T12:04:34.000Z | #!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test recovery from a crash during chainstate writing.
- 4 nodes
* node0, node1, and node2 will have different dbcrash ratios, and different
dbcache sizes
* node3 will be a regular node, with no crashing.
* The nodes will not connect to each other.
- use default test framework starting chain. initialize starting_tip_height to
tip height.
- Main loop:
* generate lots of transactions on node3, enough to fill up a block.
* uniformly randomly pick a tip height from starting_tip_height to
tip_height; with probability 1/(height_difference+4), invalidate this block.
* mine enough blocks to overtake tip_height at start of loop.
* for each node in [node0,node1,node2]:
- for each mined block:
* submit block to node
* if node crashed on/after submitting:
- restart until recovery succeeds
- check that utxo matches node3 using gettxoutsetinfo"""
import errno
import http.client
import random
import time
from test_framework.blocktools import create_confirmed_utxos
from test_framework.cdefs import DEFAULT_EXCESSIVE_BLOCK_SIZE
from test_framework.messages import (
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
ToHex,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
hex_str_to_bytes,
)
class ChainstateWriteCrashTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.rpc_timeout = 480
# Set -maxmempool=0 to turn off mempool memory sharing with dbcache
# Set -rpcservertimeout=900 to reduce socket disconnects in this
# long-running test
self.base_args = ["-maxmempool=0",
"-rpcservertimeout=900", "-dbbatchsize=200000",
"-noparkdeepreorg"]
# Set different crash ratios and cache sizes. Note that not all of
# -dbcache goes to pcoinsTip.
self.node0_args = ["-dbcrashratio=8", "-dbcache=4"] + self.base_args
self.node1_args = ["-dbcrashratio=16", "-dbcache=8"] + self.base_args
self.node2_args = ["-dbcrashratio=24", "-dbcache=16"] + self.base_args
# Node3 is a normal node with default args, except will mine full blocks
# and non-standard txs (e.g. txs with "dust" outputs)
self.node3_args = [
"-blockmaxsize={}".format(DEFAULT_EXCESSIVE_BLOCK_SIZE), "-acceptnonstdtxn"]
self.extra_args = [self.node0_args, self.node1_args,
self.node2_args, self.node3_args]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
self.start_nodes()
# Leave them unconnected, we'll use submitblock directly in this test
def restart_node(self, node_index, expected_tip):
"""Start up a given node id, wait for the tip to reach the given block hash, and calculate the utxo hash.
Exceptions on startup should indicate node crash (due to -dbcrashratio), in which case we try again. Give up
after 60 seconds. Returns the utxo hash of the given node."""
time_start = time.time()
while time.time() - time_start < 120:
try:
# Any of these RPC calls could throw due to node crash
self.start_node(node_index)
self.nodes[node_index].waitforblock(expected_tip)
utxo_hash = self.nodes[node_index].gettxoutsetinfo()[
'hash_serialized']
return utxo_hash
except Exception:
# An exception here should mean the node is about to crash.
# If bitcoind exits, then try again. wait_for_node_exit()
# should raise an exception if bitcoind doesn't exit.
self.wait_for_node_exit(node_index, timeout=15)
self.crashed_on_restart += 1
time.sleep(1)
# If we got here, bitcoind isn't coming back up on restart. Could be a
# bug in bitcoind, or we've gotten unlucky with our dbcrash ratio --
# perhaps we generated a test case that blew up our cache?
# TODO: If this happens a lot, we should try to restart without -dbcrashratio
# and make sure that recovery happens.
raise AssertionError(
"Unable to successfully restart node {} in allotted time".format(node_index))
def submit_block_catch_error(self, node_index, block):
"""Try submitting a block to the given node.
Catch any exceptions that indicate the node has crashed.
Returns true if the block was submitted successfully; false otherwise."""
try:
self.nodes[node_index].submitblock(block)
return True
except (http.client.CannotSendRequest, http.client.RemoteDisconnected) as e:
self.log.debug(
"node {} submitblock raised exception: {}".format(node_index, e))
return False
except OSError as e:
self.log.debug(
"node {} submitblock raised OSError exception: errno={}".format(node_index, e.errno))
if e.errno in [errno.EPIPE, errno.ECONNREFUSED, errno.ECONNRESET]:
# The node has likely crashed
return False
else:
# Unexpected exception, raise
raise
def sync_node3blocks(self, block_hashes):
"""Use submitblock to sync node3's chain with the other nodes
If submitblock fails, restart the node and get the new utxo hash.
If any nodes crash while updating, we'll compare utxo hashes to
ensure recovery was successful."""
node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized']
# Retrieve all the blocks from node3
blocks = []
for block_hash in block_hashes:
blocks.append(
[block_hash, self.nodes[3].getblock(block_hash, False)])
# Deliver each block to each other node
for i in range(3):
nodei_utxo_hash = None
self.log.debug("Syncing blocks to node {}".format(i))
for (block_hash, block) in blocks:
# Get the block from node3, and submit to node_i
self.log.debug("submitting block {}".format(block_hash))
if not self.submit_block_catch_error(i, block):
# TODO: more carefully check that the crash is due to -dbcrashratio
# (change the exit code perhaps, and check that here?)
self.wait_for_node_exit(i, timeout=30)
self.log.debug(
"Restarting node {} after block hash {}".format(i, block_hash))
nodei_utxo_hash = self.restart_node(i, block_hash)
assert nodei_utxo_hash is not None
self.restart_counts[i] += 1
else:
# Clear it out after successful submitblock calls -- the cached
# utxo hash will no longer be correct
nodei_utxo_hash = None
# Check that the utxo hash matches node3's utxo set
# NOTE: we only check the utxo set if we had to restart the node
# after the last block submitted:
# - checking the utxo hash causes a cache flush, which we don't
# want to do every time; so
# - we only update the utxo cache after a node restart, since flushing
# the cache is a no-op at that point
if nodei_utxo_hash is not None:
self.log.debug(
"Checking txoutsetinfo matches for node {}".format(i))
assert_equal(nodei_utxo_hash, node3_utxo_hash)
def verify_utxo_hash(self):
"""Verify that the utxo hash of each node matches node3.
Restart any nodes that crash while querying."""
node3_utxo_hash = self.nodes[3].gettxoutsetinfo()['hash_serialized']
self.log.info("Verifying utxo hash matches for all nodes")
for i in range(3):
try:
nodei_utxo_hash = self.nodes[i].gettxoutsetinfo()[
'hash_serialized']
except OSError:
# probably a crash on db flushing
nodei_utxo_hash = self.restart_node(
i, self.nodes[3].getbestblockhash())
assert_equal(nodei_utxo_hash, node3_utxo_hash)
def generate_small_transactions(self, node, count, utxo_list):
FEE = 1000 # TODO: replace this with node relay fee based calculation
num_transactions = 0
random.shuffle(utxo_list)
while len(utxo_list) >= 2 and num_transactions < count:
tx = CTransaction()
input_amount = 0
for i in range(2):
utxo = utxo_list.pop()
tx.vin.append(
CTxIn(COutPoint(int(utxo['txid'], 16), utxo['vout'])))
input_amount += int(utxo['amount'] * COIN)
output_amount = (input_amount - FEE) // 3
if output_amount <= 0:
# Sanity check -- if we chose inputs that are too small, skip
continue
for i in range(3):
tx.vout.append(
CTxOut(output_amount, hex_str_to_bytes(utxo['scriptPubKey'])))
# Sign and send the transaction to get into the mempool
tx_signed_hex = node.signrawtransactionwithwallet(ToHex(tx))['hex']
node.sendrawtransaction(tx_signed_hex)
num_transactions += 1
def run_test(self):
# Track test coverage statistics
self.restart_counts = [0, 0, 0] # Track the restarts for nodes 0-2
self.crashed_on_restart = 0 # Track count of crashes during recovery
# Start by creating a lot of utxos on node3
initial_height = self.nodes[3].getblockcount()
utxo_list = create_confirmed_utxos(self.nodes[3], 5000)
self.log.info("Prepped {} utxo entries".format(len(utxo_list)))
# Sync these blocks with the other nodes
block_hashes_to_sync = []
for height in range(initial_height + 1,
self.nodes[3].getblockcount() + 1):
block_hashes_to_sync.append(self.nodes[3].getblockhash(height))
self.log.debug("Syncing {} blocks with other nodes".format(
len(block_hashes_to_sync)))
# Syncing the blocks could cause nodes to crash, so the test begins
# here.
self.sync_node3blocks(block_hashes_to_sync)
starting_tip_height = self.nodes[3].getblockcount()
# Set mock time to the last block time. This will allow us to increase
# the time at each loop so the block hash will always differ for the
# same block height, and avoid duplication.
# Note that the current time can be behind the block time due to the
# way the miner sets the block time.
tip = self.nodes[3].getbestblockhash()
block_time = self.nodes[3].getblockheader(tip)['time']
self.nodes[3].setmocktime(block_time)
# Main test loop:
# each time through the loop, generate a bunch of transactions,
# and then either mine a single new block on the tip, or some-sized
# reorg.
for i in range(40):
block_time += 10
self.nodes[3].setmocktime(block_time)
self.log.info(
"Iteration {}, generating 2500 transactions {}".format(
i, self.restart_counts))
# Generate a bunch of small-ish transactions
self.generate_small_transactions(self.nodes[3], 2500, utxo_list)
# Pick a random block between current tip, and starting tip
current_height = self.nodes[3].getblockcount()
random_height = random.randint(starting_tip_height, current_height)
self.log.debug("At height {}, considering height {}".format(
current_height, random_height))
if random_height > starting_tip_height:
# Randomly reorg from this point with some probability (1/4 for
# tip, 1/5 for tip-1, ...)
if random.random() < 1.0 / (current_height + 4 - random_height):
self.log.debug(
"Invalidating block at height {}".format(random_height))
self.nodes[3].invalidateblock(
self.nodes[3].getblockhash(random_height))
# Now generate new blocks until we pass the old tip height
self.log.debug("Mining longer tip")
block_hashes = []
while current_height + 1 > self.nodes[3].getblockcount():
block_hashes.extend(self.nodes[3].generate(
min(10, current_height + 1 - self.nodes[3].getblockcount())))
self.log.debug(
"Syncing {} new blocks...".format(len(block_hashes)))
self.sync_node3blocks(block_hashes)
utxo_list = self.nodes[3].listunspent()
self.log.debug("Node3 utxo count: {}".format(len(utxo_list)))
# Check that the utxo hashes agree with node3
# Useful side effect: each utxo cache gets flushed here, so that we
# won't get crashes on shutdown at the end of the test.
self.verify_utxo_hash()
# Check the test coverage
self.log.info("Restarted nodes: {}; crashes on restart: {}".format(
self.restart_counts, self.crashed_on_restart))
# If no nodes were restarted, we didn't test anything.
assert self.restart_counts != [0, 0, 0]
# Make sure we tested the case of crash-during-recovery.
assert self.crashed_on_restart > 0
# Warn if any of the nodes escaped restart.
for i in range(3):
if self.restart_counts[i] == 0:
self.log.warning(
"Node {} never crashed during utxo flush!".format(i))
if __name__ == "__main__":
ChainstateWriteCrashTest().main()
| 44.349693 | 116 | 0.617374 |
7914ebcff5d164616703d216cb64c4c47ddcd627 | 4,547 | py | Python | omnium/run_control/mpi_control.py | markmuetz/omnium | 183acd1729e1234aa103ed53a41429c9d4c1d341 | [
"Apache-2.0"
] | 3 | 2016-08-29T20:31:55.000Z | 2019-05-15T09:32:53.000Z | omnium/run_control/mpi_control.py | markmuetz/omnium | 183acd1729e1234aa103ed53a41429c9d4c1d341 | [
"Apache-2.0"
] | 71 | 2016-08-29T20:49:00.000Z | 2019-01-11T14:30:49.000Z | omnium/run_control/mpi_control.py | markmuetz/omnium | 183acd1729e1234aa103ed53a41429c9d4c1d341 | [
"Apache-2.0"
] | null | null | null | from logging import getLogger
from mpi4py import MPI
logger = getLogger('om.mpi_ctrl')
WORKTAG = 0
DIETAG = 1
class MpiMaster(object):
def __init__(self, run_control, comm, rank, size):
self.run_control = run_control
self.comm = comm
self.rank = rank
self.size = size
logger.info('Initialized MPI master: {}/{}', rank, size)
def run(self):
task_master = self.run_control._task_master
status = MPI.Status()
# Launch all tasks initially.
if self.size > len(task_master.pending_tasks):
logger.warning('MPI size > # of pending tasks, not sure what will happen')
waiting_dests = list(range(1, self.size)[::-1])
# TODO: should handle exception in slave by consuming all data and issuing dies.
# Farm out rest of work when a worker reports back that it's done.
while True:
try:
task = task_master.get_next_pending()
if not task:
# There are tasks with unmet dependencies.
waiting_dests.append(dest)
logger.debug('appended waiting dests: {}', waiting_dests)
except StopIteration:
logger.debug('All tasks sent')
break
need_to_block = not waiting_dests or not task
if need_to_block:
# Block until notified of completion.
rdata = self.comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
logger.info('Data received from {}', status.Get_source())
logger.debug('data: {}', rdata)
if rdata['command'] == 'error':
logger.error('Rank {} raised error', status.Get_source())
logger.error(rdata['msg'])
raise Exception('Unrecoverable error')
received_task = rdata['task'] # reconstituted via pickle.
task_master.update_task(received_task.index, received_task.status)
if task:
if waiting_dests:
# Clear backlog of waiting dests.
logger.debug('pop waiting dests: {}', waiting_dests)
dest = waiting_dests.pop()
else:
dest = status.Get_source()
data = {'command': 'run_task', 'task': task}
logger.info('Sending data to {}', dest)
logger.debug('data: {}', data)
self.comm.send(data, dest=dest, tag=WORKTAG)
# We are done! Listen for final data responses.
for dest in range(1, self.size - len(waiting_dests)):
rdata = self.comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
if rdata['command'] == 'error':
logger.error('Rank {} raised error', status.Get_source())
logger.error(rdata['msg'])
raise Exception('Unrecoverable error')
received_task = rdata['task'] # reconstituted via pickle.
task_master.update_task(received_task.index, received_task.status)
logger.info('Final data received from {}', status.Get_source())
logger.debug('data: {}', rdata)
# Send all slaves a die command.
for dest in range(1, self.size):
data = {'command': 'die'}
logger.info('Sending die to {}', dest)
self.comm.send(data, dest=dest, tag=DIETAG)
logger.info('Finished')
class MpiSlave(object):
def __init__(self, run_control, comm, rank, size):
self.run_control = run_control
self.comm = comm
self.rank = rank
self.size = size
logger.info('Initialized MPI slave: {}/{}', rank, size)
def listen(self):
try:
status = MPI.Status()
while True:
logger.debug('Waiting for data')
data = self.comm.recv(source=0, tag=MPI.ANY_TAG, status=status)
logger.debug('Received data: {}', data)
if status.Get_tag() == DIETAG:
break
else:
self.run_control.run_task(data['task'])
data['task'].status = 'done'
self.comm.send(data, dest=0, tag=WORKTAG)
logger.debug('Finished')
except Exception as e:
logger.error(e)
data = {'command': 'error', 'msg': str(e)}
self.comm.send(data, dest=0, tag=WORKTAG)
return
| 39.53913 | 93 | 0.548933 |
f5224e2ffba5c5043cbccaf0a15854e7fc128fed | 5,362 | py | Python | tests/benchmark/milvus_benchmark/runners/chaos.py | CyberFlameGO/milvus | c6ebae89598c4198fa44ea02f8a60219b21fbffd | [
"Apache-2.0"
] | 10,504 | 2019-09-16T12:20:11.000Z | 2022-03-31T15:07:56.000Z | tests/benchmark/milvus_benchmark/runners/chaos.py | CyberFlameGO/milvus | c6ebae89598c4198fa44ea02f8a60219b21fbffd | [
"Apache-2.0"
] | 13,389 | 2019-09-16T06:49:53.000Z | 2022-03-31T18:01:24.000Z | tests/benchmark/milvus_benchmark/runners/chaos.py | CyberFlameGO/milvus | c6ebae89598c4198fa44ea02f8a60219b21fbffd | [
"Apache-2.0"
] | 1,792 | 2019-09-18T04:27:42.000Z | 2022-03-31T14:37:20.000Z | import copy
import logging
import pdb
import time
from operator import methodcaller
from yaml import full_load, dump
import threading
from milvus_benchmark import utils
from milvus_benchmark.runners import utils as runner_utils
from milvus_benchmark.chaos import utils as chaos_utils
from milvus_benchmark.runners.base import BaseRunner
from chaos.chaos_opt import ChaosOpt
from milvus_benchmark import config
from milvus_benchmark.chaos.chaos_mesh import PodChaos, NetworkChaos
logger = logging.getLogger("milvus_benchmark.runners.chaos")
kind_chaos_mapping = {
"PodChaos": PodChaos,
"NetworkChaos": NetworkChaos
}
assert_func_mapping = {
"fail": chaos_utils.assert_fail,
"pass": chaos_utils.assert_pass
}
class SimpleChaosRunner(BaseRunner):
"""run chaos"""
name = "simple_chaos"
def __init__(self, env, metric):
super(SimpleChaosRunner, self).__init__(env, metric)
async def async_call(self, func, **kwargs):
future = methodcaller(func, **kwargs)(self.milvus)
def run_step(self, interface_name, interface_params):
if interface_name == "create_collection":
collection_name = utils.get_unique_name("chaos")
self.data_type = interface_params["data_type"]
self.dimension = interface_params["dimension"]
self.milvus.set_collection(collection_name)
vector_type = runner_utils.get_vector_type(self.data_type)
self.milvus.create_collection(self.dimension, data_type=vector_type)
elif interface_name == "insert":
batch_size = interface_params["batch_size"]
collection_size = interface_params["collection_size"]
self.insert(self.milvus, self.milvus.collection_name, self.data_type, self.dimension, collection_size,
batch_size)
elif interface_name == "create_index":
metric_type = interface_params["metric_type"]
index_type = interface_params["index_type"]
index_param = interface_params["index_param"]
vector_type = runner_utils.get_vector_type(self.data_type)
field_name = runner_utils.get_default_field_name(vector_type)
self.milvus.create_index(field_name, index_type, metric_type, index_param=index_param)
elif interface_name == "flush":
self.milvus.flush()
def extract_cases(self, collection):
before_steps = collection["before"]
after = collection["after"] if "after" in collection else None
processing = collection["processing"]
case_metrics = []
case_params = [{
"before_steps": before_steps,
"after": after,
"processing": processing
}]
self.init_metric(self.name, {}, {}, None)
case_metric = copy.deepcopy(self.metric)
case_metric.set_case_metric_type()
case_metrics.append(case_metric)
return case_params, case_metrics
def prepare(self, **case_param):
steps = case_param["before_steps"]
for step in steps:
interface_name = step["interface_name"]
params = step["params"]
self.run_step(interface_name, params)
def run_case(self, case_metric, **case_param):
processing = case_param["processing"]
after = case_param["after"]
user_chaos = processing["chaos"]
kind = user_chaos["kind"]
spec = user_chaos["spec"]
metadata_name = config.NAMESPACE + "-" + kind.lower()
metadata = {"name": metadata_name}
process_assertion = processing["assertion"]
after_assertion = after["assertion"]
# load yaml from default template to generate stand chaos dict
chaos_mesh = kind_chaos_mapping[kind](config.DEFAULT_API_VERSION, kind, metadata, spec)
experiment_config = chaos_mesh.gen_experiment_config()
process_func = processing["interface_name"]
process_params = processing["params"] if "params" in processing else {}
after_func = after["interface_name"]
after_params = after["params"] if "params" in after else {}
logger.debug(chaos_mesh.kind)
chaos_opt = ChaosOpt(chaos_mesh.kind)
chaos_objects = chaos_opt.list_chaos_object()
if len(chaos_objects["items"]) != 0:
logger.debug(chaos_objects["items"])
chaos_opt.delete_chaos_object(chaos_mesh.metadata["name"])
# with open('./pod-newq.yaml', "w") as f:
# dump(experiment_config, f)
# f.close()
# concurrent inject chaos and run func
# logger.debug(experiment_config)
t_milvus = threading.Thread(target=assert_func_mapping[process_assertion], args=(process_func, self.milvus,), kwargs=process_params)
try:
t_milvus.start()
chaos_opt.create_chaos_object(experiment_config)
# processing assert exception
except Exception as e:
logger.info("exception {}".format(str(e)))
else:
chaos_opt.delete_chaos_object(chaos_mesh.metadata["name"])
# TODO retry connect milvus
time.sleep(15)
assert_func_mapping[after_assertion](after_func, self.milvus, **after_params)
finally:
chaos_opt.delete_all_chaos_object()
logger.info(chaos_opt.list_chaos_object())
| 41.890625 | 140 | 0.669526 |
ef53170811203b5275316968782b196dfd7ab11d | 291 | py | Python | torchx/__init__.py | aivanou/torchx-1 | aaeee71b20d6c6feca48395cd1496a4b6ad1fca1 | [
"BSD-3-Clause"
] | null | null | null | torchx/__init__.py | aivanou/torchx-1 | aaeee71b20d6c6feca48395cd1496a4b6ad1fca1 | [
"BSD-3-Clause"
] | null | null | null | torchx/__init__.py | aivanou/torchx-1 | aaeee71b20d6c6feca48395cd1496a4b6ad1fca1 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from .version import __version__ as __version__ # noqa F401
| 32.333333 | 71 | 0.75945 |
43fa8347ae693404d4c176f691009ac18f242dc5 | 2,008 | py | Python | src/cirrus/core/components/files/readmes.py | sjwoodr/cirrus-geo | 8f1190325ba6f6c7fc82e5d3ddb16d34c1b8478d | [
"Apache-2.0"
] | 26 | 2020-08-18T14:34:47.000Z | 2021-07-29T14:41:14.000Z | src/cirrus/core/components/files/readmes.py | sjwoodr/cirrus-geo | 8f1190325ba6f6c7fc82e5d3ddb16d34c1b8478d | [
"Apache-2.0"
] | 47 | 2020-08-26T04:56:48.000Z | 2021-10-04T16:37:38.000Z | src/cirrus/core/components/files/readmes.py | sjwoodr/cirrus-geo | 8f1190325ba6f6c7fc82e5d3ddb16d34c1b8478d | [
"Apache-2.0"
] | 6 | 2020-08-18T14:37:32.000Z | 2021-07-15T21:59:06.000Z | import logging
from rich.markdown import Markdown
from .base import ComponentFile
logger = logging.getLogger(__name__)
default_readme = '''#{name}
Fill in this README with details for this {type}
## Description
It is often best to tell people what this {type}
does. And perhaps why they might choose to use it.
## Configuration Parameters
It's not uncommon to list out the parameters so people can better
understand how to use this {type} once they have chosen to do so.
Don't just say what they are, but where they go.
Configuration parameters are passed in `payload['process']['tasks']['copy-metadata']`:
- Name: `mappings`
Type: `dict`
Required: True
Default: None
An array of mapping dicts that define source item,
destination item, and metadata fields to copy
Providing an example is often best.
Example:
```
"copy-metadata": {{
"mappings":[
{{
"source": "GEO",
"destination": "SLC",
"metadata": {{
"assets": ["preview", "thumbnail"]
}}
}}
]
}}
```
## Detail any other options
It's possible your {type} uses more fields to define options.
Maybe your {type} also REQUIRES the following parameters
supplied in `payload['process']['item-queries']`:
```
"item-queries": {{
"GEO": {{
"sar:product_type": "GEO"
}},
"SLC": {{
"sar:product_type": "SLC"
}},
"SICD": {{
"sar:product_type": "SICD"
}}
}}
```
'''.format
class Readme(ComponentFile):
def __init__(self, *args, name='README.md', **kwargs):
super().__init__(*args, name=name, **kwargs)
@staticmethod
def content_fn(component) -> str:
return default_readme(
name=component.name,
type=component.type,
)
def show(self):
if self.content is None:
logger.error(
"%s '%s' has no README.",
self.parent.type.capitalize(),
self.parent.name
)
return
self.console.print(Markdown(self.content))
| 20.916667 | 86 | 0.62251 |
be426056c24c81265e2f07d90334c4944a353541 | 1,158 | py | Python | examples/classification_demo.py | Fuminides/deep-belief-network | 969797fa5c091f343f25980440ddeef127e6e1f5 | [
"MIT"
] | 451 | 2015-12-18T01:54:16.000Z | 2022-03-28T13:34:57.000Z | examples/classification_demo.py | Fuminides/deep-belief-network | 969797fa5c091f343f25980440ddeef127e6e1f5 | [
"MIT"
] | 55 | 2016-09-01T17:11:14.000Z | 2022-03-19T14:24:10.000Z | examples/classification_demo.py | Fuminides/deep-belief-network | 969797fa5c091f343f25980440ddeef127e6e1f5 | [
"MIT"
] | 224 | 2015-12-18T14:16:20.000Z | 2022-03-26T13:19:15.000Z | import numpy as np
np.random.seed(1337) # for reproducibility
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.metrics.classification import accuracy_score
from dbn.tensorflow import SupervisedDBNClassification
# Loading dataset
digits = load_digits()
X, Y = digits.data, digits.target
# Data scaling
X = (X / 16).astype(np.float32)
# Splitting data
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
# Training
classifier = SupervisedDBNClassification(hidden_layers_structure=[256, 256],
learning_rate_rbm=0.05,
learning_rate=0.1,
n_epochs_rbm=10,
n_iter_backprop=100,
batch_size=32,
activation_function='relu',
dropout_p=0.2)
classifier.fit(X_train, Y_train)
# Test
Y_pred = classifier.predict(X_test)
print('Done.\nAccuracy: %f' % accuracy_score(Y_test, Y_pred))
| 33.085714 | 88 | 0.598446 |
7a58c88f675ac72e611971f5cffccd86ce164afe | 877 | py | Python | src/nodes/rgb_ros.py | ericchen321/ros_x_habitat | f256b62fe8dda059baaf9bad87cf53f7d769f2f9 | [
"CC-BY-4.0"
] | 24 | 2021-09-10T23:35:53.000Z | 2022-03-31T18:12:20.000Z | src/nodes/rgb_ros.py | ericchen321/ros_x_habitat | f256b62fe8dda059baaf9bad87cf53f7d769f2f9 | [
"CC-BY-4.0"
] | 4 | 2021-12-11T06:56:58.000Z | 2022-02-23T03:05:00.000Z | src/nodes/rgb_ros.py | ericchen321/ros_x_habitat | f256b62fe8dda059baaf9bad87cf53f7d769f2f9 | [
"CC-BY-4.0"
] | 7 | 2021-12-17T14:13:27.000Z | 2022-03-31T16:39:28.000Z | #!/usr/bin/env python
# note need to run viewer with python2!!!
import numpy as np
import rospy
import std_msgs.msg
from cv_bridge import CvBridge
from rospy.numpy_msg import numpy_msg
from rospy_tutorials.msg import Floats
from sensor_msgs.msg import Image
rospy.init_node("nprgb2ros_rgb", anonymous=False)
pub = rospy.Publisher("ros_img_rgb", Image, queue_size=10)
def callback(data):
img_raveled = data.data[0:-2]
img_size = data.data[-2:].astype(int)
h = std_msgs.msg.Header()
h.stamp = rospy.Time.now()
img = (np.reshape(img_raveled, (img_size[0], img_size[1], 3))).astype(np.uint8)
image_message = CvBridge().cv2_to_imgmsg(img, encoding="rgb8")
image_message.header = h
pub.publish(image_message)
def listener():
rospy.Subscriber("rgb", numpy_msg(Floats), callback)
rospy.spin()
if __name__ == "__main__":
listener()
| 24.361111 | 83 | 0.717218 |
3fa5c12316ac26fd84f82cad6a47a6329cef47d9 | 7,604 | py | Python | src/onegov/election_day/layouts/vote.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/election_day/layouts/vote.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | src/onegov/election_day/layouts/vote.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | from cached_property import cached_property
from onegov.core.utils import normalize_for_url
from onegov.election_day import _
from onegov.election_day.layouts.detail import DetailLayout
from onegov.election_day.utils import pdf_filename
from onegov.election_day.utils import svg_filename
class VoteLayout(DetailLayout):
def __init__(self, model, request, tab='entities'):
super().__init__(model, request)
self.tab = tab
tabs_with_embedded_tables = ('entities', 'districts')
@cached_property
def all_tabs(self):
"""Return all tabs. Ordering is important for the main view."""
return (
'entities',
'districts',
'proposal-entities',
'proposal-districts',
'counter-proposal-entities',
'counter-proposal-districts',
'tie-breaker-entities',
'tie-breaker-districts',
'data'
)
def title(self, tab=None):
tab = (self.tab if tab is None else tab) or ''
if tab == 'entities':
return self.principal.label('entities')
if tab == 'districts':
return self.app.principal.label('districts')
if tab.startswith('proposal'):
return _("Proposal")
if tab.startswith('counter-proposal'):
return _("Counter Proposal")
if tab.startswith('tie-breaker'):
return _("Tie-Breaker")
if tab == 'data':
return _("Downloads")
return ''
def subtitle(self, tab=None):
tab = (self.tab if tab is None else tab) or ''
if tab.endswith('-entities') and self.has_districts:
return self.principal.label('entities')
if tab.endswith('-districts'):
return self.app.principal.label('districts')
return ''
def tab_visible(self, tab):
if self.hide_tab(tab):
return False
if not self.has_results:
return False
if tab == 'entities':
return self.type == 'simple'
if tab == 'proposal-entities':
return self.type == 'complex'
if tab == 'counter-proposal-entities':
return self.type == 'complex'
if tab == 'tie-breaker-entities':
return self.type == 'complex'
if tab == 'districts':
return self.has_districts and self.type == 'simple'
if tab == 'proposal-districts':
return self.has_districts and self.type == 'complex'
if tab == 'counter-proposal-districts':
return self.has_districts and self.type == 'complex'
if tab == 'tie-breaker-districts':
return self.has_districts and self.type == 'complex'
return True
@cached_property
def visible(self):
return self.tab_visible(self.tab)
@cached_property
def type(self):
return self.model.type
@cached_property
def ballot(self):
if self.type == 'complex' and 'counter' in self.tab:
return self.model.counter_proposal
if self.type == 'complex' and 'tie-breaker' in self.tab:
return self.model.tie_breaker
return self.model.proposal
@cached_property
def entities_map_link(self):
return self.request.link(
self.model, f'{self.ballot.type}-by-entities-map'
)
@cached_property
def districts_map_link(self):
return self.request.link(
self.model, f'{self.ballot.type}-by-districts-map'
)
@cached_property
def table_link(self):
if self.tab == 'data':
return None
scope = 'entities'
if 'district' in self.tab:
scope = 'districts'
return self.request.link(
self.model, f'{self.ballot.type}-by-{scope}-table'
)
@cached_property
def widget_link(self):
return self.request.link(
self.model, name='vote-header-widget'
)
@cached_property
def summarize(self):
return self.ballot.results.count() != 1
@cached_property
def main_view(self):
if self.type == 'complex':
return self.request.link(self.model, 'proposal-entities')
for tab in self.all_tabs:
if not self.hide_tab(tab):
return self.request.link(self.model, tab)
return self.request.link(self.model, 'entities')
@cached_property
def answer(self):
return self.model.answer
@cached_property
def menu(self):
def entry(tab, use_subtitle=False):
return (
self.subtitle(tab) if use_subtitle else self.title(tab),
self.request.link(self.model, tab),
self.tab == tab,
[]
)
if self.type == 'complex' and self.has_districts:
result = []
for title, prefix in (
(_("Proposal"), 'proposal'),
(_("Counter Proposal"), 'counter-proposal'),
(_("Tie-Breaker"), 'tie-breaker')
):
result.append((
title, '', self.tab.startswith(prefix), [(
self.subtitle(tab),
self.request.link(self.model, tab),
self.tab == tab,
[]
) for tab in (f'{prefix}-entities', f'{prefix}-districts')]
))
result.append((
self.title('data'),
self.request.link(self.model, 'data'),
self.tab == 'data',
[]
))
return result
return [
(
self.title(tab),
self.request.link(self.model, tab),
self.tab == tab,
[]
) for tab in self.all_tabs if self.tab_visible(tab)
]
@cached_property
def pdf_path(self):
""" Returns the path to the PDF file or None, if it is not available.
"""
path = 'pdf/{}'.format(
pdf_filename(
self.model,
self.request.locale,
last_modified=self.last_modified
)
)
if self.request.app.filestorage.exists(path):
return path
return None
@cached_property
def svg_prefix(self):
return 'districts-map' if 'districts' in self.tab else 'entities-map'
@cached_property
def svg_path(self):
""" Returns the path to the SVG file or None, if it is not available.
"""
if not self.ballot:
return None
path = 'svg/{}'.format(
svg_filename(
self.ballot,
self.svg_prefix,
self.request.locale,
last_modified=self.last_modified
)
)
if self.request.app.filestorage.exists(path):
return path
return None
@cached_property
def svg_link(self):
""" Returns a link to the SVG download view. """
return self.request.link(
self.ballot, name='{}-svg'.format(self.svg_prefix)
)
@cached_property
def svg_name(self):
""" Returns a nice to read SVG filename. """
return '{}.svg'.format(
normalize_for_url(
'{}-{}-{}'.format(
self.model.id,
self.request.translate(self.title() or ''),
self.request.translate(self.subtitle() or '')
).rstrip('-')
)
)
| 29.703125 | 79 | 0.537875 |
5bd4df83f53628b1fa05a6ff7d22c7df75231da0 | 8,955 | py | Python | pyalgotrade/eventprofiler.py | berlm/pyalgotrade | 3b04efec563d7fa47b482b15347b0fa1aa4e11ec | [
"Apache-2.0"
] | null | null | null | pyalgotrade/eventprofiler.py | berlm/pyalgotrade | 3b04efec563d7fa47b482b15347b0fa1aa4e11ec | [
"Apache-2.0"
] | null | null | null | pyalgotrade/eventprofiler.py | berlm/pyalgotrade | 3b04efec563d7fa47b482b15347b0fa1aa4e11ec | [
"Apache-2.0"
] | 1 | 2019-10-20T15:34:34.000Z | 2019-10-20T15:34:34.000Z | # PyAlgoTrade
#
# Copyright 2011-2015 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
import numpy as np
import matplotlib.pyplot as plt
from pyalgotrade.technical import roc
from pyalgotrade import dispatcher
class Results(object):
"""Results from the profiler."""
def __init__(self, eventsDict, lookBack, lookForward):
assert(lookBack > 0)
assert(lookForward > 0)
self.__lookBack = lookBack
self.__lookForward = lookForward
self.__values = [[] for i in range(lookBack+lookForward+1)]
self.__eventCount = 0
# Process events.
for instrument, events in eventsDict.items():
for event in events:
# Skip events which are on the boundary or for some reason are not complete.
if event.isComplete():
self.__eventCount += 1
# Compute cumulative returns: (1 + R1)*(1 + R2)*...*(1 + Rn)
values = np.cumprod(event.getValues() + 1)
# Normalize everything to the time of the event
values = values / values[event.getLookBack()]
for t in range(event.getLookBack()*-1, event.getLookForward()+1):
self.setValue(t, values[t+event.getLookBack()])
def __mapPos(self, t):
assert(t >= -1*self.__lookBack and t <= self.__lookForward)
return t + self.__lookBack
def setValue(self, t, value):
if value is None:
raise Exception("Invalid value at time %d" % (t))
pos = self.__mapPos(t)
self.__values[pos].append(value)
def getValues(self, t):
pos = self.__mapPos(t)
return self.__values[pos]
def getLookBack(self):
return self.__lookBack
def getLookForward(self):
return self.__lookForward
def getEventCount(self):
"""Returns the number of events occurred. Events that are on the boundary are skipped."""
return self.__eventCount
class Predicate(object):
"""Base class for event identification. You should subclass this to implement
the event identification logic."""
def eventOccurred(self, instrument, bards):
"""Override (**mandatory**) to determine if an event took place in the last bar (bards[-1]).
:param instrument: Instrument identifier.
:type instrument: string.
:param bards: The BarDataSeries for the given instrument.
:type bards: :class:`pyalgotrade.dataseries.bards.BarDataSeries`.
:rtype: boolean.
"""
raise NotImplementedError()
class Event(object):
def __init__(self, lookBack, lookForward):
assert(lookBack > 0)
assert(lookForward > 0)
self.__lookBack = lookBack
self.__lookForward = lookForward
self.__values = np.empty((lookBack + lookForward + 1))
self.__values[:] = np.NAN
def __mapPos(self, t):
assert(t >= -1*self.__lookBack and t <= self.__lookForward)
return t + self.__lookBack
def isComplete(self):
return not any(np.isnan(self.__values))
def getLookBack(self):
return self.__lookBack
def getLookForward(self):
return self.__lookForward
def setValue(self, t, value):
if value is not None:
pos = self.__mapPos(t)
self.__values[pos] = value
def getValue(self, t):
pos = self.__mapPos(t)
return self.__values[pos]
def getValues(self):
return self.__values
class Profiler(object):
"""This class is responsible for scanning over historical data and analyzing returns before
and after the events.
:param predicate: A :class:`Predicate` subclass responsible for identifying events.
:type predicate: :class:`Predicate`.
:param lookBack: The number of bars before the event to analyze. Must be > 0.
:type lookBack: int.
:param lookForward: The number of bars after the event to analyze. Must be > 0.
:type lookForward: int.
"""
def __init__(self, predicate, lookBack, lookForward):
assert(lookBack > 0)
assert(lookForward > 0)
self.__predicate = predicate
self.__lookBack = lookBack
self.__lookForward = lookForward
self.__feed = None
self.__rets = {}
self.__futureRets = {}
self.__events = {}
def __addPastReturns(self, instrument, event):
begin = (event.getLookBack() + 1) * -1
for t in range(begin, 0):
try:
ret = self.__rets[instrument][t]
if ret is not None:
event.setValue(t+1, ret)
except IndexError:
pass
def __addCurrentReturns(self, instrument):
nextTs = []
for event, t in self.__futureRets[instrument]:
event.setValue(t, self.__rets[instrument][-1])
if t < event.getLookForward():
t += 1
nextTs.append((event, t))
self.__futureRets[instrument] = nextTs
def __onBars(self, dateTime, bars):
for instrument in bars.getInstruments():
self.__addCurrentReturns(instrument)
eventOccurred = self.__predicate.eventOccurred(instrument, self.__feed[instrument])
if eventOccurred:
event = Event(self.__lookBack, self.__lookForward)
self.__events[instrument].append(event)
self.__addPastReturns(instrument, event)
# Add next return for this instrument at t=1.
self.__futureRets[instrument].append((event, 1))
def getResults(self):
"""Returns the results of the analysis.
:rtype: :class:`Results`.
"""
return Results(self.__events, self.__lookBack, self.__lookForward)
def run(self, feed, useAdjustedCloseForReturns=True):
"""Runs the analysis using the bars supplied by the feed.
:param barFeed: The bar feed to use to run the analysis.
:type barFeed: :class:`pyalgotrade.barfeed.BarFeed`.
:param useAdjustedCloseForReturns: True if adjusted close values should be used to calculate returns.
:type useAdjustedCloseForReturns: boolean.
"""
if useAdjustedCloseForReturns:
assert feed.barsHaveAdjClose(), "Feed doesn't have adjusted close values"
try:
self.__feed = feed
self.__rets = {}
self.__futureRets = {}
for instrument in feed.getRegisteredInstruments():
self.__events.setdefault(instrument, [])
self.__futureRets[instrument] = []
if useAdjustedCloseForReturns:
ds = feed[instrument].getAdjCloseDataSeries()
else:
ds = feed[instrument].getCloseDataSeries()
self.__rets[instrument] = roc.RateOfChange(ds, 1)
feed.getNewValuesEvent().subscribe(self.__onBars)
disp = dispatcher.Dispatcher()
disp.addSubject(feed)
disp.run()
finally:
feed.getNewValuesEvent().unsubscribe(self.__onBars)
def build_plot(profilerResults):
# Calculate each value.
x = []
y = []
std = []
for t in range(profilerResults.getLookBack()*-1, profilerResults.getLookForward()+1):
x.append(t)
values = np.asarray(profilerResults.getValues(t))
y.append(values.mean())
std.append(values.std())
# Plot
plt.clf()
plt.plot(x, y, color='#0000FF')
eventT = profilerResults.getLookBack()
# stdBegin = eventT + 1
# plt.errorbar(x[stdBegin:], y[stdBegin:], std[stdBegin:], alpha=0, ecolor='#AAAAFF')
plt.errorbar(x[eventT+1:], y[eventT+1:], std[eventT+1:], alpha=0, ecolor='#AAAAFF')
# plt.errorbar(x, y, std, alpha=0, ecolor='#AAAAFF')
plt.axhline(y=y[eventT], xmin=-1*profilerResults.getLookBack(), xmax=profilerResults.getLookForward(), color='#000000')
plt.xlim(profilerResults.getLookBack()*-1-0.5, profilerResults.getLookForward()+0.5)
plt.xlabel('Time')
plt.ylabel('Cumulative returns')
def plot(profilerResults):
"""Plots the result of the analysis.
:param profilerResults: The result of the analysis
:type profilerResults: :class:`Results`.
"""
build_plot(profilerResults)
plt.show()
| 35.255906 | 123 | 0.626912 |
20470990ac60e392f50c42ca9fb2ed83485ea9ca | 4,844 | py | Python | models/utils/resource_util.py | kolbytn/alfred | ef6fd8d61ecc59e1b4c805fce1b1c595af8e1049 | [
"MIT"
] | null | null | null | models/utils/resource_util.py | kolbytn/alfred | ef6fd8d61ecc59e1b4c805fce1b1c595af8e1049 | [
"MIT"
] | null | null | null | models/utils/resource_util.py | kolbytn/alfred | ef6fd8d61ecc59e1b4c805fce1b1c595af8e1049 | [
"MIT"
] | null | null | null | import os
import psutil
import nvsmi
import nvidia_smi
import time
import datetime
import inspect
import multiprocessing as mp
import threading
def resource_util(pid, interval):
'''
arg:
pid: process id (int)
example return:
{
'pid': 24832,
'cpu': 0.0,
'mem_total': 3371,
'mem_shared': 502,
'mem_data': 3039,
'gpu_id': 0,
'gpu_mem': 5985.0,
'gpu_usage': 100,
'result': [24832, 0.0, 3371, 502, 3039, 0, 5985.0, 100]
}
'''
nvidia_smi.nvmlInit()
# Get resources used by process
p = psutil.Process(pid)
usage = {'pid': pid}
result = [pid]
# cpu usage of current PID
usage['cpu'] = p.cpu_percent(interval=interval)
result.append(usage['cpu'])
# Memory usage current PID
mem = p.memory_info()
# print(mem, type(mem))
usage['mem_total'] = mem.rss >> 20
result.append(usage['mem_total'])
usage['mem_shared'] = mem.shared >> 20
result.append(usage['mem_shared'])
usage['mem_data'] = mem.data >> 20
result.append(usage['mem_data'])
for process in (nvsmi.get_gpu_processes()):
# print(process.pid, process.gpu_id, process.used_memory)
if process.pid == pid:
usage['gpu_id'] = int(process.gpu_id)
result.append(usage['gpu_id'])
usage['gpu_mem'] = process.used_memory
result.append(usage['gpu_mem'])
handle = nvidia_smi.nvmlDeviceGetHandleByIndex(int(process.gpu_id))
res = nvidia_smi.nvmlDeviceGetUtilizationRates(handle)
usage['gpu_usage'] = res.gpu # gpu utilization, may not only by this process
result.append(usage['gpu_usage'])
break
else:
usage['gpu_id'] = None
result.append(usage['gpu_id'])
usage['gpu_mem'] = None
result.append(usage['gpu_mem'])
usage['gpu_usage'] = None # gpu utilization, may not only by this process
result.append(usage['gpu_usage'])
usage['result'] = result
return usage
def start_monitor(path, note, pid=None, interval=0.1):
"""
starts monitoring the resource usage of process
args:
path (str): the path of log directory
note (str): a custom note that is added to log result
pid (int): process id default to the process that you called this function
interval (float): measurement interval of cpu usage (must be >0)
return:
monitor (tuple): use this object in stop monitor to finish monitoring
example usage: if we want to measure the resource usage of func
... some code
print(abc)
monitor = start_monitor(args.dout, "rollout step=10")
func()
stop_monitor(monitor)
... some more code
stop monitor writes:
38790,106.9,2871,526,2962,0,1129.0,2,2021-02-01 03:45:48.855156,1.164,run_rollouts,rollout step=10\n
to args.dout/resource_monitor.csv
which means:
pid: 38790, function: run_rollouts , note: rollout step=10, datetime: 2021-02-01 03:45:48.855156
[cpu: 106.9, time_took: 1.164, mem_total: 2871, mem_shared: 526, mem_data: 2962,0, gpu_id: 0, gpu_mem: 1129.0, gpu_usage: 2]
NOTE:
use python ALFRED/resource_monitor.py exp/<dout folder>/resource_monitor.csv
to view real time monitoring of current execution.
"""
t0 = time.time()
print(f"start monitor pid={pid}, path={path}, note={note}")
if pid == None:
pid = os.getpid()
res = list()
def monitor_process(res):
for i in (resource_util(pid, interval)['result']):
res.append(i)
p = threading.Thread(target=monitor_process, args=(res,))
p.start()
return (res, p, t0, path, note)
def stop_monitor(measurement):
res, p, t0, path, note = measurement
print(f"stop monitor path={path}, note={note}")
p.join()
result = list(res) + [str(datetime.datetime.now()), str(time.time()-t0), inspect.stack()[1][3], note]
outpath = '/'.join(os.path.split(path) + ('resource_monitor.csv',))
with open(outpath, 'a') as f:
f.write(','.join([str(i) for i in result])+'\n')
# print(','.join([str(i) for i in result])+'\n')
if __name__ == '__main__':
print("=========")
m = mp.Manager()
measure = start_measure(interval=0.1, pid=24252, manager=m)
# time.sleep(0.1)
stop_measure(measure, '', '')
measure = start_measure(interval=0.1, pid=25300, manager=m)
# time.sleep(0.1)
stop_measure(measure, '', '') | 32.293333 | 140 | 0.573699 |
8e2193567c8a43259157de61c905f4e79ddf5045 | 515 | py | Python | desafiosCursoEmVideo/ex055.py | gomesGabriel/Pythonicos | b491cefbb0479dd83fee267304d0fa30b99786a5 | [
"MIT"
] | 1 | 2019-09-02T12:14:58.000Z | 2019-09-02T12:14:58.000Z | desafiosCursoEmVideo/ex055.py | gomesGabriel/Pythonicos | b491cefbb0479dd83fee267304d0fa30b99786a5 | [
"MIT"
] | null | null | null | desafiosCursoEmVideo/ex055.py | gomesGabriel/Pythonicos | b491cefbb0479dd83fee267304d0fa30b99786a5 | [
"MIT"
] | null | null | null | print('\033[33m-=-\033[m' * 20)
print('\033[33m************* Maior e menor da sequência *************\033[m')
print('\033[33m-=-\033[m' * 20)
maior = 0
menor = 0
for c in range(1, 6):
p = float(input('Insira o peso da {}ª pessoa: ' .format(c)))
if c == 1:
maior = p
menor = p
else:
if p > maior:
maior = p
if p < menor:
menor = p
print('O maior peso lido foi de: {:.2f}kg' .format(maior))
print('O menor peso lido foi de: {:.2f}kg' .format(menor)) | 30.294118 | 77 | 0.504854 |
c79323d341e851b1ae53b4758a208d572b16af1d | 1,976 | py | Python | service/serving/cls_model.py | hasty-ai/docker-inference-example | f5e8bcccff8011b783c25c9795771be1fd4f732d | [
"MIT"
] | 1 | 2021-11-04T06:50:30.000Z | 2021-11-04T06:50:30.000Z | service/serving/cls_model.py | hasty-ai/docker-inference-example | f5e8bcccff8011b783c25c9795771be1fd4f732d | [
"MIT"
] | null | null | null | service/serving/cls_model.py | hasty-ai/docker-inference-example | f5e8bcccff8011b783c25c9795771be1fd4f732d | [
"MIT"
] | null | null | null | from typing import List, Union
import json
import logging
import os
from torch.utils.data import DataLoader, TensorDataset
import albumentations as A
import numpy as np
import torch
import torchvision
class CLSModel:
def __init__(self, model_path):
self.model_path = model_path
self.transforms = A.load(os.path.join(model_path, 'transforms.json'))
with open(os.path.join(model_path, 'class_mapping.json')) as data:
mappings = json.load(data)
self.class_mapping = {item['model_idx']: item['class_name'] for item in mappings}
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if not torch.cuda.is_available():
logging.warning("GPU not found")
model = torch.jit.load(os.path.join(model_path, 'model.pt'))
self.model = model.to(self.device)
logging.info(f"Model {model_path} loaded")
def predict(self, images: List[np.array], batch_size=32):
images_tensor = []
for i in images:
tr_img = self.transforms(image=i)['image']
tr_img = torch.from_numpy(tr_img).permute(2, 0, 1)
images_tensor.append(tr_img.unsqueeze(dim=0))
images_tensor = torch.cat(images_tensor).float()
images_dataset = TensorDataset(images_tensor)
images_loader = DataLoader(images_dataset, batch_size=batch_size, shuffle=False)
with torch.no_grad():
preds = []
for (batch,) in images_loader:
batch = batch.to(self.device)
preds = preds + list(torch.softmax(self.model(batch), dim=1).cpu().numpy())
scores = np.max(np.array(preds), axis=1)
indices = np.argmax(preds, 1)
results = []
for idx, score in zip(indices, scores):
results.append({"cls_score": score,
"class_idx": idx,
"class_name": self.class_mapping[idx]})
return results
| 38.745098 | 91 | 0.623482 |
aefd18b8938b254129db4ccc2f7f0c8b6167b4f1 | 28,307 | py | Python | lib/rucio/web/ui/flask/common/utils.py | nimishbongale/rucio | 21c93bd7d02dbc70bc3127ad77fb1a1981b83058 | [
"Apache-2.0"
] | null | null | null | lib/rucio/web/ui/flask/common/utils.py | nimishbongale/rucio | 21c93bd7d02dbc70bc3127ad77fb1a1981b83058 | [
"Apache-2.0"
] | null | null | null | lib/rucio/web/ui/flask/common/utils.py | nimishbongale/rucio | 21c93bd7d02dbc70bc3127ad77fb1a1981b83058 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright European Organization for Nuclear Research (CERN) since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
from json import dumps, load
from os.path import dirname, join
from time import time
from flask import request, render_template, redirect, make_response
from six.moves.urllib.parse import quote, unquote
from rucio.api import authentication as auth, identity
from rucio.api.account import account_exists, get_account_info, list_account_attributes
from rucio.common.config import config_get, config_get_bool
from rucio.common.extra import import_extras
from rucio.core import identity as identity_core, vo as vo_core
from rucio.db.sqla.constants import AccountType, IdentityType
if sys.version_info > (3, 0):
long = int
unicode = str
if sys.version_info > (3, 2):
import html
def html_escape(s, quote=True):
return html.escape(s, quote)
else:
import cgi
def html_escape(s, quote=True):
return cgi.escape(s, quote) # pylint: disable-msg=E1101
EXTRA_MODULES = import_extras(['onelogin'])
if EXTRA_MODULES['onelogin']:
from onelogin.saml2.auth import OneLogin_Saml2_Auth # pylint: disable=import-error
SAML_SUPPORT = True
else:
SAML_SUPPORT = False
# check if there is preferred server side config for webui authentication
AUTH_TYPE = config_get('webui', 'auth_type', False, None)
if AUTH_TYPE == 'oidc':
try:
AUTH_ISSUER_WEBUI = config_get('webui', 'auth_issuer')
except:
render_template("problem.html", msg="Please specify auth_issuer in the [webui] section of the Rucio configuration.")
# if no specific config on the server side - we collect information
# about all authentication options, in particular OIDC
AUTH_ISSUERS = []
if not AUTH_TYPE:
IDPSECRETS = config_get('oidc', 'idpsecrets', False, None)
try:
with open(IDPSECRETS) as client_secret_file:
client_secrets = load(client_secret_file)
for iss in client_secrets:
AUTH_ISSUERS.append(iss.upper())
except:
AUTH_ISSUERS = []
MULTI_VO = config_get_bool('common', 'multi_vo', raise_exception=False, default=False)
# Additional error message that can have VO specific information for the user, e.g., support mailing list.
ADDITIONAL_ERROR_MSG = config_get("webui", "additional_error_msg", raise_exception=False, default="")
# excluded characters for injected JavaScript variables
VARIABLE_VALUE_REGEX = re.compile(r"^[\w\- /=,.+*#()\[\]]*$", re.UNICODE)
# TO-DO !!! Remove passing data with account and other params to the functions
# catch these from the webpy input() storage object
# will allow to remove also lines around each use of select_account_name
def prepare_saml_request(environ, data):
"""
TODO: Validate for Flask
Prepare a webpy request for SAML
:param environ: Flask request.environ object
:param data: GET or POST data
"""
if environ.get('mod_wsgi.url_scheme') == 'https':
ret = {
'https': 'on' if environ.get('modwsgi.url_scheme') == 'https' else 'off',
'http_host': environ.get('HTTP_HOST'),
'server_port': environ.get('SERVER_PORT'),
'script_name': environ.get('SCRIPT_NAME'),
# Uncomment if using ADFS as IdP
# 'lowercase_urlencoding': True,
}
if data:
ret['get_data'] = data
ret['post_data'] = data
return ret
return None
def add_cookies(response, cookie={}):
for int_cookie in cookie:
response.set_cookie(**int_cookie)
return(response)
def redirect_to_last_known_url(cookie):
"""
Checks if there is preferred path in cookie and redirects to it.
:returns: redirect to last known path
"""
requested_path = request.cookies.get('rucio-requested-path')
if not requested_path:
requested_path = request.environ.get('REQUEST_URI')
resp = add_cookies(make_response(redirect(requested_path, code=303)), cookie)
return resp
def select_account_name(identitystr, identity_type, vo=None):
"""
Looks for account (and VO if not known) corresponding to the provided identity.
:param identitystr: identity string
:param identity_type: identity_type e.g. x509, saml, oidc, userpass
:returns: Tuple of None or account string, None or VO string or list of VO strings
"""
ui_account = None
if not MULTI_VO:
vo = 'def'
if vo is not None:
accounts = identity.list_accounts_for_identity(identitystr, identity_type)
else:
internal_accounts = identity_core.list_accounts_for_identity(identitystr, IdentityType[identity_type])
accounts = [account.external for account in internal_accounts]
vos = [account.vo for account in internal_accounts]
if vos:
vos = list(set(vos))
# If we only have 1 VO that matches the identity use that, otherwise return all possible VOs so the user can choose
if len(vos) == 1:
vo = vos[0]
else:
return None, vos
if len(accounts) == 0:
return None, vo
# check if ui_account param is set
ui_account = request.args.get('ui_account', default=None)
# if yes check if the accounts provided for users identity include this account
if not ui_account:
ui_account = request.args.get('account', default=None)
if ui_account:
if ui_account not in accounts:
return None, vo
else:
return ui_account, vo
else:
# try to set the default account to the user account, if not available take the first account.
def_account = accounts[0]
for account in accounts:
account_info = get_account_info(account, vo=vo)
if account_info.account_type == AccountType.USER:
def_account = account
break
selected_account = request.cookies.get('rucio-selected-account')
if (selected_account):
def_account = selected_account
ui_account = def_account
return ui_account, vo
def get_token(token_method, acc=None, vo=None, idt=None, pwd=None):
"""
Gets a token with the token_method provided.
:param token_method: the method to get the token
:param acc: Rucio account string
:param idt: Rucio identity string
:param pwd: Rucio password string (in case of userpass auth_type)
:returns: None or token string
"""
if not acc:
acc = request.environ.get('HTTP_X_RUCIO_ACCOUNT')
if not vo:
vo = request.environ.get('HTTP_X_RUCIO_VO')
if not idt:
idt = request.environ.get('SSL_CLIENT_S_DN')
if not idt.startswith('/'):
idt = '/%s' % '/'.join(idt.split(',')[::-1])
if not (acc and vo and idt):
return None
try:
if pwd:
token = token_method(acc, idt, pwd, 'webui', request.environ.get('REMOTE_ADDR'), vo=vo).get('token')
else:
token = token_method(acc, idt, 'webui', request.environ.get('REMOTE_ADDR'), vo=vo).get('token')
return token
except:
return None
def validate_webui_token(from_cookie=True, session_token=None):
"""
Validates token and returns token validation dictionary.
:param from_cookie: Token is looked up in cookies if True, otherwise session_token must be provided
:param session_token: token string
:returns: None or token validation dictionary
"""
if from_cookie:
session_token = request.cookies.get('x-rucio-auth-token')
if session_token:
session_token = unquote(session_token)
valid_token_dict = auth.validate_auth_token(session_token)
if not valid_token_dict or not session_token:
return None
else:
valid_token_dict['token'] = session_token # pylint: disable=E1137
return valid_token_dict
def access_granted(valid_token_dict, template, title):
"""
Assuming validated token dictionary is provided, renders required template page.
:param valid_token_dict: token validation dictionary
:param template: the template name that should be rendered
:returns: rendered base temmplate with template content
"""
policy = config_get('policy', 'permission')
return render_template(template, token=valid_token_dict['token'], account=valid_token_dict['account'], vo=valid_token_dict['vo'], policy=policy, title=title)
def finalize_auth(token, identity_type, cookie_dict_extra=None):
"""
Finalises login. Validates provided token, sets cookies
and redirects to the final page.
:param token: token string
:param identity_type: identity_type e.g. x509, userpass, oidc, saml
:param cookie_dict_extra: extra cookies to set, dictionary expected
:returns: redirects to the final page or renders a page with an error message.
"""
cookie = []
valid_token_dict = validate_webui_token(from_cookie=False, session_token=token)
if not valid_token_dict:
return render_template("problem.html", msg="It was not possible to validate and finalize your login with the provided token.")
try:
attribs = list_account_attributes(valid_token_dict['account'], valid_token_dict['vo'])
accounts = identity.list_accounts_for_identity(valid_token_dict['identity'], identity_type)
accvalues = ""
for acc in accounts:
accvalues += acc + " "
accounts = accvalues[:-1]
cookie.extend([{'key': 'x-rucio-auth-token', 'value': quote(token)},
{'key': 'x-rucio-auth-type', 'value': quote(identity_type)},
{'key': 'rucio-auth-token-created-at', 'value': str(long(time()))},
{'key': 'rucio-available-accounts', 'value': quote(accounts)},
{'key': 'rucio-account-attr', 'value': quote(dumps(attribs))},
{'key': 'rucio-selected-account', 'value': quote(valid_token_dict['account'])},
{'key': 'rucio-selected-vo', 'value': quote(valid_token_dict['vo'])}])
if cookie_dict_extra:
for key, value in cookie_dict_extra.items():
cookie.append({'key': key, 'value': value})
return redirect_to_last_known_url(cookie)
except Exception:
return render_template("problem.html", msg="It was not possible to validate and finalize your login with the provided token.")
def get_vo_descriptions(vos):
"""
Gets the description for each VO in the list.
:param vos: List of 3 character VO strings
:returns: List of tuples containing VO string, VO description
"""
all_vos = vo_core.list_vos()
vos_with_desc = []
for vo in all_vos:
if vo['vo'] in vos:
vos_with_desc.append((vo['vo'], vo['description']))
return vos_with_desc
# AUTH_TYPE SPECIFIC METHODS FOLLOW:
def x509token_auth(data=None):
"""
Manages login via X509 certificate.
:param data: data object containing account string can be provided
:returns: final page or a page with an error message
"""
# checking if Rucio auth server succeeded to verify the certificate
if request.environ.get('SSL_CLIENT_VERIFY') != 'SUCCESS':
return render_template("problem.html", msg="No certificate provided. Please authenticate with a certificate registered in Rucio.")
dn = request.environ.get('SSL_CLIENT_S_DN')
if not dn.startswith('/'):
dn = '/%s' % '/'.join(dn.split(',')[::-1])
if not MULTI_VO:
ui_vo = 'def'
elif hasattr(data, 'vo') and data.vo:
ui_vo = data.vo
else:
ui_vo = None
if hasattr(data, 'account') and data.account:
ui_account = data.account
else:
ui_account = None
if ui_account is None and ui_vo is None:
ui_account, ui_vo = select_account_name(dn, 'x509', ui_vo)
elif ui_account is None:
ui_account, _ = select_account_name(dn, 'x509', ui_vo)
elif ui_vo is None:
_, ui_vo = select_account_name(dn, 'x509', ui_vo)
# Try to eliminate VOs based on the account name (if we have one), if we still have multiple options let the user select one
if type(ui_vo) is list:
if ui_account:
valid_vos = []
for vo in ui_vo:
if account_exists(ui_account, vo):
valid_vos.append(vo)
if len(valid_vos) == 0:
return render_template('problem.html', msg=('<br><br>Your certificate (%s) is not mapped to (possibly any) rucio account: %s at any VO.' % (html_escape(dn), html_escape(ui_account))))
elif len(valid_vos) == 1:
ui_vo = valid_vos[0]
else:
vos_with_desc = get_vo_descriptions(valid_vos)
return add_cookies(make_response(render_template("select_login_method.html", oidc_issuers=AUTH_ISSUERS, saml_support=SAML_SUPPORT, possible_vos=vos_with_desc)))
else:
vos_with_desc = get_vo_descriptions(ui_vo)
return add_cookies(make_response(render_template("select_login_method.html", oidc_issuers=AUTH_ISSUERS, saml_support=SAML_SUPPORT, possible_vos=vos_with_desc)))
if not ui_account:
if MULTI_VO:
msg = "<br><br>Your certificate (%s) is not mapped to (possibly any) rucio account at VO: %s." % (html_escape(dn), html_escape(ui_vo))
else:
msg = "<br><br>Your certificate (%s) is not mapped to (possibly any) rucio account." % (html_escape(dn))
else:
if MULTI_VO:
msg = "<br><br>Your certificate (%s) is not mapped to (possibly any) rucio account: %s at VO: %s." % (html_escape(dn), html_escape(ui_account), html_escape(ui_vo))
else:
msg = "<br><br>Your certificate (%s) is not mapped to (possibly any) rucio account: %s." % (html_escape(dn), html_escape(ui_account))
if ADDITIONAL_ERROR_MSG:
msg += ADDITIONAL_ERROR_MSG
if not ui_account:
return render_template("problem.html", msg=msg)
token = get_token(auth.get_auth_token_x509, acc=ui_account, vo=ui_vo, idt=dn)
if not token:
return render_template("problem.html", msg=msg)
return finalize_auth(token, 'x509')
def userpass_auth():
"""
Manages login via Rucio USERPASS method.
:returns: final page or a page with an error message
"""
ui_account = request.args.get('account')
ui_vo = request.args.get('vo')
username = request.form.get('username')
password = request.form.get('password')
if not username and not password:
return render_template('problem.html', msg="No input credentials were provided.")
if not MULTI_VO:
ui_vo = 'def'
if ui_account is None and ui_vo is None:
ui_account, ui_vo = select_account_name(username, 'userpass', ui_vo)
elif ui_account is None:
ui_account, _ = select_account_name(username, 'userpass', ui_vo)
elif ui_vo is None:
_, ui_vo = select_account_name(username, 'userpass', ui_vo)
if type(ui_vo) is list:
if ui_account:
valid_vos = []
for vo in ui_vo:
if account_exists(ui_account, vo):
valid_vos.append(vo)
if len(valid_vos) == 0:
return render_template('problem.html', msg='Cannot find any Rucio account %s associated with identity %s at any VO.' % (html_escape(ui_account), html_escape(username)))
elif len(valid_vos) == 1:
ui_vo = valid_vos[0]
else:
vos_with_desc = get_vo_descriptions(valid_vos)
return render_template('login.html', account=ui_account, vo=None, possible_vos=vos_with_desc)
else:
vos_with_desc = get_vo_descriptions(ui_vo)
return render_template('login.html', account=None, vo=None, possible_vos=vos_with_desc)
if not ui_account:
if MULTI_VO:
return render_template('problem.html', msg='Cannot get find any account associated with %s identity at VO %s.' % (html_escape(username), html_escape(ui_vo)))
else:
return render_template('problem.html', msg='Cannot get find any account associated with %s identity.' % (html_escape(username)))
token = get_token(auth.get_auth_token_user_pass, acc=ui_account, vo=ui_vo, idt=username, pwd=password)
if not token:
if MULTI_VO:
return render_template('problem.html', msg='Cannot get auth token. It is possible that the presented identity %s is not mapped to any Rucio account %s at VO %s.') % (html_escape(username), html_escape(ui_account), html_escape(ui_vo))
else:
return render_template('problem.html', msg='Cannot get auth token. It is possible that the presented identity %s is not mapped to any Rucio account %s.') % (html_escape(username), html_escape(ui_account))
return finalize_auth(token, 'userpass')
def saml_auth(method, data=None):
"""
# TODO: Validate for Flask
Login with SAML
:param method: method type, GET or POST
:param data: data object containing account string can be provided
:param rendered_tpl: page to be rendered
:returns: rendered final page or a page with error message
"""
SAML_PATH = join(dirname(__file__), 'saml/')
req = prepare_saml_request(request.environ, data)
samlauth = OneLogin_Saml2_Auth(req, custom_base_path=SAML_PATH)
saml_user_data = request.cookies.get('saml-user-data')
if not MULTI_VO:
ui_vo = 'def'
elif hasattr(data, 'vo') and data.vo:
ui_vo = data.vo
else:
ui_vo = None
if hasattr(data, 'account') and data.account:
ui_account = data.account
else:
ui_account = None
if method == "GET":
# If user data is not present, redirect to IdP for authentication
if not saml_user_data:
return redirect(samlauth.login(), code=303)
# If user data is present but token is not valid, create a new one
saml_nameid = request.cookies.get('saml-nameid')
if ui_account is None and ui_vo is None:
ui_account, ui_vo = select_account_name(saml_nameid, 'saml', ui_vo)
elif ui_account is None:
ui_account, _ = select_account_name(saml_nameid, 'saml', ui_vo)
elif ui_vo is None:
_, ui_vo = select_account_name(saml_nameid, 'saml', ui_vo)
# Try to eliminate VOs based on the account name (if we have one), if we still have multiple options let the user select one
if type(ui_vo) is list:
if ui_account:
valid_vos = []
for vo in ui_vo:
if account_exists(ui_account, vo):
valid_vos.append(vo)
if len(valid_vos) == 0:
return render_template("problem.html", msg=('Cannot find any Rucio account %s associated with identity %s at any VO.' % (html_escape(ui_account), html_escape(saml_nameid))))
elif len(valid_vos) == 1:
ui_vo = valid_vos[0]
else:
vos_with_desc = get_vo_descriptions(valid_vos)
return add_cookies(make_response(render_template("select_login_method.html", oidc_issuers=AUTH_ISSUERS, saml_support=SAML_SUPPORT, possible_vos=vos_with_desc)))
else:
vos_with_desc = get_vo_descriptions(ui_vo)
return add_cookies(make_response(render_template("select_login_method.html", oidc_issuers=AUTH_ISSUERS, saml_support=SAML_SUPPORT, possible_vos=vos_with_desc)))
if not ui_account:
if MULTI_VO:
return render_template("problem.html", msg='Cannot get find any account associated with %s identity at VO %s.' % (html_escape(saml_nameid), html_escape(ui_vo)))
else:
return render_template("problem.html", msg='Cannot get find any account associated with %s identity.' % (html_escape(saml_nameid)))
token = get_token(auth.get_auth_token_saml, acc=ui_account, vo=ui_vo, idt=saml_nameid)
if not token:
if MULTI_VO:
return render_template("problem.html", msg=('Cannot get auth token. It is possible that the presented identity %s is not mapped to any Rucio account %s at VO %s.') % (html_escape(saml_nameid), html_escape(ui_account), html_escape(ui_vo)))
else:
return render_template("problem.html", msg=('Cannot get auth token. It is possible that the presented identity %s is not mapped to any Rucio account %s.') % (html_escape(saml_nameid), html_escape(ui_account)))
return finalize_auth(token, 'saml')
# If method is POST, check the received SAML response and redirect to home if valid
samlauth.process_response()
errors = samlauth.get_errors()
if not errors:
if samlauth.is_authenticated():
saml_nameid = samlauth.get_nameid()
cookie_extra = {'saml-nameid': saml_nameid}
cookie_extra['saml-user-data'] = samlauth.get_attributes()
cookie_extra['saml-session-index'] = samlauth.get_session_index()
# WHY THIS ATTEMPTS TO GET A NEW TOKEN ?
# WE SHOULD HAVE IT/GET IT FROM COOKIE OR DB AND JUST REDIRECT, NO ?
if ui_account is None and ui_vo is None:
ui_account, ui_vo = select_account_name(saml_nameid, 'saml', ui_vo)
elif ui_account is None:
ui_account, _ = select_account_name(saml_nameid, 'saml', ui_vo)
elif ui_vo is None:
_, ui_vo = select_account_name(saml_nameid, 'saml', ui_vo)
# Try to eliminate VOs based on the account name (if we have one), if we still have multiple options let the user select one
if type(ui_vo) is list:
if ui_account:
valid_vos = []
for vo in ui_vo:
if account_exists(ui_account, vo):
valid_vos.append(vo)
if len(valid_vos) == 0:
return render_template("problem.html", msg=('Cannot find any Rucio account %s associated with identity %s at any VO.' % (html_escape(ui_account), html_escape(saml_nameid))))
elif len(valid_vos) == 1:
ui_vo = valid_vos[0]
else:
vos_with_desc = get_vo_descriptions(valid_vos)
return add_cookies(make_response(render_template("select_login_method.html", oidc_issuers=AUTH_ISSUERS, saml_support=SAML_SUPPORT, possible_vos=vos_with_desc)))
else:
vos_with_desc = get_vo_descriptions(ui_vo)
return add_cookies(make_response(render_template("select_login_method.html", oidc_issuers=AUTH_ISSUERS, saml_support=SAML_SUPPORT, possible_vos=vos_with_desc)))
if not ui_account:
if MULTI_VO:
return render_template("problem.html", msg='Cannot get find any account associated with %s identity at VO %s.' % (html_escape(saml_nameid), html_escape(ui_vo)))
else:
return render_template("problem.html", msg='Cannot get find any account associated with %s identity.' % (html_escape(saml_nameid)))
token = get_token(auth.get_auth_token_saml, acc=ui_account, vo=ui_vo, idt=saml_nameid)
if not token:
if MULTI_VO:
return render_template("problem.html", msg=('Cannot get auth token. It is possible that the presented identity %s is not mapped to any Rucio account %s at VO %s.') % (html_escape(saml_nameid), html_escape(ui_account), html_escape(ui_vo)))
else:
return render_template("problem.html", msg=('Cannot get auth token. It is possible that the presented identity %s is not mapped to any Rucio account %s.') % (html_escape(saml_nameid), html_escape(ui_account)))
return finalize_auth(token, 'saml', cookie_extra)
return render_template("problem.html", msg="Not authenticated")
return render_template("problem.html", msg="Error while processing SAML")
def oidc_auth(account, issuer, ui_vo=None):
"""
# TODO: Validate for Flask
Open ID Connect Login
:param account: Rucio account string
:param issuer: issuer key (e.g. xdc, wlcg) as in the idpsecrets.json file
:param ui_vo: 3 character string to identify the VO, if None will attempt to deduce it from `account`
:returns: rendered final page or a page with error message
"""
if not account:
account = 'webui'
if ui_vo is None:
all_vos = [vo['vo'] for vo in vo_core.list_vos()]
valid_vos = []
for vo in all_vos:
if account_exists(account, vo):
valid_vos.append(vo)
if len(valid_vos) == 0:
return render_template("problem.html", msg=('Cannot find any Rucio account %s at any VO.' % html_escape(account)))
elif len(valid_vos) == 1:
ui_vo = valid_vos[0]
else:
vos_with_desc = get_vo_descriptions(valid_vos)
return add_cookies(make_response(render_template("select_login_method.html", oidc_issuers=AUTH_ISSUERS, saml_support=SAML_SUPPORT, possible_vos=vos_with_desc)))
if not issuer:
return render_template("problem.html", msg="Please provide IdP issuer.")
realhome = request.environ.get('REQUEST_SCHEME') + '://' + request.environ.get('HTTP_HOST') + request.environ.get('SCRIPT_NAME')
kwargs = {'audience': None,
'auth_scope': None,
'issuer': issuer.lower(),
'auto': True,
'polling': False,
'refresh_lifetime': None,
'ip': None,
'webhome': realhome + '/oidc_final'}
auth_url = auth.get_auth_oidc(account, vo=ui_vo, **kwargs)
if not auth_url:
return render_template("problem.html", msg=("It was not possible to get the OIDC authentication url "
"from the Rucio auth server. "
"In case you provided your account name, make sure it is "
"known to Rucio."))
return redirect(auth_url, code=303)
def authenticate(template, title):
"""
Authentication management method.
:param template: the template name that should be rendered
:returns: rendered final page or a page with error message
"""
global AUTH_ISSUERS, SAML_SUPPORT, AUTH_TYPE
cookie = []
valid_token_dict = validate_webui_token()
if not valid_token_dict:
cookie.append({'key': 'rucio-requested-path', 'value': request.environ.get('REQUEST_URI')})
else:
return access_granted(valid_token_dict, template, title)
# login without any known server config
if not AUTH_TYPE:
return add_cookies(make_response(render_template("select_login_method.html", oidc_issuers=AUTH_ISSUERS, saml_support=SAML_SUPPORT)), cookie)
# for AUTH_TYPE predefined by the server continue
else:
if AUTH_TYPE == 'userpass':
return redirect('/login', code=303)
elif AUTH_TYPE == 'x509':
return x509token_auth(None)
elif AUTH_TYPE == 'x509_userpass':
if request.environ.get('SSL_CLIENT_VERIFY') == 'SUCCESS':
return x509token_auth(None)
return render_template("no_certificate.html")
elif AUTH_TYPE == 'oidc':
return oidc_auth(None, AUTH_ISSUER_WEBUI)
elif AUTH_TYPE == 'saml':
return saml_auth("GET")
return render_template('problem.html', msg='Invalid auth type')
| 45.436597 | 258 | 0.652771 |
730f4a033ffc7d49982903670dab0f13f31321be | 2,011 | py | Python | test/test_paradigms.py | sanskrit/padmini | 8e7e8946a7d2df9c941f689ea4bc7b6ebb7ca1d0 | [
"MIT"
] | 1 | 2022-03-01T05:05:04.000Z | 2022-03-01T05:05:04.000Z | test/test_paradigms.py | sanskrit/padmini | 8e7e8946a7d2df9c941f689ea4bc7b6ebb7ca1d0 | [
"MIT"
] | null | null | null | test/test_paradigms.py | sanskrit/padmini | 8e7e8946a7d2df9c941f689ea4bc7b6ebb7ca1d0 | [
"MIT"
] | null | null | null | import itertools
import json
import pytest
from padmini.dhatupatha import load_dhatus
from padmini.prakarana import tin_pratyaya as tin
from .utils import run_all_permutations
dhatus = {f"{x.gana}.{x.number}": x for x in load_dhatus()}
def iter_paradigm_tests(filename):
with open(filename) as f:
test_data = []
for datum in json.load(f):
if isinstance(datum, str):
continue
padas = set()
for item in datum["padas"]:
padas.update(item.split(","))
yield (datum["dhatu"], datum["la"], datum.get("tags", []), padas)
sarvadhatuka = iter_paradigm_tests("test/data/sarvadhatuka.json")
@pytest.mark.parametrize("dhatu_code, la, tags, padas", sarvadhatuka)
def test_sarvadhatuka(dhatu_code, la, tags, padas):
actual = set()
prakriya_map = {}
for purusha, vacana in itertools.product(tin.PURUSHA, tin.VACANA):
tags = {purusha, vacana}
prakriyas = run_all_permutations(dhatu_code, la, tags)
for p in prakriyas:
prakriya_map[p.text] = p
actual = set(prakriya_map.keys())
# Debug print only for errors
if padas != actual:
for form in actual:
if form not in padas:
prakriya_map[form].debug_print()
assert padas == actual
ardhadhatuka = iter_paradigm_tests("test/data/ardhadhatuka.json")
@pytest.mark.parametrize("dhatu_code, la, tags, padas", ardhadhatuka)
def test_ardhadhatuka(dhatu_code, la, tags, padas):
actual = set()
prakriya_map = {}
for purusha, vacana in itertools.product(tin.PURUSHA, tin.VACANA):
p_tags = {purusha, vacana} | set(tags)
prakriyas = run_all_permutations(dhatu_code, la, p_tags)
for p in prakriyas:
prakriya_map[p.text] = p
actual = set(prakriya_map.keys())
# Debug print only for errors
if padas != actual:
for form in sorted(actual):
prakriya_map[form].debug_print()
assert padas == actual
| 27.547945 | 77 | 0.642466 |
70a55be3a95f49c8b2ddfe940e6d5f5e745f61d7 | 10,964 | py | Python | certbot-nginx/certbot_nginx/_internal/nginxparser.py | infinite-skx/certbot | b751112eec8c355163ab80d1091e1ce6f5b3c352 | [
"Apache-2.0"
] | null | null | null | certbot-nginx/certbot_nginx/_internal/nginxparser.py | infinite-skx/certbot | b751112eec8c355163ab80d1091e1ce6f5b3c352 | [
"Apache-2.0"
] | 1 | 2022-01-24T20:51:52.000Z | 2022-01-24T20:51:52.000Z | certbot-nginx/certbot_nginx/_internal/nginxparser.py | infinite-skx/certbot | b751112eec8c355163ab80d1091e1ce6f5b3c352 | [
"Apache-2.0"
] | null | null | null | """Very low-level nginx config parser based on pyparsing."""
# Forked from https://github.com/fatiherikli/nginxparser (MIT Licensed)
import copy
import logging
import typing
from typing import Any
from typing import IO
from typing import Iterable
from typing import Iterator
from typing import List
from typing import overload
from typing import Tuple
from typing import TYPE_CHECKING
from typing import Union
from pyparsing import Combine
from pyparsing import Forward
from pyparsing import Group
from pyparsing import Literal
from pyparsing import Optional
from pyparsing import ParseResults
from pyparsing import QuotedString
from pyparsing import Regex
from pyparsing import restOfLine
from pyparsing import stringEnd
from pyparsing import White
from pyparsing import ZeroOrMore
if TYPE_CHECKING:
from typing_extensions import SupportsIndex # typing.SupportsIndex not supported on Python 3.6
logger = logging.getLogger(__name__)
class RawNginxParser:
# pylint: disable=pointless-statement
"""A class that parses nginx configuration with pyparsing."""
# constants
space = Optional(White()).leaveWhitespace()
required_space = White().leaveWhitespace()
left_bracket = Literal("{").suppress()
right_bracket = space + Literal("}").suppress()
semicolon = Literal(";").suppress()
dquoted = QuotedString('"', multiline=True, unquoteResults=False, escChar='\\')
squoted = QuotedString("'", multiline=True, unquoteResults=False, escChar='\\')
quoted = dquoted | squoted
head_tokenchars = Regex(r"(\$\{)|[^{};\s'\"]") # if (last_space)
tail_tokenchars = Regex(r"(\$\{)|[^{;\s]") # else
tokenchars = Combine(head_tokenchars + ZeroOrMore(tail_tokenchars))
paren_quote_extend = Combine(quoted + Literal(')') + ZeroOrMore(tail_tokenchars))
# note: ')' allows extension, but then we fall into else, not last_space.
token = paren_quote_extend | tokenchars | quoted
whitespace_token_group = space + token + ZeroOrMore(required_space + token) + space
assignment = whitespace_token_group + semicolon
comment = space + Literal('#') + restOfLine
block = Forward()
# order matters! see issue 518, and also http { # server { \n}
contents = Group(comment) | Group(block) | Group(assignment)
block_begin = Group(whitespace_token_group)
block_innards = Group(ZeroOrMore(contents) + space).leaveWhitespace()
block << block_begin + left_bracket + block_innards + right_bracket
script = ZeroOrMore(contents) + space + stringEnd
script.parseWithTabs().leaveWhitespace()
def __init__(self, source: str) -> None:
self.source = source
def parse(self) -> ParseResults:
"""Returns the parsed tree."""
return self.script.parseString(self.source)
def as_list(self) -> List[Any]:
"""Returns the parsed tree as a list."""
return self.parse().asList()
class RawNginxDumper:
"""A class that dumps nginx configuration from the provided tree."""
def __init__(self, blocks: List[Any]) -> None:
self.blocks = blocks
def __iter__(self, blocks: typing.Optional[List[Any]] = None) -> Iterator[str]:
"""Iterates the dumped nginx content."""
blocks = blocks or self.blocks
for b0 in blocks:
if isinstance(b0, str):
yield b0
continue
item = copy.deepcopy(b0)
if spacey(item[0]):
yield item.pop(0) # indentation
if not item:
continue
if isinstance(item[0], list): # block
yield "".join(item.pop(0)) + '{'
for parameter in item.pop(0):
for line in self.__iter__([parameter]): # negate "for b0 in blocks"
yield line
yield '}'
else: # not a block - list of strings
semicolon = ";"
if isinstance(item[0], str) and item[0].strip() == '#': # comment
semicolon = ""
yield "".join(item) + semicolon
def __str__(self) -> str:
"""Return the parsed block as a string."""
return ''.join(self)
spacey = lambda x: (isinstance(x, str) and x.isspace()) or x == ''
class UnspacedList(List[Any]):
"""Wrap a list [of lists], making any whitespace entries magically invisible"""
def __init__(self, list_source: Iterable[Any]) -> None:
# ensure our argument is not a generator, and duplicate any sublists
self.spaced = copy.deepcopy(list(list_source))
self.dirty = False
# Turn self into a version of the source list that has spaces removed
# and all sub-lists also UnspacedList()ed
super().__init__(list_source)
for i, entry in reversed(list(enumerate(self))):
if isinstance(entry, list):
sublist = UnspacedList(entry)
super().__setitem__(i, sublist)
self.spaced[i] = sublist.spaced
elif spacey(entry):
# don't delete comments
if "#" not in self[:i]:
super().__delitem__(i)
@overload
def _coerce(self, inbound: None) -> Tuple[None, None]: ...
@overload
def _coerce(self, inbound: str) -> Tuple[str, str]: ...
@overload
def _coerce(self, inbound: List[Any]) -> Tuple["UnspacedList", List[Any]]: ...
def _coerce(self, inbound: Any) -> Tuple[Any, Any]:
"""
Coerce some inbound object to be appropriately usable in this object
:param inbound: string or None or list or UnspacedList
:returns: (coerced UnspacedList or string or None, spaced equivalent)
:rtype: tuple
"""
if not isinstance(inbound, list): # str or None
return inbound, inbound
else:
if not hasattr(inbound, "spaced"):
inbound = UnspacedList(inbound)
return inbound, inbound.spaced
def insert(self, i: int, x: Any) -> None:
"""Insert object before index."""
item, spaced_item = self._coerce(x)
slicepos = self._spaced_position(i) if i < len(self) else len(self.spaced)
self.spaced.insert(slicepos, spaced_item)
if not spacey(item):
super().insert(i, item)
self.dirty = True
def append(self, x: Any) -> None:
"""Append object to the end of the list."""
item, spaced_item = self._coerce(x)
self.spaced.append(spaced_item)
if not spacey(item):
super().append(item)
self.dirty = True
def extend(self, x: Any) -> None:
"""Extend list by appending elements from the iterable."""
item, spaced_item = self._coerce(x)
self.spaced.extend(spaced_item)
super().extend(item)
self.dirty = True
def __add__(self, other: List[Any]) -> "UnspacedList":
new_list = copy.deepcopy(self)
new_list.extend(other)
new_list.dirty = True
return new_list
def pop(self, *args: Any, **kwargs: Any) -> None:
"""Function pop() is not implemented for UnspacedList"""
raise NotImplementedError("UnspacedList.pop() not yet implemented")
def remove(self, *args: Any, **kwargs: Any) -> None:
"""Function remove() is not implemented for UnspacedList"""
raise NotImplementedError("UnspacedList.remove() not yet implemented")
def reverse(self) -> None:
"""Function reverse() is not implemented for UnspacedList"""
raise NotImplementedError("UnspacedList.reverse() not yet implemented")
def sort(self, *_args: Any, **_kwargs: Any) -> None:
"""Function sort() is not implemented for UnspacedList"""
raise NotImplementedError("UnspacedList.sort() not yet implemented")
def __setslice__(self, *args: Any, **kwargs: Any) -> None:
raise NotImplementedError("Slice operations on UnspacedLists not yet implemented")
def __setitem__(self, i: Union["SupportsIndex", slice], value: Any) -> None:
if isinstance(i, slice):
raise NotImplementedError("Slice operations on UnspacedLists not yet implemented")
item, spaced_item = self._coerce(value)
self.spaced.__setitem__(self._spaced_position(i), spaced_item)
if not spacey(item):
super().__setitem__(i, item)
self.dirty = True
def __delitem__(self, i: Union["SupportsIndex", slice]) -> None:
if isinstance(i, slice):
raise NotImplementedError("Slice operations on UnspacedLists not yet implemented")
self.spaced.__delitem__(self._spaced_position(i))
super().__delitem__(i)
self.dirty = True
def __deepcopy__(self, memo: Any) -> "UnspacedList":
new_spaced = copy.deepcopy(self.spaced, memo=memo)
new_list = UnspacedList(new_spaced)
new_list.dirty = self.dirty
return new_list
def is_dirty(self) -> bool:
"""Recurse through the parse tree to figure out if any sublists are dirty"""
if self.dirty:
return True
return any((isinstance(x, UnspacedList) and x.is_dirty() for x in self))
def _spaced_position(self, idx: "SupportsIndex") -> int:
"""Convert from indexes in the unspaced list to positions in the spaced one"""
int_idx = idx.__index__()
pos = spaces = 0
# Normalize indexes like list[-1] etc, and save the result
if int_idx < 0:
int_idx = len(self) + int_idx
if not 0 <= int_idx < len(self):
raise IndexError("list index out of range")
int_idx0 = int_idx
# Count the number of spaces in the spaced list before int_idx in the unspaced one
while int_idx != -1:
if spacey(self.spaced[pos]):
spaces += 1
else:
int_idx -= 1
pos += 1
return int_idx0 + spaces
# Shortcut functions to respect Python's serialization interface
# (like pyyaml, picker or json)
def loads(source: str) -> UnspacedList:
"""Parses from a string.
:param str source: The string to parse
:returns: The parsed tree
:rtype: list
"""
return UnspacedList(RawNginxParser(source).as_list())
def load(file_: IO[Any]) -> UnspacedList:
"""Parses from a file.
:param file file_: The file to parse
:returns: The parsed tree
:rtype: list
"""
return loads(file_.read())
def dumps(blocks: UnspacedList) -> str:
"""Dump to a Unicode string.
:param UnspacedList blocks: The parsed tree
:rtype: six.text_type
"""
return str(RawNginxDumper(blocks.spaced))
def dump(blocks: UnspacedList, file_: IO[Any]) -> None:
"""Dump to a file.
:param UnspacedList blocks: The parsed tree
:param IO[Any] file_: The file stream to dump to. It must be opened with
Unicode encoding.
:rtype: None
"""
file_.write(dumps(blocks))
| 35.141026 | 99 | 0.630974 |
78cbb66f1f6a3658a39425ba99f10827b6517ed3 | 1,326 | py | Python | authentik/providers/oauth2/migrations/0009_oauth2provider_verification_keys_and_more.py | BeryJu/passbook | 350f0d836580f4411524614f361a76c4f27b8a2d | [
"MIT"
] | 15 | 2020-01-05T09:09:57.000Z | 2020-11-28T05:27:39.000Z | authentik/providers/oauth2/migrations/0009_oauth2provider_verification_keys_and_more.py | BeryJu/passbook | 350f0d836580f4411524614f361a76c4f27b8a2d | [
"MIT"
] | 302 | 2020-01-21T08:03:59.000Z | 2020-12-04T05:04:57.000Z | authentik/providers/oauth2/migrations/0009_oauth2provider_verification_keys_and_more.py | BeryJu/passbook | 350f0d836580f4411524614f361a76c4f27b8a2d | [
"MIT"
] | 3 | 2020-03-04T08:21:59.000Z | 2020-08-01T20:37:18.000Z | # Generated by Django 4.0.3 on 2022-03-29 19:37
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("authentik_crypto", "0003_certificatekeypair_managed"),
("authentik_providers_oauth2", "0008_rename_rsa_key_oauth2provider_signing_key_and_more"),
]
operations = [
migrations.AddField(
model_name="oauth2provider",
name="verification_keys",
field=models.ManyToManyField(
help_text="DEPRECATED. JWTs created with the configured certificates can authenticate with this provider.",
related_name="+",
to="authentik_crypto.certificatekeypair",
verbose_name="Allowed certificates for JWT-based client_credentials",
),
),
migrations.AlterField(
model_name="oauth2provider",
name="signing_key",
field=models.ForeignKey(
help_text="Key used to sign the tokens. Only required when JWT Algorithm is set to RS256.",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="authentik_crypto.certificatekeypair",
verbose_name="Signing Key",
),
),
]
| 35.837838 | 123 | 0.61991 |
a86cb3825a08ee1a45872adc3c64fb55e6e653a8 | 1,390 | py | Python | datasets/multiview_dataset.py | marcbadger/avian-mesh | 71a884153e8970de6ee1d934949914790e40e80e | [
"MIT"
] | 39 | 2020-08-17T04:09:17.000Z | 2022-01-11T06:21:20.000Z | datasets/multiview_dataset.py | marcbadger/avian-mesh | 71a884153e8970de6ee1d934949914790e40e80e | [
"MIT"
] | 5 | 2020-08-06T11:16:43.000Z | 2022-02-07T01:48:46.000Z | datasets/multiview_dataset.py | marcbadger/avian-mesh | 71a884153e8970de6ee1d934949914790e40e80e | [
"MIT"
] | 7 | 2020-08-04T23:03:35.000Z | 2021-11-04T15:16:56.000Z | import os
import torch
class Multiview_Dataset(torch.utils.data.Dataset):
"""
Dataset class to read bird instances that have multiview matches.
Each index outputs one instance and its multiview annotations
"""
def __init__(self, root='data/cowbird/images',
annfile='data/cowbird/annotations/multiview_instance.pth'):
self.root = root
self.anns = torch.load(annfile)
def __getitem__(self, index):
ann = self.anns[index]
masks = self.get_fullsize_masks(ann['masks'], ann['bboxes'])
data = {
'img_ids': ann['img_ids'],
'imgpaths': [os.path.join(self.root, file) for file in ann['img_filenames']],
'frames': ann['frames'],
'bboxes': ann['bboxes'],
'keypoints': ann['keypoints'].float(),
'masks': masks
}
return data
def __len__(self):
return len(self.anns)
def get_fullsize_masks(self, masks, bboxes, h=1200, w=1920):
full_masks = []
for i in range(len(masks)):
box = bboxes[i]
full_mask = torch.zeros([h, w], dtype=torch.bool)
full_mask[box[1]:box[1]+box[3]+1, box[0]:box[0]+box[2]+1] = masks[i]
full_masks.append(full_mask)
full_masks = torch.stack(full_masks)
return full_masks
| 33.095238 | 89 | 0.568345 |
a5387c797eeb6800d2303d048e2f44ad769b68ee | 5,692 | py | Python | pyVers/neuralnet.py | m0baxter/tic-tac-toe-AI | 3529f1e1dcbb2c4edba6ffaabe6f7db3178b7de5 | [
"MIT"
] | null | null | null | pyVers/neuralnet.py | m0baxter/tic-tac-toe-AI | 3529f1e1dcbb2c4edba6ffaabe6f7db3178b7de5 | [
"MIT"
] | null | null | null | pyVers/neuralnet.py | m0baxter/tic-tac-toe-AI | 3529f1e1dcbb2c4edba6ffaabe6f7db3178b7de5 | [
"MIT"
] | null | null | null |
import numpy as np
import scipy.io as sio
from scipy.optimize import minimize
class NeuralNet(object):
def __init__(self, inputs, outputs, *hiddens):
self.shape = [inputs] + list(hiddens) + [outputs]
self.layers = len(self.shape)
self.weights = self.__randInitialize()
def toFile(self, fileName):
"""Saves the NeuralNet to filename.mat."""
nnDict = dict( shape = self.shape, weights=self.weights)
sio.savemat(fileName, nnDict, True)
return
@classmethod
def fromFile(cls, path):
"""Creates a NeuralNet from a file."""
cls = NeuralNet(1,1,1)
data = sio.loadmat(path)
cls.shape = list(data['shape'][0])
cls.layers = len(cls.shape)
cls.weights = list(data['weights'][0])
return cls
def numLabels(self):
"""Returns the number labels in the output of the network."""
return self.shape[-1]
def __randInitialize(self):
"""Randomly initializes the weight matrices."""
weights = []
for i in xrange(self.layers - 1):
Nin = self.shape[i]
Nout = self.shape[i+1]
eps = np.sqrt(6)/np.sqrt(Nin + Nout)
weights.append( randMatrix((Nout, Nin + 1), eps) )
return weights
def __unflatten(self, flat):
"""Used by the cost function to unflatten weight matrices."""
matrices = []
start = 0
for i in xrange(self.layers - 1):
Nin = self.shape[i] +1
Nout = self.shape[i+1]
end = Nout * Nin + start
arr = flat[start:end].reshape( (Nout, Nin) )
matrices.append(arr)
start = end
return matrices
def __labelMatrix(self, y):
"""converts the label data into a matrix of zeroes and ones."""
m, n = y.shape
nl = self.shape[-1]
Y = np.zeros( (m, self.numLabels()) )
for i in xrange(m):
Y[i,:] = np.eye(nl)[ y[i], : ]
return Y
def __nnCost(self, theta, X, Y, l):
"""Computes the cost and its gradient for the neural network on data X, Y with regularizer l."""
m, n = X.shape
Ws = self.__unflatten(theta)
Grads = [ np.zeros(w.shape) for w in Ws ]
As = [ np.append( np.ones((m,1)), X, 1) ]
Zs = []
for l in xrange(self.layers - 2):
#compute Z:
z = As[l].dot(Ws[l].T)
Zs.append(z)
#Compute activation (A):
mz, nz = z.shape
a = np.append( np.ones((mz,1)), sigmoid(z), 1)
As.append(a)
#activation of output:
Zs.append( As[-1].dot(Ws[-1].T) )
As.append( sigmoid(Zs[-1]) )
#compute cost:
reg = l*np.sum( [ np.sum( w[:,1:]**2 ) for w in Ws ] )/(2.0 * m)
cost = -np.sum( np.log(As[-1])*Y + np.log(1 - As[-1] + 1.0e-15)*(1 - Y))/m + reg
#gradient:
d = As[-1] - Y
Delta = d.T.dot(As[-2])
mw, _ = Ws[-1].shape
Grads[-1] = ( Delta + l*np.append(np.zeros((mw,1)), Ws[-1][:,1:], 1) )/m
for i in xrange(2, self.layers):
z = sigGrad(Zs[-i])
mz, _ = z.shape
d = ( d.dot(Ws[-i+1]) * np.append(np.ones((mz,1)), z, 1) )[:,1:]
Delta = d.T.dot(As[-i - 1])
mw, _ = Ws[-i].shape
Grads[-i] = ( Delta + l*np.append(np.zeros((mw,1)), Ws[-i][:,1:], 1) )/m
GRAD = np.concatenate( [g.flatten() for g in Grads] )
return ( np.asscalar(cost), GRAD )
def trainNetwork(self, X, y, l):
"""Trains the neural network from the data X with labels y and regularizer l."""
Y = self.__labelMatrix(y)
theta = np.concatenate([w.flatten() for w in self.weights])
res = minimize( self.__nnCost, theta, args=(X,Y,l), method='CG',
jac =True, options={'disp': True})
self.weights = self.__unflatten(res.x)
return
def evaluate(self, x):
"""Evaluates the neural network on a single example x, a 1xn vector."""
As = [ np.append( np.ones((1,1)), x, 1) ]
Zs = []
for l in xrange(self.layers - 2):
z = As[l].dot(self.weights[l].T)
Zs.append(z)
mz, nz = z.shape
a = np.append( np.ones((mz,1)), sigmoid(z), 1)
As.append(a)
Zs.append( As[-1].dot(self.weights[-1].T) )
As.append( sigmoid(Zs[-1]) )
return self.__indexDecending(As[-1])
def __indexDecending( self, arr ):
"""Given the output of NeuralNet.evaluate return a list of decreasing certainty
of classification."""
lst = list(arr[0])
indices = []
for i in xrange( len(lst) ):
j = np.argmax(lst)
indices.append(j)
lst[j] = None
return indices
def laplaceWeights(self, loc, scale):
"""Sets the weights randomly drawing from a laplace distribution"""
for i in range( len(self.weights) ):
s = self.weights[i].shape
self.weights[i] = np.random.laplace(loc, scale, s)
return
def randMatrix(size, eps):
"""Returns random matrix with shape = size whose values range in [-eps,eps]."""
return 2*eps*np.random.random_sample(size) - eps
def sigmoid(z):
"""Returns the sigmoid function evaluated on z (z can be any numpy array or scalar)."""
return 1/(1 + np.exp(-z))
def sigGrad(z):
"""Returns the gradient of the sigmoid at z (saclar or numpy array)."""
s = sigmoid(z)
return s * (1 - s)
| 26.976303 | 104 | 0.518271 |
b7e040aa2726b281254e66b145ab5d78fecb09d4 | 11,560 | py | Python | tests/test_pdf_fftconv.py | nsahoo/zfit | fcad2578f31138f5383f7fa5de6c0f8c6b1dbaa4 | [
"BSD-3-Clause"
] | 129 | 2018-03-24T22:27:55.000Z | 2022-03-19T21:04:07.000Z | tests/test_pdf_fftconv.py | nsahoo/zfit | fcad2578f31138f5383f7fa5de6c0f8c6b1dbaa4 | [
"BSD-3-Clause"
] | 195 | 2018-03-22T11:31:47.000Z | 2022-03-29T17:01:45.000Z | tests/test_pdf_fftconv.py | nsahoo/zfit | fcad2578f31138f5383f7fa5de6c0f8c6b1dbaa4 | [
"BSD-3-Clause"
] | 45 | 2018-03-22T10:12:31.000Z | 2022-02-01T10:45:27.000Z | """Example test for a pdf or function."""
# Copyright (c) 2021 zfit
import matplotlib.pyplot as plt
import numpy as np
import pytest
import scipy.signal
import scipy.stats
import tensorflow as tf
import zfit
from zfit import z
from zfit.util.exception import WorkInProgressError
param1_true = 0.3
param2_true = 1.2
class FFTConvPDFV1NoSampling(zfit.pdf.FFTConvPDFV1):
@zfit.supports()
def _sample(self, n, limits):
raise zfit.exception.SpecificFunctionNotImplemented
interpolation_methods = (
'linear',
'spline',
'spline:5',
'spline:3'
)
@pytest.mark.parametrize('interpolation', interpolation_methods)
def test_conv_simple(interpolation):
n_points = 2432
obs = zfit.Space("obs1", limits=(-5, 5))
param1 = zfit.Parameter('param1', -3)
param2 = zfit.Parameter('param2', 0.3)
gauss = zfit.pdf.Gauss(0., param2, obs=obs)
func1 = zfit.pdf.Uniform(param1, param2, obs=obs)
func2 = zfit.pdf.Uniform(-1.2, -1, obs=obs)
func = zfit.pdf.SumPDF([func1, func2], 0.5)
conv = zfit.pdf.FFTConvPDFV1(func=func, kernel=gauss, n=100, interpolation=interpolation)
if interpolation == 'spline:5':
assert conv._conv_spline_order == 5
elif interpolation == 'spline:3':
assert conv._conv_spline_order == 3
x = tf.linspace(-5., 5., n_points)
probs = conv.pdf(x=x)
# true convolution
true_conv = true_conv_np(func, gauss, obs, x, xkernel=tf.linspace(*obs.limit1d, num=n_points))
integral = conv.integrate(limits=obs)
probs_np = probs.numpy()
np.testing.assert_allclose(probs, true_conv, rtol=0.01, atol=0.01)
assert pytest.approx(1, rel=1e-3) == integral.numpy()
assert len(probs_np) == n_points
plt.figure()
plt.title(f"Conv FFT 1Dim, interpolation={interpolation}")
plt.plot(x, probs_np, label='zfit')
plt.plot(x, true_conv, label='numpy')
plt.legend()
# pytest.zfit_savefig()
@pytest.mark.parametrize('interpolation', interpolation_methods)
def test_asymetric_limits(interpolation):
from numpy import linspace
import zfit
from zfit.models.convolution import FFTConvPDFV1
## Space
low_obs = -30
high_obs = 30
obs = zfit.Space('space', limits=[low_obs, high_obs])
## PDFs
uniform1 = zfit.pdf.Uniform(low=-10, high=10, obs=obs)
uniform2 = zfit.pdf.Uniform(low=-10, high=10, obs=obs)
conv_uniforms_1 = FFTConvPDFV1(
func=uniform1,
kernel=uniform2,
limits_kernel=(-18, 18),
interpolation=interpolation,
)
conv_uniforms_2 = FFTConvPDFV1(
func=uniform1,
kernel=uniform2,
limits_kernel=(-12, 12),
interpolation=interpolation,
)
conv_uniforms_3 = FFTConvPDFV1(
func=uniform1,
kernel=uniform2,
limits_kernel=(-25, 12),
interpolation=interpolation,
)
x = linspace(low_obs, high_obs, 300)
tol = 5e-3
# If this fails, we're too sensitive
np.testing.assert_allclose(conv_uniforms_1.pdf(x), conv_uniforms_2.pdf(x), rtol=tol, atol=tol)
# this is the "actual" test
np.testing.assert_allclose(conv_uniforms_1.pdf(x), conv_uniforms_3.pdf(x), rtol=tol, atol=tol)
@pytest.mark.parametrize('interpolation', interpolation_methods)
def test_conv_1d_shifted(interpolation):
kerlim = (-3, 3) # symmetric to make the np conv comparison simple
obs_kernel = zfit.Space("obs1", limits=kerlim)
obs = zfit.Space("obs1", limits=(5, 15))
func1 = zfit.pdf.GaussianKDE1DimV1(obs=obs, data=np.random.uniform(6, 12, size=100))
# func1 = zfit.pdf.Uniform(6, 12, obs=obs)
func2 = zfit.pdf.Uniform(11, 11.5, obs=obs)
func = zfit.pdf.SumPDF([func1, func2], 0.5)
func1k = zfit.pdf.Gauss(0., 1, obs=obs_kernel)
func2k = zfit.pdf.Gauss(1., 0.4, obs=obs_kernel)
funck = zfit.pdf.SumPDF([func1k, func2k], 0.5)
conv = zfit.pdf.FFTConvPDFV1(func=func, kernel=funck, n=200)
xnp = tf.linspace(obs_kernel.rect_lower, obs.rect_upper, 4023)
# true convolution
kernel_points = obs_kernel.filter(xnp)
x = obs.filter(xnp)
probs = conv.pdf(x=x)
true_conv = true_conv_np(func, funck, obs, x=x, xkernel=kernel_points)
integral = conv.integrate(limits=obs, )
probs_np = probs.numpy()
np.testing.assert_allclose(probs_np, true_conv, rtol=0.01, atol=0.01)
assert pytest.approx(1, rel=1e-3) == integral.numpy()
plt.figure()
plt.title("Conv FFT 1Dim shift testing")
plt.plot(x, probs_np, label='zfit')
plt.plot(x, true_conv, label='numpy')
plt.legend()
pytest.zfit_savefig()
@pytest.mark.parametrize('interpolation', interpolation_methods)
@pytest.mark.flaky(reruns=3)
def test_onedim_sampling(interpolation):
# there is a sampling shortcut, so we test if it also works without the shortcut
obs_kernel = zfit.Space("obs1", limits=(-3, 1))
obs = zfit.Space("obs1", limits=(5, 15))
func1 = zfit.pdf.Uniform(6, 12, obs=obs)
func2 = zfit.pdf.Uniform(11, 11.5, obs=obs)
func = zfit.pdf.SumPDF([func1, func2], 0.5)
func1k = zfit.pdf.Uniform(-2, 1, obs=obs_kernel)
func2k = zfit.pdf.Uniform(-0.5, 1., obs=obs_kernel)
funck = zfit.pdf.SumPDF([func1k, func2k], 0.5)
conv = zfit.pdf.FFTConvPDFV1(func=func, kernel=funck, n=200, interpolation=interpolation)
conv_nosample = FFTConvPDFV1NoSampling(func=func, kernel=funck, n=200, interpolation=interpolation)
npoints_sample = 10000
sample = conv.sample(npoints_sample)
sample_nosample = conv_nosample.sample(npoints_sample)
x = z.unstack_x(sample)
xns = z.unstack_x(sample_nosample)
assert scipy.stats.ks_2samp(x, xns).pvalue > 1e-3 # can vary a lot, but still means close
def true_conv_np(func, gauss1, obs, x, xkernel):
y_kernel = gauss1.pdf(xkernel)
y_func = func.pdf(x)
true_conv = scipy.signal.fftconvolve(y_func, y_kernel, mode='same')
true_conv /= np.mean(true_conv) * obs.rect_area()
return true_conv
def true_conv_2d_np(func, gauss1, obsfunc, xfunc, xkernel):
y_func = func.pdf(xfunc)
y_kernel = gauss1.pdf(xkernel)
nfunc = int(np.sqrt(xfunc.shape[0]))
y_func = tf.reshape(y_func, (nfunc, nfunc))
nkernel = int(np.sqrt(xkernel.shape[0]))
y_kernel = tf.reshape(y_kernel, (nkernel, nkernel))
true_conv = scipy.signal.convolve(y_func, y_kernel, mode='same')
true_conv /= np.mean(true_conv) * obsfunc.rect_area()
return tf.reshape(true_conv, xfunc.shape[0])
def test_max_1dim():
obs1 = zfit.Space("obs1", limits=(-2, 4))
obs2 = zfit.Space("obs2", limits=(-6, 4))
param2 = zfit.Parameter('param2', 0.4)
gauss1 = zfit.pdf.Gauss(1., 0.5, obs=obs1)
gauss22 = zfit.pdf.CrystalBall(0.0, param2, -0.2, 3, obs=obs2)
obs1func = zfit.Space("obs1", limits=(4, 10))
obs2func = zfit.Space("obs2", limits=(-6, 4))
gauss21 = zfit.pdf.Gauss(-0.5, param2, obs=obs2func)
func1 = zfit.pdf.Uniform(5, 8, obs=obs1func)
func2 = zfit.pdf.Uniform(6, 7, obs=obs1func)
func = zfit.pdf.SumPDF([func1, func2], 0.5)
func = func * gauss21
gauss = gauss1 * gauss22
with pytest.raises(WorkInProgressError):
_ = zfit.pdf.FFTConvPDFV1(func=func, kernel=gauss)
@pytest.mark.skip # not yet implemented WIP
def test_conv_2D_simple():
# zfit.run.set_graph_mode(False) # TODO: remove, just for debugging
# raise WorkInProgressError("2D convolution not yet implemented, re-activate if so")
n_points = 1000
# obs1 = zfit.Space("obs1", limits=(-2, 4))
# obs2 = zfit.Space("obs2", limits=(-6, 4))
obs1 = zfit.Space("obs1", limits=(-5, 5))
obs2 = zfit.Space("obs2", limits=(-5, 5))
obskernel = obs1 * obs2
param2 = zfit.Parameter('param2', 0.4)
gauss1 = zfit.pdf.Gauss(1., 0.5, obs=obs1)
gauss22 = zfit.pdf.CrystalBall(0.0, param2, -0.2, 3, obs=obs2)
gauss1 = zfit.pdf.Uniform(-1, 1, obs=obs1)
gauss22 = zfit.pdf.Uniform(-2, 2, obs=obs2)
obs1func = zfit.Space("obs1", limits=(-10, 10))
obs2func = zfit.Space("obs2", limits=(-26, 26))
obs_func = obs1func * obs2func
gauss21 = zfit.pdf.Gauss(-0.5, param2, obs=obs2func)
func1 = zfit.pdf.Uniform(2, 8, obs=obs1func)
func2 = zfit.pdf.Uniform(6, 7, obs=obs1func)
func = zfit.pdf.SumPDF([func1, func2], 0.5)
func = func * gauss21
gauss = gauss1 * gauss22
conv = zfit.pdf.FFTConvPDFV1(func=func, kernel=gauss)
start = obs_func.rect_lower
stop = obs_func.rect_upper
x_tensor = tf.random.uniform((n_points, 2), start, stop)
x_tensor = tf.reshape(x_tensor, (-1, 2))
linspace = tf.linspace(start, stop, num=n_points)
linspace = tf.transpose(tf.meshgrid(*tf.unstack(linspace, axis=-1)))
linspace_func = tf.reshape(linspace, (-1, 2))
# linspace_full = tf.linspace((-8, -8), (12, 12), num=n_points)
# linspace_full = tf.transpose(tf.meshgrid(*tf.unstack(linspace_full, axis=-1)))
# linspace_full = tf.reshape(linspace_full, (-1, 2))
linspace_kernel = tf.linspace(obskernel.rect_lower,
obskernel.rect_upper, num=n_points)
linspace_kernel = tf.transpose(tf.meshgrid(*tf.unstack(linspace_kernel, axis=-1)))
linspace_kernel = tf.reshape(linspace_kernel, (-1, 2))
# linspace_kernel = obskernel.filter(linspace_full)
# linspace_func = obs_func.filter(linspace_full)
x = zfit.Data.from_tensor(obs=obs_func, tensor=x_tensor)
linspace_data = zfit.Data.from_tensor(obs=obs_func, tensor=linspace)
probs_rnd = conv.pdf(x=x)
probs = conv.pdf(x=linspace_data)
# Numpy doesn't support ndim convolution?
true_probs = true_conv_2d_np(func, gauss, obsfunc=obs_func,
xfunc=linspace_func, xkernel=linspace_kernel)
import matplotlib.pyplot as plt
# np.testing.assert_allclose(probs, true_probs, rtol=0.2, atol=0.1)
integral = conv.integrate(limits=obs_func, )
assert pytest.approx(1, rel=1e-3) == integral.numpy()
probs_np = probs_rnd.numpy()
assert len(probs_np) == n_points
# probs_plot = np.reshape(probs_np, (-1, n_points))
# x_plot = linspace[0:, ]
# probs_plot_projx = np.sum(probs_plot, axis=0)
# plt.plot(x_plot, probs_np)
# probs_plot = np.reshape(probs_np, (n_points, n_points))
# plt.imshow(probs_plot)
# plt.show()
true_probsr = tf.reshape(true_probs, (n_points, n_points))
probsr = tf.reshape(probs, (n_points, n_points))
plt.figure()
plt.imshow(true_probsr, label='true probs')
plt.title('true probs')
plt.figure()
plt.imshow(probsr, label='zfit conv')
plt.title('zfit conv')
# test the sampling
conv_nosample = FFTConvPDFV1NoSampling(func=func, kernel=gauss)
npoints_sample = 10000
sample = conv.sample(npoints_sample)
sample_nosample = conv_nosample.sample(npoints_sample)
x, y = z.unstack_x(sample)
xns, yns = z.unstack_x(sample_nosample)
plt.figure()
plt.title('FFT conv, custom sampling, addition')
plt.hist2d(x, y, bins=30)
# pytest.zfit_savefig()
plt.figure()
plt.title('FFT conv, fallback sampling, accept-reject')
plt.hist2d(xns, yns, bins=30)
# pytest.zfit_savefig()
plt.figure()
plt.title('FFT conv x projection')
plt.hist(x.numpy(), bins=50, label='custom', alpha=0.5)
plt.hist(xns.numpy(), bins=50, label='fallback', alpha=0.5)
plt.legend()
# pytest.zfit_savefig()
plt.figure()
plt.title('FFT conv y projection')
plt.hist(y.numpy(), bins=50, label='custom', alpha=0.5)
plt.hist(yns.numpy(), bins=50, label='fallback', alpha=0.5)
plt.legend()
# pytest.zfit_savefig()
| 34.610778 | 103 | 0.669637 |
bb4e66ab04db9a5cd5f7cda6456adfaef3a0349a | 13,972 | py | Python | union_find_drawing_demo/union_find_drawing_demo.py | LPRowe/miscellaneous | 98c6c57a88627b90e87c553d9235e0349feb971d | [
"MIT"
] | null | null | null | union_find_drawing_demo/union_find_drawing_demo.py | LPRowe/miscellaneous | 98c6c57a88627b90e87c553d9235e0349feb971d | [
"MIT"
] | null | null | null | union_find_drawing_demo/union_find_drawing_demo.py | LPRowe/miscellaneous | 98c6c57a88627b90e87c553d9235e0349feb971d | [
"MIT"
] | null | null | null | import pygame
import time
import math
import matplotlib.pyplot as plt
import numpy as np
# Add removal (erase and mark all groups affected by erase)
# Delete all nodes in those groups, and re-add them back in one by one
# TODO:
# 1. add mouse tracking, 1 click and 2 click -> create vertices
# 2. add shape selection by keyboard input
# add shape icon based on current shape in top right corner
# 3. add union find data structure to keep track of which groups are connected
# 4. add color based on union find data structure group id
# 5. add eraser tool to shape selection and have union find update all related nodes
# [SOLVED] 1. why is union find running so slow? unnecessary cycle? numpy?
# [CHECK ARR UPDATE] 2. why is union find not updating surface every action? seemingly random
# add a paint fill button (right click)
# add color brightness adjust with mouse scroll
class UnionFind():
"""
Non-standard implementation of union find.
Each shape's nodes are connected like a network.
When shapes overlap, their two networks merge together into a larger group (network) of nodes.
Serves a second function to create a pygame surface to display the current shapes
where shapes of the same color belong to the same group.
params:
surface_shape (num_rows, num_cols) of the drawing plane
brightness int [0, 255] controls how bright the shapes are
color_wheel tuple of (R, G, B) tuples where R, G, B are integers [0, 255]
"""
def __init__(self, surface_shape, brightness, color_wheel):
self.group_id = 0
self.group = {}
self.id = {}
self.colors = color_wheel
self.R, self.C = surface_shape
self.arr = np.array([[(0,)*3 for _ in range(self.C)] for _ in range(self.R)]) # row, column, RGB
self.brightness = brightness
self.surface = pygame.surfarray.make_surface(self.arr)
def update_arr(self):
"""
Updates the array for all nodes affected by most recent union.
"""
for node_id in self.group:
color = self.colors[node_id % len(self.colors)]
print(node_id, color)
for x, y in self.group[node_id]:
self.arr[x][y] = color
print('x')
self.update_surface()
def update_surface(self):
"""
Creates a pygame surface of the array.
"""
self.surface = pygame.surfarray.make_surface(self.arr)
def normalize_brightness(self):
"""
Converts all pixels that are on to the same intensity.
"""
for i in range(self.R):
for j in range(self.C):
if sum(self.arr[i][j]) != 0:
c = self.brightness / math.sqrt(sum(self.arr[i][j][k]**2 for k in range(3)))
for k in range(3):
self.arr[i][j][k] *= c
def union(self, a, b):
"""Union nodes a and b"""
A, B = a in self.id, b in self.id
if A and B and self.id[a] != self.id[b]:
self.merge(a, b)
elif A or B:
self.add(a, b)
else:
self.create(a, b)
return self.id[a] if a in self.id else self.id[b]
def merge(self, a, b):
"""Nodes a and b both belong to a group, merge the smaller group with the larger group."""
obs, targ = sorted((self.id[a], self.id[b]), key = lambda i: len(self.group[i]))
for node in self.group[obs]:
self.id[node] = targ
self.group[targ] |= self.group[obs]
del self.group[obs]
def add(self, a, b):
"""Node a or node b does not have a group. Add the new node to the existing group."""
a, b = (a, b) if a in self.id else (b, a)
targ = self.id[a]
self.id[b] = targ
self.group[targ] |= {b}
def create(self, a, b):
"""Neither node a nor b belong to a group. Create a new group {a, b}."""
self.group[self.group_id] = {a, b}
self.id[a] = self.id[b] = self.group_id
self.group_id += 1
class Shape():
def __init__(self, vertices):
self.vertices = vertices # List of vertices of the shape (order matters)
self.edges = self.get_edges() # Set of nodes that make the outline of the shape
self.nodes = self.edges # Set of edge nodes, vertex nodes, and nodes to fill in the shape
@staticmethod
def get_line(x0, y0, x1, y1):
"""Recursively finds all integer points that connect (x0, y0) to (x1, y1)"""
def helper(x0, y0, x1, y1):
nonlocal seen, points
a, b, c, d = int(round(x0, 0)), int(round(y0, 0)), int(round(x1, 0)), int(round(y1, 0))
h = (a, b, c, d)
if h not in seen:
seen.add(h)
points |= {(a, b), (c, d)}
if a == c and b == d:
return None
xm, ym = (x0 + x1) / 2, (y0 + y1) / 2
helper(x0, y0, xm, ym)
helper(xm, ym, x1, y1)
seen = set()
points = {(x0, y0), (x1, y1)}
helper(x0, y0, x1, y1)
return points
@staticmethod
def get_centroid(vertices):
X = Y = 0
for v in vertices:
X += v[0]
Y += v[1]
return (X / len(vertices), Y / len(vertices))
@staticmethod
def get_neighbors(x, y):
"""
y: int row
x: int column
returns 4-directionaly adjacent neighbors to (x, y)
"""
return ((x+1, y), (x-1, y), (x, y+1), (x, y-1))
def get_edges(self):
"""Returns all points that connect the vertices (including the vertices themselves)"""
if not self.vertices:
print("Shape must have vertices before edges can be drawn")
return [(0, 0)]
edges = set()
for (x0, y0), (x1, y1) in zip(self.vertices, self.vertices[1:] + [self.vertices[0]]):
edges |= self.get_line(x0, y0, x1, y1)
return edges
def fill_shape(self):
"""
Shapes such as stars, squares, triangles can be filled in with points.
fill_shape should not be called on a line or freehand shape.
Performs BFS from the central point of the shape (average of all vertices).
Returns all visited points, never going outside of the edge boundary.
"""
if len(self.edges) < 3:
raise Exception("A shape must have at least 3 edges in order to be filled")
elif len(self.vertices) < 3:
raise Exception(f"Shape has {len(self.vertices)} vertices, must have at least 3 to be filled.")
X, Y = self.get_centroid(self.vertices)
if self.get_centroid in self.edges:
raise Exception(f"Shape is too small or thin to fill.")
q = [(int(X), int(Y))]
visited = self.edges
while q:
next_level = []
for node in q:
for neighbor in self.get_neighbors(*node):
if neighbor not in visited:
visited.add(neighbor)
next_level.append(neighbor)
q = next_level
self.nodes |= visited
def create_vertices(x0, y0, x1, y1, name = "rectangle"):
"""
Fist click is position x0, y0
Current mouse position (or second click) is position x1, y1
Calculates the vertex points for the given shape
"""
if name == "free":
return [(x0, y0), (x1, y1)]
x0, x1 = sorted((x0, x1))
y0, y1 = sorted((y0, y1), reverse = False)
if x0 == x1: x1 += 1
if y0 == y1: y0 += 1
a, b, c, d = (x0, y0), (x1, y0), (x0, y1), (x1, y1) # four corners (TL, TR, BL, BR)
x_midpoint = (x0 + x1) / 2
y_midpoint = (y0 + y1) / 2
if name == "rectangle":
vertices = [a, b, d, c]
elif name == "triangle1":
vertices = [(x_midpoint, y0), c, d]
elif name == "triangle2":
vertices = [(x_midpoint, y1), a, b]
elif name == "triangle3":
vertices = [(x0, y_midpoint), b, d]
elif name == "triangle4":
vertices = [(x1, y_midpoint), a, c]
elif name in ["pentagon", "star"]:
theta = 36 * math.pi / 180
hy= (x_midpoint - x0) * math.tan(theta) * (y0 - y1) / (x1 - x0)
hx = (y_midpoint - y1) * math.tan(theta / 2) * (x1 - x0) / (y0 - y1)
top = (x_midpoint, y0)
left = (x0, y0 - hy)
right = (x1, y0 - hy)
bottom_left = (x0 + hx, y1)
bottom_right = (x1 - hx, y1)
if name == "pentagon":
vertices = [top, right, bottom_right, bottom_left, left]
else:
vertices = [bottom_left, top, bottom_right, left, right]
else:
vertices = [(x0, y0), (x1, y1)]
return vertices
class Game():
def __init__(self, **kwargs):
pygame.init()
for key in kwargs:
self.__dict__[key] = kwargs[key]
self.SURFACE = pygame.display.set_mode((self.WIDTH, self.HEIGHT))
self.active = True # True when the game is running
self.left_click_down = False # monitor status of left click
# Cycle through shape to draw
self.shapes = ["rectangle", "triangle1", "triangle2", "triangle3", "triangle4",
"pentagon", "star", "free"]
self.shape_id = 0
# Record drawn shapes in a Union Find data structure
self.uf = UnionFind(surface_shape = (self.WIDTH, self.HEIGHT),
brightness = self.BRIGHTNESS,
color_wheel = self.COLOR_WHEEL)
# Store temporary shapes (outlined but not drawn here)
self.shape_outline = set()
# Inputs are locked until time > input_lock
self.input_lock = -1
def temporary_lock(self):
self.input_lock = time.time() + self.LOCK_TIME
def run(self):
while self.active:
time.sleep(self.SLEEP_TIME)
self.get_events()
keys = pygame.key.get_pressed()
mouse = pygame.mouse.get_pressed()
mouse_pos = pygame.mouse.get_pos()
t = time.time()
# Change mode (shape) by up or down arrow
if t >= self.input_lock:
if keys[pygame.K_UP]:
self.shape_id = (self.shape_id + 1) % len(self.shapes)
self.temporary_lock()
print(self.shapes[self.shape_id])
# TODO: update icon
elif keys[pygame.K_DOWN]:
self.shape_id = (self.shape_id - 1) % len(self.shapes)
self.temporary_lock()
print(self.shapes[self.shape_id])
# TODO: update shape icon
if not self.left_click_down and mouse[0]:
print('a')
self.left_click_down = True
self.temporary_lock()
x0, y0 = mouse_pos
elif self.left_click_down and mouse[0]:
x1, y1 = mouse_pos
x1 = max(0, min(self.WIDTH - 2, x1))
y1 = max(0, min(self.HEIGHT - 2, y1))
vertices = create_vertices(x0, y0, x1, y1, name = self.shapes[self.shape_id])
self.shape_outline = vertices[:]
shape = Shape(vertices)
elif self.left_click_down and not mouse[0]:
print('c')
# release to draw shape; use try and catch to handle errors from too small of shape
self.left_click_down = False
self.shape_outline = set()
if self.shape_id <= -1: # only fill shapes do not fill star or free hand drawings
try: shape.fill_shape()
except: pass # Shape is too small/thin do not fill
print('c1')
# add the shape's nodes to the union find data structure
for node in shape.nodes:
node = (int(node[0]), int(node[1]))
self.uf.union(node, node)
ids = set()
for neighbor in Shape.get_neighbors(*node):
ids.add(self.uf.union(node, neighbor))
self.uf.update_arr()
print(np.sum(self.uf.arr))
self.draw()
pygame.quit()
def get_events(self):
"""Gets key and mouse inputs. Deactivates game if input action was quit."""
self.events = pygame.event.poll()
if self.events.type == pygame.QUIT:
self.active = False
self.keys_press = pygame.key.get_pressed()
self.mouse_press = pygame.mouse.get_pressed()
self.mouse_pos = pygame.mouse.get_pos()
def draw(self):
# blit shapes already made and merged
self.SURFACE.blit(self.uf.surface, (0, 0))
# blit oultine of shape being considered (use pygame.draw)
if self.shape_outline:
pygame.draw.lines(self.SURFACE, (200, 200, 200), True,
self.shape_outline, 5)
pygame.display.flip()
if __name__ == "__main__":
a, b, c = 51, 153, 255
color_wheel = ((c, a, a), (c, b, a), (c, c, a), (b, c, a), (a, c, a),
(a, c, b), (a, c, c), (a, b, c), (a, a, c),
(b, a, c), (c, a, c), (c, a, b), (b, b, b))
settings = {"WIDTH": 800,
"HEIGHT": 800,
"HEADER_RATIO": 0.15,
"SLEEP_TIME": 0,
"LOCK_TIME": 0.2,
"COLOR_WHEEL": color_wheel,
"BRIGHTNESS": 200 # intensity of shapes colors [0, 255]
}
g = Game(**settings)
g.run()
| 37.762162 | 107 | 0.530561 |
97f54c30d652166133bc8137c162e92d2cd4dcbe | 32 | py | Python | src/__init__.py | zuniverse/python-starter-kit | e699481dcfe339ff26a0768c7ce26991d81365bf | [
"MIT"
] | null | null | null | src/__init__.py | zuniverse/python-starter-kit | e699481dcfe339ff26a0768c7ce26991d81365bf | [
"MIT"
] | null | null | null | src/__init__.py | zuniverse/python-starter-kit | e699481dcfe339ff26a0768c7ce26991d81365bf | [
"MIT"
] | null | null | null | '''
__init__.py for package.
''' | 10.666667 | 24 | 0.625 |
e8a47787be33aec7b622437e2035c5445202ef1d | 9,060 | py | Python | imutils/ml/aug/image/images.py | JacobARose/image-utils | aa0e005c0b4df5198d188b074f4e21f8d8f97962 | [
"MIT"
] | null | null | null | imutils/ml/aug/image/images.py | JacobARose/image-utils | aa0e005c0b4df5198d188b074f4e21f8d8f97962 | [
"MIT"
] | null | null | null | imutils/ml/aug/image/images.py | JacobARose/image-utils | aa0e005c0b4df5198d188b074f4e21f8d8f97962 | [
"MIT"
] | null | null | null | """
imutils/ml/aug/image/images.py
Created on: Wednesday March 16th, 2022
Created by: Jacob Alexander Rose
"""
import argparse
import cv2
from rich import print as pp
import numpy as np
from omegaconf import OmegaConf, DictConfig, ListConfig
import os
from torch import nn
import torch
from typing import *
from torchvision import transforms as T
import albumentations as A
from albumentations.augmentations import transforms as AT
import hydra
DEFAULT_CFG_PATH = os.path.join(os.path.dirname(__file__), "default_image_transform_config.yaml")
DEFAULT_CFG = OmegaConf.load(DEFAULT_CFG_PATH)
to_tensor = T.ToTensor()
__all__ = ["instantiate_transforms", "Preprocess", "BatchTransform", "get_default_transforms"]
def functional_to_grayscale(img: np.ndarray, num_output_channels: int=3):
"""Convert image to grayscale version of image.
Args:
img (np.ndarray): Image to be converted to grayscale.
Returns:
CV Image: Grayscale version of the image.
if num_output_channels == 1 : returned image is single channel
if num_output_channels == 3 : returned image is 3 channel with r == g == b
"""
# if not _is_numpy_image(img):
# raise TypeError('img should be CV Image. Got {}'.format(type(img)))
if num_output_channels == 1:
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
elif num_output_channels == 3:
img = cv2.cvtColor(cv2.cvtColor(img, cv2.COLOR_RGB2GRAY), cv2.COLOR_GRAY2RGB)
else:
raise ValueError('num_output_channels should be either 1 or 3')
return img
class Grayscale(AT.ImageOnlyTransform):
"""Convert image to grayscale.
Args:
num_output_channels (int): (1 or 3) number of channels desired for output image
Returns:
CV Image: Grayscale version of the input.
- If num_output_channels == 1 : returned image is single channel
- If num_output_channels == 3 : returned image is 3 channel with r == g == b
"""
def __init__(self, num_output_channels=3, always_apply: bool = True, p: float = 1.0):
super().__init__(always_apply=always_apply, p=p)
self.num_output_channels = num_output_channels
# replay mode params
self.deterministic = False
self.save_key = "replay"
self.params: Dict[Any, Any] = {}
self.replay_mode = False
self.applied_in_replay = False
def get_transform_init_args_names(self):
return (
"num_output_channels",
)
def apply(self, img=None, **kwargs):
"""
Args:
img (CV Image): Image to be converted to grayscale.
Returns:
CV Image: Randomly grayscaled image.
"""
img = img if "image" not in kwargs else kwargs["image"]
return functional_to_grayscale(img, num_output_channels=self.num_output_channels)
# return {"image":
# functional_to_grayscale(img, num_output_channels=self.num_output_channels)
# }
def adjust_gamma(img, gamma, gain=1):
"""Perform gamma correction on an image.
Also known as Power Law Transform. Intensities in RGB mode are adjusted
based on the following equation:
I_out = 255 * gain * ((I_in / 255) ** gamma)
See https://en.wikipedia.org/wiki/Gamma_correction for more details.
Args:
img (np.ndarray): CV Image to be adjusted.
gamma (float): Non negative real number. gamma larger than 1 make the
shadows darker, while gamma smaller than 1 make dark regions
lighter.
gain (float): The constant multiplier.
"""
if not _is_numpy_image(img):
raise TypeError('img should be CV Image. Got {}'.format(type(img)))
if gamma < 0:
raise ValueError('Gamma should be a non-negative real number')
im = img.astype(np.float32)
im = 255. * gain * np.power(im / 255., gamma)
im = im.clip(min=0., max=255.)
return im.astype(img.dtype)
def instantiate_transforms(cfg: List[DictConfig],
to_grayscale: bool=False,
num_output_channels: int=3,
verbose: bool=False) -> List[Callable]:
"""
Compose a series of albumentations image transformations specified entirely within a config.
Each augmentation's python class is specified with a _target_ key, followed by any kwargs.
"""
transforms = []
if to_grayscale:
transforms.append(Grayscale(num_output_channels=num_output_channels))
for name, transform_step in cfg.items():
if verbose: print(name)
transforms.append(
hydra.utils.instantiate(transform_step)
)
if verbose:
pp(transforms)
return A.Compose(transforms)
class Preprocess(nn.Module):
def __init__(self, mode="train", resize=None, to_tensor: bool=True):
super().__init__()
self.mode = mode
self.resize = resize
self.to_tensor = to_tensor
self.resize_func = T.Resize(self.resize)
@torch.no_grad() # disable gradients for effiency
def forward(self, x) -> torch.Tensor:
# x_tmp: np.ndarray = np.array(x) # HxWxC
if self.to_tensor:
x: Tensor = to_tensor(x) # CxHxW
if self.resize:
x = self.resize_func(x)
return x #_out.float()# / 255.0
class BatchTransform(nn.Module):
"""Module to perform data augmentation using Kornia on torch tensors."""
def __init__(self,
mode: str="train",
random_resize_crop=None,
center_crop=None,
apply_color_jitter: bool = False,
random_flips: bool=True,
normalize = (
[0,0,0],
[1,1,1]
),
skip_augmentations: bool=False
) -> None:
super().__init__()
self.mode = mode
self.random_resize_crop = random_resize_crop
self.center_crop = center_crop
self._apply_color_jitter = apply_color_jitter
self.normalize = normalize
self.random_flips = random_flips
self.skip_augmentations = skip_augmentations
self.build_transforms(mode=mode)
def add_train_transforms(self, transforms=None):
transforms = transforms or []
if self.skip_augmentations:
if self.random_resize_crop:
transforms.append(T.CenterCrop(self.random_resize_crop))
else:
transforms.append(T.RandomPerspective())
if type(self.random_resize_crop) == int:
transforms.append(T.RandomResizedCrop(self.random_resize_crop))
if self.random_flips:
transforms.extend([
T.RandomHorizontalFlip(),
T.RandomVerticalFlip()
])
return transforms
def add_test_transforms(self, transforms=None):
transforms = transforms or []
if type(self.center_crop) == int:
transforms.append(T.CenterCrop(self.center_crop))
return transforms
def build_transforms(self,
mode: str = "train"):
transforms = []
if mode == "train":
transforms = self.add_train_transforms(transforms=transforms)
elif mode in ["val", "test"]:
transforms = self.add_test_transforms(transforms=transforms)
print(f"self.normalize: {self.normalize}")
transforms.extend([
# T.ToTensor(),
T.Normalize(*self.normalize)
])
self.transforms = nn.Sequential(*transforms)
self.jitter = AT.ColorJitter(brightness=0.2,
contrast=0.2,
saturation=0.2,
hue=0.2,
always_apply=False,
p=0.5)
@torch.no_grad() # disable gradients for effiency
def forward(self, x: torch.Tensor) -> torch.Tensor:
x_out = self.transforms(x) # BxCxHxW
if self._apply_color_jitter:
x_out = self.jitter(image=x_out)
return x_out
def get_default_transforms(
mode: str="train",
compose: bool=True,
config = dict(
preprocess={
'train': {'resize': 512},
'val': {'resize': 256},
'test': {'resize': 256}},
batch_transform={
'train': {'random_resize_crop': 224},
'val': {'center_crop': 224},
'test': {'center_crop': 224}},
normalize=(
[0.485, 0.456, 0.406],
[0.229, 0.224, 0.225]
),
apply_color_transform=False,
random_flips=True,
skip_augmentations=False
)
) -> Tuple[Callable]:
config = OmegaConf.merge(DEFAULT_CFG, config)
if config["preprocess"][mode].get("resize", None):
preprocess_transforms = Preprocess(mode=mode,
resize=config["preprocess"][mode]["resize"])
else:
preprocess_transforms = T.ToTensor()
if mode == "train":
random_resize_crop = config["batch_transform"]["train"]["random_resize_crop"]
center_crop = None
else:
random_resize_crop = None
center_crop = config["batch_transform"][mode]["center_crop"]
apply_color_jitter = config.get("apply_color_transform", False)
random_flips = config.get("random_flips", True)
skip_augmentations = config.get("skip_augmentations", False)
normalize = config.get("normalize",
(
[0,0,0],
[1,1,1]
)
)
batch_transforms = BatchTransform(mode=mode,
random_resize_crop=random_resize_crop,
center_crop=center_crop,
apply_color_jitter = apply_color_jitter,
random_flips = random_flips,
normalize = normalize,
skip_augmentations=skip_augmentations)
transforms = (preprocess_transforms,
batch_transforms)
if compose:
transforms = T.Compose(transforms)
return transforms
if __name__=="__main_":
config = DEFAULT_CFG
train_preprocess_transforms, train_batch_transforms = get_default_transforms(mode="train",
**config)
val_preprocess_transforms, val_batch_transforms = get_default_transforms(mode="val",
**config) | 25.521127 | 97 | 0.702318 |
fef60d6e866379c10fbec50fbdc98c07f5f48311 | 9,565 | py | Python | database/compiled_templates/display_common.mako.py | bopopescu/galaxy-pipelines | 109059d32a0a176a42f9d8949e72802677b8133d | [
"CC-BY-3.0"
] | null | null | null | database/compiled_templates/display_common.mako.py | bopopescu/galaxy-pipelines | 109059d32a0a176a42f9d8949e72802677b8133d | [
"CC-BY-3.0"
] | null | null | null | database/compiled_templates/display_common.mako.py | bopopescu/galaxy-pipelines | 109059d32a0a176a42f9d8949e72802677b8133d | [
"CC-BY-3.0"
] | 1 | 2020-07-25T21:10:26.000Z | 2020-07-25T21:10:26.000Z | # -*- encoding:ascii -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 6
_modified_time = 1382462618.14644
_template_filename=u'templates/display_common.mako'
_template_uri=u'/display_common.mako'
_template_cache=cache.Cache(__name__, _modified_time)
_source_encoding='ascii'
_exports = ['get_class_plural_display_name', 'get_item_user', 'get_item_plural', 'render_message', 'get_class_plural', 'get_item_name', 'get_class_display_name', 'get_controller_name', 'get_item_slug', 'get_history_link']
# SOURCE LINE 8
from galaxy import model
def render_body(context,**pageargs):
context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
__M_writer = context.writer()
# SOURCE LINE 7
__M_writer(u'\n')
# SOURCE LINE 8
__M_writer(u'\n\n')
# SOURCE LINE 19
__M_writer(u'\n\n')
# SOURCE LINE 38
__M_writer(u'\n\n')
# SOURCE LINE 51
__M_writer(u'\n\n')
# SOURCE LINE 64
__M_writer(u'\n\n')
# SOURCE LINE 69
__M_writer(u'\n\n')
# SOURCE LINE 96
__M_writer(u'\n\n')
# SOURCE LINE 112
__M_writer(u'\n\n')
# SOURCE LINE 123
__M_writer(u'\n\n')
# SOURCE LINE 134
__M_writer(u'\n\n')
# SOURCE LINE 143
__M_writer(u'\n\n')
# SOURCE LINE 153
__M_writer(u'\n\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_get_class_plural_display_name(context,a_class):
context.caller_stack._push_frame()
try:
def get_class_display_name(a_class):
return render_get_class_display_name(context,a_class)
__M_writer = context.writer()
# SOURCE LINE 41
__M_writer(u'\n')
# SOURCE LINE 42
# Start with exceptions, end with default.
if a_class is model.History:
return "Histories"
elif a_class is model.FormDefinitionCurrent:
return "Forms"
else:
return get_class_display_name( a_class ) + "s"
# SOURCE LINE 50
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_get_item_user(context,item):
context.caller_stack._push_frame()
try:
isinstance = context.get('isinstance', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 115
__M_writer(u'\n ')
# SOURCE LINE 116
# Exceptions first, default last.
if isinstance( item, model.HistoryDatasetAssociation ):
return item.history.user
else:
return item.user
# SOURCE LINE 122
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_get_item_plural(context,item):
context.caller_stack._push_frame()
try:
def get_class_plural(a_class):
return render_get_class_plural(context,a_class)
__M_writer = context.writer()
# SOURCE LINE 67
__M_writer(u'\n ')
# SOURCE LINE 68
return get_class_plural( item.__class__ )
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_render_message(context,message,status):
context.caller_stack._push_frame()
try:
util = context.get('util', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 146
__M_writer(u'\n')
# SOURCE LINE 147
if message:
# SOURCE LINE 148
__M_writer(u' <p>\n <div class="')
# SOURCE LINE 149
__M_writer(unicode(status))
__M_writer(u'message transient-message">')
__M_writer(unicode(util.restore_text( message )))
__M_writer(u'</div>\n <div style="clear: both"></div>\n </p>\n')
pass
return ''
finally:
context.caller_stack._pop_frame()
def render_get_class_plural(context,a_class):
context.caller_stack._push_frame()
try:
__M_writer = context.writer()
# SOURCE LINE 72
__M_writer(u'\n')
# SOURCE LINE 73
if a_class == model.History:
class_plural = "Histories"
elif a_class == model.StoredWorkflow:
class_plural = "Workflows"
elif a_class == model.Page:
class_plural = "Pages"
elif a_class == model.Library:
class_plural = "Libraries"
elif a_class == model.HistoryDatasetAssociation:
class_plural = "Datasets"
elif a_class == model.SampleDataset:
class_plural = "Sample Datasets"
elif a_class == model.FormDefinitionCurrent:
class_plural = "Forms"
elif a_class == model.RequestType:
class_plural = "request types"
elif a_class == model.UserOpenID:
class_plural = "OpenIDs"
else:
class_plural = "items"
return class_plural
# SOURCE LINE 95
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_get_item_name(context,item):
context.caller_stack._push_frame()
try:
hasattr = context.get('hasattr', UNDEFINED)
type = context.get('type', UNDEFINED)
unicode = context.get('unicode', UNDEFINED)
str = context.get('str', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 21
__M_writer(u'\n ')
# SOURCE LINE 22
# Start with exceptions, end with default.
if type( item ) is model.Page:
item_name = item.title
elif type( item ) is model.Visualization:
item_name = item.title
elif hasattr( item, 'get_display_name'):
item_name = item.get_display_name()
else:
item_name = item.name
# Encode in unicode.
if type( item_name ) is str:
item_name = unicode( item_name, 'utf-8' )
return item_name
# SOURCE LINE 37
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_get_class_display_name(context,a_class):
context.caller_stack._push_frame()
try:
__M_writer = context.writer()
# SOURCE LINE 54
__M_writer(u'\n')
# SOURCE LINE 55
## Start with exceptions, end with default.
if a_class is model.StoredWorkflow:
return "Workflow"
elif a_class is model.HistoryDatasetAssociation:
return "Dataset"
else:
return a_class.__name__
# SOURCE LINE 63
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_get_controller_name(context,item):
context.caller_stack._push_frame()
try:
isinstance = context.get('isinstance', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 99
__M_writer(u'\n ')
# SOURCE LINE 100
if isinstance( item, model.History ):
return "history"
elif isinstance( item, model.StoredWorkflow ):
return "workflow"
elif isinstance( item, model.HistoryDatasetAssociation ):
return "dataset"
elif isinstance( item, model.Page ):
return "page"
elif isinstance( item, model.Visualization ):
return "visualization"
# SOURCE LINE 111
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_get_item_slug(context,item):
context.caller_stack._push_frame()
try:
trans = context.get('trans', UNDEFINED)
isinstance = context.get('isinstance', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 126
__M_writer(u'\n ')
# SOURCE LINE 127
# Exceptions first, default last.
if isinstance( item, model.HistoryDatasetAssociation ):
return trans.security.encode_id( item.id )
else:
return item.slug
# SOURCE LINE 133
__M_writer(u'\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_get_history_link(context,history,qualify=False):
context.caller_stack._push_frame()
try:
h = context.get('h', UNDEFINED)
trans = context.get('trans', UNDEFINED)
__M_writer = context.writer()
# SOURCE LINE 137
__M_writer(u'\n')
# SOURCE LINE 138
if history.slug and history.user.username:
# SOURCE LINE 139
__M_writer(u' ')
return h.url_for( controller='/history', action='display_by_username_and_slug', username=history.user.username, slug=history.slug, qualified=qualify )
__M_writer(u'\n')
# SOURCE LINE 140
else:
# SOURCE LINE 141
__M_writer(u' ')
return h.url_for( controller='/history', action='view', id=trans.security.encode_id( history.id ), qualified=qualify, use_panels=context.get('use_panels', True) )
__M_writer(u'\n')
pass
return ''
finally:
context.caller_stack._pop_frame()
| 29.984326 | 221 | 0.586618 |
c13c337f36b0125035a1ae3601c62c6bd5f602a5 | 2,182 | py | Python | PSet2/P6/modules/graphics.py | alpha-leo/ComputationalPhysics-Fall2020 | 737769d4a046b4ecea885cafeaf26e26075f7320 | [
"MIT"
] | 1 | 2021-08-10T14:33:35.000Z | 2021-08-10T14:33:35.000Z | PSet2/P6/modules/graphics.py | alpha-leo/ComputationalPhysics-Fall2020 | 737769d4a046b4ecea885cafeaf26e26075f7320 | [
"MIT"
] | null | null | null | PSet2/P6/modules/graphics.py | alpha-leo/ComputationalPhysics-Fall2020 | 737769d4a046b4ecea885cafeaf26e26075f7320 | [
"MIT"
] | null | null | null | """
Here lie methods for ouputing as images--aka Graphics. R.I.P
"""
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
from .generate import error
def draw_canvas(canvas, max_height):
"""
Draw the given table
"""
fig, ax = plt.subplots(1, 1)
ax.pcolor(canvas, cmap="RdBu")
ax.set_ylim(0, np.max(max_height) + 10)
ax.set_title("Ballistic Deposition")
# Save to fig
plt.savefig("canvas.jpg", dpi=500, bbox_inches='tight')
def draw_variance(x_axis, variance):
"""
Plot the variance after finding the error bars :)
"""
# Find the error bars. Take variance of the data as the error
yerr = []
means = []
for _ in range(variance.shape[1]):
yerr.append(error(variance[:, _]))
means.append(np.mean(variance[:, _]))
# Make subplot
fig, ax = plt.subplots(1, 1)
# Plot with error bars, errorbar color is default(blue)
ax.errorbar(x_axis, means, yerr=yerr, ls='', marker='*',
markersize=5, markerfacecolor='red', markeredgecolor='black',
markeredgewidth=0.2, label='scatter data')
# Find the curve fit and plot it
popt, pcov = np.polyfit(np.log10(x_axis), np.log10(means), 1,
full=False, cov=True)
# Make the fitted data
# y_fit = np.zeros((x_axis.shape[0], ), dtype=float, order='F')
# # y_fit = popt[1] * np.power(x_axis, popt[0])
# for i in range(x_axis.shape[0]):
# y_fit[i] = popt[1] + (x_axis[i] * popt[0])
# print("Values for y_fit: \n", y_fit)
print("Values for popt:\n", popt)
# ax.plot(x_axis, y_fit, color='green',
# label='Curve fit (a * x^beta)')
# Log scale for x- and y-axis
ax.set_xscale('log')
ax.set_yscale('log')
# axis labels
ax.set_xlabel("Time (unit = number of particles depositted)")
ax.set_ylabel("Roughness -- w(t)")
# Show legend
plt.legend()
# Save to fig
plt.savefig("plot_for_beta.jpg", dpi=500, bbox_inches='tight')
# Calculate standard deviation for params (sqrt of the diagonal
# of the covariance)
stdev = np.sqrt(np.diag(pcov))
return popt, stdev
| 32.567164 | 77 | 0.617324 |
c3d7eeb5781aff3ea273ecae991d9c559b6a0794 | 3,631 | py | Python | setup.py | mzwiessele/cellSLAM | c94c52885ed5e78a7b5d659c9510033647633076 | [
"BSD-3-Clause"
] | 15 | 2016-06-01T00:47:19.000Z | 2022-02-02T10:34:23.000Z | setup.py | mzwiessele/cellSLAM | c94c52885ed5e78a7b5d659c9510033647633076 | [
"BSD-3-Clause"
] | 1 | 2019-09-24T05:08:43.000Z | 2019-09-24T05:08:43.000Z | setup.py | mzwiessele/cellSLAM | c94c52885ed5e78a7b5d659c9510033647633076 | [
"BSD-3-Clause"
] | 5 | 2016-10-19T08:18:39.000Z | 2020-03-01T15:50:43.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#===============================================================================
# Copyright (c) 2015, Max Zwiessele
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of paramax nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
from __future__ import print_function
import os
from setuptools import setup
import codecs
def read(fname):
with codecs.open(fname, 'r', 'latin') as f:
return f.read()
def read_to_rst(fname):
try:
import pypandoc
rstname = "{}.{}".format(os.path.splitext(fname)[0], 'rst')
pypandoc.convert(read(fname), 'rst', format='md', outputfile=rstname)
with open(rstname, 'r') as f:
rststr = f.read()
return rststr
#return read(rstname)
except ImportError:
return read(fname)
desc = read('README.md')
version_dummy = {}
exec(read('topslam/__version__.py'), version_dummy)
__version__ = version_dummy['__version__']
del version_dummy
setup(name = 'topslam',
version = __version__,
author = "Max Zwiessele",
author_email = "ibinbei@gmail.com",
description = ("topslam metric and correction techniques for (Bayesian) GPLVM"),
long_description=desc,
license = "BSD 3-clause",
keywords = "machine-learning gaussian-processes kernels",
url = "https://github.com/mzwiessele/topslam",
#packages = ["cellSLAM",
# "cellSLAM/tests"
# "cellSLAM/simulation"
# ],
package_dir={'topslam': 'topslam'},
py_modules = ['topslam.__init__'],
#test_suite = 'cellSLAM.tests',
install_requires=['GPy>=1', 'scikit-learn', 'pandas', 'pods', 'seaborn', 'adjustText'],
classifiers=['License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Artificial Intelligence']
)
| 41.261364 | 93 | 0.646103 |
3cb5f334625151e9a796918ee61c665d1a4c1673 | 6,685 | py | Python | src/config_web.py | biothings/mygene.info | 907f87b4c3dc5f3a59bd3cfc31ecd81a2c3f5136 | [
"Apache-2.0"
] | 78 | 2017-05-26T08:38:25.000Z | 2022-02-25T08:55:31.000Z | src/config_web.py | biothings/mygene.info | 907f87b4c3dc5f3a59bd3cfc31ecd81a2c3f5136 | [
"Apache-2.0"
] | 105 | 2017-05-18T21:57:13.000Z | 2022-03-18T21:41:47.000Z | src/config_web.py | biothings/mygene.info | 907f87b4c3dc5f3a59bd3cfc31ecd81a2c3f5136 | [
"Apache-2.0"
] | 19 | 2017-06-12T18:31:54.000Z | 2021-11-10T00:04:43.000Z | """
Mygene.info API v3
https://mygene.info/v3/
"""
import re
import copy
from biothings.web.settings.default import APP_LIST, ANNOTATION_KWARGS, QUERY_KWARGS
# *****************************************************************************
# Elasticsearch Settings
# *****************************************************************************
ES_HOST = 'localhost:9200'
ES_INDEX = 'mygene_current'
ES_DOC_TYPE = 'gene'
# *****************************************************************************
# Web Application
# *****************************************************************************
API_VERSION = 'v3'
TAX_REDIRECT = "http://t.biothings.io/v1/taxon/{0}?include_children=1"
APP_LIST += [
(r"/{ver}/species/(\d+)/?", "tornado.web.RedirectHandler", {"url": TAX_REDIRECT}),
(r"/{ver}/taxon/(\d+)/?", "tornado.web.RedirectHandler", {"url": TAX_REDIRECT}),
(r"/{ver}/query/?", "web.handlers.MygeneQueryHandler"),
(r"/{ver}/metadata/?", "web.handlers.MygeneSourceHandler"),
(r"/metadata/?", "web.handlers.MygeneSourceHandler"),
]
# html header image
HTML_OUT_HEADER_IMG = "/static/favicon.ico"
# for title line on format=html
HTML_OUT_TITLE = """<p style="font-family:'Open Sans',sans-serif;font-weight:bold; font-size:16px;"><a href="http://mygene.info" target="_blank" style="text-decoration: none; color: black">MyGene.info - Gene Annotation as a Service</a></p>"""
METADATA_DOCS_URL = "http://docs.mygene.info/en/latest/doc/data.html"
QUERY_DOCS_URL = "http://docs.mygene.info/en/latest/doc/query_service.html"
ANNOTATION_DOCS_URL = "http://docs.mygene.info/en/latest/doc/annotation_service.html"
# *****************************************************************************
# User Input Control
# *****************************************************************************
DEFAULT_FIELDS = ['name', 'symbol', 'taxid', 'entrezgene']
TAXONOMY = {
"human": {"tax_id": "9606", "assembly": "hg38"},
"mouse": {"tax_id": "10090", "assembly": "mm10"},
"rat": {"tax_id": "10116", "assembly": "rn6"},
"fruitfly": {"tax_id": "7227", "assembly": "dm6"},
"nematode": {"tax_id": "6239", "assembly": "ce11"},
"zebrafish": {"tax_id": "7955", "assembly": "danRer11"},
"thale-cress": {"tax_id": "3702", "assembly": "araTha1"},
"frog": {"tax_id": "8364", "assembly": "xenTro9"},
"pig": {"tax_id": "9823", "assembly": "susScr11"}
}
DATASOURCE_TRANSLATIONS = {
"refseq:": r"refseq_agg:",
"accession:": r"accession_agg:",
"reporter:": r"reporter.\*:",
"interpro:": r"interpro.id:",
# GO:xxxxx looks like a ES raw query, so just look for
# the term as a string in GO's ID (note: searching every keys
# will raise an error because pubmed key is a int and we're
# searching with a string term.
"GO:": r"go.\*.id:go\:",
# "GO:": r"go.\*:go.",
"homologene:": r"homologene.id:",
"reagent:": r"reagent.\*.id:",
"uniprot:": r"uniprot.\*:",
"wikipedia:": r"wikipedia.\*:",
"ensemblgene:": "ensembl.gene:",
"ensembltranscript:": "ensembl.transcript:",
"ensemblprotein:": "ensembl.protein:",
# some specific datasources needs to be case-insentive
"hgnc:": r"HGNC:",
"hprd:": r"HPRD:",
"mim:": r"MIM:",
"mgi:": r"MGI:",
"ratmap:": r"RATMAP:",
"rgd:": r"RGD:",
"flybase:": r"FLYBASE:",
"wormbase:": r"WormBase:",
"tair:": r"TAIR:",
"zfin:": r"ZFIN:",
"xenbase:": r"Xenbase:",
"mirbase:": r"miRBase:",
}
SPECIES_TYPEDEF = {
'species': {
'type': list,
'default': ['all'],
'strict': False,
'max': 1000,
'translations': [
(re.compile(pattern, re.I), translation['tax_id'])
for (pattern, translation) in TAXONOMY.items()
]
},
'species_facet_filter': {
'type': list,
'default': None,
'strict': False,
'max': 1000,
'translations': [
(re.compile(pattern, re.I), translation['tax_id']) for
(pattern, translation) in TAXONOMY.items()
]
}
}
FIELD_FILTERS = {
'entrezonly': {'type': bool, 'default': False},
'ensemblonly': {'type': bool, 'default': False},
'exists': {'type': list, 'default': None, 'max': 1000, 'strict': False},
'missing': {'type': list, 'default': None, 'max': 1000, 'strict': False},
}
DATASOURCE_TRANSLATION_TYPEDEF = [
(re.compile(pattern, re.I), translation) for
(pattern, translation) in DATASOURCE_TRANSLATIONS.items()
]
TRIMMED_DATASOURCE_TRANSLATION_TYPEDEF = [
(re.compile(re.sub(r':.*', '', pattern).replace('\\', '') + '(?!\\.)', re.I),
re.sub(r':.*', '', translation).replace('\\', ''))
for(pattern, translation) in DATASOURCE_TRANSLATIONS.items()
]
ANNOTATION_KWARGS = copy.deepcopy(ANNOTATION_KWARGS)
ANNOTATION_KWARGS['*'].update(SPECIES_TYPEDEF)
ANNOTATION_KWARGS['*']['_source']['strict'] = False
QUERY_KWARGS = copy.deepcopy(QUERY_KWARGS)
QUERY_KWARGS['*'].update(SPECIES_TYPEDEF)
QUERY_KWARGS['*'].update(FIELD_FILTERS)
QUERY_KWARGS['*']['_source']['default'] = DEFAULT_FIELDS
QUERY_KWARGS['*']['_source']['strict'] = False
QUERY_KWARGS['GET']['q']['translations'] = DATASOURCE_TRANSLATION_TYPEDEF
QUERY_KWARGS['POST']['scopes']['translations'] = TRIMMED_DATASOURCE_TRANSLATION_TYPEDEF
QUERY_KWARGS['GET']['include_tax_tree'] = {'type': bool, 'default': False}
QUERY_KWARGS['POST']['scopes']['default'] = ["_id", "entrezgene", "ensembl.gene", "retired"]
QUERY_KWARGS['POST']['q']['jsoninput'] = True
# *****************************************************************************
# Elasticsearch Query Pipeline
# *****************************************************************************
ES_QUERY_BUILDER = "web.pipeline.MygeneQueryBuilder"
AVAILABLE_FIELDS_EXCLUDED = ['all', 'accession_agg', 'refseq_agg']
# *****************************************************************************
# Endpoints Specifics & Others
# *****************************************************************************
# kwargs for status check
STATUS_CHECK = {
'id': '1017',
'index': 'mygene_current'
}
# This essentially bypasses the es.get fallback as in myvariant...
# The first regex matched integers, in which case the query becomes against
# entrezgeneall annotation queries are now multimatch against the following fields
ANNOTATION_ID_REGEX_LIST = [(re.compile(r'^\d+$'), ['entrezgene', 'retired'])]
ANNOTATION_DEFAULT_SCOPES = ["_id", "entrezgene", "ensembl.gene", "retired"]
# for docs
INCLUDE_DOCS = False
DOCS_STATIC_PATH = 'docs/_build/html'
# url template to redirect for 'include_tax_tree' parameter
INCLUDE_TAX_TREE_REDIRECT_ENDPOINT = 'http://t.biothings.io/v1/taxon'
| 38.641618 | 242 | 0.567091 |
77ec48c543562ca1ea278c5c12ad722b4ba695c2 | 7,897 | py | Python | homeassistant/components/notify/__init__.py | paoloantinori/core | 4bbc737954325e84e42572e8ce7e40116d1a271e | [
"Apache-2.0"
] | 1 | 2020-09-07T17:15:34.000Z | 2020-09-07T17:15:34.000Z | homeassistant/components/notify/__init__.py | paoloantinori/core | 4bbc737954325e84e42572e8ce7e40116d1a271e | [
"Apache-2.0"
] | 45 | 2020-07-23T07:13:34.000Z | 2022-03-31T06:01:55.000Z | homeassistant/components/notify/__init__.py | ajschmidt8/home-assistant | 75153dd4a3061f27674f4adbd9283e6c46534e66 | [
"Apache-2.0"
] | null | null | null | """Provides functionality to notify people."""
import asyncio
from functools import partial
import logging
from typing import Optional
import voluptuous as vol
from homeassistant.const import CONF_NAME, CONF_PLATFORM
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_per_platform, discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.loader import bind_hass
from homeassistant.setup import async_prepare_setup_platform
from homeassistant.util import slugify
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
# Platform specific data
ATTR_DATA = "data"
# Text to notify user of
ATTR_MESSAGE = "message"
# Target of the notification (user, device, etc)
ATTR_TARGET = "target"
# Title of notification
ATTR_TITLE = "title"
ATTR_TITLE_DEFAULT = "Home Assistant"
DOMAIN = "notify"
SERVICE_NOTIFY = "notify"
NOTIFY_SERVICES = "notify_services"
SERVICE = "service"
TARGETS = "targets"
FRIENDLY_NAME = "friendly_name"
TARGET_FRIENDLY_NAME = "target_friendly_name"
PLATFORM_SCHEMA = vol.Schema(
{vol.Required(CONF_PLATFORM): cv.string, vol.Optional(CONF_NAME): cv.string},
extra=vol.ALLOW_EXTRA,
)
NOTIFY_SERVICE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_MESSAGE): cv.template,
vol.Optional(ATTR_TITLE): cv.template,
vol.Optional(ATTR_TARGET): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_DATA): dict,
}
)
@bind_hass
async def async_reload(hass, integration_name):
"""Register notify services for an integration."""
if (
NOTIFY_SERVICES not in hass.data
or integration_name not in hass.data[NOTIFY_SERVICES]
):
return
tasks = [
_async_setup_notify_services(hass, data)
for data in hass.data[NOTIFY_SERVICES][integration_name]
]
await asyncio.gather(*tasks)
async def _async_setup_notify_services(hass, data):
"""Create or remove the notify services."""
notify_service = data[SERVICE]
friendly_name = data[FRIENDLY_NAME]
targets = data[TARGETS]
async def _async_notify_message(service):
"""Handle sending notification message service calls."""
await _async_notify_message_service(hass, service, notify_service, targets)
if hasattr(notify_service, "targets"):
target_friendly_name = data[TARGET_FRIENDLY_NAME]
stale_targets = set(targets)
for name, target in notify_service.targets.items():
target_name = slugify(f"{target_friendly_name}_{name}")
if target_name in stale_targets:
stale_targets.remove(target_name)
if target_name in targets:
continue
targets[target_name] = target
hass.services.async_register(
DOMAIN,
target_name,
_async_notify_message,
schema=NOTIFY_SERVICE_SCHEMA,
)
for stale_target_name in stale_targets:
del targets[stale_target_name]
hass.services.async_remove(
DOMAIN,
stale_target_name,
)
friendly_name_slug = slugify(friendly_name)
if hass.services.has_service(DOMAIN, friendly_name_slug):
return
hass.services.async_register(
DOMAIN,
friendly_name_slug,
_async_notify_message,
schema=NOTIFY_SERVICE_SCHEMA,
)
async def _async_notify_message_service(hass, service, notify_service, targets):
"""Handle sending notification message service calls."""
kwargs = {}
message = service.data[ATTR_MESSAGE]
title = service.data.get(ATTR_TITLE)
if title:
title.hass = hass
kwargs[ATTR_TITLE] = title.async_render()
if targets.get(service.service) is not None:
kwargs[ATTR_TARGET] = [targets[service.service]]
elif service.data.get(ATTR_TARGET) is not None:
kwargs[ATTR_TARGET] = service.data.get(ATTR_TARGET)
message.hass = hass
kwargs[ATTR_MESSAGE] = message.async_render()
kwargs[ATTR_DATA] = service.data.get(ATTR_DATA)
await notify_service.async_send_message(**kwargs)
async def async_setup(hass, config):
"""Set up the notify services."""
hass.data.setdefault(NOTIFY_SERVICES, {})
async def async_setup_platform(
integration_name, p_config=None, discovery_info=None
):
"""Set up a notify platform."""
if p_config is None:
p_config = {}
platform = await async_prepare_setup_platform(
hass, config, DOMAIN, integration_name
)
if platform is None:
_LOGGER.error("Unknown notification service specified")
return
_LOGGER.info("Setting up %s.%s", DOMAIN, integration_name)
notify_service = None
try:
if hasattr(platform, "async_get_service"):
notify_service = await platform.async_get_service(
hass, p_config, discovery_info
)
elif hasattr(platform, "get_service"):
notify_service = await hass.async_add_job(
platform.get_service, hass, p_config, discovery_info
)
else:
raise HomeAssistantError("Invalid notify platform.")
if notify_service is None:
# Platforms can decide not to create a service based
# on discovery data.
if discovery_info is None:
_LOGGER.error(
"Failed to initialize notification service %s", integration_name
)
return
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error setting up platform %s", integration_name)
return
notify_service.hass = hass
if discovery_info is None:
discovery_info = {}
target_friendly_name = (
p_config.get(CONF_NAME) or discovery_info.get(CONF_NAME) or integration_name
)
friendly_name = (
p_config.get(CONF_NAME) or discovery_info.get(CONF_NAME) or SERVICE_NOTIFY
)
data = {
FRIENDLY_NAME: friendly_name,
# The targets use a slightly different friendly name
# selection pattern than the base service
TARGET_FRIENDLY_NAME: target_friendly_name,
SERVICE: notify_service,
TARGETS: {},
}
hass.data[NOTIFY_SERVICES].setdefault(integration_name, [])
hass.data[NOTIFY_SERVICES][integration_name].append(data)
await _async_setup_notify_services(hass, data)
hass.config.components.add(f"{DOMAIN}.{integration_name}")
return True
setup_tasks = [
async_setup_platform(integration_name, p_config)
for integration_name, p_config in config_per_platform(config, DOMAIN)
]
if setup_tasks:
await asyncio.wait(setup_tasks)
async def async_platform_discovered(platform, info):
"""Handle for discovered platform."""
await async_setup_platform(platform, discovery_info=info)
discovery.async_listen_platform(hass, DOMAIN, async_platform_discovered)
return True
class BaseNotificationService:
"""An abstract class for notification services."""
hass: Optional[HomeAssistantType] = None
def send_message(self, message, **kwargs):
"""Send a message.
kwargs can contain ATTR_TITLE to specify a title.
"""
raise NotImplementedError()
async def async_send_message(self, message, **kwargs):
"""Send a message.
kwargs can contain ATTR_TITLE to specify a title.
"""
await self.hass.async_add_job(partial(self.send_message, message, **kwargs))
| 30.968627 | 88 | 0.662783 |
c0331dd7361403b63a82b75969238b33d2726b62 | 69,293 | py | Python | tensorflow/python/ops/variable_scope.py | conqer/tensorflow | 100552f943c78cbf90aad521f9981df9b5e3c738 | [
"Apache-2.0"
] | 1 | 2018-01-19T10:51:11.000Z | 2018-01-19T10:51:11.000Z | tensorflow/python/ops/variable_scope.py | conqer/tensorflow | 100552f943c78cbf90aad521f9981df9b5e3c738 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ops/variable_scope.py | conqer/tensorflow | 100552f943c78cbf90aad521f9981df9b5e3c738 | [
"Apache-2.0"
] | 1 | 2019-04-21T16:12:56.000Z | 2019-04-21T16:12:56.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A class to store named variables and a scope operator to manage sharing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as collections_lib
import contextlib
import copy
import functools
import traceback
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
__all__ = ["VariableScope", "get_variable_scope",
"get_variable", "get_local_variable", "variable_scope",
"variable_op_scope", "no_regularizer"]
class _PartitionInfo(object):
"""Holds partition info used by initializer functions.
"""
def __init__(self, full_shape, var_offset):
"""Constructor.
Args:
full_shape: Tuple or list of `int` indicating the full combined shape
of the partitioned variables.
var_offset: Tuple or list of `int` specifying offset of this partition
with respect to the full variable for each dimension.
Raises:
TypeError: If `full_shape` or `var_offset` is not a sequence.
ValueError: If `full_shape` or `var_offset` differ in length. If
`var_offset` exceeds `full_shape` in any dimension.
"""
if not isinstance(full_shape, collections_lib.Sequence) or isinstance(
full_shape, six.string_types):
raise TypeError(
"`full_shape` must be a sequence (like tuple or list) instead of " +
type(full_shape).__name__)
if not isinstance(var_offset, collections_lib.Sequence) or isinstance(
var_offset, six.string_types):
raise TypeError(
"`var_offset` must be a sequence (like tuple or list) instead of " +
type(var_offset).__name__)
if len(var_offset) != len(full_shape):
raise ValueError(
"Expected equal length, but `var_offset` is of length {} while "
"full_shape is of length {}.".format(
len(var_offset), len(full_shape)))
for i in xrange(len(full_shape)):
offset = var_offset[i]
shape = full_shape[i]
if offset < 0 or offset >= shape:
raise ValueError(
"Expected 0 <= offset < shape but found offset={}, shape={} for "
"var_offset={}, full_shape={}".format(offset, shape, var_offset,
full_shape))
self._full_shape = full_shape
self._var_offset = var_offset
@property
def full_shape(self):
return self._full_shape
@property
def var_offset(self):
return self._var_offset
def single_offset(self, shape):
"""Returns the offset when the variable is partitioned in at most one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the offset in the dimension along which the variable is
partitioned. Returns 0 if the variable is not being partitioned.
Raises:
ValueError: Depending on self.single_slice_dim().
"""
single_slice_dim = self.single_slice_dim(shape)
# If this variable is not being partitioned at all, single_slice_dim() could
# return None.
if single_slice_dim is None:
return 0
return self.var_offset[single_slice_dim]
def single_slice_dim(self, shape):
"""Returns the slice dim when the variable is partitioned only in one dim.
Args:
shape: Tuple or list of `int` indicating the shape of one specific
variable partition.
Returns:
`int` representing the dimension that the variable is partitioned in, or
`None` if the variable doesn't seem to be partitioned at all.
Raises:
TypeError: If `shape` is not a sequence.
ValueError: If `shape` is not the same length as `self.full_shape`. If
the variable is partitioned in more than one dimension.
"""
if not isinstance(shape, collections_lib.Sequence) or isinstance(
shape, six.string_types):
raise TypeError(
"`shape` must be a sequence (like tuple or list) instead of " +
type(shape).__name__)
if len(shape) != len(self.full_shape):
raise ValueError(
"Expected equal length, but received shape={} of length {} while "
"self.full_shape={} is of length {}.".format(shape, len(
shape), self.full_shape, len(self.full_shape)))
for i in xrange(len(shape)):
if self.var_offset[i] + shape[i] > self.full_shape[i]:
raise ValueError(
"With self.var_offset={}, a partition of shape={} would exceed "
"self.full_shape={} in dimension {}.".format(
self.var_offset, shape, self.full_shape, i))
slice_dim = None
for i in xrange(len(shape)):
if shape[i] == self.full_shape[i]:
continue
if slice_dim is not None:
raise ValueError(
"Cannot use single_slice_dim() with shape={} and "
"self.full_shape={} since slice dim could be either dimension {} "
"or {}.".format(shape, self.full_shape, i, slice_dim))
slice_dim = i
return slice_dim
class _VariableStore(object):
"""Variable store that carries a number of named Variables.
New variable names and new variables can be created; all stored
variables are initialized with the initializer passed to __init__.
Attributes:
vars: a dictionary with string names (same as passed in GetVar) as keys
and the corresponding TensorFlow Variables as values.
"""
def __init__(self):
"""Create a variable store."""
self._vars = {} # A dictionary of the stored TensorFlow variables.
self._partitioned_vars = {} # A dict of the stored PartitionedVariables.
self.variable_scopes_count = {} # Count re-used variable scopes.
def open_variable_scope(self, scope_name):
if scope_name in self.variable_scopes_count:
self.variable_scopes_count[scope_name] += 1
else:
self.variable_scopes_count[scope_name] = 1
def close_variable_subscopes(self, scope_name):
for k in self.variable_scopes_count:
if not scope_name or k.startswith(scope_name + "/"):
self.variable_scopes_count[k] = 0
def variable_scope_count(self, scope_name):
return self.variable_scopes_count.get(scope_name, 0)
def get_variable(self, name, shape=None, dtype=dtypes.float32,
initializer=None, regularizer=None, reuse=None,
trainable=True, collections=None, caching_device=None,
partitioner=None, validate_shape=True, use_resource=None,
custom_getter=None):
"""Gets an existing variable with these parameters or create a new one.
If a variable with the given name is already stored, we return the stored
variable. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
If `reuse` is `None` (the default), both new and existing variables are
returned.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean or `None`. Controls reuse or creation of variables.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the `Variable` to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the `Variable` reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and dtype of the `Variable` to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates
instead an experimental ResourceVariable which has well-defined
semantics. Defaults to False (will later change to True).
custom_getter: Callable that takes as a first argument the true getter,
and allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
or when violating reuse during variable creation.
"""
if custom_getter is not None and not callable(custom_getter):
raise ValueError(
"Passed a custom_getter which is not callable: %s" % custom_getter)
# This is the main logic of get_variable. However, custom_getter
# may override this logic. So we save it as a callable and pass
# it to custom_getter.
# Note: the parameters of _true_getter, and their documentation, match
# *exactly* item-for-item with the docstring of this method.
def _true_getter(name, shape=None, dtype=dtypes.float32, # pylint: disable=missing-docstring
initializer=None, regularizer=None, reuse=None,
trainable=True, collections=None, caching_device=None,
partitioner=None, validate_shape=True, use_resource=None):
is_scalar = shape is not None and not shape
# Partitioned variable case
if partitioner is not None and not is_scalar:
if not callable(partitioner):
raise ValueError(
"Partitioner must be callable, but received: %s" % partitioner)
with ops.name_scope(None):
return self._get_partitioned_variable(name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=partitioner,
validate_shape=validate_shape,
use_resource=use_resource)
# Special case for partitioned variable to allow reuse without having to
# specify partitioner.
if (reuse is True and partitioner is None
and name in self._partitioned_vars):
return self._get_partitioned_variable(name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
partitioner=None,
validate_shape=validate_shape,
use_resource=use_resource)
# Single variable case
if "%s/part_0" % name in self._vars:
raise ValueError(
"No partitioner was provided, but a partitioned version of the "
"variable was found: %s/part_0. Perhaps a variable of the same "
"name was already created with partitioning?" % name)
return self._get_single_variable(
name=name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer, reuse=reuse,
trainable=trainable, collections=collections,
caching_device=caching_device, validate_shape=validate_shape,
use_resource=use_resource)
if custom_getter is not None:
return custom_getter(
getter=_true_getter, name=name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer,
reuse=reuse, trainable=trainable, collections=collections,
caching_device=caching_device, partitioner=partitioner,
validate_shape=validate_shape, use_resource=use_resource)
else:
return _true_getter(
name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer,
reuse=reuse, trainable=trainable, collections=collections,
caching_device=caching_device, partitioner=partitioner,
validate_shape=validate_shape, use_resource=use_resource)
def _get_partitioned_variable(
self, name, partitioner, shape=None, dtype=dtypes.float32,
initializer=None, regularizer=None, reuse=None,
trainable=True, collections=None, caching_device=None,
validate_shape=True, use_resource=None):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
If `reuse` is `None` (the default), both new and existing variables are
returned.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: the name of the new or existing sharded variable.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
shape: shape of the new or existing sharded variable.
dtype: type of the new or existing sharded variable
(defaults to `DT_FLOAT`).
initializer: initializer for the sharded variable.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean or `None`. Controls reuse or creation of variables.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable which has well-defined semantics. Defaults
to False (will later change to True).
Returns:
A `PartitionedVariable` object.
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
when violating reuse during variable creation, or if an existing
sharded variable exists for the given name but with different sharding.
"""
initializing_from_value = initializer is not None and isinstance(
initializer, ops.Tensor)
reuse_without_partition = reuse is True and partitioner is None
if name in self._vars:
raise ValueError(
"A partitioner was provided, but an unpartitioned version of the "
"variable was found: %s. Perhaps a variable of the same name was "
"already created without partitioning?" % name)
shape = tensor_shape.as_shape(shape)
if initializing_from_value:
shape = shape.merge_with(initializer.get_shape())
if not reuse_without_partition:
if not shape.is_fully_defined():
raise ValueError("Shape of a new partitioned variable (%s) must be "
"fully defined, but instead was %s." % (name, shape))
if shape.ndims < 1:
raise ValueError("A partitioned Variable must have rank at least 1, "
"shape: %s" % shape)
partitions = partitioner(shape=shape, dtype=dtype)
if not isinstance(partitions, collections_lib.Sequence):
raise ValueError("Partitioner must return a sequence, but saw: %s"
% partitions)
if len(partitions) != shape.ndims:
raise ValueError(
"Partitioner returned a partition list that does not match the "
"Variable's rank: %s vs. %s" % (partitions, shape))
if any([p < 1 for p in partitions]):
raise ValueError(
"Partitioner returned zero partitions for some axes: %s" %
partitions)
should_check = reuse is not None
if name in self._partitioned_vars:
if should_check and not reuse:
raise ValueError(
"Partitioned variable with name %s already exists. Did you mean to "
"set reuse=True in VarScope?"
% name)
existing_var = self._partitioned_vars[name]
if not shape.is_compatible_with(existing_var.get_shape()):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified shape %s "
"and found shape %s."
% (name, shape, existing_var.get_shape()))
if not dtype.is_compatible_with(existing_var.dtype):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified dtype %s "
"and found dtype %s."
% (name, dtype.name, existing_var.dtype.name))
# pylint: disable=protected-access
if (not reuse_without_partition and
existing_var._get_partitions() != partitions):
raise ValueError(
"Trying to reuse partitioned variable %s, but specified partitions "
"%s and found partitions %s." %
(name, partitions, existing_var._get_partitions()))
# pylint: enable=protected-access
return existing_var
if should_check and reuse:
raise ValueError("PartitionedVariable %s does not exist, or was not "
"created with tf.get_variable(). Did you mean to set "
"reuse=None in VarScope?" % name)
slice_dim, slice_shape = _compute_slice_dim_and_shape(
shape.as_list(), partitions)
vs = []
num_slices = partitions[slice_dim]
num_slices_with_excess = shape[slice_dim].value % num_slices
slice_offset = [0] * shape.ndims
if "%s/part_0" % name in self._vars:
if "%s/part_%d" % (name, num_slices - 1) not in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but %s/part_%d was not."
% (num_slices, name, name, num_slices - 1))
if "%s/part_%d" % (name, num_slices) in self._vars:
raise ValueError(
"Partitioner returned a different partitioning than what was "
"already found. Partitioner returned %d shards, and shard "
"%s/part_0 was found, but so was the extra shard %s/part_%d."
% (num_slices, name, name, num_slices))
for i in xrange(num_slices):
var_shape = slice_shape[:]
var_offset = slice_offset[:]
partition_info = _PartitionInfo(
full_shape=shape.as_list(), var_offset=var_offset)
if i < num_slices_with_excess:
var_shape[slice_dim] += 1
slice_offset[slice_dim] += var_shape[slice_dim]
var_full_name = "%s/part_%d" % (name, i)
with ops.name_scope(var_full_name + "/PartitionedInitializer"):
# Create the tensor to initialize the variable with default value.
if initializer is None:
init, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
if initializing_from_value:
init_shape = None
else:
init_shape = var_shape
elif callable(initializer):
init = initializer
init_shape = var_shape
elif isinstance(initializer, ops.Tensor):
init = array_ops.slice(initializer, var_offset, var_shape)
# Use the dtype of the given tensor.
dtype = init.dtype.base_dtype
init_shape = None
else:
init = ops.convert_to_tensor(initializer, dtype=dtype)
init = array_ops.slice(init, var_offset, var_shape)
init_shape = None
with ops.name_scope(None):
var = self._get_single_variable(
name=var_full_name,
shape=init_shape,
dtype=dtype,
initializer=init,
partition_info=partition_info,
regularizer=regularizer,
reuse=reuse,
trainable=trainable,
collections=collections,
caching_device=caching_device,
validate_shape=validate_shape,
use_resource=use_resource)
# pylint: disable=protected-access
var._set_save_slice_info(variables.Variable.SaveSliceInfo(
name, shape.as_list(), var_offset, var_shape))
vs.append(var)
# pylint: enable=protected-access
# pylint: disable=protected-access
partitioned_var = variables.PartitionedVariable(name=name,
shape=shape,
dtype=dtype,
variable_list=vs,
partitions=partitions)
# pylint: enable=protected-access
self._partitioned_vars[name] = partitioned_var
return partitioned_var
def _get_single_variable(self,
name,
shape=None,
dtype=dtypes.float32,
initializer=None,
regularizer=None,
partition_info=None,
reuse=None,
trainable=True,
collections=None,
caching_device=None,
validate_shape=True,
use_resource=None,):
"""Get or create a single Variable (e.g. a shard or entire variable).
See the documentation of get_variable above (ignore partitioning components)
for details.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
initializer: see get_variable.
regularizer: see get_variable.
partition_info: _PartitionInfo object.
reuse: see get_variable.
trainable: see get_variable.
collections: see get_variable.
caching_device: see get_variable.
validate_shape: see get_variable.
use_resource: see get_variable.
Returns:
A Variable. See documentation of get_variable above.
Raises:
ValueError: See documentation of get_variable above.
"""
# Set to true if initializer is a constant.
initializing_from_value = False
if initializer is not None and not callable(initializer):
initializing_from_value = True
if shape is not None and initializing_from_value:
raise ValueError("If initializer is a constant, do not specify shape.")
should_check = reuse is not None
dtype = dtypes.as_dtype(dtype)
shape = tensor_shape.as_shape(shape)
if name in self._vars:
# Here we handle the case when returning an existing variable.
if should_check and not reuse:
tb = self._vars[name].op.traceback[::-1]
# Throw away internal tf entries and only take a few lines.
tb = [x for x in tb if "tensorflow/python" not in x[0]][:3]
raise ValueError("Variable %s already exists, disallowed."
" Did you mean to set reuse=True in VarScope? "
"Originally defined at:\n\n%s" % (
name, "".join(traceback.format_list(tb))))
found_var = self._vars[name]
if not shape.is_compatible_with(found_var.get_shape()):
raise ValueError("Trying to share variable %s, but specified shape %s"
" and found shape %s." % (name, shape,
found_var.get_shape()))
if not dtype.is_compatible_with(found_var.dtype):
dtype_str = dtype.name
found_type_str = found_var.dtype.name
raise ValueError("Trying to share variable %s, but specified dtype %s"
" and found dtype %s." % (name, dtype_str,
found_type_str))
return found_var
# The code below handles only the case of creating a new variable.
if should_check and reuse:
raise ValueError("Variable %s does not exist, or was not created with "
"tf.get_variable(). Did you mean to set reuse=None in "
"VarScope?" % name)
if not shape.is_fully_defined() and not initializing_from_value:
raise ValueError("Shape of a new variable (%s) must be fully defined, "
"but instead was %s." % (name, shape))
# Create the tensor to initialize the variable with default value.
if initializer is None:
initializer, initializing_from_value = self._get_default_initializer(
name=name, shape=shape, dtype=dtype)
# Clear control dependencies while creating the initializer.
with ops.control_dependencies(None):
if initializing_from_value:
init_val = initializer
variable_dtype = None
else:
# Instantiate initializer if provided initializer is a type object.
if isinstance(initializer, type(init_ops.Initializer)):
initializer=initializer(dtype=dtype)
init_val = lambda: initializer( # pylint: disable=g-long-lambda
shape.as_list(), dtype=dtype, partition_info=partition_info)
variable_dtype = dtype.base_dtype
# Create the variable.
if use_resource is None:
# Set the default value if unspecified.
use_resource = False
if use_resource:
v = resource_variable_ops.ResourceVariable(
initial_value=init_val,
name=name,
trainable=trainable,
collections=collections,
caching_device=caching_device,
dtype=variable_dtype,
validate_shape=validate_shape)
else:
v = variables.Variable(
initial_value=init_val,
name=name,
trainable=trainable,
collections=collections,
caching_device=caching_device,
dtype=variable_dtype,
validate_shape=validate_shape)
self._vars[name] = v
logging.vlog(1, "Created variable %s with shape %s and init %s", v.name,
format(shape), initializer)
# Run the regularizer if requested and save the resulting loss.
if regularizer:
with ops.colocate_with(v.op):
with ops.name_scope(name + "/Regularizer/"):
loss = regularizer(v)
if loss is not None:
logging.vlog(1, "Applied regularizer to %s and added the result %s "
"to REGULARIZATION_LOSSES.", v.name, loss.name)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss)
return v
# Initialize variable when no initializer provided
def _get_default_initializer(self, name, shape=None, dtype=dtypes.float32):
"""Provide a default initializer and a corresponding value.
Args:
name: see get_variable.
shape: see get_variable.
dtype: see get_variable.
Returns:
initializer and initializing_from_value. See get_variable above.
Raises:
ValueError: When giving unsupported dtype.
"""
# If dtype is DT_FLOAT, provide a uniform unit scaling initializer
if dtype.is_floating:
initializer = init_ops.glorot_uniform_initializer()
initializing_from_value = False
# If dtype is DT_INT/DT_UINT, provide a default value `zero`
# If dtype is DT_BOOL, provide a default value `FALSE`
elif dtype.is_integer or dtype.is_unsigned or dtype.is_bool:
initializer = init_ops.zeros_initializer()(
shape=shape, dtype=dtype.base_dtype)
initializing_from_value = True
# NOTES:Do we need to support for handling DT_STRING and DT_COMPLEX here?
else:
raise ValueError("An initializer for variable %s of %s is required"
% (name, dtype.base_dtype))
return initializer, initializing_from_value
# To stop regularization, use this regularizer
def no_regularizer(_):
"""Use this function to prevent regularization of variables."""
return None
class VariableScope(object):
"""Variable scope object to carry defaults to provide to `get_variable`.
Many of the arguments we need for `get_variable` in a variable store are most
easily handled with a context. This object is used for the defaults.
Attributes:
name: name of the current scope, used as prefix in get_variable.
initializer: default initializer passed to get_variable.
regularizer: default regularizer passed to get_variable.
reuse: Boolean or None, setting the reuse in get_variable.
caching_device: string, callable, or None: the caching device passed to
get_variable.
partitioner: callable or `None`: the partitioner passed to `get_variable`.
custom_getter: default custom getter passed to get_variable.
name_scope: The name passed to `tf.name_scope`.
dtype: default type passed to get_variable (defaults to DT_FLOAT).
use_resource: if False, create a normal Variable; if True create an
experimental ResourceVariable with well-defined semantics. Defaults
to False (will later change to True).
"""
def __init__(self,
reuse,
name="",
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
name_scope="",
dtype=dtypes.float32,
use_resource=None):
"""Creates a new VariableScope with the given properties."""
self._name = name
self._initializer = initializer
self._regularizer = regularizer
self._reuse = reuse
self._caching_device = caching_device
self._partitioner = partitioner
self._custom_getter = custom_getter
self._name_scope = name_scope
self._dtype = dtype
self._use_resource = use_resource
@property
def name(self):
return self._name
@property
def original_name_scope(self):
return self._name_scope
@property
def reuse(self):
return self._reuse
@property
def initializer(self):
return self._initializer
@property
def dtype(self):
return self._dtype
@property
def use_resource(self):
return self._use_resource
@property
def regularizer(self):
return self._regularizer
@property
def caching_device(self):
return self._caching_device
@property
def partitioner(self):
return self._partitioner
@property
def custom_getter(self):
return self._custom_getter
def reuse_variables(self):
"""Reuse variables in this scope."""
self._reuse = True
def set_initializer(self, initializer):
"""Set initializer for this scope."""
self._initializer = initializer
def set_dtype(self, dtype):
"""Set data type for this scope."""
self._dtype = dtype
def set_use_resource(self, use_resource):
"""Sets whether to use ResourceVariables for this scope."""
self._use_resource = use_resource
def set_regularizer(self, regularizer):
"""Set regularizer for this scope."""
self._regularizer = regularizer
def set_caching_device(self, caching_device):
"""Set caching_device for this scope."""
self._caching_device = caching_device
def set_partitioner(self, partitioner):
"""Set partitioner for this scope."""
self._partitioner = partitioner
def set_custom_getter(self, custom_getter):
"""Set custom getter for this scope."""
self._custom_getter = custom_getter
def get_collection(self, name):
"""Get this scope's variables."""
scope = self._name + '/' if self._name else ""
return ops.get_collection(name, scope)
def trainable_variables(self):
"""Get this scope's trainable variables."""
return self.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES)
def global_variables(self):
"""Get this scope's global variables."""
return self.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
def get_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None,):
"""Gets an existing variable with this name or create a new one."""
if regularizer is None:
regularizer = self._regularizer
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if custom_getter is None:
custom_getter = self._custom_getter
full_name = self.name + "/" + name if self.name else name
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# Check that `initializer` dtype and `dtype` are consistent before
# replacing them with defaults.
if (dtype is not None and initializer is not None and
not callable(initializer)):
init_dtype = ops.convert_to_tensor(initializer).dtype.base_dtype
if init_dtype != dtype:
raise ValueError("Initializer type '%s' and explicit dtype '%s' "
"don't match." % (init_dtype, dtype))
if initializer is None:
initializer = self._initializer
if dtype is None:
dtype = self._dtype
if use_resource is None:
use_resource = self._use_resource
return var_store.get_variable(
full_name, shape=shape, dtype=dtype, initializer=initializer,
regularizer=regularizer, reuse=self.reuse, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource, custom_getter=custom_getter)
def _get_partitioned_variable(self,
var_store,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None):
"""Gets an existing variable with this name or create a new one."""
if initializer is None:
initializer = self._initializer
if regularizer is None:
regularizer = self._regularizer
if caching_device is None:
caching_device = self._caching_device
if partitioner is None:
partitioner = self._partitioner
if dtype is None:
dtype = self._dtype
if self._custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % self._custom_getter)
if partitioner is None:
raise ValueError("No partitioner was specified")
# This allows the variable scope name to be used as the variable name if
# this function is invoked with an empty name arg, for backward
# compatibility with create_partitioned_variables().
full_name_list = []
if self.name:
full_name_list.append(self.name)
if name:
full_name_list.append(name)
full_name = "/".join(full_name_list)
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
# pylint: disable=protected-access
return var_store._get_partitioned_variable(
full_name, shape=shape, dtype=dtype, initializer=initializer,
regularizer=regularizer, reuse=self.reuse, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource)
# pylint: enable=protected-access
_VARSTORE_KEY = ("__variable_store",)
_VARSCOPE_KEY = ("__varscope",)
def get_variable_scope():
"""Returns the current variable scope."""
scope = ops.get_collection(_VARSCOPE_KEY)
if scope: # This collection has at most 1 element, the default scope at [0].
return scope[0]
scope = VariableScope(False)
ops.add_to_collection(_VARSCOPE_KEY, scope)
return scope
def _get_default_variable_store():
store = ops.get_collection(_VARSTORE_KEY)
if store:
return store[0]
store = _VariableStore()
ops.add_to_collection(_VARSTORE_KEY, store)
return store
def get_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None,
custom_getter=None):
return get_variable_scope().get_variable(
_get_default_variable_store(), name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource, custom_getter=custom_getter)
get_variable_or_local_docstring = (
"""%s
%sThis function prefixes the name with the current variable scope
and performs reuse checks. See the
@{$variable_scope$Variable Scope How To}
for an extensive description of how reusing works. Here is a basic example:
```python
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1]) # v.name == "foo/v:0"
w = tf.get_variable("w", [1]) # w.name == "foo/w:0"
with tf.variable_scope("foo", reuse=True):
v1 = tf.get_variable("v") # The same as v above.
```
If initializer is `None` (the default), the default initializer passed in
the variable scope will be used. If that one is `None` too, a
`glorot_uniform_initializer` will be used. The initializer can also be
a Tensor, in which case the variable is initialized to this value and shape.
Similarly, if the regularizer is `None` (the default), the default regularizer
passed in the variable scope will be used (if that is `None` too,
then by default no regularization is performed).
If a partitioner is provided, a `PartitionedVariable` is returned.
Accessing this object as a `Tensor` returns the shards concatenated along
the partition axis.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
@{tf.GraphKeys.REGULARIZATION_LOSSES} and can be used for regularization.
%scollections: List of graph collections keys to add the Variable to.
Defaults to `[%s]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If true, creates an
experimental ResourceVariable instead with well-defined semantics.
Defaults to False (will later change to True).
custom_getter: Callable that takes as a first argument the true getter, and
allows overwriting the internal get_variable method.
The signature of `custom_getter` should match that of this method,
but the most future-proof version will allow for changes:
`def custom_getter(getter, *args, **kwargs)`. Direct access to
all `get_variable` parameters is also allowed:
`def custom_getter(getter, name, *args, **kwargs)`. A simple identity
custom getter that simply creates variables with modified names is:
```python
def custom_getter(getter, name, *args, **kwargs):
return getter(name + '_suffix', *args, **kwargs)
```
Returns:
The created or existing `Variable` (or `PartitionedVariable`, if a
partitioner was used).
Raises:
ValueError: when creating a new variable and shape is not declared,
when violating reuse during variable creation, or when `initializer` dtype
and `dtype` don't match. Reuse is set inside `variable_scope`.
""")
get_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing variable with these parameters or create a new one.",
"",
"trainable: If `True` also add the variable to the graph collection\n"
" `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).\n",
"GraphKeys.GLOBAL_VARIABLES")
@functools.wraps(get_variable)
def get_local_variable(*args, **kwargs):
kwargs["trainable"] = False
if "collections" in kwargs:
kwargs["collections"] += [ops.GraphKeys.LOCAL_VARIABLES]
else:
kwargs["collections"] = [ops.GraphKeys.LOCAL_VARIABLES]
return get_variable(*args, **kwargs)
get_local_variable.__doc__ = get_variable_or_local_docstring % (
"Gets an existing *local* variable or creates a new one.",
"Behavior is the same as in `get_variable`, except that variables are\n"
"added to the `LOCAL_VARIABLES` collection and `trainable` is set to\n"
"`False`.\n",
"",
"GraphKeys.LOCAL_VARIABLES")
def _get_partitioned_variable(name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
validate_shape=True,
use_resource=None):
"""Gets or creates a sharded variable list with these parameters.
The `partitioner` must be a callable that accepts a fully defined
`TensorShape` and returns a sequence of integers (the `partitions`).
These integers describe how to partition the given sharded `Variable`
along the given dimension. That is, `partitions[1] = 3` means split
the `Variable` into 3 shards along dimension 1. Currently, sharding along
only one axis is supported.
If the list of variables with the given name (prefix) is already stored,
we return the stored variables. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
If `reuse` is `None` (the default), both new and existing variables are
returned.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`glorot_uniform_initializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
If the initializer is a callable, then it will be called for each
shard. Otherwise the initializer should match the shape of the entire
sharded Variable, and it will be sliced accordingly for each shard.
Some useful partitioners are available. See, e.g.,
`variable_axis_size_partitioner` and `min_max_variable_partitioner`.
Args:
name: The name of the new or existing variable.
shape: Shape of the new or existing variable.
dtype: Type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: Initializer for the variable if one is created.
regularizer: A (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.GLOBAL_VARIABLES]` (see `tf.Variable`).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
partitioner: Optional callable that accepts a fully defined `TensorShape`
and `dtype` of the Variable to be created, and returns a list of
partitions for each axis (currently only one axis can be partitioned).
validate_shape: If False, allows the variable to be initialized with a
value of unknown shape. If True, the default, the shape of initial_value
must be known.
use_resource: If False, creates a regular Variable. If True, creates an
experimental ResourceVariable instead which has well-defined semantics.
Defaults to False (will later change to True).
Returns:
A tuple `(shards, partitions)` where `shards` is the list of `Variable`
shards and `partitions` is the output of the partitioner on the input
shape.
Raises:
ValueError: when creating a new variable and shape is not declared,
or when violating reuse during variable creation. Reuse is set inside
`variable_scope`.
"""
# pylint: disable=protected-access
scope = get_variable_scope()
if scope.custom_getter is not None:
raise ValueError(
"Private access to _get_partitioned_variable is not allowed when "
"a custom getter is set. Current custom getter: %s. "
"It is likely that you're using create_partitioned_variables. "
"If so, consider instead using get_variable with a non-empty "
"partitioner parameter instead." % scope.custom_getter)
return scope._get_partitioned_variable(
_get_default_variable_store(), name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer, trainable=trainable,
collections=collections, caching_device=caching_device,
partitioner=partitioner, validate_shape=validate_shape,
use_resource=use_resource)
# pylint: enable=protected-access
@contextlib.contextmanager
def _pure_variable_scope(name_or_scope,
reuse=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
old_name_scope=None,
dtype=dtypes.float32,
use_resource=None):
"""Creates a context for the variable_scope, see `variable_scope` for docs.
Note: this does not create a name scope.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
reuse: `True` or `None`; if `True`, we go into reuse mode for this scope as
well as all sub-scopes; if `None`, we just inherit the parent scope reuse.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
old_name_scope: the original name scope when re-entering a variable scope.
dtype: type of the variables within this scope (defaults to `DT_FLOAT`).
use_resource: If False, variables in this scope will be regular Variables.
If True, experimental ResourceVariables will be creates instead, with
well-defined semantics. Defaults to False (will later change to True).
Yields:
A scope that can be to captured and reused.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope, or if reuse is not `None` or `True`.
TypeError: when the types of some arguments are not appropriate.
"""
get_variable_scope() # Ensure that a default exists, then get a pointer.
# Get the reference to the collection as we want to modify it in place.
default_varscope = ops.get_collection_ref(_VARSCOPE_KEY)
old = default_varscope[0]
var_store = _get_default_variable_store()
if isinstance(name_or_scope, VariableScope):
new_name = name_or_scope.name
else:
new_name = old.name + "/" + name_or_scope if old.name else name_or_scope
try:
var_store.open_variable_scope(new_name)
if isinstance(name_or_scope, VariableScope):
old_subscopes = copy.copy(var_store.variable_scopes_count)
name_scope = name_or_scope._name_scope # pylint: disable=protected-access
# Handler for the case when we jump to a shared scope.
# We create a new VariableScope (default_varscope[0]) that contains
# a copy of the provided shared scope, possibly with changed reuse
# and initializer, if the user requested this.
default_varscope[0] = VariableScope(
name_or_scope.reuse if reuse is None else reuse,
name=new_name,
initializer=name_or_scope.initializer,
regularizer=name_or_scope.regularizer,
caching_device=name_or_scope.caching_device,
partitioner=name_or_scope.partitioner,
dtype=name_or_scope.dtype,
custom_getter=name_or_scope.custom_getter,
name_scope=name_scope,
use_resource=name_or_scope.use_resource)
if initializer is not None:
default_varscope[0].set_initializer(initializer)
if regularizer is not None:
default_varscope[0].set_regularizer(regularizer)
if caching_device is not None:
default_varscope[0].set_caching_device(caching_device)
if partitioner is not None:
default_varscope[0].set_partitioner(partitioner)
if custom_getter is not None:
default_varscope[0].set_custom_getter(
_maybe_wrap_custom_getter(
custom_getter, name_or_scope.custom_getter))
if dtype is not None:
default_varscope[0].set_dtype(dtype)
if use_resource is not None:
default_varscope[0].set_use_resource(use_resource)
yield default_varscope[0]
else:
# Handler for the case when we just prolong current variable scope.
# VariableScope with name extended by the provided one, and inherited
# reuse and initializer (except if the user provided values to set).
reuse = reuse or old.reuse # Re-using is inherited by sub-scopes.
default_varscope[0] = VariableScope(
reuse,
name=new_name,
initializer=old.initializer,
regularizer=old.regularizer,
caching_device=old.caching_device,
partitioner=old.partitioner,
dtype=old.dtype,
use_resource=old.use_resource,
custom_getter=old.custom_getter,
name_scope=old_name_scope or name_or_scope)
if initializer is not None:
default_varscope[0].set_initializer(initializer)
if regularizer is not None:
default_varscope[0].set_regularizer(regularizer)
if caching_device is not None:
default_varscope[0].set_caching_device(caching_device)
if partitioner is not None:
default_varscope[0].set_partitioner(partitioner)
if custom_getter is not None:
default_varscope[0].set_custom_getter(
_maybe_wrap_custom_getter(custom_getter, old.custom_getter))
if dtype is not None:
default_varscope[0].set_dtype(dtype)
if use_resource is not None:
default_varscope[0].set_use_resource(use_resource)
yield default_varscope[0]
finally:
var_store.close_variable_subscopes(new_name)
# If jumping out from a non-prolonged scope, restore counts.
if isinstance(name_or_scope, VariableScope):
var_store.variable_scopes_count = old_subscopes
default_varscope[0] = old
def _maybe_wrap_custom_getter(custom_getter, old_getter):
"""Wrap a call to a custom_getter to use the old_getter internally."""
if old_getter is None:
return custom_getter
# The new custom_getter should call the old one
def wrapped_custom_getter(getter, *args, **kwargs):
# Call:
# custom_getter(
# lambda: old_getter(true_getter, ...), *args, **kwargs)
# which means custom_getter will call old_getter, which
# will call the true_getter, perform any intermediate
# processing, and return the results to the current
# getter, which will also perform additional processing.
return custom_getter(
functools.partial(old_getter, getter),
*args, **kwargs)
return wrapped_custom_getter
def _get_unique_variable_scope(prefix):
"""Get a name with the given prefix unique in the current variable scope."""
var_store = _get_default_variable_store()
current_scope = get_variable_scope()
name = current_scope.name + "/" + prefix if current_scope.name else prefix
if var_store.variable_scope_count(name) == 0:
return prefix
idx = 1
while var_store.variable_scope_count(name + ("_%d" % idx)) > 0:
idx += 1
return prefix + ("_%d" % idx)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def variable_scope(name_or_scope,
default_name=None,
values=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None):
"""Returns a context manager for defining ops that creates variables (layers).
This context manager validates that the (optional) `values` are from
the same graph, ensures that graph is the default graph, and pushes a
name scope and a variable scope.
If `name_or_scope` is not None, it is used as is. If `scope` is None, then
`default_name` is used. In that case, if the same name has been previously
used in the same scope, it will made unique be appending `_N` to it.
Variable scope allows to create new variables and to share already created
ones while providing checks to not create or share by accident. For details,
see the @{$variable_scope$Variable Scope How To},
here we present only a few basic examples.
Simple example of how to create a new variable:
```python
with tf.variable_scope("foo"):
with tf.variable_scope("bar"):
v = tf.get_variable("v", [1])
assert v.name == "foo/bar/v:0"
```
Basic example of sharing a variable:
```python
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1])
with tf.variable_scope("foo", reuse=True):
v1 = tf.get_variable("v", [1])
assert v1 == v
```
Sharing a variable by capturing a scope and setting reuse:
```python
with tf.variable_scope("foo") as scope:
v = tf.get_variable("v", [1])
scope.reuse_variables()
v1 = tf.get_variable("v", [1])
assert v1 == v
```
To prevent accidental sharing of variables, we raise an exception when
getting an existing variable in a non-reusing scope.
```python
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1])
v1 = tf.get_variable("v", [1])
# Raises ValueError("... v already exists ...").
```
Similarly, we raise an exception when trying to get a variable that
does not exist in reuse mode.
```python
with tf.variable_scope("foo", reuse=True):
v = tf.get_variable("v", [1])
# Raises ValueError("... v does not exists ...").
```
Note that the `reuse` flag is inherited: if we open a reusing scope,
then all its sub-scopes become reusing as well.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
default_name: The default name to use if the `name_or_scope` argument is
`None`, this name will be uniquified. If name_or_scope is provided it
won't be used and therefore it is not required and can be None.
values: The list of `Tensor` arguments that are passed to the op function.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
partitioner: default partitioner for variables within this scope.
custom_getter: default custom getter for variables within this scope.
reuse: `True` or `None`; if `True`, we go into reuse mode for this scope as
well as all sub-scopes; if `None`, we just inherit the parent scope reuse.
dtype: type of variables created in this scope (defaults to the type
in the passed scope, or inherited from parent scope).
use_resource: If False, all variables will be regular Variables. If True,
experimental ResourceVariables with well-defined semantics will be used
instead. Defaults to False (will later change to True).
Returns:
A scope that can be to captured and reused.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope.
TypeError: when the types of some arguments are not appropriate.
"""
if default_name is None and name_or_scope is None:
raise TypeError("If default_name is None then name_or_scope is required")
if not (reuse is True or reuse is False or reuse is None):
raise ValueError("The reuse parameter must be True or False or None.")
if reuse is False: # We don't allow non-inheriting scopes, False = None here.
reuse = None
if values is None:
values = []
g = ops._get_graph_from_inputs(values) # pylint: disable=protected-access
with g.as_default():
if name_or_scope is not None:
if not isinstance(name_or_scope, (VariableScope,) + six.string_types):
raise TypeError("VariableScope: name_or_scope must be a string or "
"VariableScope.")
if isinstance(name_or_scope, six.string_types):
name_scope = name_or_scope
else:
name_scope = name_or_scope.name.split("/")[-1]
if name_scope:
with ops.name_scope(name_scope) as cur_name_scope:
if isinstance(name_or_scope, six.string_types):
old_name_scope = cur_name_scope
else:
old_name_scope = name_or_scope.original_name_scope
with _pure_variable_scope(
name_or_scope,
reuse=reuse,
initializer=initializer,
regularizer=regularizer,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=custom_getter,
old_name_scope=old_name_scope,
dtype=dtype,
use_resource=use_resource) as vs:
yield vs
else:
# This can only happen if someone is entering the root variable scope.
with _pure_variable_scope(
name_or_scope,
reuse=reuse,
initializer=initializer,
regularizer=regularizer,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=custom_getter,
dtype=dtype,
use_resource=use_resource) as vs:
yield vs
else: # Here name_or_scope is None. Using default name, but made unique.
if reuse:
raise ValueError("reuse=True cannot be used without a name_or_scope")
with ops.name_scope(default_name) as scope:
unique_default_name = _get_unique_variable_scope(default_name)
with _pure_variable_scope(
unique_default_name,
initializer=initializer,
regularizer=regularizer,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=custom_getter,
old_name_scope=scope,
dtype=dtype,
use_resource=use_resource) as vs:
yield vs
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def variable_op_scope(values,
name_or_scope,
default_name=None,
initializer=None,
regularizer=None,
caching_device=None,
partitioner=None,
custom_getter=None,
reuse=None,
dtype=None,
use_resource=None):
"""Deprecated: context manager for defining an op that creates variables."""
logging.warn("tf.variable_op_scope(values, name, default_name) is deprecated,"
" use tf.variable_scope(name, default_name, values)")
with variable_scope(name_or_scope,
default_name=default_name,
values=values,
initializer=initializer,
regularizer=regularizer,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=custom_getter,
reuse=reuse,
dtype=dtype,
use_resource=use_resource) as scope:
yield scope
def _compute_slice_dim_and_shape(full_shape, slicing):
"""Computes which dimension is being sliced and the typical slice shape."""
slice_shape = [0] * len(full_shape)
slice_dim = None
for dim, num_slices in enumerate(slicing):
dim_size = full_shape[dim]
if num_slices <= 0 or dim_size < num_slices:
raise ValueError("Cannot create %d slices for size %d. shape: %s, "
"slicing: %s" %
(num_slices, full_shape[dim], full_shape, slicing))
if num_slices == 1:
# Not slicing in this dimension.
slice_shape[dim] = dim_size
elif slice_dim is not None:
# We only support slicing along one of the dimensions.
raise ValueError("Can only slice a variable along one dimension: "
"shape: %s, slicing: %s" % (full_shape, slicing))
else:
# Note: We will add any extras onto the last slice, later.
slice_dim = dim
slice_shape[dim] = dim_size // num_slices
# Degenerate case: If "slicing" was all ones, pretend we are slicing along
# the first dimension.
if slice_dim is None:
slice_dim = 0
return slice_dim, slice_shape
| 42.329261 | 97 | 0.661005 |
cda2e0abba92851a397f7667e9f705874637347f | 15,462 | py | Python | perceptual/plotting_tools.py | LIMUNIMI/PerceptualEvaluation | 6e1fcdf65ae5cb86997443607bb2050163b64720 | [
"MIT"
] | null | null | null | perceptual/plotting_tools.py | LIMUNIMI/PerceptualEvaluation | 6e1fcdf65ae5cb86997443607bb2050163b64720 | [
"MIT"
] | null | null | null | perceptual/plotting_tools.py | LIMUNIMI/PerceptualEvaluation | 6e1fcdf65ae5cb86997443607bb2050163b64720 | [
"MIT"
] | null | null | null | import os
from collections import OrderedDict
import pandas as pd
import streamlit as st
import numpy as np
import plotly.express as px
import plotly.graph_objects as go
from joblib import Parallel, delayed
import scipy
from scipy.stats import (f_oneway, kendalltau, kruskal, pearsonr, spearmanr,
wilcoxon, ttest_rel, shapiro)
from statsmodels.stats.multitest import multipletests
from tqdm import tqdm
var1_map = {
'rest': 'restoration',
'resynth': 'resynthesis',
'transcr': 'transcription',
'hr': 'hr',
'nr': 'nr',
'si': 'si',
'o&f': 'o&f'
}
def plot(df,
obj_eval,
measure_name,
var1,
var2,
excerpts_mean=True,
variable=None):
"""
Arguments
---------
`df` : pd.DataFrame
the dataframe built from the `saves` dir
`excerpt_mean` : bool
if True, plots the average over all the excerpts per each var1_val type
`variable` : str or None
the variable to be observed: each value of the variable will be a
different line and the Wilcoxon rank test will be computed for all the
combinations of the variable value
`measure_name` : str
the name used for the measure in the plots
`var1` : str
a column of `df` that is iterated first (e.g. `'task'` or `'method'`)
`var2` : str
a column of `df` that is iterated next (e.g. `'task'` or `'method'`)
Returns
-------
`list[dash_core_components.Graph]` :
the list of graphs to be plotted in Dash
"""
var1_vals = df[var1].unique()
excerpts = df['excerpt_num'].unique()
# selecting data
print("Plotting")
def process(var1_val):
sort_by = [var2] + ([variable] if variable is not None else [])
if excerpts_mean:
excerpt = 'mean'
selected_data = df.loc[df[var1] == var1_val].sort_values(sort_by)
# plotting means of the excerpts
_plot_data(var2, selected_data, var1_val, excerpt, variable,
obj_eval, measure_name)
else:
for excerpt in sorted(excerpts):
selected_data = df.loc[df[var1] == var1_val].loc[
df['excerpt_num'] == excerpt].sort_values(sort_by)
# plotting each excerpt
_plot_data(var2, selected_data, var1_val, excerpt, variable,
obj_eval, measure_name)
Parallel(n_jobs=1)(delayed(process)(var1_val)
for var1_val in tqdm(var1_vals))
def compute_correlations(groupby, obj_eval, excerpt):
# correlations = np.zeros((3, 2, 3, 2, 2))
funcs = ['Pearsons', 'Spearman', 'Kendall']
measures = ['Precision', 'Recall', 'F-Measure']
vel = ['With Velocity', 'Without Velocity']
type_ = ['Mean', 'Median']
correlations = pd.DataFrame(
index=pd.MultiIndex.from_product([vel, measures]),
columns=pd.MultiIndex.from_product(
[type_, funcs, ['Correlation', 'p-value']]))
for i in range(2):
for j in range(3):
for k, correl_func in enumerate([pearsonr, spearmanr, kendalltau]):
mean_val = correl_func(obj_eval[excerpt, :, i, j],
groupby.mean())
median_val = correl_func(obj_eval[excerpt, :, i, j],
groupby.median())
correlations.loc[vel[i], measures[j]]['Mean',
funcs[k]] = mean_val
correlations.loc[vel[i], measures[j]]['Median',
funcs[k]] = median_val
return correlations.T
def _plot_data(var2, selected_data, var1_val, excerpt, variable, obj_eval,
measure_name):
st.write(f"""
## Var1: _{var1_map[var1_val]}_
## Excerpt: _{excerpt}_
## Controlled variable: _{variable}_
""")
# computing the data related to this excerpt
if excerpt == 'mean':
obj_eval = np.mean(obj_eval[:], axis=0)[np.newaxis]
groupby = selected_data.groupby(var2)['rating']
excerpt_num = 0
else:
groupby = selected_data.loc[selected_data['excerpt_num'] ==
excerpt].groupby(var2)['rating']
excerpt_num = excerpt
# creating plot
fig_plot = px.violin(
selected_data,
x=var2,
y='rating',
box=True,
# points="all",
color=variable,
violinmode='group',
title=
f'Var1: {var1_map[var1_val]} - Excerpt: {excerpt} - Controlled variable: {variable}'
)
# customizing plot
fig_plot.update_traces(spanmode='manual', span=[0, 1])
# changing color of boxplots and adding mean line
for data in fig_plot.data:
data.meanline = dict(visible=True, color='white', width=1)
data.box.line.color = 'white'
data.box.line.width = 1
var2_vals = selected_data[var2].unique()
# adding measure line to the plot
if len(var2_vals) == 4:
fig_plot.add_trace(
go.Scatter(x=var2_vals,
y=obj_eval[excerpt_num, :, 1, 2],
name=measure_name))
# saving plot and output to streamlit
if not os.path.exists('imgs'):
os.mkdir('imgs')
fig_plot.write_image(f"imgs/{var1_map[var1_val]}_{excerpt}_{variable}.svg")
st.write(fig_plot)
# computing correlations
st.write("### Correlations and error margins")
if groupby.sum().shape[0] == 4:
correlations = compute_correlations(groupby, obj_eval, excerpt_num)
st.write("Correlations for all the data:")
st.table(correlations.style.format('{:.2e}'))
# a function to compute error margins
def _compute_error_margins(groupby, selected_data_variable, var):
# error_margin_text.append(f"var {var}: ")
error_margins = pd.DataFrame(index=var2_vals,
columns=[
'Sample Size',
'Error Margin Gaussian',
'Error Margin Bootstrap'
])
for var2_val in var2_vals:
# computing std, and error margin
samples = selected_data.loc[selected_data_variable]
samples = samples.loc[samples[var2] == var2_val]['rating']
sample_size = samples.count()
std = samples.std()
gauss_err = 1.96 * std / np.sqrt(sample_size)
bootstrap = [
samples.sample(frac=1., replace=True) for _ in range(1000)
]
means = np.mean(bootstrap, axis=1)
alpha_2 = 0.05 / 2
bootstrap_err = (np.quantile(means, q=1 - alpha_2) -
np.quantile(means, q=alpha_2)) / 2
error_margins.loc[var2_val] = [
sample_size, gauss_err, bootstrap_err
]
st.write(f"Error margins for control group **{var}**")
st.write(error_margins)
if type(excerpt) is not int:
groupby = selected_data
else:
groupby = selected_data.loc[selected_data['excerpt_num'] == excerpt]
if variable:
# the values available for this variable
variable_vals = selected_data[variable].unique()
distributions = {}
for var in variable_vals:
# computing the distributions for each value of the variable and
# each var2_val
for var2_val in var2_vals:
distributions[f"{var2_val}, group {var}"] = selected_data[
(selected_data[variable] == var).values *
(selected_data[var2] == var2_val).values]['rating'].values
# computing correlations and error margins for each variable value
groupby_variable = groupby[variable] == var
print(groupby_variable)
selected_data_variable = selected_data[variable] == var
this_groupby = groupby.loc[groupby_variable].groupby(
var2)['rating']
if this_groupby.sum().shape[0] == 4:
correlations = compute_correlations(this_groupby, obj_eval,
excerpt_num)
st.write(f"Correlations for control group **{var}**")
st.table(correlations.style.format('{:.2e}'))
_compute_error_margins(this_groupby, selected_data_variable, var)
else:
# no variable provided, using all the data
# computing error margins for all the data
groupby_variable = groupby['rating'] > -1
groupby = groupby.loc[groupby_variable].groupby(var2)['rating']
_compute_error_margins(groupby, selected_data['rating'] > -1, 'all')
distributions = {
var2_val:
selected_data[selected_data[var2] == var2_val]['rating'].values
for var2_val in var2_vals
}
st.write("### Statistical significance analysis")
# normality test
shapiro_pvals(distributions, var2_vals)
# computing wilcoxon, and t-test test
st.write("#### Wilcoxon test")
pval1, var2_pval1 = _compute_pvals(var2, selected_data, excerpt, variable,
wilcoxon)
st.write("#### t-test (related variables)")
pval2, var2_pval2 = _compute_pvals(var2, selected_data, excerpt, variable,
ttest_rel)
if variable:
if not np.all((pval1 > 0.05) == (pval2 > 0.05)):
st.write(
"**Wilcoxon and Student's t tests _differ_ for alpha = 0.05 in variable-wise tests**"
)
else:
st.write(
"**Wilcoxon and Student's t tests have _identical_ outcomes for alpha = 0.05 in variable-wise tests**"
)
if not np.all((var2_pval1 > 0.05) == (var2_pval2 > 0.05)):
st.write(
"**Wilcoxon and Student's t tests _differ_ for alpha = 0.05 in var2-wise tests**"
)
else:
st.write(
"**Wilcoxon and Student's t tests have _identical_ outcomes for alpha = 0.05 in var2-wise tests**"
)
st.write("---")
def omnibus(distributions):
st.write("###### Omnibus tests")
kruskal_h, kruskal_pval = kruskal(*distributions)
st.write(
f"Kruskal-Wallis (p-value, h-statistics): {kruskal_pval:.2e}, {kruskal_h:.2f}"
)
f_h, f_pval = f_oneway(*distributions)
st.write(
f"ANOVA (p-value, h-statistics): {f_pval:.2e}, {f_h:.2f}")
def shapiro_pvals(distributions, var2_vals):
st.write("#### Shapiro-Wilk tests:")
shapiro_pval = OrderedDict(
{k: shapiro(d)[1]
for k, d in distributions.items()})
if len(distributions) > 2:
st.write("###### Using Bonferroni-Holm correction!")
_, shapiro_pval_corrected, _, _ = multipletests(list(
shapiro_pval.values()),
method='holm')
st.write({
k: f"{shapiro_pval_corrected[i]:.2e}"
for i, k in enumerate(shapiro_pval.keys())
})
def correct_pvalues(pval):
pval_indices = np.nonzero(~np.isnan(pval))
_, corrected_pval, _, _ = multipletests(pval[pval_indices].flatten(),
method='holm')
pval[pval_indices] = corrected_pval
def _compute_pvals(var2, selected_data, excerpt, variable, statistics_func):
var2_vals = selected_data[var2].unique()
if variable:
variable_vals = selected_data[variable].unique()
pvals, var2_pvals = [], []
# computing pval for each var2_val between the variable values
for var2_val in var2_vals:
samples = selected_data.loc[selected_data[var2] == var2_val]
pval = np.full((len(variable_vals), len(variable_vals)), np.nan)
distributions = []
for i, expi in enumerate(variable_vals):
for j, expj in enumerate(variable_vals):
if i > j:
try:
datai = samples.loc[samples[variable] == expi]
dataj = samples.loc[samples[variable] == expj]
maxlen = min(len(datai), len(dataj))
x = datai['rating'][:maxlen]
y = dataj['rating'][:maxlen]
_, pval[i, j] = statistics_func(x, y)
distributions += [x, y]
except Exception as e:
print(
f"\nError while computing pvals with {statistics_func} test!:"
)
print(e)
print()
if pval.shape[0] > 2:
omnibus(distributions)
correct_pvalues(pval)
st.write(
f"p-values for var2_val **{var2_val}** and variable **{variable}**"
)
st.write("###### using Bonferroni-Holm correction!")
st.table(
pd.DataFrame(pval,
columns=variable_vals,
index=variable_vals).style.format('{:.2e}'))
else:
st.write(
f"p-values for var2_val **{var2_val}** and variable **{variable}**: {pval[1, 0]:.2e}"
)
pvals.append(pval)
# computing pval for each variable
for var in variable_vals:
samples = selected_data.loc[selected_data[variable] == var]
var2_pval = _pval_on_var2_vals(var2, var2_vals, samples, var,
statistics_func)
var2_pvals.append(var2_pval)
var2_pval = np.stack(var2_pvals)
pval = np.stack(pvals)
else:
samples = selected_data
pval = None
var2_pval = _pval_on_var2_vals(var2, var2_vals, samples, 'all',
statistics_func)
return pval, var2_pval
def _pval_on_var2_vals(var2, var2_vals, samples, var, statistics_func):
pval = np.full((len(var2_vals), len(var2_vals)), np.nan)
distributions = []
for i, expi in enumerate(var2_vals):
for j, expj in enumerate(var2_vals):
if i > j:
try:
datai = samples.loc[samples[var2] == expi]
dataj = samples.loc[samples[var2] == expj]
maxlen = min(len(datai), len(dataj))
x = datai['rating'][:maxlen]
y = dataj['rating'][:maxlen]
_, pval[i, j] = statistics_func(x, y)
distributions += [x, y]
except Exception as e:
print(
f"\nError while computing pvals with {statistics_func} test!:"
)
print(e)
print()
st.write(f"##### p-values for variable value **{var}**")
if pval.shape[0] > 2:
omnibus(distributions)
correct_pvalues(pval)
st.write("###### using Bonferroni-Holm correction!")
st.table(
pd.DataFrame(pval, columns=var2_vals,
index=var2_vals).style.format('{:.2e}'))
return pval
| 37.804401 | 118 | 0.546307 |
b00fcc6a45ec5362d4e2d154badfdf7399b3ed4d | 27 | py | Python | exercises/isbn-verifier/isbn_verifier.py | wonhyeongseo/python | ccd399510a58ad42d03420e43de67893f55dd411 | [
"MIT"
] | 2 | 2019-07-25T04:40:24.000Z | 2020-12-18T21:29:02.000Z | exercises/isbn-verifier/isbn_verifier.py | toroad/python | ce085c81a82ae5fb460fe166323dbbaa5a2588c5 | [
"MIT"
] | null | null | null | exercises/isbn-verifier/isbn_verifier.py | toroad/python | ce085c81a82ae5fb460fe166323dbbaa5a2588c5 | [
"MIT"
] | null | null | null | def verify(isbn):
pass
| 9 | 17 | 0.62963 |
45f0c273c71e8c0355aa5cf5cc661438b953419a | 974 | py | Python | src/sst/elements/simpleElementExample/tests/subcomponent_tests/test_sc_ua.py | feldergast/sst-elements | a7abc015aed709feb05821d269d233110569fd72 | [
"BSD-3-Clause"
] | 2 | 2019-06-10T15:32:03.000Z | 2019-06-11T14:17:32.000Z | src/sst/elements/simpleElementExample/tests/subcomponent_tests/test_sc_ua.py | feldergast/sst-elements | a7abc015aed709feb05821d269d233110569fd72 | [
"BSD-3-Clause"
] | 39 | 2016-01-06T15:08:15.000Z | 2020-06-03T18:12:31.000Z | src/sst/elements/simpleElementExample/tests/subcomponent_tests/test_sc_ua.py | feldergast/sst-elements | a7abc015aed709feb05821d269d233110569fd72 | [
"BSD-3-Clause"
] | 1 | 2017-06-13T13:50:58.000Z | 2017-06-13T13:50:58.000Z | import sst
# Define SST core options
sst.setProgramOption("stopAtCycle", "10us")
# Set up sender using slot and anonymous subcomponent
loader0 = sst.Component("Loader0", "simpleElementExample.SubComponentLoader")
loader0.addParam("clock", "1.5GHz")
sub0 = loader0.setSubComponent("mySubComp", "simpleElementExample.SubCompSlot",0)
sub0.addParam("sendCount", 15)
sub0.addParam("unnamed_subcomponent", "simpleElementExample.SubCompSender")
sub0.enableAllStatistics()
# Set up receiver using slot andanonymous subcomponent
loader1 = sst.Component("Loader1", "simpleElementExample.SubComponentLoader")
loader1.addParam("clock", "1.0GHz")
sub1 = loader1.setSubComponent("mySubComp", "simpleElementExample.SubCompSlot",0)
sub1.addParam("unnamed_subcomponent", "simpleElementExample.SubCompReceiver")
sub1.enableAllStatistics()
# Set up links
link = sst.Link("myLink1")
link.connect((sub0, "slot_port0", "5ns"), (sub1, "slot_port0", "5ns"))
sst.setStatisticLoadLevel(1)
| 30.4375 | 81 | 0.784394 |
f7d3d15035f74ba6045d2da33f1be72acf85714e | 1,581 | py | Python | tests/test_random_signal.py | CNES/swot_simulator | 92d0bb4a274ec9923265567968beea3be4283e61 | [
"BSD-3-Clause"
] | 17 | 2020-05-28T08:20:11.000Z | 2022-03-25T07:40:48.000Z | tests/test_random_signal.py | CNES/swot_simulator | 92d0bb4a274ec9923265567968beea3be4283e61 | [
"BSD-3-Clause"
] | 7 | 2021-07-21T02:15:52.000Z | 2021-11-14T10:46:41.000Z | tests/test_random_signal.py | CNES/swot_simulator | 92d0bb4a274ec9923265567968beea3be4283e61 | [
"BSD-3-Clause"
] | 8 | 2020-05-17T13:53:43.000Z | 2022-03-25T07:40:58.000Z | import os
import pickle
import numpy as np
import xarray as xr
import swot_simulator
import swot_simulator.random_signal as random_signal
ROOT = os.path.dirname(os.path.abspath(__file__))
def test_gen_signal_1d():
with open(os.path.join(ROOT, "data", "gen_signal_1d.bin"), "rb") as stream:
(fi, psi, x, nseed, fmin, fmax, alpha, lf_extpl, hf_extpl,
_expected) = pickle.load(stream)
rng = np.random.default_rng(seed=nseed)
result = random_signal.gen_signal_1d(fi, psi, x, rng, fmin, fmax, alpha,
lf_extpl, hf_extpl)
assert result.mean() < 1
def test_gen_signal_2d_rectangle():
with open(os.path.join(ROOT, "data", "gen_signal_2d_rectangle.bin"),
"rb") as stream:
(fi, psi, x, y, fminx, fminy, fmax, alpha, nseed, lf_extpl, hf_extpl,
_expected) = pickle.load(stream)
ps2d, f = random_signal.gen_ps2d(fi, psi, fminx, fminy, fmax, alpha,
lf_extpl, hf_extpl)
rng = np.random.default_rng(seed=nseed)
result = random_signal.gen_signal_2d_rectangle(ps2d, f, x, y, rng, fminx,
fminy, fmax, alpha)
assert result.mean() < 1
def test_read_file_karin():
height_sdt, cross_track, swh = random_signal.read_file_karin(
str(swot_simulator.DATA.joinpath("karin_noise_v2.nc")))
def test_read_file_instr():
dataset = random_signal.read_file_instr(
str(swot_simulator.DATA.joinpath("error_spectrum.nc")), 2.0)
assert isinstance(dataset, xr.Dataset)
| 34.369565 | 79 | 0.645161 |
d5c1febad0e8a33005bcd01789d255680d079190 | 385 | py | Python | pyfcm/__meta__.py | smart-learning/PyFCM | 85370b578daafc463812be09d385cf50e5fd3b29 | [
"MIT",
"Unlicense"
] | 2 | 2018-10-24T05:00:11.000Z | 2018-11-19T04:12:56.000Z | pyfcm/__meta__.py | smart-learning/PyFCM | 85370b578daafc463812be09d385cf50e5fd3b29 | [
"MIT",
"Unlicense"
] | null | null | null | pyfcm/__meta__.py | smart-learning/PyFCM | 85370b578daafc463812be09d385cf50e5fd3b29 | [
"MIT",
"Unlicense"
] | 2 | 2019-01-22T03:00:27.000Z | 2019-06-24T12:35:39.000Z | # -*- coding: utf-8 -*-
"""Define project metadata
"""
__title__ = 'pyfcm'
__summary__ = 'Python client for FCM - Firebase Cloud Messaging (Android & iOS)..'
__url__ = 'https://github.com/olucurious/pyfcm'
__version__ = '1.4.5'
__install_requires__ = ['requests', 'requests-toolbelt']
__author__ = 'Emmanuel Adegbite'
__email__ = 'olucurious@gmail.com'
__license__ = 'MIT License'
| 24.0625 | 82 | 0.711688 |
e652e890f53d4a152c68e0552c8548318ce89b50 | 7,897 | py | Python | Tests/Methods/Slot/test_SlotW27_meth.py | IrakozeFD/pyleecan | 5a93bd98755d880176c1ce8ac90f36ca1b907055 | [
"Apache-2.0"
] | null | null | null | Tests/Methods/Slot/test_SlotW27_meth.py | IrakozeFD/pyleecan | 5a93bd98755d880176c1ce8ac90f36ca1b907055 | [
"Apache-2.0"
] | null | null | null | Tests/Methods/Slot/test_SlotW27_meth.py | IrakozeFD/pyleecan | 5a93bd98755d880176c1ce8ac90f36ca1b907055 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
from pyleecan.Classes.SlotW27 import SlotW27
from numpy import ndarray, arcsin, exp
from pyleecan.Classes.LamSlot import LamSlot
from pyleecan.Classes.Segment import Segment
from pyleecan.Classes.Slot import Slot
from pyleecan.Methods.Slot.SlotW27 import S27_W03CheckError
# For AlmostEqual
DELTA = 1e-4
slotW27_test = list()
# Internal Slot
lam = LamSlot(is_internal=True, Rext=0.1)
lam.slot = SlotW27(
Zs=12, H0=10e-3, W0=10e-3, H1=0.03, W1=0.02, H2=0.02, W2=0.03, W3=0.02
)
slotW27_test.append(
{
"test_obj": lam,
"S_exp": 1.3508e-3,
"Ao": 0.10004,
"Aw": 0.3853569,
"SW_exp": 1.25e-3,
"H_exp": 6.0125e-2,
}
)
# External Slot
lam = LamSlot(is_internal=False, Rint=0.1)
lam.slot = SlotW27(
Zs=12, H0=10e-3, W0=10e-3, H1=0.03, W1=0.02, H2=0.02, W2=0.03, W3=0.02
)
slotW27_test.append(
{
"test_obj": lam,
"S_exp": 1.34916e-3,
"Ao": 0.10004,
"Aw": 0.184928,
"SW_exp": 1.25e-3,
"H_exp": 6.0187e-2,
}
)
@pytest.mark.METHODS
class Test_SlotW27_meth(object):
"""pytest for SlotW27 methods"""
@pytest.mark.parametrize("test_dict", slotW27_test)
def test_schematics(self, test_dict):
"""Check that the schematics is correct"""
test_obj = test_dict["test_obj"]
point_dict = test_obj.slot._comp_point_coordinate()
# Check width
assert abs(point_dict["Z1"] - point_dict["Z10"]) == pytest.approx(
test_obj.slot.W0
)
assert abs(point_dict["Z2"] - point_dict["Z9"]) == pytest.approx(
test_obj.slot.W0
)
assert abs(point_dict["Z3"] - point_dict["Z8"]) == pytest.approx(
test_obj.slot.W1
)
assert abs(point_dict["Z4"] - point_dict["Z7"]) == pytest.approx(
test_obj.slot.W2
)
assert abs(point_dict["Z5"] - point_dict["Z6"]) == pytest.approx(
test_obj.slot.W3
)
# Check height
assert abs(point_dict["Z1"] - point_dict["Z2"]) == pytest.approx(
test_obj.slot.H0
)
assert abs(point_dict["Z2"].real - point_dict["Z4"].real) == pytest.approx(
test_obj.slot.H1
)
assert abs(point_dict["Z3"].real - point_dict["Z4"].real) == pytest.approx(
test_obj.slot.H1
)
assert abs(point_dict["Z4"].real - point_dict["Z5"].real) == pytest.approx(
test_obj.slot.H2
)
assert abs(point_dict["Z10"] - point_dict["Z9"]) == pytest.approx(
test_obj.slot.H0
)
assert abs(point_dict["Z9"].real - point_dict["Z7"].real) == pytest.approx(
test_obj.slot.H1
)
assert abs(point_dict["Z8"].real - point_dict["Z7"].real) == pytest.approx(
test_obj.slot.H1
)
assert abs(point_dict["Z7"].real - point_dict["Z6"].real) == pytest.approx(
test_obj.slot.H2
)
@pytest.mark.parametrize("test_dict", slotW27_test)
def test_comp_surface(self, test_dict):
"""Check that the computation of the surface is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_surface()
a = result
b = test_dict["S_exp"]
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
# Check that the analytical method returns the same result as the numerical one
b = Slot.comp_surface(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
@pytest.mark.parametrize("test_dict", slotW27_test)
def test_comp_surface_active(self, test_dict):
"""Check that the computation of the winding surface is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_surface_active()
a = result
b = test_dict["SW_exp"]
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
# Check that the analytical method returns the same result as the numerical one
b = Slot.comp_surface_active(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
@pytest.mark.parametrize("test_dict", slotW27_test)
def test_comp_height(self, test_dict):
"""Check that the computation of the height is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_height()
a = result
b = test_dict["H_exp"]
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
# Check that the analytical method returns the same result as the numerical one
b = Slot.comp_height(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
@pytest.mark.parametrize("test_dict", slotW27_test)
def test_comp_angle_opening(self, test_dict):
"""Check that the computation of the average opening angle iscorrect"""
test_obj = test_dict["test_obj"]
a = test_obj.slot.comp_angle_opening()
b = test_dict["Ao"]
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
b = Slot.comp_angle_opening(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
@pytest.mark.parametrize("test_dict", slotW27_test)
def test_comp_angle_active_eq(self, test_dict):
"""Check that the computation of the average angle is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_angle_active_eq()
a = result
b = test_dict["Aw"]
msg = "Return " + str(a) + " expected " + str(b)
assert abs((a - b) / a - 0) < DELTA, msg
def test_build_geometry(self):
"""Check if the curve_list is correct"""
test_obj = SlotW27(
Zs=6, H0=0.05, W0=30e-3, H1=0.125, W1=0.06, H2=0.05, W2=0.09, W3=0.04
)
lam = LamSlot(is_internal=False, slot=test_obj, Rint=1)
Z1 = exp(1j * float(arcsin(30e-3 / 2.0)))
Z2 = Z1 + 0.05
Z3 = Z2 + ((0.06 - 30e-3) / 2.0) * 1j
Z4 = Z3 + 0.125 + ((0.09 - 0.06) / 2.0) * 1j
Z5 = Z4 + 0.05 + ((0.04 - 0.09) / 2.0) * 1j
Z6 = Z5.conjugate()
Z7 = Z4.conjugate()
Z8 = Z3.conjugate()
Z9 = Z2.conjugate()
Z10 = Z1.conjugate()
[Z1, Z2, Z3, Z4, Z5, Z6, Z7, Z8, Z9, Z10] = [
Z10,
Z9,
Z8,
Z7,
Z6,
Z5,
Z4,
Z3,
Z2,
Z1,
]
curve_list = list()
curve_list.append(Segment(Z1, Z2))
curve_list.append(Segment(Z2, Z3))
curve_list.append(Segment(Z3, Z4))
curve_list.append(Segment(Z4, Z5))
curve_list.append(Segment(Z5, Z6))
curve_list.append(Segment(Z6, Z7))
curve_list.append(Segment(Z7, Z8))
curve_list.append(Segment(Z8, Z9))
curve_list.append(Segment(Z9, Z10))
result = test_obj.build_geometry()
assert len(result) == len(curve_list)
for i in range(0, len(result)):
a = result[i].begin
b = curve_list[i].begin
assert abs((a - b) / a - 0) < DELTA
a = result[i].end
b = curve_list[i].end
assert abs((a - b) / a - 0) < DELTA
def test_check(self):
"""Check that the check function is raising error"""
test_obj = SlotW27(
Zs=6, H0=0.05, W0=0.01, H1=0.125, W1=0.04, H2=0.05, W2=0.05, W3=0.00015
)
with pytest.raises(S27_W03CheckError) as context:
test_obj.check()
| 33.892704 | 87 | 0.561479 |
8207f83f660c4b7e05a59b5ce7de8fea71946c60 | 16,490 | py | Python | test/run_device_tests.py | coreboot/chrome-ec | 61044db105bc854167efe83815acb3fcb55deb85 | [
"BSD-3-Clause"
] | 46 | 2017-02-12T20:48:45.000Z | 2022-03-01T15:53:39.000Z | test/run_device_tests.py | coreboot/chrome-ec | 61044db105bc854167efe83815acb3fcb55deb85 | [
"BSD-3-Clause"
] | 1 | 2022-01-08T23:28:01.000Z | 2022-01-09T00:43:16.000Z | test/run_device_tests.py | coreboot/chrome-ec | 61044db105bc854167efe83815acb3fcb55deb85 | [
"BSD-3-Clause"
] | 46 | 2016-02-07T18:43:27.000Z | 2022-01-03T02:30:51.000Z | #!/usr/bin/env python3
# Copyright 2020 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs unit tests on device and displays the results.
This script assumes you have a ~/.servodrc config file with a line that
corresponds to the board being tested.
See https://chromium.googlesource.com/chromiumos/third_party/hdctools/+/HEAD/docs/servo.md#servodrc
"""
import argparse
import concurrent
import io
import logging
import os
import re
import subprocess
import sys
import time
from concurrent.futures.thread import ThreadPoolExecutor
from enum import Enum
from pathlib import Path
from typing import Optional, BinaryIO, List
import colorama # type: ignore[import]
EC_DIR = Path(os.path.dirname(os.path.realpath(__file__))).parent
JTRACE_FLASH_SCRIPT = os.path.join(EC_DIR, 'util/flash_jlink.py')
SERVO_MICRO_FLASH_SCRIPT = os.path.join(EC_DIR, 'util/flash_ec')
ALL_TESTS_PASSED_REGEX = re.compile(r'Pass!\r\n')
ALL_TESTS_FAILED_REGEX = re.compile(r'Fail! \(\d+ tests\)\r\n')
SINGLE_CHECK_PASSED_REGEX = re.compile(r'Pass: .*')
SINGLE_CHECK_FAILED_REGEX = re.compile(r'.*failed:.*')
ASSERTION_FAILURE_REGEX = re.compile(r'ASSERTION FAILURE.*')
DATA_ACCESS_VIOLATION_8020000_REGEX = re.compile(
r'Data access violation, mfar = 8020000\r\n')
DATA_ACCESS_VIOLATION_8040000_REGEX = re.compile(
r'Data access violation, mfar = 8040000\r\n')
DATA_ACCESS_VIOLATION_80C0000_REGEX = re.compile(
r'Data access violation, mfar = 80c0000\r\n')
DATA_ACCESS_VIOLATION_80E0000_REGEX = re.compile(
r'Data access violation, mfar = 80e0000\r\n')
DATA_ACCESS_VIOLATION_20000000_REGEX = re.compile(
r'Data access violation, mfar = 20000000\r\n')
DATA_ACCESS_VIOLATION_24000000_REGEX = re.compile(
r'Data access violation, mfar = 24000000\r\n')
BLOONCHIPPER = 'bloonchipper'
DARTMONKEY = 'dartmonkey'
JTRACE = 'jtrace'
SERVO_MICRO = 'servo_micro'
GCC = 'gcc'
CLANG = 'clang'
class ImageType(Enum):
"""EC Image type to use for the test."""
RO = 1
RW = 2
class BoardConfig:
"""Board-specific configuration."""
def __init__(self, name, servo_uart_name, servo_power_enable,
rollback_region0_regex, rollback_region1_regex, mpu_regex):
self.name = name
self.servo_uart_name = servo_uart_name
self.servo_power_enable = servo_power_enable
self.rollback_region0_regex = rollback_region0_regex
self.rollback_region1_regex = rollback_region1_regex
self.mpu_regex = mpu_regex
class TestConfig:
"""Configuration for a given test."""
def __init__(self, name, image_to_use=ImageType.RW, finish_regexes=None,
toggle_power=False, test_args=None, num_flash_attempts=2,
timeout_secs=10, enable_hw_write_protect=False):
if test_args is None:
test_args = []
if finish_regexes is None:
finish_regexes = [ALL_TESTS_PASSED_REGEX, ALL_TESTS_FAILED_REGEX]
self.name = name
self.image_to_use = image_to_use
self.finish_regexes = finish_regexes
self.test_args = test_args
self.toggle_power = toggle_power
self.num_flash_attempts = num_flash_attempts
self.timeout_secs = timeout_secs
self.enable_hw_write_protect = enable_hw_write_protect
self.logs = []
self.passed = False
self.num_fails = 0
self.num_passes = 0
# All possible tests.
class AllTests:
"""All possible tests."""
@staticmethod
def get(board_config: BoardConfig):
tests = {
'aes':
TestConfig(name='aes'),
'cec':
TestConfig(name='cec'),
'crc':
TestConfig(name='crc'),
'flash_physical':
TestConfig(name='flash_physical', image_to_use=ImageType.RO,
toggle_power=True),
'flash_write_protect':
TestConfig(name='flash_write_protect',
image_to_use=ImageType.RO,
toggle_power=True, enable_hw_write_protect=True),
'fpsensor_hw':
TestConfig(name='fpsensor_hw'),
'fpsensor_spi_ro':
TestConfig(name='fpsensor', image_to_use=ImageType.RO,
test_args=['spi']),
'fpsensor_spi_rw':
TestConfig(name='fpsensor', test_args=['spi']),
'fpsensor_uart_ro':
TestConfig(name='fpsensor', image_to_use=ImageType.RO,
test_args=['uart']),
'fpsensor_uart_rw':
TestConfig(name='fpsensor', test_args=['uart']),
'mpu_ro':
TestConfig(name='mpu',
image_to_use=ImageType.RO,
finish_regexes=[board_config.mpu_regex]),
'mpu_rw':
TestConfig(name='mpu',
finish_regexes=[board_config.mpu_regex]),
'mutex':
TestConfig(name='mutex'),
'pingpong':
TestConfig(name='pingpong'),
'printf':
TestConfig(name='printf'),
'queue':
TestConfig(name='queue'),
'rollback_region0':
TestConfig(name='rollback', finish_regexes=[
board_config.rollback_region0_regex],
test_args=['region0']),
'rollback_region1':
TestConfig(name='rollback', finish_regexes=[
board_config.rollback_region1_regex],
test_args=['region1']),
'rollback_entropy':
TestConfig(name='rollback_entropy', image_to_use=ImageType.RO),
'rtc':
TestConfig(name='rtc'),
'sha256':
TestConfig(name='sha256'),
'sha256_unrolled':
TestConfig(name='sha256_unrolled'),
'static_if':
TestConfig(name='static_if'),
'timer_dos':
TestConfig(name='timer_dos'),
'utils':
TestConfig(name='utils', timeout_secs=20),
'utils_str':
TestConfig(name='utils_str'),
}
if board_config.name == BLOONCHIPPER:
tests['stm32f_rtc'] = TestConfig(name='stm32f_rtc')
return tests
BLOONCHIPPER_CONFIG = BoardConfig(
name=BLOONCHIPPER,
servo_uart_name='raw_fpmcu_console_uart_pty',
servo_power_enable='fpmcu_pp3300',
rollback_region0_regex=DATA_ACCESS_VIOLATION_8020000_REGEX,
rollback_region1_regex=DATA_ACCESS_VIOLATION_8040000_REGEX,
mpu_regex=DATA_ACCESS_VIOLATION_20000000_REGEX,
)
DARTMONKEY_CONFIG = BoardConfig(
name=DARTMONKEY,
servo_uart_name='raw_fpmcu_console_uart_pty',
servo_power_enable='fpmcu_pp3300',
rollback_region0_regex=DATA_ACCESS_VIOLATION_80C0000_REGEX,
rollback_region1_regex=DATA_ACCESS_VIOLATION_80E0000_REGEX,
mpu_regex=DATA_ACCESS_VIOLATION_24000000_REGEX,
)
BOARD_CONFIGS = {
'bloonchipper': BLOONCHIPPER_CONFIG,
'dartmonkey': DARTMONKEY_CONFIG,
}
def get_console(board_config: BoardConfig) -> Optional[str]:
"""Get the name of the console for a given board."""
cmd = [
'dut-control',
board_config.servo_uart_name,
]
logging.debug('Running command: "%s"', ' '.join(cmd))
with subprocess.Popen(cmd, stdout=subprocess.PIPE) as proc:
for line in io.TextIOWrapper(proc.stdout): # type: ignore[arg-type]
logging.debug(line)
pty = line.split(':')
if len(pty) == 2 and pty[0] == board_config.servo_uart_name:
return pty[1].strip()
return None
def power(board_config: BoardConfig, on: bool) -> None:
"""Turn power to board on/off."""
if on:
state = 'pp3300'
else:
state = 'off'
cmd = [
'dut-control',
board_config.servo_power_enable + ':' + state,
]
logging.debug('Running command: "%s"', ' '.join(cmd))
subprocess.run(cmd).check_returncode()
def hw_write_protect(enable: bool) -> None:
"""Enable/disable hardware write protect."""
if enable:
state = 'force_on'
else:
state = 'force_off'
cmd = [
'dut-control',
'fw_wp_state:' + state,
]
logging.debug('Running command: "%s"', ' '.join(cmd))
subprocess.run(cmd).check_returncode()
def build(test_name: str, board_name: str, compiler: str) -> None:
"""Build specified test for specified board."""
cmd = ['make']
if compiler == CLANG:
cmd = cmd + ['CC=arm-none-eabi-clang']
cmd = cmd + [
'BOARD=' + board_name,
'test-' + test_name,
'-j',
]
logging.debug('Running command: "%s"', ' '.join(cmd))
subprocess.run(cmd).check_returncode()
def flash(test_name: str, board: str, flasher: str, remote: str) -> bool:
"""Flash specified test to specified board."""
logging.info("Flashing test")
cmd = []
if flasher == JTRACE:
cmd.append(JTRACE_FLASH_SCRIPT)
if remote:
cmd.extend(['--remote', remote])
elif flasher == SERVO_MICRO:
cmd.append(SERVO_MICRO_FLASH_SCRIPT)
else:
logging.error('Unknown flasher: "%s"', flasher)
return False
cmd.extend([
'--board', board,
'--image', os.path.join(EC_DIR, 'build', board, test_name,
test_name + '.bin'),
])
logging.debug('Running command: "%s"', ' '.join(cmd))
completed_process = subprocess.run(cmd)
return completed_process.returncode == 0
def readline(executor: ThreadPoolExecutor, f: BinaryIO, timeout_secs: int) -> \
Optional[bytes]:
"""Read a line with timeout."""
a = executor.submit(f.readline)
try:
return a.result(timeout_secs)
except concurrent.futures.TimeoutError:
return None
def readlines_until_timeout(executor, f: BinaryIO, timeout_secs: int) -> \
List[bytes]:
"""Continuously read lines for timeout_secs."""
lines: List[bytes] = []
while True:
line = readline(executor, f, timeout_secs)
if not line:
return lines
lines.append(line)
def process_console_output_line(line: bytes, test: TestConfig):
try:
line_str = line.decode()
if SINGLE_CHECK_PASSED_REGEX.match(line_str):
test.num_passes += 1
if SINGLE_CHECK_FAILED_REGEX.match(line_str):
test.num_fails += 1
if ALL_TESTS_FAILED_REGEX.match(line_str):
test.num_fails += 1
if ASSERTION_FAILURE_REGEX.match(line_str):
test.num_fails += 1
return line_str
except UnicodeDecodeError:
# Sometimes we get non-unicode from the console (e.g., when the
# board reboots.) Not much we can do in this case, so we'll just
# ignore it.
return None
def run_test(test: TestConfig, console: str, executor: ThreadPoolExecutor) ->\
bool:
"""Run specified test."""
start = time.time()
with open(console, "wb+", buffering=0) as c:
# Wait for boot to finish
time.sleep(1)
c.write('\n'.encode())
if test.image_to_use == ImageType.RO:
c.write('reboot ro\n'.encode())
time.sleep(1)
test_cmd = 'runtest ' + ' '.join(test.test_args) + '\n'
c.write(test_cmd.encode())
while True:
c.flush()
line = readline(executor, c, 1)
if not line:
now = time.time()
if now - start > test.timeout_secs:
logging.debug("Test timed out")
return False
continue
logging.debug(line)
test.logs.append(line)
# Look for test_print_result() output (success or failure)
line_str = process_console_output_line(line, test)
if line_str is None:
# Sometimes we get non-unicode from the console (e.g., when the
# board reboots.) Not much we can do in this case, so we'll just
# ignore it.
continue
for r in test.finish_regexes:
if r.match(line_str):
# flush read the remaining
lines = readlines_until_timeout(executor, c, 1)
logging.debug(lines)
test.logs.append(lines)
for line in lines:
process_console_output_line(line, test)
return test.num_fails == 0
def get_test_list(config: BoardConfig, test_args) -> List[TestConfig]:
"""Get a list of tests to run."""
if test_args == 'all':
return list(AllTests.get(config).values())
test_list = []
for t in test_args:
logging.debug('test: %s', t)
test_config = AllTests.get(config).get(t)
if test_config is None:
logging.error('Unable to find test config for "%s"', t)
sys.exit(1)
test_list.append(test_config)
return test_list
def main():
parser = argparse.ArgumentParser()
default_board = 'bloonchipper'
parser.add_argument(
'--board', '-b',
help='Board (default: ' + default_board + ')',
default=default_board)
default_tests = 'all'
parser.add_argument(
'--tests', '-t',
nargs='+',
help='Tests (default: ' + default_tests + ')',
default=default_tests)
log_level_choices = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
parser.add_argument(
'--log_level', '-l',
choices=log_level_choices,
default='DEBUG'
)
flasher_choices = [SERVO_MICRO, JTRACE]
parser.add_argument(
'--flasher', '-f',
choices=flasher_choices,
default=JTRACE
)
compiler_options = [GCC, CLANG]
parser.add_argument('--compiler', '-c',
choices=compiler_options,
default=GCC)
# This might be expanded to serve as a "remote" for flash_ec also, so
# we will leave it generic.
parser.add_argument(
'--remote', '-n',
help='The remote host:ip to connect to J-Link. '
'This is passed to flash_jlink.py.',
)
args = parser.parse_args()
logging.basicConfig(level=args.log_level)
if args.board not in BOARD_CONFIGS:
logging.error('Unable to find a config for board: "%s"', args.board)
sys.exit(1)
board_config = BOARD_CONFIGS[args.board]
e = ThreadPoolExecutor(max_workers=1)
test_list = get_test_list(board_config, args.tests)
logging.debug('Running tests: %s', [t.name for t in test_list])
for test in test_list:
# build test binary
build(test.name, args.board, args.compiler)
# flash test binary
# TODO(b/158327221): First attempt to flash fails after
# flash_write_protect test is run; works after second attempt.
flash_succeeded = False
for i in range(0, test.num_flash_attempts):
logging.debug('Flash attempt %d', i + 1)
if flash(test.name, args.board, args.flasher, args.remote):
flash_succeeded = True
break
time.sleep(1)
if not flash_succeeded:
logging.debug('Flashing failed after max attempts: %d',
test.num_flash_attempts)
test.passed = False
continue
if test.toggle_power:
power(board_config, on=False)
time.sleep(1)
power(board_config, on=True)
hw_write_protect(test.enable_hw_write_protect)
# run the test
logging.info('Running test: "%s"', test.name)
console = get_console(board_config)
test.passed = run_test(test, console, executor=e)
colorama.init()
exit_code = 0
for test in test_list:
# print results
print('Test "' + test.name + '": ', end='')
if test.passed:
print(colorama.Fore.GREEN + 'PASSED')
else:
print(colorama.Fore.RED + 'FAILED')
exit_code = 1
print(colorama.Style.RESET_ALL)
e.shutdown(wait=False)
sys.exit(exit_code)
if __name__ == '__main__':
sys.exit(main())
| 31.711538 | 99 | 0.60091 |
504dcb4305d800a670c0dc22d9243d9cb7b24a4b | 87 | py | Python | tests/unit/test_clutch.py | wyleung/clutch | 2564735245a97a4fc0a1bb923c2a515dc2ae61a1 | [
"MIT"
] | null | null | null | tests/unit/test_clutch.py | wyleung/clutch | 2564735245a97a4fc0a1bb923c2a515dc2ae61a1 | [
"MIT"
] | null | null | null | tests/unit/test_clutch.py | wyleung/clutch | 2564735245a97a4fc0a1bb923c2a515dc2ae61a1 | [
"MIT"
] | null | null | null | from clutch import __version__
def test_version():
assert __version__ == "2.0.0"
| 14.5 | 33 | 0.712644 |
a95e44df603596d90027a9b5fd64b7f0d130c450 | 1,460 | py | Python | recipes/Python/577119_3d_Surface_fitting_N_random/recipe-577119.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/577119_3d_Surface_fitting_N_random/recipe-577119.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/577119_3d_Surface_fitting_N_random/recipe-577119.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | # 3D surface fitting to N random points
# using inverse distance weighted averages.
# FB - 201003162
from PIL import Image
import random
import math
# image size
imgx = 512
imgy = 512
image = Image.new("RGB", (imgx, imgy))
# random color palette coefficients
kr = random.randint(1, 7)
kg = random.randint(1, 7)
kb = random.randint(1, 7)
ir = 2**kr
ig = 2**kg
ib = 2**kb
jr = 2**(8-kr)
jg = 2**(8-kg)
jb = 2**(8-kb)
# select n random points
n=random.randint(5, 50)
arx=[]
ary=[]
arz=[]
for i in range(n):
arx.append(random.randint(0, imgx-1))
ary.append(random.randint(0, imgy-1))
arz.append(random.randint(0, 255))
for y in range(imgy):
for x in range(imgx):
flag=False
sumv=0.0
sumw=0.0
for i in range(n):
dx=x-arx[i]
dy=y-ary[i]
if(dx==0 and dy==0):
flag=True
z=arz[i]
break
else:
# wgh=1.0/math.pow(math.sqrt(dx*dx+dy*dy),1.0) # linear
wgh=1.0/math.pow(math.sqrt(dx*dx+dy*dy),2.0) # quadratic
# wgh=1.0/math.pow(math.sqrt(dx*dx+dy*dy),3.0) # cubic
sumw+=wgh
sumv+=(wgh*arz[i])
if flag==False:
z=int(sumv/sumw)
# z to RGB
r = z % ir * jr
g = z % ig * jg
b = z % ib * jb
image.putpixel((x, y), b * 65536 + g * 256 + r)
image.save("rndSurface.png", "PNG")
| 22.8125 | 72 | 0.516438 |
8fbfad2df712e30b5b231cb7e2186aca7c8eb08f | 405 | py | Python | astr-119-session-4/using_numpy_continued.py | Aukau/Astr-119 | da56326c84ad6755aee0182d87c607b4c321c45d | [
"MIT"
] | null | null | null | astr-119-session-4/using_numpy_continued.py | Aukau/Astr-119 | da56326c84ad6755aee0182d87c607b4c321c45d | [
"MIT"
] | 12 | 2021-09-27T18:42:44.000Z | 2021-12-09T18:01:31.000Z | astr-119-session-4/using_numpy_continued.py | Aukau/Astr-119 | da56326c84ad6755aee0182d87c607b4c321c45d | [
"MIT"
] | null | null | null | import numpy as np
x = 1.0
y = 2.0
#exponenets and logarithms
print(np.exp(x))
print(np.log(x))
print(np.log10(x))
print(np.log2(x))
#min.max.misc
print(np.fabs(x))
print(np.fmin(x, y))
print(np.fmax(x, y))
#populate arrays
n = 1200
z = np.arange(n, dtype = float)
z *= 2.0*np.pi / float(n-1)
sin_z = np.sin(z)
#interpolate
print(np.interp(0.75, z, sin_z))
print(np.sin(0.75)) | 16.2 | 33 | 0.614815 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.