hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7959f2b20cb87e5a7794dafbb97222d684aaba8b | 1,316 | py | Python | var/spack/repos/builtin/packages/angsd/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2020-10-15T01:08:42.000Z | 2021-10-18T01:28:18.000Z | var/spack/repos/builtin/packages/angsd/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2019-07-30T10:12:28.000Z | 2019-12-17T09:02:27.000Z | var/spack/repos/builtin/packages/angsd/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 5 | 2019-07-30T09:42:14.000Z | 2021-01-25T05:39:20.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Angsd(MakefilePackage):
"""Angsd is a program for analysing NGS data. The software can handle a
number of different input types from mapped reads to imputed genotype
probabilities. Most methods take genotype uncertainty into account
instead of basing the analysis on called genotypes. This is especially
useful for low and medium depth data."""
homepage = "https://github.com/ANGSD/angsd"
url = "https://github.com/ANGSD/angsd/archive/0.919.tar.gz"
version('0.921', sha256='8892d279ce1804f9e17fe2fc65a47e5498e78fc1c1cb84d2ca2527fd5c198772')
version('0.919', sha256='c2ea718ca5a5427109f4c3415e963dcb4da9afa1b856034e25c59c003d21822a')
depends_on('htslib')
conflicts('^htslib@1.6:', when='@0.919')
def setup_environment(self, spack_env, run_env):
run_env.set('R_LIBS', prefix.R)
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('angsd', join_path(prefix.bin))
install_tree('R', prefix.R)
install_tree('RES', prefix.RES)
install_tree('scripts', prefix.scripts)
| 38.705882 | 95 | 0.715046 |
7959f31fd40cac1e14f726eb7be54ea312f8e647 | 12,587 | py | Python | electronics_abstract_parts/AbstractPowerConverters.py | lab11/PolymorphicBlocks | 52e5ee332fddc9a9f583ebabfca863365e873bf7 | [
"BSD-3-Clause"
] | null | null | null | electronics_abstract_parts/AbstractPowerConverters.py | lab11/PolymorphicBlocks | 52e5ee332fddc9a9f583ebabfca863365e873bf7 | [
"BSD-3-Clause"
] | null | null | null | electronics_abstract_parts/AbstractPowerConverters.py | lab11/PolymorphicBlocks | 52e5ee332fddc9a9f583ebabfca863365e873bf7 | [
"BSD-3-Clause"
] | null | null | null | from electronics_model import *
from .Categories import *
from .AbstractPassives import Inductor, DecouplingCapacitor
@abstract_block
class DcDcConverter(PowerConditioner):
"""Base class for all DC-DC converters with shared ground (non-isoalted)."""
@init_in_parent
def __init__(self, output_voltage: RangeExpr = RangeExpr()) -> None:
super().__init__()
self.spec_output_voltage = self.Parameter(RangeExpr(output_voltage))
self.pwr_in = self.Port(VoltageSink(
voltage_limits=RangeExpr(),
current_draw=RangeExpr()
), [Power, Input]) # TODO mark as future-connected here?
self.pwr_out = self.Port(VoltageSource(
voltage_out=RangeExpr(),
current_limits=RangeExpr()
), [Output]) # TODO mark as future-connected here?
self.gnd = self.Port(Ground(), [Common]) # TODO mark as future-connected?
self.require(self.pwr_out.voltage_out.within(self.spec_output_voltage),
"Output voltage must be within spec")
@abstract_block
class LinearRegulator(DcDcConverter):
@init_in_parent
def __init__(self, output_voltage: RangeExpr = RangeExpr()) -> None:
super().__init__(output_voltage)
self.dropout = self.Parameter(RangeExpr())
self.quiescent_current = self.Parameter(RangeExpr())
# TODO these constraints establish a theoretical bound, but allows (and demands) subtypes refine to exact values
self.require(self.pwr_in.current_draw.within(self.pwr_out.link().current_drawn + self.quiescent_current + (0, 0.01))) # TODO avoid fudge factor
self.require(self.pwr_in.link().voltage.lower() >= self.pwr_out.link().voltage.upper() + self.dropout.upper()) # TODO more elegant?
@abstract_block
class DcDcSwitchingConverter(DcDcConverter):
"""https://www.ti.com/lit/an/slta055/slta055.pdf: recommends 75mV for maximum peak-peak ripple voltage
"""
@init_in_parent
def __init__(self, output_voltage: RangeExpr = RangeExpr(), ripple_current_factor: RangeLike = RangeExpr(),
input_ripple_limit: FloatExpr = 75 * mVolt, output_ripple_limit: FloatExpr = 25 * mVolt) -> None:
# TODO can this be integrated with some kind of AbstractDcDcConverter?
super().__init__(output_voltage)
self.ripple_current_factor = self.Parameter(RangeExpr(ripple_current_factor))
self.input_ripple_limit = self.Parameter(FloatExpr(input_ripple_limit))
self.output_ripple_limit = self.Parameter(FloatExpr(output_ripple_limit))
self.efficiency = self.Parameter(RangeExpr())
self.require(self.pwr_in.current_draw.within((
self.pwr_out.link().current_drawn * self.pwr_out.voltage_out / self.pwr_in.link().voltage / self.efficiency + (0, 0.01) # TODO avoid fudge factor
)))
@abstract_block
class BuckConverter(DcDcSwitchingConverter):
"""Step-down switching converter"""
def __init__(self, ripple_current_factor: RangeLike = (0.2, 0.5), **kwargs) -> None:
# TODO default ripple is very heuristic, intended 0.3-0.4, loosely adjusted for inductor tolerance
# TODO can this be integrated with some kind of AbstractDcDcConverter?
super().__init__(ripple_current_factor=ripple_current_factor, **kwargs)
self.frequency = self.Parameter(RangeExpr())
@abstract_block
class DiscreteBuckConverter(BuckConverter):
"""Provides a helper function to generate the power path for a switching buck converter.
TODO: support non-synchronous topologies and non-integrated FETs
Useful resources:
https://www.ti.com/lit/an/slva477b/slva477b.pdf
Component sizing in continuous mode
Listed references go into more detail
http://www.onmyphd.com/?p=voltage.regulators.buck.step.down.converter
Very detailed analysis including component sizing, operating modes, calculating losses
"""
DUTYCYCLE_MIN_LIMIT = 0.1
DUTYCYCLE_MAX_LIMIT = 0.9
WORST_EFFICIENCY_ESTIMATE = 0.9 # from TI reference
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dutycycle_limit = self.Parameter(RangeExpr((self.DUTYCYCLE_MIN_LIMIT, self.DUTYCYCLE_MAX_LIMIT)))
self.dutycycle = self.Parameter(RangeExpr()) # calculated duty cycle
def _generate_converter(self, switch_node: VoltageSource, rated_max_current_amps: float,
input_voltage: Range, output_voltage: Range,
output_current_max: float, frequency: Range,
spec_output_ripple: float, spec_input_ripple: float, ripple_factor: Range,
dutycycle_limit: Range
) -> Passive:
"""
Given the switch node, generates the passive (in and out filter caps, inductor) components,
connects them, and returns the output of the inductor as a PassivePort.
The caller must connect the PassivePort.
Main assumptions in component sizing
- Operating only in continuous mode TODO: also consider boundary and discontinuous mode
TODO support capacitor ESR calculation
TODO unify rated max current with something else, perhaps a block param?
"""
dutycycle = output_voltage / input_voltage / Range(self.WORST_EFFICIENCY_ESTIMATE, 1)
self.assign(self.dutycycle, dutycycle)
# if these are violated, these generally mean that the converter will start tracking the input
# these can (maybe?) be waived if tracking (plus losses) is acceptable
self.require(self.dutycycle.within(dutycycle_limit), f"dutycycle {dutycycle} outside limit {dutycycle_limit}")
# these are actual numbers to be used in calculations
effective_dutycycle = dutycycle.bound_to(dutycycle_limit)
ripple_current = (output_current_max * ripple_factor).extend_upper_to(
rated_max_current_amps * ripple_factor.lower # see LMR33630 datasheet, use rating if current draw much lower
)
# Calculate minimum inductance based on worst case values (operating range corners producing maximum inductance)
# This range must be constructed manually to not double-count the tolerance stackup of the voltages
inductance_min = (output_voltage.lower * (input_voltage.upper - output_voltage.lower) /
(ripple_current.upper * frequency.lower * input_voltage.upper))
inductance_max = (output_voltage.lower * (input_voltage.upper - output_voltage.lower) /
(ripple_current.lower * frequency.lower * input_voltage.upper))
inductance = Range(inductance_min, inductance_max)
# TODO size based on transient response, add to voltage tolerance stackups
output_capacitance = Range.from_lower(ripple_current.upper / (8 * frequency.lower * spec_output_ripple))
# TODO pick a single worst-case DC
input_capacitance = Range.from_lower(output_current_max * effective_dutycycle.upper * (1 - effective_dutycycle.lower) /
(frequency.lower * spec_input_ripple))
sw_current_max = output_current_max + ripple_current.upper / 2
self.inductor = self.Block(Inductor(
inductance=inductance*Henry,
current=(0, sw_current_max)*Amp,
frequency=frequency*Hertz
))
# TODO: DC derating
# Note, implicit connect is not great here because of the different power in / power out rails
# But maybe something can be done with ground?
self.in_cap = self.Block(DecouplingCapacitor(
capacitance=input_capacitance*Farad,
))
self.out_cap = self.Block(DecouplingCapacitor(
capacitance=output_capacitance*Farad,
))
self._pwr_in_net = self.connect(self.pwr_in, self.in_cap.pwr)
self._pwr_out_net = self.connect(self.pwr_out, self.out_cap.pwr)
self._gnd_net = self.connect(self.gnd, self.in_cap.gnd, self.out_cap.gnd)
self._switch_net = self.connect(switch_node, self.inductor.a.as_voltage_sink(
voltage_limits=RangeExpr.ALL,
current_draw=(0, sw_current_max)*Amp))
return self.inductor.b
@abstract_block
class BoostConverter(DcDcSwitchingConverter):
"""Step-up switching converter"""
def __init__(self, ripple_current_factor: RangeLike = (0.2, 0.5), **kwargs) -> None:
# TODO default ripple is very heuristic, intended 0.3-0.4, loosely adjusted for inductor tolerance
# TODO can this be integrated with some kind of AbstractDcDcConverter?
super().__init__(ripple_current_factor=ripple_current_factor, **kwargs)
self.frequency = self.Parameter(RangeExpr())
@abstract_block
class DiscreteBoostConverter(BoostConverter):
"""Step-up switching converter, with provisions for passives sizing.
TODO: support non-integrated FETs
Useful resources:
https://www.ti.com/lit/an/slva372c/slva372c.pdf
Component sizing in continuous mode
Listed references go into more detail
http://www.simonbramble.co.uk/dc_dc_converter_design/boost_converter/boost_converter_design.htm
Detailed analysis of converter with discrete FET and diode
"""
DUTYCYCLE_MIN_LIMIT = 0.2 # inferred from Figure 9
DUTYCYCLE_MAX_LIMIT = 0.85 # by datasheet
WORST_EFFICIENCY_ESTIMATE = 0.8 # from TI reference
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dutycycle_limit = self.Parameter(RangeExpr((self.DUTYCYCLE_MIN_LIMIT, self.DUTYCYCLE_MAX_LIMIT)))
self.dutycycle = self.Parameter(RangeExpr()) # calculated duty cycle
def _generate_converter(self, switch_node: VoltageSource, rated_max_current_amps: float,
input_voltage: Range, output_voltage: Range,
output_current_max: float, frequency: Range,
spec_output_ripple: float, spec_input_ripple: float, ripple_factor: Range,
dutycycle_limit: Range
) -> None:
"""
- diode needs to be fast, consider forward voltage drop, forward current (> peak inductor current), reverse volts (> Vout)
Main assumptions in component sizing
- Operating only in continuous mode TODO: also consider boundary and discontinuous mode
TODO support capacitor ESR calculation
"""
dutycycle = 1 - input_voltage / output_voltage * Range(self.WORST_EFFICIENCY_ESTIMATE, 1)
self.assign(self.dutycycle, dutycycle)
# if these are violated, these generally mean that the converter will start tracking the input
# these can (maybe?) be waived if tracking (plus losses) is acceptable
self.require(self.dutycycle.within(dutycycle_limit), f"dutycycle {dutycycle} outside limit {dutycycle_limit}")
# these are actual numbers to be used in calculations
effective_dutycycle = dutycycle.bound_to(dutycycle_limit)
ripple_current = (output_current_max * ripple_factor).extend_upper_to(
rated_max_current_amps * ripple_factor.lower # see LMR33630 datasheet, use rating if current draw much lower
)
# Calculate minimum inductance based on worst case values (operating range corners producing maximum inductance)
# This range must be constructed manually to not double-count the tolerance stackup of the voltages
inductance_min = (input_voltage.lower * (output_voltage.upper - input_voltage.lower) /
(ripple_current.upper * frequency.lower * output_voltage.lower))
inductance_max = (input_voltage.lower * (output_voltage.upper - input_voltage.lower) /
(ripple_current.lower * frequency.lower * output_voltage.lower))
inductance = Range(inductance_min, inductance_max)
output_capacitance = Range.from_lower(output_current_max * effective_dutycycle.upper /
(frequency.lower * spec_output_ripple))
input_capacitance = Range.from_lower((output_current_max / effective_dutycycle.lower) * (1 - effective_dutycycle.lower) /
(frequency.lower * spec_input_ripple))
sw_current_max = ripple_current.upper / 2 + output_current_max / (1 - effective_dutycycle.upper)
self.inductor = self.Block(Inductor(
inductance=inductance*Henry,
current=(0, sw_current_max)*Amp,
frequency=frequency*Hertz
))
# TODO: DC derating
# Note, implicit connect is not great here because of the different power in / power out rails
# But maybe something can be done with ground?
self.in_cap = self.Block(DecouplingCapacitor(
capacitance=input_capacitance*Farad,
))
self.out_cap = self.Block(DecouplingCapacitor(
capacitance=output_capacitance*Farad,
))
self.connect(self.pwr_in, self.in_cap.pwr, self.inductor.a.as_voltage_sink())
self.connect(self.pwr_out, self.out_cap.pwr)
self.connect(self.gnd, self.in_cap.gnd, self.out_cap.gnd)
self.connect(switch_node, self.inductor.b.as_voltage_sink())
| 48.976654 | 152 | 0.726384 |
7959f3783ba03583b1f6c773e0051b374b030d39 | 13,965 | py | Python | nova/tests/rpc/test_qpid.py | bopopescu/openstack-12 | 2c7e0d1e63cae7aaa38095439843c9a2abb0382b | [
"Apache-2.0"
] | null | null | null | nova/tests/rpc/test_qpid.py | bopopescu/openstack-12 | 2c7e0d1e63cae7aaa38095439843c9a2abb0382b | [
"Apache-2.0"
] | null | null | null | nova/tests/rpc/test_qpid.py | bopopescu/openstack-12 | 2c7e0d1e63cae7aaa38095439843c9a2abb0382b | [
"Apache-2.0"
] | null | null | null | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for remote procedure calls using qpid
"""
import mox
from nova import context
from nova import flags
from nova import log as logging
from nova.rpc import amqp as rpc_amqp
from nova import test
try:
import qpid
from nova.rpc import impl_qpid
except ImportError:
qpid = None
impl_qpid = None
FLAGS = flags.FLAGS
LOG = logging.getLogger(__name__)
class RpcQpidTestCase(test.TestCase):
"""
Exercise the public API of impl_qpid utilizing mox.
This set of tests utilizes mox to replace the Qpid objects and ensures
that the right operations happen on them when the various public rpc API
calls are exercised. The API calls tested here include:
nova.rpc.create_connection()
nova.rpc.common.Connection.create_consumer()
nova.rpc.common.Connection.close()
nova.rpc.cast()
nova.rpc.fanout_cast()
nova.rpc.call()
nova.rpc.multicall()
"""
def setUp(self):
super(RpcQpidTestCase, self).setUp()
self.mock_connection = None
self.mock_session = None
self.mock_sender = None
self.mock_receiver = None
if qpid:
impl_qpid.register_opts(FLAGS)
self.orig_connection = qpid.messaging.Connection
self.orig_session = qpid.messaging.Session
self.orig_sender = qpid.messaging.Sender
self.orig_receiver = qpid.messaging.Receiver
qpid.messaging.Connection = lambda *_x, **_y: self.mock_connection
qpid.messaging.Session = lambda *_x, **_y: self.mock_session
qpid.messaging.Sender = lambda *_x, **_y: self.mock_sender
qpid.messaging.Receiver = lambda *_x, **_y: self.mock_receiver
def tearDown(self):
if qpid:
qpid.messaging.Connection = self.orig_connection
qpid.messaging.Session = self.orig_session
qpid.messaging.Sender = self.orig_sender
qpid.messaging.Receiver = self.orig_receiver
if impl_qpid:
# Need to reset this in case we changed the connection_cls
# in self._setup_to_server_tests()
impl_qpid.Connection.pool.connection_cls = impl_qpid.Connection
super(RpcQpidTestCase, self).tearDown()
@test.skip_if(qpid is None, "Test requires qpid")
def test_create_connection(self):
self.mock_connection = self.mox.CreateMock(self.orig_connection)
self.mock_session = self.mox.CreateMock(self.orig_session)
self.mock_connection.opened().AndReturn(False)
self.mock_connection.open()
self.mock_connection.session().AndReturn(self.mock_session)
self.mock_connection.close()
self.mox.ReplayAll()
connection = impl_qpid.create_connection(FLAGS)
connection.close()
def _test_create_consumer(self, fanout):
self.mock_connection = self.mox.CreateMock(self.orig_connection)
self.mock_session = self.mox.CreateMock(self.orig_session)
self.mock_receiver = self.mox.CreateMock(self.orig_receiver)
self.mock_connection.opened().AndReturn(False)
self.mock_connection.open()
self.mock_connection.session().AndReturn(self.mock_session)
if fanout:
# The link name includes a UUID, so match it with a regex.
expected_address = mox.Regex(r'^impl_qpid_test_fanout ; '
'{"node": {"x-declare": {"auto-delete": true, "durable": '
'false, "type": "fanout"}, "type": "topic"}, "create": '
'"always", "link": {"x-declare": {"auto-delete": true, '
'"exclusive": true, "durable": false}, "durable": true, '
'"name": "impl_qpid_test_fanout_.*"}}$')
else:
expected_address = ('nova/impl_qpid_test ; {"node": {"x-declare": '
'{"auto-delete": true, "durable": true}, "type": "topic"}, '
'"create": "always", "link": {"x-declare": {"auto-delete": '
'true, "exclusive": false, "durable": false}, "durable": '
'true, "name": "impl_qpid_test"}}')
self.mock_session.receiver(expected_address).AndReturn(
self.mock_receiver)
self.mock_receiver.capacity = 1
self.mock_connection.close()
self.mox.ReplayAll()
connection = impl_qpid.create_connection(FLAGS)
connection.create_consumer("impl_qpid_test",
lambda *_x, **_y: None,
fanout)
connection.close()
@test.skip_if(qpid is None, "Test requires qpid")
def test_create_consumer(self):
self._test_create_consumer(fanout=False)
@test.skip_if(qpid is None, "Test requires qpid")
def test_create_consumer_fanout(self):
self._test_create_consumer(fanout=True)
def _test_cast(self, fanout, server_params=None):
self.mock_connection = self.mox.CreateMock(self.orig_connection)
self.mock_session = self.mox.CreateMock(self.orig_session)
self.mock_sender = self.mox.CreateMock(self.orig_sender)
self.mock_connection.opened().AndReturn(False)
self.mock_connection.open()
self.mock_connection.session().AndReturn(self.mock_session)
if fanout:
expected_address = ('impl_qpid_test_fanout ; '
'{"node": {"x-declare": {"auto-delete": true, '
'"durable": false, "type": "fanout"}, '
'"type": "topic"}, "create": "always"}')
else:
expected_address = ('nova/impl_qpid_test ; {"node": {"x-declare": '
'{"auto-delete": true, "durable": false}, "type": "topic"}, '
'"create": "always"}')
self.mock_session.sender(expected_address).AndReturn(self.mock_sender)
self.mock_sender.send(mox.IgnoreArg())
if not server_params:
# This is a pooled connection, so instead of closing it, it
# gets reset, which is just creating a new session on the
# connection.
self.mock_session.close()
self.mock_connection.session().AndReturn(self.mock_session)
self.mox.ReplayAll()
try:
ctx = context.RequestContext("user", "project")
args = [FLAGS, ctx, "impl_qpid_test",
{"method": "test_method", "args": {}}]
if server_params:
args.insert(2, server_params)
if fanout:
method = impl_qpid.fanout_cast_to_server
else:
method = impl_qpid.cast_to_server
else:
if fanout:
method = impl_qpid.fanout_cast
else:
method = impl_qpid.cast
method(*args)
finally:
while impl_qpid.Connection.pool.free_items:
# Pull the mock connection object out of the connection pool so
# that it doesn't mess up other test cases.
impl_qpid.Connection.pool.get()
@test.skip_if(qpid is None, "Test requires qpid")
def test_cast(self):
self._test_cast(fanout=False)
@test.skip_if(qpid is None, "Test requires qpid")
def test_fanout_cast(self):
self._test_cast(fanout=True)
def _setup_to_server_tests(self, server_params):
class MyConnection(impl_qpid.Connection):
def __init__(myself, *args, **kwargs):
super(MyConnection, myself).__init__(*args, **kwargs)
self.assertEqual(myself.connection.username,
server_params['username'])
self.assertEqual(myself.connection.password,
server_params['password'])
self.assertEqual(myself.broker,
server_params['hostname'] + ':' +
str(server_params['port']))
MyConnection.pool = rpc_amqp.Pool(FLAGS, MyConnection)
self.stubs.Set(impl_qpid, 'Connection', MyConnection)
@test.skip_if(qpid is None, "Test requires qpid")
def test_cast_to_server(self):
server_params = {'username': 'fake_username',
'password': 'fake_password',
'hostname': 'fake_hostname',
'port': 31337}
self._setup_to_server_tests(server_params)
self._test_cast(fanout=False, server_params=server_params)
@test.skip_if(qpid is None, "Test requires qpid")
def test_fanout_cast_to_server(self):
server_params = {'username': 'fake_username',
'password': 'fake_password',
'hostname': 'fake_hostname',
'port': 31337}
self._setup_to_server_tests(server_params)
self._test_cast(fanout=True, server_params=server_params)
def _test_call(self, multi):
self.mock_connection = self.mox.CreateMock(self.orig_connection)
self.mock_session = self.mox.CreateMock(self.orig_session)
self.mock_sender = self.mox.CreateMock(self.orig_sender)
self.mock_receiver = self.mox.CreateMock(self.orig_receiver)
self.mock_connection.opened().AndReturn(False)
self.mock_connection.open()
self.mock_connection.session().AndReturn(self.mock_session)
rcv_addr = mox.Regex(r'^.*/.* ; {"node": {"x-declare": {"auto-delete":'
' true, "durable": true, "type": "direct"}, "type": '
'"topic"}, "create": "always", "link": {"x-declare": '
'{"auto-delete": true, "exclusive": true, "durable": '
'false}, "durable": true, "name": ".*"}}')
self.mock_session.receiver(rcv_addr).AndReturn(self.mock_receiver)
self.mock_receiver.capacity = 1
send_addr = ('nova/impl_qpid_test ; {"node": {"x-declare": '
'{"auto-delete": true, "durable": false}, "type": "topic"}, '
'"create": "always"}')
self.mock_session.sender(send_addr).AndReturn(self.mock_sender)
self.mock_sender.send(mox.IgnoreArg())
self.mock_session.next_receiver(timeout=mox.IsA(int)).AndReturn(
self.mock_receiver)
self.mock_receiver.fetch().AndReturn(qpid.messaging.Message(
{"result": "foo", "failure": False, "ending": False}))
if multi:
self.mock_session.next_receiver(timeout=mox.IsA(int)).AndReturn(
self.mock_receiver)
self.mock_receiver.fetch().AndReturn(
qpid.messaging.Message(
{"result": "bar", "failure": False,
"ending": False}))
self.mock_session.next_receiver(timeout=mox.IsA(int)).AndReturn(
self.mock_receiver)
self.mock_receiver.fetch().AndReturn(
qpid.messaging.Message(
{"result": "baz", "failure": False,
"ending": False}))
self.mock_session.next_receiver(timeout=mox.IsA(int)).AndReturn(
self.mock_receiver)
self.mock_receiver.fetch().AndReturn(qpid.messaging.Message(
{"failure": False, "ending": True}))
self.mock_session.close()
self.mock_connection.session().AndReturn(self.mock_session)
self.mox.ReplayAll()
try:
ctx = context.RequestContext("user", "project")
if multi:
method = impl_qpid.multicall
else:
method = impl_qpid.call
res = method(FLAGS, ctx, "impl_qpid_test",
{"method": "test_method", "args": {}})
if multi:
self.assertEquals(list(res), ["foo", "bar", "baz"])
else:
self.assertEquals(res, "foo")
finally:
while impl_qpid.Connection.pool.free_items:
# Pull the mock connection object out of the connection pool so
# that it doesn't mess up other test cases.
impl_qpid.Connection.pool.get()
@test.skip_if(qpid is None, "Test requires qpid")
def test_call(self):
self._test_call(multi=False)
@test.skip_if(qpid is None, "Test requires qpid")
def test_multicall(self):
self._test_call(multi=True)
#
#from nova.tests.rpc import common
#
# Qpid does not have a handy in-memory transport like kombu, so it's not
# terribly straight forward to take advantage of the common unit tests.
# However, at least at the time of this writing, the common unit tests all pass
# with qpidd running.
#
# class RpcQpidCommonTestCase(common._BaseRpcTestCase):
# def setUp(self):
# self.rpc = impl_qpid
# super(RpcQpidCommonTestCase, self).setUp()
#
# def tearDown(self):
# super(RpcQpidCommonTestCase, self).tearDown()
#
| 41.19469 | 79 | 0.59642 |
7959f44b320e8af507eed230b3646ff203621324 | 5,361 | py | Python | AddNewUser.py | najla88/SaedRobot | 6c7e00d6572715f2fdaff96a3ca4b221b2c0af9e | [
"MIT"
] | null | null | null | AddNewUser.py | najla88/SaedRobot | 6c7e00d6572715f2fdaff96a3ca4b221b2c0af9e | [
"MIT"
] | null | null | null | AddNewUser.py | najla88/SaedRobot | 6c7e00d6572715f2fdaff96a3ca4b221b2c0af9e | [
"MIT"
] | null | null | null | #!/usr/bin/python
########################################################################
# written by : Reem AlJunaid, CS, #
# Imam Abdulrahman AlFaisal University #
#----------------------------------------------------------------------#
# #
# This interface manages the admin interface where the admin #
# can add new users to the database #
# #
########################################################################
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from common import id_generator,send_email
import string
import sqlite3
from validate_email import validate_email
import re
import login
import ManageUsersAccounts
import subprocess
class AddNewUser():
builder = None
window = None
box = None
username = None
email = None
MyUsername = None
userType=None
#starting function
def __init__(self, Username, kind):
self.MyUsername = Username
self.userType=kind
self.builder = Gtk.Builder()
self.builder.add_from_file("AdminHome.glade")
self.window = self.builder.get_object("window3")
self.username = self.builder.get_object("username")
self.email = self.builder.get_object("email")
addBtn=self.builder.get_object("addBtn1")
logoutBtn=self.builder.get_object("logoutBtn3")
addBtn.connect("clicked",self.onAddNewUserButtonPressed)
logoutBtn.connect("clicked",self.onLogoutButtonPressed)
backbox=self.builder.get_object("backbox3")
backbox.connect("button-release-event",self.onBackToManageUsersButtonPressed)
image=self.builder.get_object("image3")
image.set_visible(1)
backbox.set_sensitive(1)
self.window.show_all()
self.username.connect("focus-in-event",self.focus_in)
#self.username.connect("focus-out-event",self.focus_out)
self.email.connect("focus-in-event",self.focus_in)
self.email.connect("focus-out-event",self.focus_out)
#show keyboard when the field is in focus
def focus_in(self, entry, event):
subprocess.Popen(["onboard","20*10"])
#show keyboard when the field is in focus
def focus_out(self, entry, event):
subprocess.Popen(["pkill","onboard"])
def onAddNewUserButtonPressed(self, button):
db = sqlite3.connect('SaedRobot.db')
c = db.cursor()
c1 = db.cursor()
c.execute('SELECT * from users WHERE username= ? ' , (str(self.username.get_text()),))
data=c.fetchall()
c1.execute('SELECT * from users WHERE email= ? ' , (str(self.email.get_text()),))
data1=c1.fetchall()
if len(str(self.username.get_text())) == 0:
dialog = Gtk.MessageDialog(None,0,Gtk.MessageType.ERROR,Gtk.ButtonsType.OK, "No username entered, please enter a username")
dialog.set_title("Error message")
dialog.run()
dialog.close()
elif len(str(self.email.get_text())) == 0:
dialog = Gtk.MessageDialog(None,0,Gtk.MessageType.ERROR,Gtk.ButtonsType.OK, "No email entered, please enter an email")
dialog.set_title("Error message")
dialog.run()
dialog.close()
elif len(data)>0:
dialog = Gtk.MessageDialog(None,0,Gtk.MessageType.ERROR,Gtk.ButtonsType.OK, "This username is already exist!")
dialog.set_title("Error message")
dialog.run()
dialog.close()
elif len(data1)>0:
dialog = Gtk.MessageDialog(None,0,Gtk.MessageType.ERROR,Gtk.ButtonsType.OK, "This email is already exist!")
dialog.set_title("Error message")
dialog.run()
dialog.close()
elif not re.match("^[a-zA-Z0-9_]+$", str(self.username.get_text())):
dialog = Gtk.MessageDialog(None,0,Gtk.MessageType.ERROR,Gtk.ButtonsType.OK, "Invalid username address, please enter a valid username.")
dialog.set_title("Error message")
dialog.run()
dialog.close()
elif not validate_email(str(self.email.get_text())):
dialog = Gtk.MessageDialog(None,0,Gtk.MessageType.ERROR,Gtk.ButtonsType.OK, "Invalid email address, please enter a valid address.")
dialog.set_title("Error message")
dialog.run()
dialog.close()
else:
password = id_generator()
c.execute('INSERT INTO users(USERNAME,PASSWORD,EMAIL,ADMIN) VALUES (?,?,?,0)', (str(self.username.get_text()),str(password),str(self.email.get_text())))
db.commit()
send_email(password,self.username.get_text(),"Saed Robot - Registration Confirmation",str(self.email.get_text()) )
dialog = Gtk.MessageDialog(None,0,Gtk.MessageType.INFO,Gtk.ButtonsType.OK, "The user has been added")
dialog.set_title("Confirmation message")
dialog.run()
dialog.close()
self.window.destroy()
self.window=ManageUsersAccounts.ManageUsersAccounts(self.MyUsername, self.userType)
def onBackToManageUsersButtonPressed(self, button, a):
self.window.destroy()
self.window=ManageUsersAccounts.ManageUsersAccounts(self.MyUsername, self.userType)
def onLogoutButtonPressed(self, button):
self.window.destroy()
self.window=login.loginClass()
#window = AddNewUser()
#Gtk.main()
| 38.847826 | 156 | 0.626003 |
7959f45c36ca3723dc1e68771c51f05cade94ec6 | 631 | py | Python | scripts/state.py | mimno/mallet_state_tools | 5bf47b74e4ce875b65c4919227d7c9f8f7839be5 | [
"Apache-2.0"
] | null | null | null | scripts/state.py | mimno/mallet_state_tools | 5bf47b74e4ce875b65c4919227d7c9f8f7839be5 | [
"Apache-2.0"
] | null | null | null | scripts/state.py | mimno/mallet_state_tools | 5bf47b74e4ce875b65c4919227d7c9f8f7839be5 | [
"Apache-2.0"
] | null | null | null | import regex
import gzip, regex
whitespace_pattern = regex.compile("\s+")
def read_state(filename):
tokens = []
with gzip.open(filename, "rt", encoding="UTF8") as reader:
for line in reader:
if not line.startswith("#"):
tokens.append(parse_line(line))
return tokens
def parse_line(line):
fields = whitespace_pattern.split(line.rstrip())
if len(fields) == 6: # mallet format
return (fields[0], fields[4], fields[5])
elif len(fields) == 3: # doc, word, topic
return (fields[0], fields[1], fields[2])
else:
print("unrecognized line format")
| 26.291667 | 62 | 0.616482 |
7959f47d2adae58aaf73689e9e1c9f8fa5660f9d | 4,698 | py | Python | tools/fetch_ztf_matchfiles.py | dannygoldstein/kowalski | db955f85f8e2737534c40266a7ca500c96cef80d | [
"MIT"
] | null | null | null | tools/fetch_ztf_matchfiles.py | dannygoldstein/kowalski | db955f85f8e2737534c40266a7ca500c96cef80d | [
"MIT"
] | 2 | 2021-02-02T19:42:19.000Z | 2021-03-16T00:10:25.000Z | tools/fetch_ztf_matchfiles.py | dannygoldstein/kowalski | db955f85f8e2737534c40266a7ca500c96cef80d | [
"MIT"
] | null | null | null | import argparse
from bs4 import BeautifulSoup
import multiprocessing as mp
from multiprocessing.pool import ThreadPool
import os
import pandas as pd
import pathlib
import requests
import subprocess
from tqdm.auto import tqdm
from utils import load_config
''' load config and secrets '''
# config = load_config(path='../', config_file='config.yaml')['kowalski']
config = load_config(config_file='config.yaml')['kowalski']
def collect_urls(rc):
bu = os.path.join(base_url, f'rc{rc:02d}')
response = requests.get(bu, auth=(config['ztf_depot']['username'], config['ztf_depot']['password']))
html = response.text
# link_list = []
soup = BeautifulSoup(html, 'html.parser')
links = soup.findAll('a')
for link in links:
txt = link.getText()
if 'fr' in txt:
bu_fr = os.path.join(bu, txt)
response_fr = requests.get(
bu_fr,
auth=(config['ztf_depot']['username'], config['ztf_depot']['password'])
)
html_fr = response_fr.text
soup_fr = BeautifulSoup(html_fr, 'html.parser')
links_fr = soup_fr.findAll('a')
for link_fr in links_fr:
txt_fr = link_fr.getText()
if txt_fr.endswith('.pytable'):
# print('\t', txt_fr)
urls.append({'rc': rc, 'name': txt_fr, 'url': os.path.join(bu_fr, txt_fr)})
# fixme:
# break
def fetch_url(urlrc, source='ipac'):
url, _rc = urlrc
p = os.path.join(str(path), str(_rc), os.path.basename(url))
if not os.path.exists(p):
if source == 'ipac':
subprocess.run(['wget',
f"--http-user={config['ztf_depot']['username']}",
f"--http-passwd={config['ztf_depot']['password']}",
'-q', '--timeout=600', '--waitretry=10',
'--tries=5', '-O', p, url])
elif source == 'supernova':
_url = url.replace('https://', '/media/Data2/Matchfiles/')
subprocess.run(['scp',
f'duev@supernova.caltech.edu:{_url}',
path])
# time.sleep(0.5)
def gunzip(f):
subprocess.run(['gunzip', f])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--tag', type=str, default='20200401', help='matchfile release time tag')
args = parser.parse_args()
t_tag = args.tag
path_base = pathlib.Path('./')
# path_base = pathlib.Path('/_tmp/')
path = path_base / f'ztf_matchfiles_{t_tag}/'
if not path.exists():
path.mkdir(exist_ok=True, parents=True)
for rc in range(0, 64):
path_rc = path / str(rc)
if not path_rc.exists():
path_rc.mkdir(exist_ok=True, parents=True)
path_urls = path_base / f'ztf_matchfiles_{t_tag}.csv'
# n_rc = 1
n_rc = 64
if not path_urls.exists():
base_url = 'https://ztfweb.ipac.caltech.edu/ztf/ops/srcmatch/'
# store urls
urls = []
print('Collecting urls of matchfiles to download:')
# collect urls of matchfiles to download
with ThreadPool(processes=20) as pool:
list(tqdm(pool.imap(collect_urls, range(0, n_rc)), total=n_rc))
df_mf = pd.DataFrame.from_records(urls)
print(df_mf)
df_mf.to_csv(path_urls, index=False)
else:
df_mf = pd.read_csv(path_urls)
print(df_mf)
# check what's (already) on GCS:
ongs = []
for rc in tqdm(range(0, n_rc), total=n_rc):
ongs_rc = subprocess.check_output([
'gsutil', 'ls',
f'gs://ztf-matchfiles-{t_tag}/{rc}/',
]).decode('utf-8').strip().split('\n')
ongs += [pathlib.Path(ong).name for ong in ongs_rc if ong.endswith('pytable')]
# print(ongs)
# matchfiles that are not on GCS:
# print(df_mf['name'].isin(ongs))
w = ~(df_mf['name'].isin(ongs))
# print(pd.unique(df_mf.loc[w, 'rc']))
print(f'Downloading {w.sum()} matchfiles:')
url_list = [(r.url, r.rc) for r in df_mf.loc[w].itertuples()]
# download
with mp.Pool(processes=4) as pool:
list(tqdm(pool.imap(fetch_url, url_list), total=len(url_list)))
# move to GCS:
for rc in tqdm(range(0, n_rc), total=n_rc):
# move to gs
subprocess.run(["/usr/local/bin/gsutil",
"-m", "mv",
str(path / f"{rc}/*.pytable"),
f"gs://ztf-matchfiles-{t_tag}/{rc}/"])
# remove locally
# subprocess.run(["rm", "rf", f"/_tmp/ztf_matchfiles_{t_tag}/{rc}/"])
| 29.734177 | 104 | 0.557684 |
7959f4f37cd95c3c025120b6b4a4d392f342b7af | 2,391 | py | Python | examples/ffchangeglyphnames.py | simoncozens/pysilfont | bb8a9fc58a83e074bbcc466ba058841845b9107e | [
"MIT"
] | 41 | 2015-05-21T21:12:26.000Z | 2022-02-17T17:23:14.000Z | examples/ffchangeglyphnames.py | simoncozens/pysilfont | bb8a9fc58a83e074bbcc466ba058841845b9107e | [
"MIT"
] | 63 | 2015-05-15T10:25:55.000Z | 2021-02-23T04:51:17.000Z | examples/ffchangeglyphnames.py | simoncozens/pysilfont | bb8a9fc58a83e074bbcc466ba058841845b9107e | [
"MIT"
] | 12 | 2015-06-12T11:52:08.000Z | 2020-09-23T10:40:59.000Z | #!/usr/bin/env python
from __future__ import unicode_literals
'''Update glyph names in a font based on csv file
- Using FontForge rather than UFOlib so it can work with ttf (or sfd) files'''
__url__ = 'http://github.com/silnrsi/pysilfont'
__copyright__ = 'Copyright (c) 2016 SIL International (http://www.sil.org)'
__license__ = 'Released under the MIT License (http://opensource.org/licenses/MIT)'
__author__ = 'David Raymond'
from silfont.core import execute
''' This will need updating, since FontForge is no longer supported as a tool by execute() So:
- ifont and ofont will need to be changed to have type 'filename'
- ifont will then need to be opened using fontforge.open
- The font will need to be saved with font.save
- execute will need to be called with the tool set to None instead of "FF"
'''
argspec = [
('ifont',{'help': 'Input ttf font file'}, {'type': 'infont'}),
('ofont',{'help': 'Output font file','nargs': '?' }, {'type': 'outfont'}),
('-i','--input',{'help': 'Mapping csv file'}, {'type': 'incsv', 'def': 'psnames.csv'}),
('-l','--log',{'help': 'Log file'}, {'type': 'outfile', 'def': '_setPostNames.log'}),
('--reverse',{'help': 'Change names in reverse', 'action': 'store_true', 'default': False},{})]
def doit(args) :
logger = args.paramsobj.logger
font = args.ifont
# Process csv
csv = args.input
csv.numfields = 2
newnames={}
namescheck=[]
missingnames = False
for line in csv :
if args.reverse :
newnames[line[1]] = line[0]
namescheck.append(line[1])
else :
newnames[line[0]] = line[1]
namescheck.append(line[0])
for glyph in font.glyphs():
gname = glyph.glyphname
if gname in newnames :
namescheck.remove(gname)
glyph.glyphname = newnames[gname]
else:
missingnames = True
logger.log(gname + " in font but not csv file","W")
if missingnames : logger.log("Font glyph names missing from csv - see log for details","E")
for name in namescheck : # Any names left in namescheck were in csv but not ttf
logger.log(name + " in csv but not in font","W")
if namescheck != [] : logger.log("csv file names missing from font - see log for details","E")
return font
def cmd() : execute("FF",doit,argspec)
if __name__ == "__main__": cmd()
| 36.227273 | 99 | 0.630698 |
7959f59ee9c899e24361ec50118dfe6f66211ea8 | 2,907 | py | Python | electrum/gui/qt/fee_slider.py | mgrychow/electrum-vault | a15b0fc5db4e83801cd7f1ba3defd56daa0b058a | [
"MIT"
] | 8 | 2020-03-18T21:55:38.000Z | 2021-03-01T12:54:47.000Z | electrum/gui/qt/fee_slider.py | mgrychow/electrum-vault | a15b0fc5db4e83801cd7f1ba3defd56daa0b058a | [
"MIT"
] | 6 | 2020-07-10T13:17:21.000Z | 2021-04-26T11:47:22.000Z | electrum/gui/qt/fee_slider.py | mgrychow/electrum-vault | a15b0fc5db4e83801cd7f1ba3defd56daa0b058a | [
"MIT"
] | 8 | 2020-05-10T11:04:15.000Z | 2021-05-06T14:51:46.000Z | import threading
from PyQt5.QtGui import QCursor
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QSlider, QToolTip
from electrum.i18n import _
class FeeSlider(QSlider):
def __init__(self, window, config, callback):
QSlider.__init__(self, Qt.Horizontal)
self.config = config
self.window = window
self.callback = callback
self.dyn = False
self.lock = threading.RLock()
self.update()
self.valueChanged.connect(self.moved)
self._active = True
def reinitialize(self):
self.update()
_, pos, _ = self.config.get_fee_slider(self.dyn, self.config.use_mempool_fees())
self.moved(pos)
def moved(self, pos):
with self.lock:
if self.dyn:
fee_rate = self.config.depth_to_fee(pos) if self.config.use_mempool_fees() else self.config.eta_to_fee(pos)
else:
fee_rate = self.config.static_fee(pos)
tooltip = self.get_tooltip(pos, fee_rate)
QToolTip.showText(QCursor.pos(), tooltip, self)
self.setToolTip(tooltip)
self.callback(self.dyn, pos, fee_rate)
def get_tooltip(self, pos, fee_rate):
mempool = self.config.use_mempool_fees()
target, estimate = self.config.get_fee_text(pos, self.dyn, mempool, fee_rate)
if self.dyn:
return _('Target') + ': ' + target + '\n' + _('Current rate') + ': ' + estimate
else:
return _('Fixed rate') + ': ' + target + '\n' + _('Estimate') + ': ' + estimate
def update(self):
with self.lock:
self.dyn = self.config.is_dynfee()
mempool = self.config.use_mempool_fees()
maxp, pos, fee_rate = self.config.get_fee_slider(self.dyn, mempool)
self.setRange(0, maxp)
self.setValue(pos)
tooltip = self.get_tooltip(pos, fee_rate)
self.setToolTip(tooltip)
def activate(self):
self._active = True
self.setStyleSheet('')
def deactivate(self):
self._active = False
# TODO it would be nice to find a platform-independent solution
# that makes the slider look as if it was disabled
self.setStyleSheet(
"""
QSlider::groove:horizontal {
border: 1px solid #999999;
height: 8px;
background: qlineargradient(x1:0, y1:0, x2:0, y2:1, stop:0 #B1B1B1, stop:1 #B1B1B1);
margin: 2px 0;
}
QSlider::handle:horizontal {
background: qlineargradient(x1:0, y1:0, x2:1, y2:1, stop:0 #b4b4b4, stop:1 #8f8f8f);
border: 1px solid #5c5c5c;
width: 12px;
margin: -2px 0;
border-radius: 3px;
}
"""
)
def is_active(self):
return self._active
| 33.802326 | 123 | 0.569659 |
7959f7e28b6acf69cd9c0a164c4e381afc32c4b7 | 10,308 | py | Python | plot_utils.py | wangkua1/Glow-PyTorch | efec083a4bb0cee971868f49b80a17276ef2cab0 | [
"MIT"
] | null | null | null | plot_utils.py | wangkua1/Glow-PyTorch | efec083a4bb0cee971868f49b80a17276ef2cab0 | [
"MIT"
] | null | null | null | plot_utils.py | wangkua1/Glow-PyTorch | efec083a4bb0cee971868f49b80a17276ef2cab0 | [
"MIT"
] | null | null | null | """Plotting utilities.
"""
import os
import csv
import ipdb
import argparse
from collections import defaultdict
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import seaborn as sns
sns.set(style='white')
def load_epoch_log(exp_dir):
epoch_dict = defaultdict(list)
with open(os.path.join(exp_dir, 'epoch_log.csv'), newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
for key in row:
epoch_dict[key].append(float(row[key]))
return epoch_dict
def load_log(exp_dir, filename='every_N_log.csv'):
result_dict = defaultdict(list)
with open(os.path.join(exp_dir, filename), newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
for key in row:
if not row[key] == '':
result_dict[key].append(float(row[key]))
# result_dict['global_iteration'].append(int(row['global_iteration']))
# result_dict['min_recons_error'].append(float(row['min_recons_error']))
# result_dict['max_recons_error'].append(float(row['max_recons_error']))
# result_dict['mean_recons_error'].append(float(row['mean_recons_error']))
# result_dict['std_recons_error'].append(float(row['std_recons_error']))
# if not row['condition_num'] == '':
# result_dict['condition_num'].append(float(row['condition_num']))
# result_dict['max_sv'].append(float(row['max_sv']))
# result_dict['min_sv'].append(float(row['min_sv']))
# if 'inverse_condition_num' in row and not row['inverse_condition_num'] == '':
# result_dict['inverse_condition_num'].append(float(row['inverse_condition_num']))
# result_dict['inverse_max_sv'].append(float(row['inverse_max_sv']))
# result_dict['inverse_min_sv'].append(float(row['inverse_min_sv']))
return result_dict
def load_iteration_log(exp_dir):
result_dict = defaultdict(list)
with open(os.path.join(exp_dir, 'iteration_log.csv'), newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
result_dict['global_iteration'].append(int(row['global_iteration']))
result_dict['loss'].append(float(row['loss']))
return result_dict
def plot_stability_stats(exp_dir, filename='svd_log.csv'):
result_dict = load_log(exp_dir, filename)
fig, ax = plt.subplots(nrows=1, ncols=4, figsize=(14,3))
ax[0].plot(result_dict['global_iteration'], result_dict['mean_recons_error'], linewidth=2)
ax[0].set_yscale('log')
ax[0].set_xlabel('Iteration', fontsize=18)
ax[0].set_ylabel('Recons Error', fontsize=18)
if 'condition_num' in result_dict:
ax[1].plot(result_dict['global_iteration'], result_dict['condition_num'], linewidth=2)
ax[1].set_yscale('log')
ax[1].set_xlabel('Iteration', fontsize=18)
ax[1].set_ylabel('Condition Num', fontsize=18)
ax[2].plot(result_dict['global_iteration'], result_dict['max_sv'], linewidth=2)
ax[2].set_yscale('log')
ax[2].set_xlabel('Iteration', fontsize=18)
ax[2].set_ylabel('Max singular Value', fontsize=18)
ax[3].plot(result_dict['global_iteration'], result_dict['min_sv'], linewidth=2)
ax[3].set_yscale('log')
ax[3].set_xlabel('Iteration', fontsize=18)
ax[3].set_ylabel('Min singular Value', fontsize=18)
plt.tight_layout()
plt.savefig(os.path.join(exp_dir, 'stability_stats.png'), bbox_inches='tight', pad_inches=0)
plt.close(fig)
if 'inverse_condition_num' in result_dict:
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=(12,3))
ax[0].plot(result_dict['global_iteration'], result_dict['inverse_condition_num'], linewidth=2)
ax[0].set_yscale('log')
ax[0].set_xlabel('Iteration', fontsize=18)
ax[0].set_ylabel('Inv Condition Num', fontsize=18)
ax[1].plot(result_dict['global_iteration'], result_dict['inverse_max_sv'], linewidth=2)
ax[1].set_yscale('log')
ax[1].set_xlabel('Iteration', fontsize=18)
ax[1].set_ylabel('Inv max singular Value', fontsize=18)
ax[2].plot(result_dict['global_iteration'], result_dict['inverse_min_sv'], linewidth=2)
ax[2].set_yscale('log')
ax[2].set_xlabel('Iteration', fontsize=18)
ax[2].set_ylabel('Inv min singular Value', fontsize=18)
plt.tight_layout()
plt.savefig(os.path.join(exp_dir, 'stability_stats_inverse.png'), bbox_inches='tight', pad_inches=0)
plt.close(fig)
def plot_item(result_dict,
xkey,
ykey,
xlabel='',
ylabel='',
xlabel_fontsize=22,
ylabel_fontsize=22,
xtick_fontsize=18,
ytick_fontsize=18,
yscale='linear',
linewidth=2,
save_to=None):
fig = plt.figure(figsize=(5,4))
# max_iter = 180000
# max_iter = 160000
# max_iter = 120000
# max_iter = 6500
# num_values = min(len(result_dict[xkey]), len(result_dict[ykey]))
# num_values = min(num_values, max([i for (i, value) in enumerate(result_dict['global_iteration']) if value < max_iter]))
# plt.plot(result_dict[xkey][:num_values], result_dict[ykey][:num_values], linewidth=linewidth)
plt.plot(result_dict[xkey], result_dict[ykey], linewidth=linewidth)
plt.xlabel(xlabel, fontsize=xlabel_fontsize)
plt.ylabel(ylabel, fontsize=ylabel_fontsize)
# plt.xticks([0, 60000, 120000, 180000], fontsize=xtick_fontsize)
# plt.xticks([0, 40000, 80000, 120000], fontsize=xtick_fontsize)
# plt.xticks([0, 40000, 80000, 120000, 160000], fontsize=xtick_fontsize)
# plt.xticks([0, 500, 1000], fontsize=xtick_fontsize)
plt.xticks(fontsize=xtick_fontsize)
plt.yticks(fontsize=ytick_fontsize)
plt.yscale(yscale)
# plt.yscale('linear')
# plt.gca().yaxis.set_major_formatter(mtick.FormatStrFormatter('%.0e'))
# plt.ticklabel_format(axis='y', style='sci', scilimits=(-2,2))
# Uncomment this part for xticks like '5k'
# ----------------------------------------
# if max(result_dict[xkey]) > 3000:
# plt.gca().xaxis.set_major_formatter(mtick.FuncFormatter(lambda x, pos: '{:,.0f}'.format(x/1000) + 'k'))
# ----------------------------------------
plt.tight_layout()
if save_to:
plt.savefig(save_to, bbox_inches='tight', pad_inches=0)
plt.close(fig)
def plot_loss(exp_dir):
figure_dir = os.path.join(exp_dir, 'figures')
if not os.path.exists(figure_dir):
os.makedirs(figure_dir)
result_dict = load_iteration_log(exp_dir)
plot_item(result_dict,
xkey='global_iteration',
ykey='loss',
xlabel='Iteration',
ylabel='Loss',
linewidth=3,
yscale='linear',
save_to=os.path.join(figure_dir, 'loss-plot.pdf')
)
def plot_individual_figures(exp_dir, filename='every_N_log.csv'):
figure_dir = os.path.join(exp_dir, 'figures')
if not os.path.exists(figure_dir):
os.makedirs(figure_dir)
result_dict = load_log(exp_dir, filename)
if 'mean_recons_error' in result_dict:
plot_item(result_dict,
xkey='global_iteration',
ykey='mean_recons_error',
xlabel='Iteration',
ylabel='Reconstruction Error',
linewidth=2,
yscale='log',
save_to=os.path.join(figure_dir, 'recons-plot.png')
)
if 'condition_num' in result_dict:
plot_item(result_dict,
xkey='global_iteration',
ykey='condition_num',
xlabel='Iteration',
ylabel='Condition Num',
linewidth=2,
yscale='log',
save_to=os.path.join(figure_dir, 'condition-plot.png')
)
if 'min_sv' in result_dict:
plot_item(result_dict,
xkey='global_iteration',
ykey='min_sv',
xlabel='Iteration',
ylabel='Minimum SV',
linewidth=2,
yscale='log',
save_to=os.path.join(figure_dir, 'min-sv-plot.png')
)
if 'max_sv' in result_dict:
plot_item(result_dict,
xkey='global_iteration',
ykey='max_sv',
xlabel='Iteration',
ylabel='Maximum SV',
linewidth=2,
yscale='log',
save_to=os.path.join(figure_dir, 'max-sv-plot.png')
)
if 'inverse_condition_num' in result_dict:
plot_item(result_dict,
xkey='global_iteration',
ykey='inverse_condition_num',
xlabel='Iteration',
ylabel='Inverse Cond. Num',
linewidth=2,
yscale='log',
save_to=os.path.join(figure_dir, 'inv-condition-plot.png')
)
if 'inverse_max_sv' in result_dict:
plot_item(result_dict,
xkey='global_iteration',
ykey='inverse_max_sv',
xlabel='Iteration',
ylabel='Inverse Max SV',
linewidth=2,
yscale='log',
save_to=os.path.join(figure_dir, 'inv-max-sv-plot.png')
)
if 'inverse_min_sv' in result_dict:
plot_item(result_dict,
xkey='global_iteration',
ykey='inverse_min_sv',
xlabel='Iteration',
ylabel='Inverse Min SV',
linewidth=2,
yscale='log',
save_to=os.path.join(figure_dir, 'inv-min-sv-plot.png')
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--exp_dir', type=str,
help='Path to the experiment directory')
args = parser.parse_args()
plot_individual_figures(args.exp_dir)
plot_loss(args.exp_dir) | 36.424028 | 125 | 0.588669 |
7959f92b4634be2f84ceeda26f7a1088a73a7d82 | 13,192 | py | Python | numpy/array_api/tests/test_array_object.py | iam-abbas/numpy | 2fb5e969fded3cd468f2ca01d5b954c953545dd9 | [
"BSD-3-Clause"
] | 20,453 | 2015-01-02T09:00:47.000Z | 2022-03-31T23:35:56.000Z | numpy/array_api/tests/test_array_object.py | iam-abbas/numpy | 2fb5e969fded3cd468f2ca01d5b954c953545dd9 | [
"BSD-3-Clause"
] | 14,862 | 2015-01-01T01:28:34.000Z | 2022-03-31T23:48:52.000Z | numpy/array_api/tests/test_array_object.py | iam-abbas/numpy | 2fb5e969fded3cd468f2ca01d5b954c953545dd9 | [
"BSD-3-Clause"
] | 9,362 | 2015-01-01T15:49:43.000Z | 2022-03-31T21:26:51.000Z | import operator
from numpy.testing import assert_raises
import numpy as np
from .. import ones, asarray, result_type
from .._dtypes import (
_all_dtypes,
_boolean_dtypes,
_floating_dtypes,
_integer_dtypes,
_integer_or_boolean_dtypes,
_numeric_dtypes,
int8,
int16,
int32,
int64,
uint64,
)
def test_validate_index():
# The indexing tests in the official array API test suite test that the
# array object correctly handles the subset of indices that are required
# by the spec. But the NumPy array API implementation specifically
# disallows any index not required by the spec, via Array._validate_index.
# This test focuses on testing that non-valid indices are correctly
# rejected. See
# https://data-apis.org/array-api/latest/API_specification/indexing.html
# and the docstring of Array._validate_index for the exact indexing
# behavior that should be allowed. This does not test indices that are
# already invalid in NumPy itself because Array will generally just pass
# such indices directly to the underlying np.ndarray.
a = ones((3, 4))
# Out of bounds slices are not allowed
assert_raises(IndexError, lambda: a[:4])
assert_raises(IndexError, lambda: a[:-4])
assert_raises(IndexError, lambda: a[:3:-1])
assert_raises(IndexError, lambda: a[:-5:-1])
assert_raises(IndexError, lambda: a[3:])
assert_raises(IndexError, lambda: a[-4:])
assert_raises(IndexError, lambda: a[3::-1])
assert_raises(IndexError, lambda: a[-4::-1])
assert_raises(IndexError, lambda: a[...,:5])
assert_raises(IndexError, lambda: a[...,:-5])
assert_raises(IndexError, lambda: a[...,:4:-1])
assert_raises(IndexError, lambda: a[...,:-6:-1])
assert_raises(IndexError, lambda: a[...,4:])
assert_raises(IndexError, lambda: a[...,-5:])
assert_raises(IndexError, lambda: a[...,4::-1])
assert_raises(IndexError, lambda: a[...,-5::-1])
# Boolean indices cannot be part of a larger tuple index
assert_raises(IndexError, lambda: a[a[:,0]==1,0])
assert_raises(IndexError, lambda: a[a[:,0]==1,...])
assert_raises(IndexError, lambda: a[..., a[0]==1])
assert_raises(IndexError, lambda: a[[True, True, True]])
assert_raises(IndexError, lambda: a[(True, True, True),])
# Integer array indices are not allowed (except for 0-D)
idx = asarray([[0, 1]])
assert_raises(IndexError, lambda: a[idx])
assert_raises(IndexError, lambda: a[idx,])
assert_raises(IndexError, lambda: a[[0, 1]])
assert_raises(IndexError, lambda: a[(0, 1), (0, 1)])
assert_raises(IndexError, lambda: a[[0, 1]])
assert_raises(IndexError, lambda: a[np.array([[0, 1]])])
# np.newaxis is not allowed
assert_raises(IndexError, lambda: a[None])
assert_raises(IndexError, lambda: a[None, ...])
assert_raises(IndexError, lambda: a[..., None])
def test_operators():
# For every operator, we test that it works for the required type
# combinations and raises TypeError otherwise
binary_op_dtypes = {
"__add__": "numeric",
"__and__": "integer_or_boolean",
"__eq__": "all",
"__floordiv__": "numeric",
"__ge__": "numeric",
"__gt__": "numeric",
"__le__": "numeric",
"__lshift__": "integer",
"__lt__": "numeric",
"__mod__": "numeric",
"__mul__": "numeric",
"__ne__": "all",
"__or__": "integer_or_boolean",
"__pow__": "floating",
"__rshift__": "integer",
"__sub__": "numeric",
"__truediv__": "floating",
"__xor__": "integer_or_boolean",
}
# Recompute each time because of in-place ops
def _array_vals():
for d in _integer_dtypes:
yield asarray(1, dtype=d)
for d in _boolean_dtypes:
yield asarray(False, dtype=d)
for d in _floating_dtypes:
yield asarray(1.0, dtype=d)
for op, dtypes in binary_op_dtypes.items():
ops = [op]
if op not in ["__eq__", "__ne__", "__le__", "__ge__", "__lt__", "__gt__"]:
rop = "__r" + op[2:]
iop = "__i" + op[2:]
ops += [rop, iop]
for s in [1, 1.0, False]:
for _op in ops:
for a in _array_vals():
# Test array op scalar. From the spec, the following combinations
# are supported:
# - Python bool for a bool array dtype,
# - a Python int within the bounds of the given dtype for integer array dtypes,
# - a Python int or float for floating-point array dtypes
# We do not do bounds checking for int scalars, but rather use the default
# NumPy behavior for casting in that case.
if ((dtypes == "all"
or dtypes == "numeric" and a.dtype in _numeric_dtypes
or dtypes == "integer" and a.dtype in _integer_dtypes
or dtypes == "integer_or_boolean" and a.dtype in _integer_or_boolean_dtypes
or dtypes == "boolean" and a.dtype in _boolean_dtypes
or dtypes == "floating" and a.dtype in _floating_dtypes
)
# bool is a subtype of int, which is why we avoid
# isinstance here.
and (a.dtype in _boolean_dtypes and type(s) == bool
or a.dtype in _integer_dtypes and type(s) == int
or a.dtype in _floating_dtypes and type(s) in [float, int]
)):
# Only test for no error
getattr(a, _op)(s)
else:
assert_raises(TypeError, lambda: getattr(a, _op)(s))
# Test array op array.
for _op in ops:
for x in _array_vals():
for y in _array_vals():
# See the promotion table in NEP 47 or the array
# API spec page on type promotion. Mixed kind
# promotion is not defined.
if (x.dtype == uint64 and y.dtype in [int8, int16, int32, int64]
or y.dtype == uint64 and x.dtype in [int8, int16, int32, int64]
or x.dtype in _integer_dtypes and y.dtype not in _integer_dtypes
or y.dtype in _integer_dtypes and x.dtype not in _integer_dtypes
or x.dtype in _boolean_dtypes and y.dtype not in _boolean_dtypes
or y.dtype in _boolean_dtypes and x.dtype not in _boolean_dtypes
or x.dtype in _floating_dtypes and y.dtype not in _floating_dtypes
or y.dtype in _floating_dtypes and x.dtype not in _floating_dtypes
):
assert_raises(TypeError, lambda: getattr(x, _op)(y))
# Ensure in-place operators only promote to the same dtype as the left operand.
elif (
_op.startswith("__i")
and result_type(x.dtype, y.dtype) != x.dtype
):
assert_raises(TypeError, lambda: getattr(x, _op)(y))
# Ensure only those dtypes that are required for every operator are allowed.
elif (dtypes == "all" and (x.dtype in _boolean_dtypes and y.dtype in _boolean_dtypes
or x.dtype in _numeric_dtypes and y.dtype in _numeric_dtypes)
or (dtypes == "numeric" and x.dtype in _numeric_dtypes and y.dtype in _numeric_dtypes)
or dtypes == "integer" and x.dtype in _integer_dtypes and y.dtype in _numeric_dtypes
or dtypes == "integer_or_boolean" and (x.dtype in _integer_dtypes and y.dtype in _integer_dtypes
or x.dtype in _boolean_dtypes and y.dtype in _boolean_dtypes)
or dtypes == "boolean" and x.dtype in _boolean_dtypes and y.dtype in _boolean_dtypes
or dtypes == "floating" and x.dtype in _floating_dtypes and y.dtype in _floating_dtypes
):
getattr(x, _op)(y)
else:
assert_raises(TypeError, lambda: getattr(x, _op)(y))
unary_op_dtypes = {
"__abs__": "numeric",
"__invert__": "integer_or_boolean",
"__neg__": "numeric",
"__pos__": "numeric",
}
for op, dtypes in unary_op_dtypes.items():
for a in _array_vals():
if (
dtypes == "numeric"
and a.dtype in _numeric_dtypes
or dtypes == "integer_or_boolean"
and a.dtype in _integer_or_boolean_dtypes
):
# Only test for no error
getattr(a, op)()
else:
assert_raises(TypeError, lambda: getattr(a, op)())
# Finally, matmul() must be tested separately, because it works a bit
# different from the other operations.
def _matmul_array_vals():
for a in _array_vals():
yield a
for d in _all_dtypes:
yield ones((3, 4), dtype=d)
yield ones((4, 2), dtype=d)
yield ones((4, 4), dtype=d)
# Scalars always error
for _op in ["__matmul__", "__rmatmul__", "__imatmul__"]:
for s in [1, 1.0, False]:
for a in _matmul_array_vals():
if (type(s) in [float, int] and a.dtype in _floating_dtypes
or type(s) == int and a.dtype in _integer_dtypes):
# Type promotion is valid, but @ is not allowed on 0-D
# inputs, so the error is a ValueError
assert_raises(ValueError, lambda: getattr(a, _op)(s))
else:
assert_raises(TypeError, lambda: getattr(a, _op)(s))
for x in _matmul_array_vals():
for y in _matmul_array_vals():
if (x.dtype == uint64 and y.dtype in [int8, int16, int32, int64]
or y.dtype == uint64 and x.dtype in [int8, int16, int32, int64]
or x.dtype in _integer_dtypes and y.dtype not in _integer_dtypes
or y.dtype in _integer_dtypes and x.dtype not in _integer_dtypes
or x.dtype in _floating_dtypes and y.dtype not in _floating_dtypes
or y.dtype in _floating_dtypes and x.dtype not in _floating_dtypes
or x.dtype in _boolean_dtypes
or y.dtype in _boolean_dtypes
):
assert_raises(TypeError, lambda: x.__matmul__(y))
assert_raises(TypeError, lambda: y.__rmatmul__(x))
assert_raises(TypeError, lambda: x.__imatmul__(y))
elif x.shape == () or y.shape == () or x.shape[1] != y.shape[0]:
assert_raises(ValueError, lambda: x.__matmul__(y))
assert_raises(ValueError, lambda: y.__rmatmul__(x))
if result_type(x.dtype, y.dtype) != x.dtype:
assert_raises(TypeError, lambda: x.__imatmul__(y))
else:
assert_raises(ValueError, lambda: x.__imatmul__(y))
else:
x.__matmul__(y)
y.__rmatmul__(x)
if result_type(x.dtype, y.dtype) != x.dtype:
assert_raises(TypeError, lambda: x.__imatmul__(y))
elif y.shape[0] != y.shape[1]:
# This one fails because x @ y has a different shape from x
assert_raises(ValueError, lambda: x.__imatmul__(y))
else:
x.__imatmul__(y)
def test_python_scalar_construtors():
b = asarray(False)
i = asarray(0)
f = asarray(0.0)
assert bool(b) == False
assert int(i) == 0
assert float(f) == 0.0
assert operator.index(i) == 0
# bool/int/float should only be allowed on 0-D arrays.
assert_raises(TypeError, lambda: bool(asarray([False])))
assert_raises(TypeError, lambda: int(asarray([0])))
assert_raises(TypeError, lambda: float(asarray([0.0])))
assert_raises(TypeError, lambda: operator.index(asarray([0])))
# bool/int/float should only be allowed on arrays of the corresponding
# dtype
assert_raises(ValueError, lambda: bool(i))
assert_raises(ValueError, lambda: bool(f))
assert_raises(ValueError, lambda: int(b))
assert_raises(ValueError, lambda: int(f))
assert_raises(ValueError, lambda: float(b))
assert_raises(ValueError, lambda: float(i))
assert_raises(TypeError, lambda: operator.index(b))
assert_raises(TypeError, lambda: operator.index(f))
| 45.805556 | 132 | 0.56087 |
7959f960e48a7fa830f76a55219700c9a6ed3991 | 1,304 | py | Python | custom/icds_reports/ucr/tests/test_beneficiary_form.py | rochakchauhan/commcare-hq | aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236 | [
"BSD-3-Clause"
] | 1 | 2020-07-14T13:00:23.000Z | 2020-07-14T13:00:23.000Z | custom/icds_reports/ucr/tests/test_beneficiary_form.py | rochakchauhan/commcare-hq | aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236 | [
"BSD-3-Clause"
] | 1 | 2021-06-02T04:45:16.000Z | 2021-06-02T04:45:16.000Z | custom/icds_reports/ucr/tests/test_beneficiary_form.py | rochakchauhan/commcare-hq | aa7ab3c2d0c51fe10f2b51b08101bb4b5a376236 | [
"BSD-3-Clause"
] | null | null | null | from mock import patch
from custom.icds_reports.ucr.tests.test_base_form_ucr import BaseFormsTest
@patch('custom.icds_reports.ucr.expressions._get_user_location_id',
lambda user_id: 'qwe56poiuytr4xcvbnmkjfghwerffdaa')
@patch('corehq.apps.locations.ucr_expressions._get_location_type_name',
lambda loc_id, context: 'supervisor')
class TestAWCMgtForms(BaseFormsTest):
ucr_name = "static-icds-cas-static-ls_home_visit_forms_filled"
def test_awc_visit_form_with_location_entered(self):
self._test_data_source_results(
'beneficiary_form_with_type_of_visit',
[{'user_id': 'cee18a35ce4fac591eba966c0d15d599',
'location_id': 'qwe56poiuytr4xcvbnmkjfghwerffdaa',
'doc_id': None,
'visit_type_entered': 'vhnd_day',
'home_visit_count': 1,
'submitted_on': None}])
def test_awc_visit_form_without_location_entered(self):
self._test_data_source_results(
'beneficiary_form_without_type_of_visit',
[{'user_id': 'cee18a35ce4fac591eba966c0d15d599',
'location_id': 'qwe56poiuytr4xcvbnmkjfghwerffdaa',
'doc_id': None,
'visit_type_entered': '',
'home_visit_count': 1,
'submitted_on': None}])
| 40.75 | 74 | 0.682515 |
7959f9a2765abe135f1a785b808240745bc29024 | 5,429 | py | Python | project/apps/main_site/migrations/0005_auto__chg_field_gutterbumper_work_hrs__chg_field_gutterbumper_friend_h.py | skoczen/the-steven-manual | 20599347ed1618886290c596332738013ac45500 | [
"BSD-2-Clause"
] | null | null | null | project/apps/main_site/migrations/0005_auto__chg_field_gutterbumper_work_hrs__chg_field_gutterbumper_friend_h.py | skoczen/the-steven-manual | 20599347ed1618886290c596332738013ac45500 | [
"BSD-2-Clause"
] | 4 | 2020-02-12T01:09:50.000Z | 2021-06-10T20:34:59.000Z | project/apps/main_site/migrations/0005_auto__chg_field_gutterbumper_work_hrs__chg_field_gutterbumper_friend_h.py | skoczen/the-steven-manual | 20599347ed1618886290c596332738013ac45500 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'GutterBumper.work_hrs'
db.alter_column('main_site_gutterbumper', 'work_hrs', self.gf('django.db.models.fields.FloatField')(null=True))
# Changing field 'GutterBumper.friend_hrs'
db.alter_column('main_site_gutterbumper', 'friend_hrs', self.gf('django.db.models.fields.FloatField')(null=True))
# Changing field 'GutterBumper.sleep_hrs'
db.alter_column('main_site_gutterbumper', 'sleep_hrs', self.gf('django.db.models.fields.FloatField')(null=True))
# Changing field 'GutterBumper.relationship_hrs'
db.alter_column('main_site_gutterbumper', 'relationship_hrs', self.gf('django.db.models.fields.FloatField')(null=True))
# Changing field 'GutterBumper.alone_hrs'
db.alter_column('main_site_gutterbumper', 'alone_hrs', self.gf('django.db.models.fields.FloatField')(null=True))
def backwards(self, orm):
# Changing field 'GutterBumper.work_hrs'
db.alter_column('main_site_gutterbumper', 'work_hrs', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'GutterBumper.friend_hrs'
db.alter_column('main_site_gutterbumper', 'friend_hrs', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'GutterBumper.sleep_hrs'
db.alter_column('main_site_gutterbumper', 'sleep_hrs', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'GutterBumper.relationship_hrs'
db.alter_column('main_site_gutterbumper', 'relationship_hrs', self.gf('django.db.models.fields.IntegerField')(null=True))
# Changing field 'GutterBumper.alone_hrs'
db.alter_column('main_site_gutterbumper', 'alone_hrs', self.gf('django.db.models.fields.IntegerField')(null=True))
models = {
'main_site.emotion': {
'Meta': {'object_name': 'Emotion'},
'cause': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'helpful': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'symptoms': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'main_site.emotionevent': {
'Meta': {'object_name': 'EmotionEvent'},
'date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'emotion': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main_site.Emotion']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
'main_site.gutterbumper': {
'Meta': {'object_name': 'GutterBumper'},
'alone_hrs': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'creativity': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2012, 8, 5, 0, 0)'}),
'friend_hrs': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'happiness': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mediated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'morning_mood': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'number_of_beers': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'off': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'presence': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'relationship_hrs': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'sleep_hrs': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'work_hrs': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'worked_out': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'main_site.value': {
'Meta': {'object_name': 'Value'},
'explanation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['main_site'] | 61 | 129 | 0.60361 |
7959fa019c0eddbd5ba62ca8fb644fc2ab163018 | 3,411 | py | Python | inst/python/gpt2/interactive_conditional_examples.py | JBX028/gpt2 | 875a97b11e35a6d8b5de8023b071b0fb5a3776d2 | [
"MIT"
] | 25 | 2019-10-13T17:44:22.000Z | 2022-02-16T11:27:06.000Z | inst/python/gpt2/interactive_conditional_examples.py | JBX028/gpt2 | 875a97b11e35a6d8b5de8023b071b0fb5a3776d2 | [
"MIT"
] | 5 | 2019-10-14T08:05:31.000Z | 2021-04-13T14:17:07.000Z | inst/python/gpt2/interactive_conditional_examples.py | JBX028/gpt2 | 875a97b11e35a6d8b5de8023b071b0fb5a3776d2 | [
"MIT"
] | 6 | 2019-10-24T12:06:53.000Z | 2021-11-30T02:34:14.000Z | #!/usr/bin/env python3
import fire
import json
import os
import numpy as np
import tensorflow as tf
import model, sample, encoder
def interact_model(
model_name='124M',
seed=None,
nsamples=1,
batch_size=1,
length=None,
temperature=1,
top_k=0,
top_p=1,
models_dir='models',
):
"""
Interactively run the model
:model_name=124M : String, which model to use
:seed=None : Integer seed for random number generators, fix seed to reproduce
results
:nsamples=1 : Number of samples to return total
:batch_size=1 : Number of batches (only affects speed/memory). Must divide nsamples.
:length=None : Number of tokens in generated text, if None (default), is
determined by model hyperparameters
:temperature=1 : Float value controlling randomness in boltzmann
distribution. Lower temperature results in less random completions. As the
temperature approaches zero, the model will become deterministic and
repetitive. Higher temperature results in more random completions.
:top_k=0 : Integer value controlling diversity. 1 means only 1 word is
considered for each step (token), resulting in deterministic completions,
while 40 means 40 words are considered at each step. 0 (default) is a
special setting meaning no restrictions. 40 generally is a good value.
:models_dir : path to parent folder containing model subfolders
(i.e. contains the <model_name> folder)
"""
models_dir = os.path.expanduser(os.path.expandvars(models_dir))
if batch_size is None:
batch_size = 1
assert nsamples % batch_size == 0
enc = encoder.get_encoder(model_name, models_dir)
hparams = model.default_hparams()
with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
if length is None:
length = hparams.n_ctx // 2
elif length > hparams.n_ctx:
raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
with tf.Session(graph=tf.Graph()) as sess:
context = tf.placeholder(tf.int32, [batch_size, None])
np.random.seed(seed)
tf.set_random_seed(seed)
output = sample.sample_sequence(
hparams=hparams, length=length,
context=context,
batch_size=batch_size,
temperature=temperature, top_k=top_k, top_p=top_p
)
saver = tf.train.Saver()
ckpt = tf.train.latest_checkpoint(os.path.join(models_dir, model_name))
saver.restore(sess, ckpt)
while True:
raw_text = input("Model prompt >>> ")
while not raw_text:
print('Prompt should not be empty!')
raw_text = input("Model prompt >>> ")
context_tokens = enc.encode(raw_text)
generated = 0
for _ in range(nsamples // batch_size):
out = sess.run(output, feed_dict={
context: [context_tokens for _ in range(batch_size)]
})[:, len(context_tokens):]
for i in range(batch_size):
generated += 1
text = enc.decode(out[i])
print("=" * 40 + " SAMPLE " + str(generated) + " " + "=" * 40)
print(text)
print("=" * 80)
if __name__ == '__main__':
fire.Fire(interact_model)
| 37.076087 | 89 | 0.635004 |
7959fa096a719e75d0939df7f80f0bc80df08108 | 2,705 | py | Python | tests/unit/test_views.py | OpenPermissions/perch | 36d78994133918f3c52c187f19e50132960a0156 | [
"Apache-2.0"
] | 3 | 2016-05-03T20:07:25.000Z | 2020-12-22T07:16:11.000Z | tests/unit/test_views.py | OpenPermissions/perch | 36d78994133918f3c52c187f19e50132960a0156 | [
"Apache-2.0"
] | 17 | 2016-04-26T09:35:42.000Z | 2016-08-18T10:07:40.000Z | tests/unit/test_views.py | OpenPermissions/perch | 36d78994133918f3c52c187f19e50132960a0156 | [
"Apache-2.0"
] | 1 | 2019-05-20T01:40:56.000Z | 2019-05-20T01:40:56.000Z | # -*- coding: utf-8 -*- Copyright 2016 Open Permissions Platform Coalition
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
from perch import views
def test_reference_links_view():
doc = {
'_id': 'org1',
'type': 'organisation',
'state': 'approved',
'reference_links': {
'links': {
'source_id_type1': 'https://example.com',
'source_id_type2': 'https://example2.com'
}
}
}
results = sorted([x for x in views.reference_links(doc)])
expected = [
('source_id_type1', {'organisation_id': 'org1', 'link': 'https://example.com'}),
('source_id_type2', {'organisation_id': 'org1', 'link': 'https://example2.com'}),
]
assert results == expected
def test_reference_links_deactivated_view():
doc = {
'_id': 'org1',
'type': 'organisation',
'state': 'deactivated',
'reference_links': {
'source_id_type1': 'https://example.com',
'source_id_type2': 'https://example2.com'
}
}
results = sorted([x for x in views.reference_links(doc)])
expected = []
assert results == expected
def test_service_and_repository_view():
doc = {
'_id': 'org1',
'type': 'organisation',
'services': {
's1': {
'name': 'service1'
},
's2': {
'name': 'service2'
},
},
'repositories': {
'r1': {
'name': 'repo1'
},
'r2': {
'name': 'repo2'
}
}
}
results = sorted([x for x in views.service_and_repository(doc)])
expected = [
('r1', {'id': 'r1', 'organisation_id': 'org1', 'name': 'repo1'}),
('r2', {'id': 'r2', 'organisation_id': 'org1', 'name': 'repo2'}),
('s1', {'id': 's1', 'organisation_id': 'org1', 'name': 'service1'}),
('s2', {'id': 's2', 'organisation_id': 'org1', 'name': 'service2'}),
]
assert results == expected
def test_service_and_repository_view_type():
results = [x for x in views.service_and_repository({'type': 'user'})]
assert results == []
| 30.393258 | 89 | 0.555268 |
7959fa6da59685f6c3d4230a07ec27c118d3f8eb | 805 | py | Python | libact/query_strategies/tests/utils.py | afedyukova/libact | 947f92d42690bfae758180e8809ccd5252e656df | [
"BSD-2-Clause"
] | 788 | 2015-12-17T08:34:50.000Z | 2022-03-30T15:29:29.000Z | libact/query_strategies/tests/utils.py | afedyukova/libact | 947f92d42690bfae758180e8809ccd5252e656df | [
"BSD-2-Clause"
] | 173 | 2015-12-15T17:46:39.000Z | 2021-08-22T17:50:38.000Z | libact/query_strategies/tests/utils.py | afedyukova/libact | 947f92d42690bfae758180e8809ccd5252e656df | [
"BSD-2-Clause"
] | 192 | 2015-12-31T14:37:33.000Z | 2022-03-06T07:45:24.000Z | """This module includes some functions to be reused in query strategy testing.
"""
import numpy as np
def run_qs(trn_ds, qs, truth, quota):
"""Run query strategy on specified dataset and return quering sequence.
Parameters
----------
trn_ds : Dataset object
The dataset to be run on.
qs : QueryStrategy instance
The active learning algorith to be run.
truth : array-like
The true label.
quota : int
Number of iterations to run
Returns
-------
qseq : numpy array, shape (quota,)
The numpy array of entry_id representing querying sequence.
"""
ret = []
for _ in range(quota):
ask_id = qs.make_query()
trn_ds.update(ask_id, truth[ask_id])
ret.append(ask_id)
return np.array(ret)
| 22.361111 | 78 | 0.623602 |
7959fa6f97308f69b9b39e22aaf13a016312fbf8 | 5,004 | py | Python | backtest/backtest.py | lorne-luo/qsforex | 87976aef91a3f4dd4ef1d3de17db1b014b46e4d8 | [
"Unlicense",
"MIT"
] | null | null | null | backtest/backtest.py | lorne-luo/qsforex | 87976aef91a3f4dd4ef1d3de17db1b014b46e4d8 | [
"Unlicense",
"MIT"
] | 7 | 2020-02-11T23:56:08.000Z | 2022-02-10T07:35:07.000Z | backtest/backtest.py | lorne-luo/qsforex | 87976aef91a3f4dd4ef1d3de17db1b014b46e4d8 | [
"Unlicense",
"MIT"
] | null | null | null | import logging
from datetime import datetime
import settings
try:
import Queue as queue
except ImportError:
import queue
import time
from strategy import PortfolioAllocation, RSIStrategy
from portfolio import Portfolio
from price import HistoricCSVPriceHandler
class Backtest(object):
"""
Enscapsulates the settings and components for carrying out
an event-driven backtest on the foreign exchange markets.
"""
def __init__(
self, pairs, period, token, data_handler, strategy,
strategy_params, portfolio, equity=10000.0,
heartbeat=0.0, max_iters=10000000000
):
"""
Initialises the backtest.
"""
self.pairs = pairs
self.time = period
self.token = token
self.events = queue.Queue()
self.ticker = data_handler(self.pairs, self.time, self.token, self.events)
self.strategy_params = strategy_params
self.strategy = strategy(
self.pairs, self.events, **self.strategy_params
)
self.initial = equity
self.equity = equity
self.heartbeat = heartbeat
self.max_iters = max_iters
self.portfolio = portfolio(self.ticker, self.events, backtest=True, equity=self.equity)
print('Equity=%s' % self.portfolio.equity)
def _run_backtest(self):
"""
Carries out an infinite while loop that polls the
events queue and directs each event to either the
strategy component of the execution handler. The
loop will then pause for "heartbeat" seconds and
continue unti the maximum number of iterations is
exceeded.
"""
print("Running Backtest...")
iters = 0
signal_count = 0
while iters < self.max_iters and self.ticker.continue_backtest:
try:
event = self.events.get(False)
except queue.Empty:
self.ticker.stream_next_tick()
else:
if event is not None:
if event.type == 'TICK':
# print(event)
self.strategy.calculate_signals(event)
self.portfolio.update_portfolio(event)
elif event.type == 'SIGNAL':
# print('[%s] %s' % (
# datetime.utcfromtimestamp(event.time).strftime('%Y-%m-%d %H:%M'), event.__dict__))
signal_count = signal_count + 1
self.portfolio.execute_signal(event)
elif event.type == 'ORDER':
# print(event.__dict__)
pass
time.sleep(self.heartbeat)
iters += 1
else:
for pair in self.pairs:
self.portfolio.close_position(pair)
# print("Signal Sent: %d" % signal_count)
print("Closed Trades: %d" % self.portfolio.trade_count)
print("win Executed: %d" % self.portfolio.win_count)
print("loss Executed: %d" % self.portfolio.loss_count)
print("Win rate: %0.2f%% vs %0.2f%%" % (self.portfolio.get_win_rate(), self.portfolio.get_loss_rate()))
print('Initial Equity: %s' % self.initial)
print("Portfolio Balance: %0.2f" % self.portfolio.balance)
print("Profit: %0.2f" % (self.portfolio.balance - self.initial))
percentage = (self.portfolio.balance / self.initial - 1) * 100
print("Portfolio Profit: %0.2f" % percentage + '%')
if __name__ == "__main__":
# Set up logging
logging.basicConfig(filename='backtest.log', level=logging.INFO, filemode='w+')
logger = logging.getLogger('RestMeanReversion.backtest.log')
fxcm_token = settings.FXCM_ACCESS_TOKEN
# Enter the pair(s) you would like to trade as a list
# Pick from {'EUR/USD','USD/JPY','GBP/USD','USD/CHF','EUR/CHF','AUD/USD','USD/CAD','NZD/USD',
# 'EUR/GBP','EUR/JPY','GBP/JPY','CHF/JPY','GBP/CHF','EUR/AUD','EUR/CAD','AUD/CAD',
# 'AUD/JPY','CAD/JPY','NZD/JPY','GBP/CAD','GBP/NZD','GBP/AUD','AUD/NZD','USD/SEK',
# 'EUR/SEK','EUR/NOK','USD/NOK','USD/MXN','AUD/CHF','EUR/NZD','USD/ZAR','USD/HKD',
# 'ZAR/JPY','USD/TRY','EUR/TRY','NZD/CHF','CAD/CHF','TRY/JPY','USD/CNH','USDOLLAR'}
pairs = ["EUR/USD"]
# Enter the time period you would like
# Pick from {'m1','m5','m15','m30','H1','H2','H3','H4','H6','H8','D1','W1','M1'}
period = 'H1'
# Create the strategy parameters if necessary
strategy_params = {
"equity": 10000,
"pair_targets": [40, 30]
}
strategy_params2 = {
'window': 14
}
# Create and execute the backtest
backtest = Backtest(
pairs, period, fxcm_token, HistoricCSVPriceHandler, RSIStrategy,
strategy_params2, Portfolio
)
backtest._run_backtest()
| 39.714286 | 112 | 0.571343 |
7959fa72e1ee66bafe7f471dda1c67f02ead8a62 | 3,596 | py | Python | sdk/python/pulumi_azure_native/devspaces/v20190401/outputs.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/devspaces/v20190401/outputs.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/devspaces/v20190401/outputs.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'ControllerConnectionDetailsResponse',
'KubernetesConnectionDetailsResponse',
'SkuResponse',
]
@pulumi.output_type
class ControllerConnectionDetailsResponse(dict):
def __init__(__self__, *,
orchestrator_specific_connection_details: Optional['outputs.KubernetesConnectionDetailsResponse'] = None):
"""
:param 'KubernetesConnectionDetailsResponse' orchestrator_specific_connection_details: Base class for types that supply values used to connect to container orchestrators
"""
if orchestrator_specific_connection_details is not None:
pulumi.set(__self__, "orchestrator_specific_connection_details", orchestrator_specific_connection_details)
@property
@pulumi.getter(name="orchestratorSpecificConnectionDetails")
def orchestrator_specific_connection_details(self) -> Optional['outputs.KubernetesConnectionDetailsResponse']:
"""
Base class for types that supply values used to connect to container orchestrators
"""
return pulumi.get(self, "orchestrator_specific_connection_details")
@pulumi.output_type
class KubernetesConnectionDetailsResponse(dict):
"""
Contains information used to connect to a Kubernetes cluster
"""
def __init__(__self__, *,
instance_type: str,
kube_config: Optional[str] = None):
"""
Contains information used to connect to a Kubernetes cluster
:param str instance_type: Gets the Instance type.
Expected value is 'Kubernetes'.
:param str kube_config: Gets the kubeconfig for the cluster.
"""
pulumi.set(__self__, "instance_type", 'Kubernetes')
if kube_config is not None:
pulumi.set(__self__, "kube_config", kube_config)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the Instance type.
Expected value is 'Kubernetes'.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="kubeConfig")
def kube_config(self) -> Optional[str]:
"""
Gets the kubeconfig for the cluster.
"""
return pulumi.get(self, "kube_config")
@pulumi.output_type
class SkuResponse(dict):
"""
Model representing SKU for Azure Dev Spaces Controller.
"""
def __init__(__self__, *,
name: str,
tier: Optional[str] = None):
"""
Model representing SKU for Azure Dev Spaces Controller.
:param str name: The name of the SKU for Azure Dev Spaces Controller.
:param str tier: The tier of the SKU for Azure Dev Spaces Controller.
"""
pulumi.set(__self__, "name", name)
if tier is not None:
pulumi.set(__self__, "tier", tier)
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the SKU for Azure Dev Spaces Controller.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tier(self) -> Optional[str]:
"""
The tier of the SKU for Azure Dev Spaces Controller.
"""
return pulumi.get(self, "tier")
| 33.296296 | 177 | 0.658788 |
7959fac14bd6b2459061e432a84c13dc75c931fa | 1,469 | py | Python | exchange_api.py | jeremyhahn/altcoin-autosell | 72afea4378cb45a5110de015a933d6a8ec24fd7b | [
"Apache-2.0"
] | 1 | 2017-09-29T03:24:29.000Z | 2017-09-29T03:24:29.000Z | exchange_api.py | jeremyhahn/altcoin-autosell | 72afea4378cb45a5110de015a933d6a8ec24fd7b | [
"Apache-2.0"
] | null | null | null | exchange_api.py | jeremyhahn/altcoin-autosell | 72afea4378cb45a5110de015a933d6a8ec24fd7b | [
"Apache-2.0"
] | null | null | null | # An exception that any methods in exchange may raise.
class ExchangeException(Exception):
def __init__(self, message):
Exception.__init__(self, message)
# An available market.
class Market(object):
source_currency_id = None
target_currency_id = None
market_id = None
trade_minimum = 0.00000001
def __init__(self, source_currency_id, target_currency_id, market_id,
trade_minimum=0.0000001):
self.source_currency_id = source_currency_id
self.target_currency_id = target_currency_id
self.market_id = market_id
self.trade_minimum = trade_minimum
# A base class for Exchanges.
class Exchange(object):
# The name of the exchange.
name = ''
# Returns a dict of currency_name to currency_id, e.g.
# {
# 'BTC' : 1,
# 'LTC' : 2,
# 'DOGE': 12,
# '42': 15,
# }
def GetCurrencies(self):
raise NotImplementedError
# Returns a dict of currency_id to balance, e.g.
# {
# 12: 173.23,
# 13: 19,347,
# }
def GetBalances(self):
raise NotImplementedError
# Returns an array of Markets.
def GetMarkets(self):
raise NotImplementedError
# Creates an order (market order if price is 0).
# If 'bid' is True, this is a bid/buy order, otherwise an ask/sell order.
# Returns an order_id.
def CreateOrder(self, market_id, amount, bid=True, price=0):
raise NotImplementedError
| 27.203704 | 77 | 0.646018 |
7959fb20bcb51b5a49a89fc495c310bf8a9be5ae | 224 | py | Python | Data Scientist Career Path/5. Data Manipulation with Pandas/4. Multiple Tables in Pandas/8. outer.py | myarist/Codecademy | 2ba0f104bc67ab6ef0f8fb869aa12aa02f5f1efb | [
"MIT"
] | 23 | 2021-06-06T15:35:55.000Z | 2022-03-21T06:53:42.000Z | Data Scientist Career Path/5. Data Manipulation with Pandas/4. Multiple Tables in Pandas/8. outer.py | shivaniverma1/Data-Scientist | f82939a411484311171465591455880c8e354750 | [
"MIT"
] | null | null | null | Data Scientist Career Path/5. Data Manipulation with Pandas/4. Multiple Tables in Pandas/8. outer.py | shivaniverma1/Data-Scientist | f82939a411484311171465591455880c8e354750 | [
"MIT"
] | 9 | 2021-06-08T01:32:04.000Z | 2022-03-18T15:38:09.000Z | import codecademylib
import pandas as pd
store_a = pd.read_csv('store_a.csv')
print(store_a)
store_b = pd.read_csv('store_b.csv')
print(store_b)
store_a_b_outer = store_a.merge(store_b, how='outer')
print(store_a_b_outer) | 20.363636 | 53 | 0.78125 |
7959fcbf7e78b34dfc97dee347500831c8fe486f | 262 | py | Python | venv/lib/python3.8/site-packages/nbclient/tests/conftest.py | johncollinsai/post-high-frequency-data | 88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4 | [
"MIT"
] | 92 | 2020-01-28T19:50:40.000Z | 2022-03-29T07:31:13.000Z | venv/lib/python3.8/site-packages/nbclient/tests/conftest.py | johncollinsai/post-high-frequency-data | 88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4 | [
"MIT"
] | 175 | 2020-01-27T01:09:27.000Z | 2022-03-31T15:51:32.000Z | venv/lib/python3.8/site-packages/nbclient/tests/conftest.py | johncollinsai/post-high-frequency-data | 88533b0e0afc7e7f82fee1d3ca4b68abc30aaeb4 | [
"MIT"
] | 49 | 2020-01-26T22:17:59.000Z | 2022-03-31T16:02:43.000Z | import os
# This is important for ipykernel to show the same string
# instead of randomly generated file names in outputs.
# See: https://github.com/ipython/ipykernel/blob/360685c6/ipykernel/compiler.py#L50-L55
os.environ["IPYKERNEL_CELL_NAME"] = "<IPY-INPUT>"
| 37.428571 | 87 | 0.778626 |
7959fd472fc134ded7fd78efce0e5a057b2796bd | 4,894 | py | Python | pyapprox/tests/test_indexing.py | QianWanghhu/pyapprox | 14754f987868de827a968d07486e733cde4276bf | [
"MIT"
] | null | null | null | pyapprox/tests/test_indexing.py | QianWanghhu/pyapprox | 14754f987868de827a968d07486e733cde4276bf | [
"MIT"
] | null | null | null | pyapprox/tests/test_indexing.py | QianWanghhu/pyapprox | 14754f987868de827a968d07486e733cde4276bf | [
"MIT"
] | 1 | 2020-04-01T05:26:29.000Z | 2020-04-01T05:26:29.000Z | import unittest
from pyapprox.indexing import *
from functools import partial
from scipy.special import binom
class TestIndexing(unittest.TestCase):
def test_nchoosek(self):
assert nchoosek(3, 2) == binom(3, 2)
def test_compute_hyperbolic_indices(self):
num_vars = 3
level = 3
p = 1.0
indices = compute_hyperbolic_indices(num_vars, level, p)
assert indices.shape[1] == nchoosek(num_vars+level, num_vars)
num_vars = 4
level = 3
p = 0.5
indices = compute_hyperbolic_indices(num_vars, level, p)
assert np.all(np.sum(indices**p, axis=0)**(1.0/float(p)) <= level)
def test_set_difference_1d_array(self):
num_vars = 2
level1 = 1
p = 1.0
indices1 = np.arange(0, 10, 2)
indices2 = np.arange(10)
indices = set_difference(indices1, indices2)
true_indices = np.arange(1, 10, 2)
assert np.allclose(indices, true_indices)
def test_set_difference_2d_array(self):
num_vars = 2
level1 = 1
p = 1.0
indices1 = compute_hyperbolic_indices(num_vars, level1, p)
level2 = 2
indices2 = compute_hyperbolic_indices(num_vars, level2, p)
indices = set_difference(indices1, indices2)
true_indices = np.asarray([[2, 0], [0, 2], [1, 1]]).T
assert np.allclose(indices, true_indices)
def test_argsort_indices_leixographically(self):
num_vars = 2
degree = 2
indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
sorted_idx = argsort_indices_leixographically(indices)
sorted_indices = indices[:, sorted_idx]
true_sorted_indices = np.array(
[[0, 0], [0, 1], [1, 0], [0, 2], [1, 1], [2, 0]]).T
assert np.allclose(sorted_indices, true_sorted_indices)
num_vars = 3
degree = 2
indices = compute_hyperbolic_indices(num_vars, degree, 1.0)
sorted_idx = argsort_indices_leixographically(indices)
sorted_indices = indices[:, sorted_idx]
true_sorted_indices = np.array(
[[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 0, 2],
[0, 1, 1], [0, 2, 0], [1, 0, 1], [1, 1, 0], [2, 0, 0]]).T
assert np.allclose(sorted_indices, true_sorted_indices)
def test_compute_downward_closed_indices(self):
num_vars, degree = [2, 5]
downward_closed_indices = compute_downward_closed_indices(
num_vars, partial(total_degree_admissibility_criteria, degree))
total_degree_indices = compute_hyperbolic_indices(num_vars, degree, 1)
assert np.allclose(sort_indices_lexiographically(total_degree_indices),
sort_indices_lexiographically(downward_closed_indices))
num_vars, degree = [5, 5]
downward_closed_indices = compute_downward_closed_indices(
num_vars, partial(pnorm_admissibility_criteria, degree, 0.4))
pnorm_indices = compute_hyperbolic_indices(num_vars, degree, 0.4)
assert np.allclose(sort_indices_lexiographically(pnorm_indices),
sort_indices_lexiographically(downward_closed_indices))
num_vars, degree = [2, 5]
anisotropic_weights = np.asarray([1, 2])
min_weight = np.asarray(anisotropic_weights).min()
admissibility_criteria = partial(
anisotropic_admissibility_criteria, anisotropic_weights, min_weight,
degree)
downward_closed_indices = compute_downward_closed_indices(
num_vars, admissibility_criteria)
anisotropic_indices = compute_anisotropic_indices(
num_vars, degree, anisotropic_weights)
assert np.allclose(sort_indices_lexiographically(anisotropic_indices),
sort_indices_lexiographically(downward_closed_indices))
def test_get_upper_triangular_matrix_scalar_index(self):
ii, jj, nn = 0, 1, 3
index = get_upper_triangular_matrix_scalar_index(ii, jj, nn)
assert index == 0
ii, jj, nn = 0, 2, 3
index = get_upper_triangular_matrix_scalar_index(ii, jj, nn)
assert index == 1
ii, jj, nn = 1, 2, 3
index = get_upper_triangular_matrix_scalar_index(ii, jj, nn)
assert index == 2
def test_get_upper_triangular_matrix_indices(self):
kk, nn = 0, 3
ii, jj = get_upper_triangular_matrix_indices(kk, nn)
assert (ii, jj) == (0, 1)
kk, nn = 1, 3
ii, jj = get_upper_triangular_matrix_indices(kk, nn)
assert (ii, jj) == (0, 2)
kk, nn = 2, 3
ii, jj = get_upper_triangular_matrix_indices(kk, nn)
assert (ii, jj) == (1, 2)
if __name__ == '__main__':
indexing_test_suite = unittest.TestLoader().loadTestsFromTestCase(
TestIndexing)
unittest.TextTestRunner(verbosity=2).run(indexing_test_suite)
| 37.646154 | 82 | 0.641398 |
7959fddf87caa036fb94f58fc809cbba7ce43962 | 29,558 | py | Python | python/examples/beacon/beacon_record.py | w2naf/digital_rf | 482608dcc5608b9d9a0aacf77e75f83edbec1f0e | [
"BSD-3-Clause"
] | null | null | null | python/examples/beacon/beacon_record.py | w2naf/digital_rf | 482608dcc5608b9d9a0aacf77e75f83edbec1f0e | [
"BSD-3-Clause"
] | null | null | null | python/examples/beacon/beacon_record.py | w2naf/digital_rf | 482608dcc5608b9d9a0aacf77e75f83edbec1f0e | [
"BSD-3-Clause"
] | 1 | 2020-08-03T07:49:22.000Z | 2020-08-03T07:49:22.000Z | #!python
# ----------------------------------------------------------------------------
# Copyright (c) 2017 Massachusetts Institute of Technology (MIT)
# All rights reserved.
#
# Distributed under the terms of the BSD 3-clause license.
#
# The full license is in the LICENSE file, distributed with this software.
# ----------------------------------------------------------------------------
"""Record beacon signals of specified satellites in Digital RF.
Satellite and recording parameters are specified in .ini configuration files.
Example configurations are included along with this script.
"""
from __future__ import absolute_import, division, print_function
import datetime
import math
import optparse
import os
import string
import subprocess
import sys
import time
import traceback
import dateutil.parser
import ephem
import numpy
import pytz
from digital_rf import DigitalMetadataWriter
from six.moves import configparser
class ExceptionString(Exception):
""" Simple exception handling string """
def __str__(self):
return repr(self.args[0])
def doppler_shift(frequency, relativeVelocity):
"""
DESCRIPTION:
This function calculates the doppler shift of a given frequency when actual
frequency and the relative velocity is passed.
The function for the doppler shift is f' = f - f*(v/c).
INPUTS:
frequency (float) = satlitte's beacon frequency in Hz
relativeVelocity (float) = Velocity at which the satellite is moving
towards or away from observer in m/s
RETURNS:
Param1 (float) = The frequency experienced due to doppler shift in Hz
AFFECTS:
None
EXCEPTIONS:
None
DEPENDENCIES:
ephem.Observer(...), ephem.readtle(...)
Note: relativeVelocity is positive when moving away from the observer
and negative when moving towards
"""
return (frequency - frequency * (relativeVelocity/3e8))
def satellite_rise_and_set(opt, obsLat, obsLong, obsElev, objName, tle1, tle2, startDate):
"""
DESCRIPTION:
This function take in the observers latitude, longitude, elevation as well
as the object's name and TLE line 1 and 2 to calculate the next closest rise
and set times from the given start date. Returns an array of values.
INPUTS:
obsLat (string) = Latitude of Observer in degrees represented as strings
obsLong (string) = Longitude of Observer in degrees represented as strings
obsElev (float) = Elevation of Observer in meters
objName (string) = Name of the satellite
tle1 (string) = First line of TLE
tle2 (string) = Second line of TLE
startDate (string or ephem.date) = The date from which next closest rise and set are to be
found in radians that print as degrees
RETURNS:
Param1 (ephem.date) = Rise time of satellite in 'yyyy/mm/dd hh:mm:ss'
Param2 (ephem.date) = Half time between rise and set in 'yyyy/mm/dd hh:mm:ss'
Param3 (ephem.date) = Set time of satellite in 'yyyy/mm/dd hh:mm:ss'
AFFECTS:
None
EXCEPTIONS:
None
DEPENDENCIES:
ephem.Observer(...), ephem.readtle(...)
"""
obsLoc = ephem.Observer()
obsLoc.lat = obsLat
obsLoc.long = obsLong
obsLoc.elev = obsElev
obsLoc.date = startDate
if opt.debug:
print('dbg location: ', obsLoc)
print('dbg tle1: ', tle1)
print('dbg tle2: ', tle2)
satObj = ephem.readtle(objName,tle1,tle2)
if opt.debug:
print('dbg object: ', satObj)
satObj.compute(obsLoc) # computes closest next rise time to given date
pinfo = obsLoc.next_pass(satObj)
return (pinfo[0], pinfo[2], pinfo[4])
def satellite_values_at_time(opt, obsLat, obsLong, obsElev, objName, tle1, tle2, date):
"""
DESCRIPTION:
This function take in the observers latitude, longitude, elevation as well
as the object's name and TLE line 1 and 2 to calculate various values
at a given date. It returns an array of values
INPUTS:
obsLat (string) = Latitude of Observer in degrees represented as strings
obsLong (string) = Longitude of Observer in degrees represented as strings
obsElev (float) = Elevation of Observer in meters
objName (string) = Name of the satellite
tle1 (string) = First line of TLE
tle2 (string) = Second line of TLE
date (string or ephem.date) = The date from which next closest rise and set are to be
found in radians that print as degrees
RETURNS:
Param1 (ephem.angle) = satellite's latitude in radians that print as degrees
Param2 (ephem.angle) = satellite's longitude in radians that print as degrees
Param3 (float) = satellite's current range from the observer in meters
Param4 (float) = satellite's current range_velocity from the observer in m/s
Param5 (ephem.angle) = satellite's current azimuth in radians that print as degrees
Param6 (ephem.angle) = satellite's current altitude in radians that print as degrees
Param7 (ephem.angle) = satellite's right ascention in radians that print as hours of arc
Param8 (ephem.angle) = satellite's declination in radians that print as degrees
Param9 (float) = satellite's elevation in meters from sea level
AFFECTS:
None
EXCEPTIONS:
None
DEPENDENCIES:
ephem.Observer(...), ephem.readtle(...)
"""
obsLoc = ephem.Observer()
obsLoc.lat = obsLat
obsLoc.long = obsLong
obsLoc.elev = obsElev
obsLoc.date = date
satObj = ephem.readtle(objName,tle1,tle2)
satObj.compute(obsLoc)
if opt.debug:
print("\tLatitude: %s, Longitude %s, Range: %gm, Range Velocity: %gm/s" % (satObj.sublat, satObj.sublong, satObj.range, satObj.range_velocity))
print("\tAzimuth: %s, Altitude: %s, Elevation: %gm" % ( satObj.az, satObj.alt, satObj.elevation))
print("\tRight Ascention: %s, Declination: %s" % (satObj.ra, satObj.dec))
return (satObj.sublat, satObj.sublong, satObj.range, satObj.range_velocity, satObj.az, satObj.alt, satObj.ra, satObj.dec, satObj.elevation)
def max_satellite_bandwidth(opt, obsLat, obsLong, obsElev, objName, tle1, tle2, startDate, endDate, interval, beaconFreq):
"""
DESCRIPTION:
The function calls the satellite_bandwidth function over and over for however many rises and sets occur
during the [startDate, endDate]. The max bandwidth is then returned.
INPUTS:
obsLat (string) = Latitude of Observer in degrees represented as strings
obsLong (string) = Longitude of Observer in degrees represented as strings
obsElev (float) = Elevation of Observer in meters
objName (string) = Name of the satellite
tle1 (string) = First line of TLE
tle2 (string) = Second line of TLE
startDate (string or ephem.date) = The date/time at which to find the first cycle
endDate (string or ephem.date) = The date/time at which to stop looking for a cycle
interval (float) = The rate at which to sample during one rise/set cycle same format
as time
beaconFreq (float) = The frequency of the beacon
RETURNS:
Param1 (float) = The max bandwidth of the satellite in the given range
of start and end dates
AFFECTS:
None
EXCEPTIONS:
None
DEPENDENCIES:
None
"""
maxBandwidth = 0
(satRise,satTransit,satSet) = satellite_rise_and_set(opt, obsLat, obsLong, obsElev, objName, tle1, tle2, startDate)
if(satRise == satTransit == satSet):
return 0
while(satRise < endDate):
(objBandwidth, shiftedFrequencies) = satellite_bandwidth(opt, obsLat, obsLong, obsElev, objName, tle1, tle2, satRise, satSet, interval, beaconFreq)
if(objBandwidth > maxBandwidth):
maxBandwidth = objBandwidth
(satRise,satTransit, satSet) = satellite_rise_and_set(opt,obsLat, obsLong, obsElev, objName, tle1, tle2, satSet+ephem.minute*5.0)
#print "Name: %s, Rise Time: %s, Transit Time: %s, Set Time: %s" % (objName, ephem.date(satRise-ephem.hour*4.0), ephem.date(satTransit-ephem.hour*4.0), ephem.date(satSet-ephem.hour*4.0))
return maxBandwidth
def satellite_bandwidth(opt, obsLat, obsLong, obsElev, objName, tle1, tle2, satRise, satSet, interval, beaconFreq):
"""
DESCRIPTION:
The function finds the bandwidth of a satellite pass
INPUTS:
obsLat (string) = Latitude of Observer in degrees represented as strings
obsLong (string) = Longitude of Observer in degrees represented as strings
obsElev (float) = Elevation of Observer in meters
objName (string) = Name of the satellite
tle1 (string) = First line of TLE
tle2 (string) = Second line of TLE
satRise (string or ephem.date) = The time at which the satellite rises above horizon
satSet (string or ephem.date) = The time at which the satellite sets
interval (float) = The rate at which to sample during one rise/set cycle in seconds
beaconFreq (float) = The frequency of the beacon
RETURNS:
Param1 (float) = The bandwidth of the satellite during the rise/set cycle
Param2 (list) = All the frequencies during the rise/set cycle sampled
by given interval
AFFECTS:
None
EXCEPTIONS:
None
DEPENDENCIES:
ephem.date(...), ephem.hour, ephem.minute, ephem.second
"""
currTime = satRise
dopplerFrequencies = []
dopplerBandwidth = []
if opt.debug:
print('satellite_bandwidth ', currTime, satSet, interval)
while((currTime.triple())[2] < (satSet.triple())[2]): # the 2nd index of the returned tuple has the fraction of the day
try:
(sublat, sublong, range_val, range_velocity, az, alt, ra, dec, elevation) = satellite_values_at_time(opt, obsLat, obsLong, obsElev, objName, tle1, tle2, currTime)
(dopplerFreq) = doppler_shift(beaconFreq, range_velocity)
dopplerFrequencies.append(dopplerFreq)
dopplerBandwidth.append(dopplerFreq - beaconFreq)
currTime = currTime + ephem.second*interval
currTime = ephem.date(currTime)
except Exception as eobj:
exp_str = str(ExceptionString(eobj))
print("exception: %s." % (exp_str))
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
print(lines)
if opt.debug:
print('# DF:', numpy.array(dopplerFrequencies) / 1e6, ' MHz')
print('# OB:', numpy.array(dopplerBandwidth) / 1e3, ' kHz')
return (numpy.array(dopplerBandwidth), numpy.array(dopplerFrequencies))
def __read_config__(inifile):
"""
DESCRIPTION:
The function parses the given file and returns a dictionary with the values.
INPUTS:
inifile (string) = the name of the file to be read including path
RETURNS:
For an object config :
Dictionary with name given by [Section] each of which contains:
ObsLat Decimal observer latitude
ObsLong Decimal observer longitude
ObsElev Decimal observer elevation
ObjName String object name
TLE1 String of TLE line 1 in standard format.
TLE2 String of TLE line 2 in standard format.
BeaconFreq Array of floating point beacon frequencies in Hz.
For a radio config :
TBD
AFFECTS:
None
EXCEPTIONS:
None
DEPENDENCIES:
Use module re for the simple regex used in parsing the file.
Note:
Example object format
[PROPCUBE_MERRY]
ObsLat=42.623108
ObsLong=-71.489069
ObsElev=150.0
ObjName="PROPCUBE_MERRY"
TLE1="1 90735U 16074.41055570 +.00001915 +00000-0 +22522-3 0 00790"
TLE2="2 90735 064.7823 174.3149 0209894 234.3073 123.8339 14.73463371009286"
BeaconFreq=[380.0e6,2.38e9]
StartDate="2016/03/21 14:00:00"
Interval="00:00:10"
Example radio format :
TBD
"""
objects = {}
print("# loading config ", inifile)
cparse = configparser.ConfigParser()
cparse.read(inifile)
for s in cparse.sections():
vals = cparse.options(s)
cfg = {}
for v in vals:
cfg[v] = cparse.get(s,v)
objects[s] = cfg
return objects
def get_next_object(opt, site, objects, ctime):
""" Not too efficent but works for now. """
rise_list = {}
for obj in objects:
obj_id = obj
obj_info = objects[obj]
if opt.debug:
print("# object ", obj_id, " @ ", ctime)
print("# obj_info", obj_info)
site_name = site['site']['name']
site_tag = site['site']['tag']
obs_lat = site['site']['latitude']
obs_long = site['site']['longitude']
obs_elev = float(site['site']['elevation'])
obj_name = obj_info['name']
obj_tle1 = obj_info['tle1'][1:-1]
obj_tle2 = obj_info['tle2'][1:-1]
obj_freqs = numpy.array(string.split(obj_info['frequencies'],','),numpy.float32)
c_dtime = datetime.datetime.utcfromtimestamp(ctime)
c_ephem_time = ephem.Date(c_dtime)
(sat_rise, sat_transit, sat_set) = satellite_rise_and_set(opt, obs_lat, obs_long, obs_elev, obj_name, obj_tle1, obj_tle2, c_ephem_time)
if sat_set <= sat_rise or sat_transit <= sat_rise or sat_set <= sat_transit:
continue
rise_list[sat_rise] = obj
if opt.debug:
print(' rise list : ', rise_list)
keys = list(rise_list.keys())
if opt.debug:
print(' rise keys : ', keys)
keys.sort()
if opt.debug:
print(' sorted : ', keys)
print(' selected : ', rise_list[keys[0]])
return rise_list[keys[0]]
def ephemeris_passes(opt, st0, et0):
"""
DESCRIPTION:
Finds passes from the start time to the end time given the options. Will
implement a bash script or execute on the command line.
USAGE:
ephemeris_passes(opt, st0, et0)
INPUTS:
opt command line arguments
st0 unix time start time
et0 unix time end time
RETURNS:
None
AFFECTS:
Prints all the passes
EXCEPTIONS:
None
DEPENDENCIES:
ephem
"""
passes = {}
objects = __read_config__(opt.config)
site = __read_config__(opt.site)
if opt.verbose:
print("# got objects ", objects)
print("# got radio site ", site)
print("\n")
ctime = st0
etime = et0
last_sat_rise = ctime
while ctime < etime:
obj = get_next_object(opt, site, objects, ctime)
obj_id = obj
obj_info = objects[obj]
if opt.debug:
print("# object ", obj_id, " @ ", ctime)
print("# obj_info", obj_info)
site_name = site['site']['name']
site_tag = site['site']['tag']
obs_lat = site['site']['latitude']
obs_long = site['site']['longitude']
obs_elev = float(site['site']['elevation'])
obj_name = obj_info['name']
obj_tle1 = obj_info['tle1'][1:-1]
obj_tle2 = obj_info['tle2'][1:-1]
obj_freqs = numpy.array(string.split(obj_info['frequencies'],','),numpy.float32)
c_dtime = datetime.datetime.utcfromtimestamp(ctime)
c_ephem_time = ephem.Date(c_dtime)
try:
(sat_rise, sat_transit, sat_set) = satellite_rise_and_set(opt, obs_lat, obs_long, obs_elev, obj_name, obj_tle1, obj_tle2, c_ephem_time)
if sat_set <= sat_rise or sat_transit <= sat_rise or sat_set <= sat_transit:
continue
if not last_sat_rise == sat_rise:
(sub_lat, sub_long, sat_range, sat_velocity, az, el, ra, dec, alt) = satellite_values_at_time(opt, obs_lat, obs_long, obs_elev, obj_name, obj_tle1, obj_tle2, sat_transit)
(obj_bandwidth, obj_doppler) = satellite_bandwidth(opt, obs_lat, obs_long, obs_elev, obj_name, obj_tle1, obj_tle2, sat_rise, sat_set, op.interval, obj_freqs)
last_sat_rise = sat_rise
if opt.debug:
print("time : ", c_ephem_time, sat_set, (sat_set - c_ephem_time)*60*60*24)
ctime = ctime + (sat_set - c_ephem_time)*60*60*24
if opt.el_mask:
el_val = numpy.rad2deg(el)
el_mask = numpy.float(opt.el_mask)
if opt.debug:
print('# el_val ', el_val, ' el_mask ', el_mask)
if el_val < el_mask: # check mask here!
continue
# This should really go out as digital metadata into the recording location
print("# Site : %s " % (site_name))
print("# Site tag : %s " % (site_tag))
print("# Object Name: %s" % (obj_name))
print("# observer @ latitude : %s, longitude : %s, elevation : %s m" % (obs_lat, obs_long, obs_elev))
print("# GMT -- Rise Time: %s, Transit Time: %s, Set Time: %s" % (sat_rise, sat_transit, sat_set))
print("# Azimuth: %f deg, Elevation: %f deg, Altitude: %g km" % (numpy.rad2deg(az), numpy.rad2deg(el), alt/1000.0))
print("# Frequencies: %s MHz, Bandwidth: %s kHz" % ( obj_freqs / 1.0e6, obj_bandwidth[numpy.argmax(obj_bandwidth)] / 1.0e3))
pass_md = {'obj_id':obj_id,
'rise_time':sat_rise,
'transit_time':sat_transit,
'set_time':sat_set,
'azimuth':numpy.rad2deg(az),
'elevation':numpy.rad2deg(el),
'altitude':alt,
'doppler_frequency':obj_doppler,
'doppler_bandwidth':obj_bandwidth}
if opt.schedule:
d = sat_rise.tuple()
rise_time = "%04d%02d%02d_%02d%02d" % (d[0],d[1],d[2],d[3],d[4])
offset_rise = ephem.date(sat_rise - ephem.minute)
d = offset_rise.tuple()
offset_rise_time = "%04d-%02d-%02dT%02d:%02d:%02dZ" % (d[0],d[1],d[2],d[3],d[4],int(d[5]))
offset_set = ephem.date(sat_set + ephem.minute)
d = offset_set.tuple()
offset_set_time = "%04d-%02d-%02dT%02d:%02d:%02dZ" % (d[0],d[1],d[2],d[3],d[4],int(d[5]))
cmd_lines = []
radio_channel = string.split(site['radio']['channel'][1:-1],',')
radio_gain = string.split(site['radio']['gain'][1:-1],',')
radio_address = string.split(site['radio']['address'][1:-1],',')
recorder_channels = string.split(site['recorder']['channels'][1:-1],',')
radio_sample_rate = site['radio']['sample_rate']
cmd_line0 = "%s " % (site['recorder']['command'])
if site['radio']['type'] == 'b210':
# just record a fixed frequency, needs a dual radio Thor3 script. This can be done!
idx = 0
freq = obj_freqs[1]
cmd_line1 = "-r %s -d \"%s\" -s %s -e %s -c %s -f %4.3f " % (radio_sample_rate, radio_channel[idx],offset_rise_time, offset_set_time,recorder_channels[idx],freq)
log_file_name = "%s_%s_%s_%dMHz.log" % (site_tag,obj_id,offset_rise_time,int(freq/1.0e6))
cmd_fname = '%s_%s_%s_%dMHz' % (site_tag, obj_id, rise_time, int(freq/1.0e6))
cmd_line2 = " -g %s -m %s --devargs num_recv_frames=1024 --devargs master_clock_rate=24.0e6 -o %s/%s" % (radio_gain[idx],radio_address[idx],site['recorder']['data_path'],cmd_fname)
cmd_line2 += ' {0}'.format(site['radio'].get('extra_args', '')).rstrip()
if not opt.foreground:
cmd_line0 = 'nohup ' + cmd_line0
cmd_line2 = cmd_line2 + ' 2>&1 &'
else:
cmd_line2 = cmd_line2
if opt.debug:
print(cmd_line0, cmd_line1, cmd_line2, cmd_fname)
cmd_lines.append((cmd_line0 + cmd_line1 + cmd_line2, cmd_fname, pass_md, obj_info))
print("\n")
elif site['radio']['type'] == 'n200_tvrx2':
cmd_line1 = " -r %s -d \"%s %s\" -s %s -e %s -c %s,%s -f %4.3f,%4.3f " % (radio_sample_rate,radio_channel[0],radio_channel[1],offset_rise_time, offset_set_time,recorder_channels[0],recorder_channels[1],obj_freqs[0],obj_freqs[1])
log_file_name = "%s_%s_%s_combined.log" % (site_tag,obj_id,offset_rise_time)
cmd_fname = '%s_%s_%s_combined' % (site_tag, obj_id, rise_time)
cmd_line2 = " -g %s,%s -m %s -o %s/%s" % (radio_gain[0],radio_gain[1],radio_address[0],site['recorder']['data_path'],cmd_fname)
cmd_line2 += ' {0}'.format(site['radio'].get('extra_args', '')).rstrip()
if not opt.foreground:
cmd_line0 = 'nohup ' + cmd_line0
cmd_line2 = cmd_line2 + ' 2>&1 &'
else:
cmd_line2 = cmd_line2
if opt.debug:
print(cmd_line0, cmd_line1, cmd_line2, cmd_fname)
cmd_lines.append((cmd_line0 + cmd_line1 + cmd_line2, cmd_fname, pass_md, obj_info))
print("\n")
if opt.foreground:
dtstart0 = dateutil.parser.parse(offset_rise_time)
dtstop0 = dateutil.parser.parse(offset_set_time)
start0 = int((dtstart0 - datetime.datetime(1970,1,1,tzinfo=pytz.utc)).total_seconds())
stop0 = int((dtstop0 - datetime.datetime(1970,1,1,tzinfo=pytz.utc)).total_seconds())
if opt.verbose:
print("# waiting for %s @ %s " % (obj_id, offset_rise_time))
while time.time() < start0 - 30:
time.sleep(op.interval)
if opt.verbose:
print("# %d sec" % (start0 - time.time()))
for cmd_tuple in cmd_lines:
cmd, cmd_fname, pass_md, info_md = cmd_tuple
print("# Executing command %s " % (cmd))
# write the digital metadata
start_idx = int(start0)
mdata_dir = site['recorder']['metadata_path'] + '/' + cmd_fname + '/metadata'
# site metadata
# note we use directory structure for the dictionary here
# eventually we will add this feature to digital metadata
for k in site:
try:
os.makedirs(mdata_dir + '/config/%s' % (k) )
except:
pass
md_site_obj = DigitalMetadataWriter(mdata_dir + '/config/%s' % (k), 3600, 60, 1, 1, k)
if opt.debug:
print(site[k])
if opt.verbose:
print("# writing metadata config / %s " % (k))
md_site_obj.write(start_idx,site[k])
# info metadata
try:
os.makedirs(mdata_dir + '/info')
except:
pass
md_info_obj = DigitalMetadataWriter(mdata_dir + '/info', 3600, 60, 1, 1, 'info')
if opt.verbose:
print("# writing metadata info")
if opt.debug:
print(info_md)
md_info_obj.write(start_idx,info_md)
# pass metadata
try:
os.makedirs(mdata_dir + '/pass')
except:
pass
md_pass_obj = DigitalMetadataWriter(mdata_dir + '/pass', 3600, 60, 1, 1, 'pass')
if opt.verbose:
print("# writing metadata pass")
if opt.debug:
print(pass_md)
md_pass_obj.write(start_idx,pass_md)
#sys.exit(1)
# call the command
try:
subprocess.call(cmd,shell=True)
except Exception as eobj:
exp_str = str(ExceptionString(eobj))
print("exception: %s." % (exp_str))
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
print(lines)
print("# wait...")
while time.time() < stop0 + 1:
time.sleep(op.interval)
if opt.verbose:
print("# complete in %d sec" % (stop0 - time.time()))
except Exception as eobj:
exp_str = str(ExceptionString(eobj))
print("exception: %s." % (exp_str))
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_traceback)
print(lines)
#sys.exit(1)
# advance 10 minutes
ctime = ctime + 60 * op.interval
continue
def parse_command_line():
parser = optparse.OptionParser()
parser.add_option("-v", "--verbose",action="store_true",
dest="verbose", default=False,help="prints debug output and additional detail.")
parser.add_option("-d", "--debug",action="store_true",
dest="debug", default=False,help="run in debug mode and not service context.")
parser.add_option("-b", "--bash",action="store_true",
dest="schedule", default=False,help="create schedule file for bash shell based command / control.")
parser.add_option("-m","--mask",dest="el_mask",type=float,default=0.0,help="mask all passes below the provided elevation.")
parser.add_option("-c", "--config",dest="config",default='config/beacons.ini',help="Use configuration file <config>.")
parser.add_option("-f", "--foreground",action="store_true",dest="foreground",help="Execute schedule in foreground.")
parser.add_option("-s", "--starttime",dest="starttime",help="Start time in ISO8601 format, e.g. 2016-01-01T15:24:00Z")
parser.add_option("-e", "--endtime",dest="endtime",help="End time in ISO8601 format, e.g. 2016-01-01T16:24:00Z")
parser.add_option("-i", "--interval",dest="interval",type=float,default=10.0,help="Sampling interval for ephemeris predictions, default is 10 seconds.")
parser.add_option("-r", "--radio",dest="site",default='config/site.ini',help="Radio site configuration file.")
(options, args) = parser.parse_args()
return (options, args)
if __name__ == "__main__":
# parse command line options
op,args = parse_command_line()
if op.starttime is None:
st0 = int(math.ceil(time.time())) + 10
else:
dtst0 = dateutil.parser.parse(op.starttime)
st0 = int((dtst0 - datetime.datetime(1970,1,1,tzinfo=pytz.utc)).total_seconds())
print('Start time: %s (%d)' % (dtst0.strftime('%a %b %d %H:%M:%S %Y'), st0))
if op.endtime is None:
# default to the next 24 hours
et0 = st0 + 60*60*24.0
else:
dtet0 = dateutil.parser.parse(op.endtime)
et0 = int((dtet0 - datetime.datetime(1970,1,1,tzinfo=pytz.utc)).total_seconds())
print('End time: %s (%d)' % (dtet0.strftime('%a %b %d %H:%M:%S %Y'), et0))
ephemeris_passes(op, st0, et0)
| 41.630986 | 252 | 0.556567 |
7959ffb948d5d5621ea47e68b3e48191384f32a4 | 24,336 | py | Python | node_modules/shaka-player/build/stats.py | CocoaZX/RNVideo | 4989b82ec17d83a3499d1a941314cb79fbdf7877 | [
"MIT"
] | null | null | null | node_modules/shaka-player/build/stats.py | CocoaZX/RNVideo | 4989b82ec17d83a3499d1a941314cb79fbdf7877 | [
"MIT"
] | 1 | 2018-11-09T17:18:20.000Z | 2018-11-09T17:18:20.000Z | build/stats.py | xrtube/shaka-player | c53786e71fd542e45dcb000613d367e522df027d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A program for analyzing the Shaka compiled sources.
This can be used to find areas that can be removed if not needed. This uses
the source map (i.e. shaka-player.compiled.debug.map) to find the compiled code
size, see:
https://github.com/mattrobenolt/python-sourcemap
http://www.html5rocks.com/en/tutorials/developertools/sourcemaps/
This script can output four different stats in two different formats:
- The size of functions and namespaces.
- The dependencies between types (in plain or DOT format).
- The dependencies between functions (in plain or DOT format).
- All tokens in the source map.
The dependencies can be outputted in DOT format which can be used with graph
programs to display a visual layout of the dependencies. For example, using
graphviz:
./stats.py -c -d | fdb -Goverlap=prism | neato -n2 -Tsvg > out.svg
"""
from __future__ import print_function
import argparse
import json
import logging
import math
import os
import string
import sys
import shakaBuildHelpers
# A Base64 VLQ digit can represent 5 bits, so it is Base32.
VLQ_BASE_SHIFT = 5
VLQ_BASE = 1 << VLQ_BASE_SHIFT
# A mask of bits for a VLQ digit (11111), 31 decimal
VLQ_BASE_MASK = VLQ_BASE - 1
# The continuation bit is the 6th bit
VLQ_CONTINUATION_BIT = VLQ_BASE
# Don't use Base64 lib since it is not a real Base64 string; it simply
# decodes each character to a single Base64 number.
B64 = dict((c, i) for i, c in
enumerate('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
'0123456789+/'))
def from_vlq_signed(value):
"""Converts a VLQ number to a normal signed number.
Args:
value: A number decoded from a VLQ string.
Returns:
an integer.
"""
negative = (value & 1) == 1
value >>= 1
return -value if negative else value
class Segment(object):
"""Defines an entry in the source map.
Members:
dst_col_offset - The offset of the destination column from the previous
segment.
name_offset - If not None, the offset of the name index from the previous
segment.
"""
def __init__(self, data):
self.dst_col_offset = data[0]
self.name_offset = data[4] if len(data) > 4 else None
def decode_segment(segment):
"""Decodes VLQ values from the given segment.
Args:
segment: A string containing the encoded segment text.
Returns:
the parsed Segment.
"""
values = []
cur, shift = 0, 0
for c in segment:
digit = B64[c]
cont = (digit & VLQ_CONTINUATION_BIT) != 0
digit &= VLQ_BASE_MASK
cur += digit << shift
shift += VLQ_BASE_SHIFT
if not cont:
values.append(from_vlq_signed(cur))
cur, shift = 0, 0
# A valid VLQ string should not have dangling bits.
assert cur == 0
assert shift == 0
return Segment(values)
class Token(object):
"""A Token represents one JavaScript symbol.
For example, this can be a variable or an equals sign. If this is a variable
or the keyword 'function' it will usually have a name which indicates what it
originally was defined. But there are also tokens such as ; and ( which appear
as tokens in the map but to not have explicit name (see isFunction).
Members:
dst_line - Line index in compiled code
dst_col - Column index in compiled code
name - Name of the token; or None
"""
def __init__(self, dst_line, dst_col, name=None):
self.dst_line = dst_line
self.dst_col = dst_col
self.name = name
def __str__(self):
return str(self.name)
def decode_mappings(line_data, names):
"""Decodes a mappings line of text.
Args:
line_data: A string containing the mapping line.
names: An array of strings containing the names of the objects.
Returns:
a list of Tokens
"""
tokens = []
lines = line_data.split(';')
name_id = 0
for dst_line, line in enumerate(lines):
dst_col = 0
segments = line.split(',')
for segment in segments:
if not segment:
continue
segment = decode_segment(segment)
dst_col += segment.dst_col_offset
# segment.dst_col can be negative (more useful in names below); however
# after applying a negative offset, the result must still be positive.
assert dst_col >= 0
name = None
if segment.name_offset is not None:
name_id += segment.name_offset
assert name_id >= 0
name = names[name_id]
tokens.append(Token(dst_line, dst_col, name))
return tokens
def is_function(token, lines):
"""Determines if the given token is the start of a function.
All function definitions are assumed to have a name field and the token in
the compiled source is the keyword 'function'. Sometimes the function is
defined on the previous semicolon and sometimes that semicolon appears on
the previous line.
Args:
token: The Token to check.
lines: An array of compiled code lines.
Returns:
whether the token is a function.
"""
# All functions have a name.
if not token.name:
return False
# Sometimes a function token starts with the previous ;
# Also sometimes the token starts on the ; that is on the previous
# line.
partial_line = lines[token.dst_line][token.dst_col:]
if partial_line == ';\n':
if len(lines) == token.dst_line + 1:
return False
else:
return lines[token.dst_line + 1].startswith('function')
else:
return (partial_line.startswith('function') or
partial_line.startswith(';function'))
def read_function(token_iter, prev, prev_index, lines, callback):
"""Reads a function from the token stream.
The function token should already be consumed.
Args:
token_iter: An iterator of the tokens.
prev: The token containing the function definition.
prev_index: The index of the previous token.
lines: An array of compiled code lines.
callback: A callback type used to create the data. See traverse_tokens.
Returns:
an array of State objects in a format controlled by the callback (see
traverse_tokens).
"""
brackets = 0
read = False
ret = []
partial_line = lines[prev.dst_line][prev.dst_col:]
state = callback(prev, prev_index)
try:
while not read or brackets > 0:
index, token = next(token_iter)
partial_line = lines[token.dst_line][token.dst_col:]
# Recursively read functions. Sometimes functions are defined nested.
# This doesn't happen that often, and never for Shaka methods, so it does
# not count it twice since the size of this method includes the nested
# function.
if is_function(token, lines):
ret += read_function(token_iter, token, index, lines, callback)
else:
state.add(token, index)
if partial_line.startswith('{}'):
read = True
elif partial_line[0] == '{':
brackets += 1
read = True
elif partial_line[0] == '}':
brackets -= 1
# When we run out of tokens, simply ignore it. A parent call will not see
# this error; but it will continue and the next call to 'next' will fail
# with another StopIteration. This ensures that the last State object
# is included for invalid content.
except StopIteration:
pass
temp = state.build()
if temp:
ret.append(temp)
return ret
def traverse_tokens(tokens, lines, callback):
"""Traverses a list of tokens to identify functions.
Then uses a callback to perform some work on the functions. Each function
seen gets a new State object created from the given callback method; there is
a single State for global code which is given None in the constructor. Then,
each token seen is passed to the 'add' method of the State. This is used by
the State to either calculate sizes, print tokens, or detect dependencies.
The 'build' method is called at the end of the function to create a result
object that is returned as an array at the end.
Args:
tokens: An array of Tokens.
lines: An array of compiled code lines.
callback: A constructor that returns a state object. It takes a start
token or None if outside a function. It has two member functions
add - accepts the current token and the token's index.
build - returns an object to be added to the results.
Returns:
an array of State objects in a format controlled by the callback.
"""
ret = []
state = callback(None, None)
# Create a token iterator. This is used to read tokens from the array. We
# cannot use a for loop because the iterator is passed to readFunction.
token_iter = enumerate(tokens)
try:
while True:
index, token = next(token_iter)
if is_function(token, lines):
ret += read_function(token_iter, token, index, lines, callback)
else:
state.add(token, index)
except StopIteration:
pass
temp = state.build()
if temp:
ret.append(temp)
return ret
class FunctionSize(object):
"""Contains information about a function's size."""
def __init__(self, name, size):
self.name = name
self.size = size
def print_tokens(tokens, lines, funcs):
"""Prints the given tokens.
Args:
tokens: An array of Tokens.
lines: An array of compiled code lines.
funcs: An array of FunctionSize.
"""
class State(object):
"""Defines the current parser state."""
def __init__(self, token, index):
# The start of a function, or the global start.
self.name = token.name if token else None
if token:
self._print_token('>', token, index)
def _print_token(self, prefix, token, index):
partial_line = lines[token.dst_line][token.dst_col:]
if len(tokens) > index + 1:
next_ = tokens[index + 1]
if next_.dst_line == token.dst_line:
partial_line = lines[token.dst_line][token.dst_col:next_.dst_col]
token_text = partial_line[:10].replace('\n', '').rjust(12)
print('%s %4d %4d %12s %s' % (prefix, token.dst_line, token.dst_col,
token_text, token.name))
def add(self, token, index):
"""Parses the given token.
Args:
token: The token to add.
index: The index of the token in the original array.
"""
prefix = None
if not self.name:
prefix = '!'
elif lines[token.dst_line][token.dst_col:token.dst_col+2] == '{}':
prefix = ' '
elif lines[token.dst_line][token.dst_col] == '{':
prefix = '+'
elif lines[token.dst_line][token.dst_col] == '}':
prefix = '-'
else:
prefix = ' '
self._print_token(prefix, token, index)
def build(self):
if not self.name:
return
# The end of a function. Print the size of this function.
size = 0
this_func = [t for t in funcs if t.name == self.name]
if this_func:
size = this_func[0].size
print('X %s %d' % (self.name, size))
traverse_tokens(tokens, lines, State)
class FunctionDependencies(object):
"""Contains information about a function's dependencies."""
def __init__(self, name, deps):
self.name = name
self.deps = deps
def process_deps(tokens, lines, is_class):
"""Processes the tokens into function or class dependencies.
Args:
tokens: An array of Tokens.
lines: An array of compiled code lines.
is_class: Whether to create a class graph instead of a function graph.
Returns:
an array of FunctionDependencies.
"""
class State(object):
"""Defines the current parser state."""
def __init__(self, token, _):
self.deps = []
self.name, self.parts = self._create_parts(token)
def _create_parts(self, token):
"""Creates an array of name parts.
Args:
token: The token to create the name from.
Returns:
A tuple of the name and the array of name parts.
"""
if not token or not token.name:
return (None, None)
parts = token.name.split('.')
name = token.name
# Instance methods are the same as static methods.
if len(parts) > 2 and parts[-2] == 'prototype':
del parts[-2]
# Strip function names if class graph; also remove it from the name.
if is_class:
if parts[-1][0] in string.ascii_lowercase:
del parts[-1]
name = '.'.join(parts)
return (name, parts)
def add(self, token, _):
"""Parses the given token.
Args:
token: The token to parse.
"""
# Ignore symbols outside a function. Only care about function
# references and only those that reference our code.
if not self.name or not token.name or not token.name.startswith('shaka.'):
return
name, other_parts = self._create_parts(token)
# Get the index of the first different namespace.
count = min(len(self.parts), len(other_parts))
i = 0
while i < count and self.parts[i] == other_parts[i]:
i += 1
# Ignore use of members of the same object:
# OfflineVideoSource.configure and OfflineVideoSource.store
if (i == count - 1 or i == count) and len(self.parts) == len(other_parts):
return
# Ignore use of the constructor of the same type:
# OfflineVideoSource and OfflineVideoSource.store
if i == count and abs(len(self.parts) - len(other_parts)) == 1:
return
# Add the dependency.
if name not in self.deps:
self.deps.append(name)
def build(self):
return FunctionDependencies(self.name, self.deps) if self.name else None
ret = traverse_tokens(tokens, lines, State)
assert ret
ret = sorted(ret, key=lambda key: key.name)
# We need to collapse duplicates.
i = 0
while i + 1 < len(ret):
if ret[i].name == ret[i + 1].name:
for dep in ret[i + 1].deps:
if dep not in ret[i].deps:
ret[i].deps.append(dep)
del ret[i + 1]
else:
i += 1
return ret
def process_sizes(tokens, lines):
"""Processes an array of tokens into function lengths.
Args:
tokens: An array of Tokens.
lines: An array of compiled code lines.
Returns:
an array of FunctionSizes sorted on name.
"""
class State(object):
"""Defines the current parser state."""
def __init__(self, token, _):
self.name = token.name if token else None
self.size = 0
self.start = token.dst_col if token else None
self.line = token.dst_line if token else None
def add(self, token, _):
"""Parses the given token.
Args:
token: The token to parse.
"""
# Ignore outside a function
if not self.name:
return
# If we skipped to the next line, include the code to the end of the line.
# If we skipped multiple lines, include the whole line. This will most
# likely never happen since the compiled code usually has new lines on
# function boundaries.
assert token.dst_line >= self.line
while token.dst_line != self.line:
self.size += len(lines[self.line]) - self.start
self.line += 1
self.start = 0
# Keep increasing the size. We can't simply keep the start and measure
# at the end since we are not given the end token in build().
self.size += token.dst_col - self.start
self.start = token.dst_col
def build(self):
return FunctionSize(self.name, self.size) if self.name else None
ret = traverse_tokens(tokens, lines, State)
assert ret
ret = [k for k in ret if k.name and
(k.name.startswith('shaka.') or k.name.startswith('goog.'))]
ret = sorted(ret, key=lambda key: key.name)
# We need to collapse duplicates.
i = 0
while i + 1 < len(ret):
if ret[i].name == ret[i + 1].name:
ret[i].size += ret[i + 1].size
del ret[i + 1]
else:
i += 1
return ret
def print_tree(results, indent, callback, end_callback):
"""Prints the results in an indented format.
Args:
results: An array of FunctionSizes sorted on name.
indent: A number to indent.
callback: A callback function to print the data. Accepts a title, an
indentation, and a sublist of the items in that group.
end_callback: A callback function called after a group; can be None.
"""
# This is used both when printing sizes and when printing dependencies in
# DOT format. This recursively creates groups of items with the same prefix.
# e.g.
# shaka
# shaka.util
# shaka.util.FailoverUri
# shaka.util.TypedBind
# shaka.player
# ...
if len(results) <= 1:
callback(None, indent, results)
return
# We want to group-by prefixes for the elements. Since it is sorted, we
# can find the overall prefix length.
first = results[0].name.split('.')
last = results[-1].name.split('.')
prefix = 0
while (prefix < len(first) and prefix < len(last)
and first[prefix] == last[prefix]):
prefix += 1
group = 0
group_items = first
if prefix == len(first):
# This happens when the group has a first element of a class name and the
# remaining are member functions. Remove the first element from this
# group.
group_items = results[1].name.split('.')
group = 1
# Start with second element, and go one more so we make sure to process the
# last group.
for i in range(1, len(results) + 1):
if i == len(results):
items = [''] * (prefix + 1)
else:
items = results[i].name.split('.')
if items[prefix] != group_items[prefix]:
title = '.'.join(group_items[:(prefix + 1)])
callback(title, indent, results[group:i])
print_tree(results[group:i], indent + 1, callback, end_callback)
# Set the start of the next group to the current element.
group = i
group_items = items
if end_callback:
end_callback(indent)
def print_sizes(sizes):
"""Prints the sizes in an indented format.
Args:
sizes: An array of FunctionSizes sorted on name.
"""
# This callback is used to print the total sizes of each of the sub-groups.
# Using the indent as padding allows to print a tree-like structure to
# show how big each section is.
def callback_factory(padding):
# Use a factory so we capture the padding.
def callback(title, indent, results):
if title:
size = sum([k.size for k in results])
print('%s %*d %s' % (indent * ' ', padding, size, title))
return callback
total = sum([k.size for k in sizes])
padding = int(math.ceil(math.log10(total)))
print('%*d %s' % (padding, total, 'TOTAL'))
print_tree(sizes, 0, callback_factory(padding), None)
def print_deps(results, in_dot):
"""Prints the dependencies.
Arguments:
results: A sorted array of FunctionDependencies.
in_dot: Whether to print in DOT format.
"""
if not in_dot:
for func in results:
name, deps = func.name, func.deps
# Ignore items with no dependencies.
if deps:
print(name)
for dep in deps:
print(' ' + dep)
return
dep_map = dict()
# Use the printTree to produce clusters for each namespace and type. This
# will print boxes around each class and show dependencies between types.
print('digraph {')
def callback_factory(dep_map, temp):
"""Creates a callback function."""
def callback(title, indent, results):
if title:
if len(results) > 1:
print('%s subgraph cluster%d {' % ('\t' * indent, len(temp)))
temp.append(1)
else:
print('%s %d [label="%s"];' % ('\t' * indent, len(dep_map),
results[0].name))
dep_map[results[0].name] = len(dep_map)
return callback
def end_callback(indent):
if indent > 1:
print('\t' * (indent - 1), '}')
print_tree(results, 1, callback_factory(dep_map, []), end_callback)
for func in results:
name, deps = func.name, func.deps
# Ignore items with no dependencies.
if deps:
if name not in dep_map:
dep_map[name] = len(dep_map)
print('\t%s [label="%s"];' % (dep_map[name], name))
for dep in deps:
if dep not in dep_map:
dep_map[dep] = len(dep_map)
print('\t%s [label="%s"];' % (dep_map[dep], dep))
print('\t%s -> %s;' % (dep_map[name], dep_map[dep]))
print('}')
def process(text, options):
"""Decodes a JSON string containing source map data.
Args:
text: A JSON string containing source map data.
options: An object containing the command-line options.
"""
# The spec allows a map file to start with )]} to prevent javascript from
# including it.
if text.startswith(')]}\'\n') or text.startswith(')]}\n'):
_, text = text.split('\n', 1)
# Decode the JSON data and get the parts we need.
data = json.loads(text)
# Paths are relative to the output directory.
base = os.path.join(shakaBuildHelpers.get_source_base(), 'dist')
with open(os.path.join(base, data['file']), 'r') as f:
file_lines = f.readlines()
names = data['names']
mappings = data['mappings']
tokens = decode_mappings(mappings, names)
sizes = process_sizes(tokens, file_lines)
# Print out one of the results.
if options.all_tokens:
print_tokens(tokens, file_lines, sizes)
elif options.function_sizes:
print_sizes(sizes)
elif options.function_deps or options.class_deps:
temp = process_deps(tokens, file_lines, options.class_deps)
print_deps(temp, options.dot_format)
def main(args):
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-d', '--dot-format', action='store_true',
help='Prints in DOT format.')
parser.add_argument('source_map', nargs='?',
default='shaka-player.compiled.map',
help='The source map or the name of the build to use.')
print_types = parser.add_mutually_exclusive_group(required=True)
print_types.add_argument('-c', '--class-deps', action='store_true',
help='Prints the class dependencies.')
print_types.add_argument('-f', '--function-deps', action='store_true',
help='Prints the function dependencies.')
print_types.add_argument(
'-s', '--function-sizes', action='store_true',
help='Prints the function sizes (in number of characters).')
print_types.add_argument(
'-t', '--all-tokens', action='store_true',
help='Prints all tokens in the source map.')
options = parser.parse_args(args)
# Verify arguments are correct.
if (options.dot_format and not options.function_deps
and not options.class_deps):
parser.error('--dot-format only valid with --function-deps or '
'--class-deps.')
# Try to find the file
name = options.source_map
if not os.path.isfile(name):
# Get the source code base directory
base = shakaBuildHelpers.get_source_base()
# Supports the following searches:
# * File name given, map in dist/
# * Type given, map in working directory
# * Type given, map in dist/
if os.path.isfile(os.path.join(base, 'dist', name)):
name = os.path.join(base, 'dist', name)
elif os.path.isfile(
os.path.join('shaka-player.' + name + '.debug.map')):
name = os.path.join('shaka-player.' + name + '.debug.map')
elif os.path.isfile(
os.path.join(base, 'dist', 'shaka-player.' + name + '.debug.map')):
name = os.path.join(base, 'dist', 'shaka-player.' + name + '.debug.map')
else:
logging.error('"%s" not found; build Shaka first.', name)
return 1
with open(name, 'r') as f:
process(f.read(), options)
return 0
if __name__ == '__main__':
shakaBuildHelpers.run_main(main)
| 29.750611 | 80 | 0.653723 |
795a00a96c5fbb1919f84823d53495156d05827f | 3,685 | py | Python | src/hub/dataload/sources/gnomad/gnomad_v3_parser.py | erikyao/myvariant.info | a4eaaca7ab6c069199f8942d5afae2dece908147 | [
"Apache-2.0"
] | 39 | 2017-07-01T22:34:39.000Z | 2022-03-15T22:25:59.000Z | src/hub/dataload/sources/gnomad/gnomad_v3_parser.py | erikyao/myvariant.info | a4eaaca7ab6c069199f8942d5afae2dece908147 | [
"Apache-2.0"
] | 105 | 2017-06-28T17:26:06.000Z | 2022-03-17T17:49:53.000Z | src/hub/dataload/sources/gnomad/gnomad_v3_parser.py | erikyao/myvariant.info | a4eaaca7ab6c069199f8942d5afae2dece908147 | [
"Apache-2.0"
] | 14 | 2017-06-12T18:29:36.000Z | 2021-03-18T15:51:27.000Z | import vcf
import math
from itertools import chain
from .gnomad_common_parser import PopulationName, PopulationFrequencyParser, ProfileParser, \
AbstractSiteQualityMetricsParser, GnomadVcfRecordParser
# Globals of population names
_FEMALE, _MALE = "XX", "XY"
_POPULATION_NAME_OBJ_LIST = [
PopulationName("afr", [_FEMALE, _MALE]),
PopulationName("ami", [_FEMALE, _MALE]),
PopulationName("amr", [_FEMALE, _MALE]),
PopulationName("asj", [_FEMALE, _MALE]),
PopulationName("eas", [_FEMALE, _MALE, "jpn", "kor", "oea"]),
PopulationName("fin", [_FEMALE, _MALE]),
PopulationName("mid", [_FEMALE, _MALE]),
PopulationName("nfe", [_FEMALE, _MALE, "bgr", "est", "nwe", "onf", "seu", "swe"]),
PopulationName("oth", [_FEMALE, _MALE]),
PopulationName("sas", [_FEMALE, _MALE])
]
_POPULATION_NAME_STR_LIST = list(chain.from_iterable(pop_name.to_list() for pop_name in _POPULATION_NAME_OBJ_LIST))
"""
Global PopulationFrequencyParser object.
Keys starts with the following prefixes are not parsed as population frequencies:
["AC_controls_and_biobanks", "AC_non_cancer", "AC_non_neuro", "AC_non_topmed", "AC_non_v2",
"AF_controls_and_biobanks", "AF_non_cancer", "AF_non_neuro", "AF_non_topmed", "AF_non_v2",
"nhomalt_controls_and_biobanks", "nhomalt_non_cancer", "nhomalt_non_neuro", "nhomalt_non_topmed", "nhomalt_non_v2",
"AN_controls_and_biobanks", "AN_non_cancer", "AN_non_neuro", "AN_non_topmed", "AN_non_v2"]
"""
population_frequency_parser = PopulationFrequencyParser.from_suffixes(suffixes=[_FEMALE, _MALE] + _POPULATION_NAME_STR_LIST)
class SiteQualityMetricsParser(AbstractSiteQualityMetricsParser):
@classmethod
def parse(cls, info: dict) -> dict:
"""
Read site quality metrics (as shown in the gnomAD browser) from the "INFO" field (which is a dict essentially)
of a gnomAD VCF record.
N.B. there is a "SiteQuality" metric shown in the gnomAD browser; however it's not included in the "INFO" field.
Probably it's calculated from other metrics.
N.B. there is a "AS_QUALapprox" metric shown in the gnomAD browser; however there is no such field in "INFO".
(somehow there exists a "QUALapprox" field in "INFO" but I assume they are different)
N.B. there is a "AS_VarDP" metric shown in the gnomAD browser; however there is no such field in "INFO".
(somehow there exists a "VarDP" field in "INFO" but I assume they are different)
"""
# the keys of site quality metrics could be missing in the `info` dict
# so use dict.get() method for default None values
# "AS_VQSLOD" is a little special. Legacy code will check if it's equal to INF
as_vqslod = info.get("AS_VQSLOD")
if as_vqslod == math.inf:
as_vqslod = None
sqm_dict = {
"inbreedingcoeff": info.get("InbreedingCoeff"),
"as_fs": info.get("AS_FS"),
"as_mq": {
"as_mq": info.get("AS_MQ"),
"as_mqranksum": info.get("AS_MQRankSum"),
},
"as_pab_max": info.get("AS_pab_max"),
"as_qd": info.get("AS_QD"),
"as_readposranksum": info.get('AS_ReadPosRankSum'),
"as_sor": info.get("AS_SOR"),
"as_vqslod": as_vqslod,
}
return sqm_dict
def load_genome_data(input_file):
vcf_reader = vcf.Reader(filename=input_file, compressed=True)
record_parser = GnomadVcfRecordParser(ProfileParser, SiteQualityMetricsParser, population_frequency_parser)
for record in vcf_reader:
for doc in record_parser.parse(record, doc_key="gnomad_genome"):
yield doc
| 42.848837 | 124 | 0.681411 |
795a00ec1a79cfba253b4f816242eea5ccc578b8 | 807 | py | Python | test/test_instance_stop_action_response_data.py | p-fruck/python-contabo | c3abd362a0b90783118f36bec0e557bdbe5a8f2c | [
"Apache-2.0"
] | 2 | 2022-01-27T10:36:33.000Z | 2022-03-09T14:21:12.000Z | test/test_instance_stop_action_response_data.py | p-fruck/python-contabo | c3abd362a0b90783118f36bec0e557bdbe5a8f2c | [
"Apache-2.0"
] | 7 | 2022-01-13T10:44:19.000Z | 2022-02-15T23:44:44.000Z | test/test_instance_stop_action_response_data.py | p-fruck/python-contabo | c3abd362a0b90783118f36bec0e557bdbe5a8f2c | [
"Apache-2.0"
] | null | null | null | """
Contabo API
The version of the OpenAPI document: 1.0.0
Contact: support@contabo.com
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import pfruck_contabo
from pfruck_contabo.model.instance_stop_action_response_data import InstanceStopActionResponseData
class TestInstanceStopActionResponseData(unittest.TestCase):
"""InstanceStopActionResponseData unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInstanceStopActionResponseData(self):
"""Test InstanceStopActionResponseData"""
# FIXME: construct object with mandatory attributes with example values
# model = InstanceStopActionResponseData() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 22.416667 | 98 | 0.723668 |
795a036f801f18973a6d3f41a5a8b9dc0bbd9280 | 1,106 | py | Python | starlette_cbge/schema_backends/pydantic.py | gvbgduh/starlette-cbge | 4c18a1cf1cfa088d67a61b89e64217e2e4dac809 | [
"MIT"
] | 7 | 2019-09-01T21:37:23.000Z | 2020-05-12T19:36:04.000Z | starlette_cbge/schema_backends/pydantic.py | gvbgduh/starlette-cbge | 4c18a1cf1cfa088d67a61b89e64217e2e4dac809 | [
"MIT"
] | null | null | null | starlette_cbge/schema_backends/pydantic.py | gvbgduh/starlette-cbge | 4c18a1cf1cfa088d67a61b89e64217e2e4dac809 | [
"MIT"
] | null | null | null | """
Pydantic schema backend
"""
from typing import Dict, List, Any
try:
import pydantic
except ImportError:
pydantic = None # type: ignore
from starlette_cbge.interfaces import SchemaInterface, ListSchemaInterface
class PydanticSchema(SchemaInterface, pydantic.BaseModel):
@classmethod
def perform_load(cls, data: Dict[str, Any]) -> Dict[str, Any]:
return cls(**data).dict()
@classmethod
def perform_dump(cls, data: Any) -> Dict[str, Any]:
return cls(**data).dict()
@classmethod
def openapi_schema(cls) -> Dict[str, Any]:
return cls.schema()
class PydanticListSchema(ListSchemaInterface, pydantic.BaseModel):
@classmethod
def perform_load(cls, data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
return [cls(**item_data).dict() for item_data in data]
@classmethod
def perform_dump(cls, data: List[Any]) -> List[Dict[str, Any]]:
return [cls(**item_data).dict() for item_data in data]
@classmethod
def openapi_schema(cls) -> Dict[str, Any]:
# TODO adjust for List
return cls.schema()
| 26.97561 | 78 | 0.668174 |
795a03848dc9704f61b9d1475d4b2c42dea3dc63 | 5,949 | py | Python | tests/test_trainer.py | flavell-lab/pytorch-3dunet | 066717dcbf8d491ee3420a35a7db3384e3fc2137 | [
"MIT"
] | 1 | 2021-08-04T04:03:37.000Z | 2021-08-04T04:03:37.000Z | tests/test_trainer.py | LalithShiyam/pytorch-3dunet | f6b6c13cb0bb6194e95976b0245b76aaa9e9a496 | [
"MIT"
] | null | null | null | tests/test_trainer.py | LalithShiyam/pytorch-3dunet | f6b6c13cb0bb6194e95976b0245b76aaa9e9a496 | [
"MIT"
] | 1 | 2022-03-14T04:43:24.000Z | 2022-03-14T04:43:24.000Z | import os
from tempfile import NamedTemporaryFile
import h5py
import numpy as np
import torch
from pytorch3dunet.datasets.utils import get_train_loaders
from pytorch3dunet.train import _create_optimizer, _create_lr_scheduler
from pytorch3dunet.unet3d.losses import get_loss_criterion
from pytorch3dunet.unet3d.metrics import get_evaluation_metric
from pytorch3dunet.unet3d.model import get_model
from pytorch3dunet.unet3d.trainer import UNet3DTrainer
from pytorch3dunet.unet3d.utils import DefaultTensorboardFormatter
class TestUNet3DTrainer:
def test_ce_loss(self, tmpdir, capsys, train_config):
with capsys.disabled():
assert_train_save_load(tmpdir, train_config, 'CrossEntropyLoss', 'MeanIoU', 'UNet3D')
def test_wce_loss(self, tmpdir, capsys, train_config):
with capsys.disabled():
assert_train_save_load(tmpdir, train_config, 'WeightedCrossEntropyLoss', 'MeanIoU', 'UNet3D')
def test_bce_loss(self, tmpdir, capsys, train_config):
with capsys.disabled():
assert_train_save_load(tmpdir, train_config, 'BCEWithLogitsLoss', 'DiceCoefficient', 'UNet3D')
def test_dice_loss(self, tmpdir, capsys, train_config):
with capsys.disabled():
assert_train_save_load(tmpdir, train_config, 'DiceLoss', 'MeanIoU', 'UNet3D')
def test_pce_loss(self, tmpdir, capsys, train_config):
with capsys.disabled():
assert_train_save_load(tmpdir, train_config, 'PixelWiseCrossEntropyLoss', 'MeanIoU', 'UNet3D',
weight_map=True)
def test_residual_unet(self, tmpdir, capsys, train_config):
with capsys.disabled():
assert_train_save_load(tmpdir, train_config, 'CrossEntropyLoss', 'MeanIoU', 'ResidualUNet3D')
def test_2d_unet(self, tmpdir, capsys, train_config_2d):
with capsys.disabled():
assert_train_save_load(tmpdir, train_config_2d, 'CrossEntropyLoss', 'MeanIoU', 'UNet2D',
shape=(3, 1, 128, 128))
def assert_train_save_load(tmpdir, train_config, loss, val_metric, model, weight_map=False, shape=(3, 64, 64, 64)):
max_num_epochs = train_config['trainer']['epochs']
log_after_iters = train_config['trainer']['log_after_iters']
validate_after_iters = train_config['trainer']['validate_after_iters']
max_num_iterations = train_config['trainer']['iters']
trainer = _train_save_load(tmpdir, train_config, loss, val_metric, model, weight_map, shape)
assert trainer.num_iterations == max_num_iterations
assert trainer.max_num_epochs == max_num_epochs
assert trainer.log_after_iters == log_after_iters
assert trainer.validate_after_iters == validate_after_iters
assert trainer.max_num_iterations == max_num_iterations
def _train_save_load(tmpdir, train_config, loss, val_metric, model, weight_map, shape):
binary_loss = loss in ['BCEWithLogitsLoss', 'DiceLoss', 'BCEDiceLoss', 'GeneralizedDiceLoss']
device = torch.device("cuda:0" if torch.cuda.is_available() else 'cpu')
train_config['model']['name'] = model
train_config.update({
# get device to train on
'device': device,
'loss': {'name': loss, 'weight': np.random.rand(2).astype(np.float32), 'pos_weight': 3.},
'eval_metric': {'name': val_metric}
})
train_config['model']['final_sigmoid'] = binary_loss
if weight_map:
train_config['loaders']['weight_internal_path'] = 'weight_map'
loss_criterion = get_loss_criterion(train_config)
eval_criterion = get_evaluation_metric(train_config)
model = get_model(train_config)
model = model.to(device)
if loss in ['BCEWithLogitsLoss']:
label_dtype = 'float32'
train_config['loaders']['train']['transformer']['label'][0]['dtype'] = label_dtype
train_config['loaders']['val']['transformer']['label'][0]['dtype'] = label_dtype
train = _create_random_dataset(shape, binary_loss)
val = _create_random_dataset(shape, binary_loss)
train_config['loaders']['train']['file_paths'] = [train]
train_config['loaders']['val']['file_paths'] = [val]
loaders = get_train_loaders(train_config)
optimizer = _create_optimizer(train_config, model)
lr_scheduler = _create_lr_scheduler(train_config, optimizer)
formatter = DefaultTensorboardFormatter()
trainer = UNet3DTrainer(model, optimizer, lr_scheduler,
loss_criterion, eval_criterion,
device, loaders, tmpdir,
max_num_epochs=train_config['trainer']['epochs'],
log_after_iters=train_config['trainer']['log_after_iters'],
validate_after_iters=train_config['trainer']['log_after_iters'],
max_num_iterations=train_config['trainer']['iters'],
tensorboard_formatter=formatter)
trainer.fit()
# test loading the trainer from the checkpoint
trainer = UNet3DTrainer.from_checkpoint(os.path.join(tmpdir, 'last_checkpoint.pytorch'),
model, optimizer, lr_scheduler,
loss_criterion, eval_criterion,
loaders, tensorboard_formatter=formatter)
return trainer
def _create_random_dataset(shape, channel_per_class):
tmp = NamedTemporaryFile(delete=False)
with h5py.File(tmp.name, 'w') as f:
l_shape = w_shape = shape
# make sure that label and weight tensors are 3D
if len(shape) == 4:
l_shape = shape[1:]
w_shape = shape[1:]
if channel_per_class:
l_shape = (2,) + l_shape
f.create_dataset('raw', data=np.random.rand(*shape))
f.create_dataset('label', data=np.random.randint(0, 2, l_shape))
f.create_dataset('weight_map', data=np.random.rand(*w_shape))
return tmp.name
| 43.423358 | 115 | 0.676752 |
795a0431dbbb9c2fbf6e306562c76be4ae711a7a | 84 | py | Python | gluoncv/auto/estimators/faster_rcnn/__init__.py | Kh4L/gluon-cv | 849411ed56632cd854850b07142087d599f97dcb | [
"Apache-2.0"
] | 5,447 | 2018-04-25T18:02:51.000Z | 2022-03-31T00:59:49.000Z | gluoncv/auto/estimators/faster_rcnn/__init__.py | Kh4L/gluon-cv | 849411ed56632cd854850b07142087d599f97dcb | [
"Apache-2.0"
] | 1,566 | 2018-04-25T21:14:04.000Z | 2022-03-31T06:42:42.000Z | gluoncv/auto/estimators/faster_rcnn/__init__.py | Kh4L/gluon-cv | 849411ed56632cd854850b07142087d599f97dcb | [
"Apache-2.0"
] | 1,345 | 2018-04-25T18:44:13.000Z | 2022-03-30T19:32:53.000Z | """R-CNN Estimator implementations"""
from .faster_rcnn import FasterRCNNEstimator
| 21 | 44 | 0.809524 |
795a057a07a36b0d623981f5810cf11f5de902d0 | 7,432 | py | Python | Plugins/UnrealEnginePython/Binaries/Win64/Lib/site-packages/tensorflow/contrib/keras/api/keras/layers/__init__.py | JustinACoder/H22-GR3-UnrealAI | 361eb9ef1147f8a2991e5f98c4118cd823184adf | [
"MIT"
] | 6 | 2022-02-04T18:12:24.000Z | 2022-03-21T23:57:12.000Z | Lib/site-packages/tensorflow/contrib/keras/api/keras/layers/__init__.py | shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings | 1fa4cd6a566c8745f455fc3d2273208f21f88ced | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/tensorflow/contrib/keras/api/keras/layers/__init__.py | shfkdroal/Robot-Learning-in-Mixed-Adversarial-and-Collaborative-Settings | 1fa4cd6a566c8745f455fc3d2273208f21f88ced | [
"bzip2-1.0.6"
] | 1 | 2022-02-08T03:53:23.000Z | 2022-02-08T03:53:23.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras layers API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Generic layers.
# pylint: disable=g-bad-import-order
from tensorflow.python.keras.engine.base_layer import InputSpec
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_layer import Input
from tensorflow.python.keras.engine.input_layer import InputLayer
# Advanced activations.
from tensorflow.python.keras.layers.advanced_activations import LeakyReLU
from tensorflow.python.keras.layers.advanced_activations import PReLU
from tensorflow.python.keras.layers.advanced_activations import ELU
from tensorflow.python.keras.layers.advanced_activations import ThresholdedReLU
# Convolution layers.
from tensorflow.python.keras.layers.convolutional import Conv1D
from tensorflow.python.keras.layers.convolutional import Conv2D
from tensorflow.python.keras.layers.convolutional import Conv3D
from tensorflow.python.keras.layers.convolutional import Conv2DTranspose
from tensorflow.python.keras.layers.convolutional import Conv3DTranspose
from tensorflow.python.keras.layers.convolutional import SeparableConv2D
# Convolution layer aliases.
from tensorflow.python.keras.layers.convolutional import Convolution1D
from tensorflow.python.keras.layers.convolutional import Convolution2D
from tensorflow.python.keras.layers.convolutional import Convolution3D
from tensorflow.python.keras.layers.convolutional import Convolution2DTranspose
from tensorflow.python.keras.layers.convolutional import Convolution3DTranspose
from tensorflow.python.keras.layers.convolutional import SeparableConvolution2D
# Image processing layers.
from tensorflow.python.keras.layers.convolutional import UpSampling1D
from tensorflow.python.keras.layers.convolutional import UpSampling2D
from tensorflow.python.keras.layers.convolutional import UpSampling3D
from tensorflow.python.keras.layers.convolutional import ZeroPadding1D
from tensorflow.python.keras.layers.convolutional import ZeroPadding2D
from tensorflow.python.keras.layers.convolutional import ZeroPadding3D
from tensorflow.python.keras.layers.convolutional import Cropping1D
from tensorflow.python.keras.layers.convolutional import Cropping2D
from tensorflow.python.keras.layers.convolutional import Cropping3D
# Convolutional-recurrent layers.
from tensorflow.python.keras.layers.convolutional_recurrent import ConvLSTM2D
# Core layers.
from tensorflow.python.keras.layers.core import Masking
from tensorflow.python.keras.layers.core import Dropout
from tensorflow.python.keras.layers.core import SpatialDropout1D
from tensorflow.python.keras.layers.core import SpatialDropout2D
from tensorflow.python.keras.layers.core import SpatialDropout3D
from tensorflow.python.keras.layers.core import Activation
from tensorflow.python.keras.layers.core import Reshape
from tensorflow.python.keras.layers.core import Permute
from tensorflow.python.keras.layers.core import Flatten
from tensorflow.python.keras.layers.core import RepeatVector
from tensorflow.python.keras.layers.core import Lambda
from tensorflow.python.keras.layers.core import Dense
from tensorflow.python.keras.layers.core import ActivityRegularization
# Embedding layers.
from tensorflow.python.keras.layers.embeddings import Embedding
# Locally-connected layers.
from tensorflow.python.keras.layers.local import LocallyConnected1D
from tensorflow.python.keras.layers.local import LocallyConnected2D
# Merge layers.
from tensorflow.python.keras.layers.merge import Add
from tensorflow.python.keras.layers.merge import Multiply
from tensorflow.python.keras.layers.merge import Average
from tensorflow.python.keras.layers.merge import Maximum
from tensorflow.python.keras.layers.merge import Concatenate
from tensorflow.python.keras.layers.merge import Dot
from tensorflow.python.keras.layers.merge import add
from tensorflow.python.keras.layers.merge import multiply
from tensorflow.python.keras.layers.merge import average
from tensorflow.python.keras.layers.merge import maximum
from tensorflow.python.keras.layers.merge import concatenate
from tensorflow.python.keras.layers.merge import dot
# Noise layers.
from tensorflow.python.keras.layers.noise import AlphaDropout
from tensorflow.python.keras.layers.noise import GaussianNoise
from tensorflow.python.keras.layers.noise import GaussianDropout
# Normalization layers.
from tensorflow.python.keras.layers.normalization import BatchNormalization
# Pooling layers.
from tensorflow.python.keras.layers.pooling import MaxPooling1D
from tensorflow.python.keras.layers.pooling import MaxPooling2D
from tensorflow.python.keras.layers.pooling import MaxPooling3D
from tensorflow.python.keras.layers.pooling import AveragePooling1D
from tensorflow.python.keras.layers.pooling import AveragePooling2D
from tensorflow.python.keras.layers.pooling import AveragePooling3D
from tensorflow.python.keras.layers.pooling import GlobalAveragePooling1D
from tensorflow.python.keras.layers.pooling import GlobalAveragePooling2D
from tensorflow.python.keras.layers.pooling import GlobalAveragePooling3D
from tensorflow.python.keras.layers.pooling import GlobalMaxPooling1D
from tensorflow.python.keras.layers.pooling import GlobalMaxPooling2D
from tensorflow.python.keras.layers.pooling import GlobalMaxPooling3D
# Pooling layer aliases.
from tensorflow.python.keras.layers.pooling import MaxPool1D
from tensorflow.python.keras.layers.pooling import MaxPool2D
from tensorflow.python.keras.layers.pooling import MaxPool3D
from tensorflow.python.keras.layers.pooling import AvgPool1D
from tensorflow.python.keras.layers.pooling import AvgPool2D
from tensorflow.python.keras.layers.pooling import AvgPool3D
from tensorflow.python.keras.layers.pooling import GlobalAvgPool1D
from tensorflow.python.keras.layers.pooling import GlobalAvgPool2D
from tensorflow.python.keras.layers.pooling import GlobalAvgPool3D
from tensorflow.python.keras.layers.pooling import GlobalMaxPool1D
from tensorflow.python.keras.layers.pooling import GlobalMaxPool2D
from tensorflow.python.keras.layers.pooling import GlobalMaxPool3D
# Recurrent layers.
from tensorflow.python.keras.layers.recurrent import SimpleRNN
from tensorflow.python.keras.layers.recurrent import GRU
from tensorflow.python.keras.layers.recurrent import LSTM
# Wrapper functions
from tensorflow.python.keras.layers.wrappers import Wrapper
from tensorflow.python.keras.layers.wrappers import Bidirectional
from tensorflow.python.keras.layers.wrappers import TimeDistributed
del absolute_import
del division
del print_function
| 49.879195 | 81 | 0.832347 |
795a05d6b94684f6dd1538ee422b11c97eeb5880 | 1,218 | py | Python | tests/test_sort.py | michael-kuhlmann/lazy_dataset | 8faf09b7c8a9ff8ce1eaf14285853cafcf1e2cd1 | [
"MIT"
] | 14 | 2019-03-26T15:30:44.000Z | 2021-12-19T00:56:51.000Z | tests/test_sort.py | michael-kuhlmann/lazy_dataset | 8faf09b7c8a9ff8ce1eaf14285853cafcf1e2cd1 | [
"MIT"
] | 36 | 2019-03-04T15:40:09.000Z | 2022-01-17T16:57:42.000Z | tests/test_sort.py | michael-kuhlmann/lazy_dataset | 8faf09b7c8a9ff8ce1eaf14285853cafcf1e2cd1 | [
"MIT"
] | 7 | 2019-03-05T16:25:10.000Z | 2021-05-27T16:52:23.000Z | import pytest
import lazy_dataset
def get_examples():
examples = {
'b': {'value': 2},
'c': {'value': 3},
'a': {'value': 1},
'e': {'value': 5},
'd': {'value': 4},
}
for example_id, example in examples.items():
example['example_id'] = example_id
return examples
def get_dataset():
examples = get_examples()
return lazy_dataset.new(examples)
def test_sort():
def sort_fn(example):
return example['value']
ds = get_dataset()
example_ids = [ex['example_id'] for ex in ds]
assert example_ids == 'b c a e d'.split()
ds = ds.sort(sort_fn)
example_ids = [ex['example_id'] for ex in ds]
assert example_ids == 'a b c d e'.split()
ds = get_dataset()
ds = ds.sort()
example_ids = [ex['example_id'] for ex in ds]
assert example_ids == 'a b c d e'.split()
def test_sort_reverse():
def sort_fn(example):
return example['value']
ds = get_dataset()
example_ids = [ex['example_id'] for ex in ds]
assert example_ids == 'b c a e d'.split()
ds = ds.sort(sort_fn, reverse=True)
example_ids = [ex['example_id'] for ex in ds]
assert example_ids == 'e d c b a'.split()
| 22.555556 | 49 | 0.584565 |
795a06602a90a8394c0012a6ae1b2e7a027c4d7e | 4,666 | py | Python | sample_onnx.py | Kazuhito00/NanoDet-Colaboratory-Training-Sample | 38e51c5d08697395e9778ea4bd0810000e1058d7 | [
"Apache-2.0"
] | 13 | 2021-09-20T09:21:01.000Z | 2021-11-20T07:40:04.000Z | sample_onnx.py | Kazuhito00/NanoDet-Colaboratory-Training-Sample | 38e51c5d08697395e9778ea4bd0810000e1058d7 | [
"Apache-2.0"
] | null | null | null | sample_onnx.py | Kazuhito00/NanoDet-Colaboratory-Training-Sample | 38e51c5d08697395e9778ea4bd0810000e1058d7 | [
"Apache-2.0"
] | 1 | 2021-11-05T15:36:37.000Z | 2021-11-05T15:36:37.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
import time
import argparse
import cv2
from nanodet import NanoDetONNX
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--device", type=int, default=0)
parser.add_argument("--movie", type=str, default=None)
parser.add_argument("--image", type=str, default=None)
parser.add_argument("--width", help='cap width', type=int, default=960)
parser.add_argument("--height", help='cap height', type=int, default=540)
parser.add_argument(
"--model",
type=str,
default='04.model/workspace/nanodet_m/nanodet.onnx',
)
parser.add_argument(
'--input_shape',
type=int,
default=320,
choices=[320, 416],
)
parser.add_argument(
'--score_th',
type=float,
default=0.5,
help='Class confidence',
)
parser.add_argument(
'--nms_th',
type=float,
default=0.6,
help='NMS IoU threshold',
)
args = parser.parse_args()
return args
def main():
# 引数解析 #################################################################
args = get_args()
cap_device = args.device
cap_width = args.width
cap_height = args.height
if args.movie is not None:
cap_device = args.movie
image_path = args.image
model_path = args.model
input_shape = args.input_shape
score_th = args.score_th
nms_th = args.nms_th
# カメラ準備 ###############################################################
if image_path is None:
cap = cv2.VideoCapture(cap_device)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, cap_width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, cap_height)
# モデルロード #############################################################
nanodet = NanoDetONNX(
model_path=model_path,
input_shape=input_shape,
class_score_th=score_th,
nms_th=nms_th,
)
if image_path is not None:
image = cv2.imread(image_path)
# 推論実施 ##############################################################
start_time = time.time()
bboxes, scores, class_ids = nanodet.inference(image)
elapsed_time = time.time() - start_time
print('Elapsed time', elapsed_time)
# 描画 ##################################################################
image = draw_debug(image, elapsed_time, bboxes, scores, class_ids)
cv2.imshow('NanoDet Demo', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
else:
while True:
start_time = time.time()
# カメラキャプチャ ################################################
ret, frame = cap.read()
if not ret:
break
debug_image = copy.deepcopy(frame)
# 推論実施 ########################################################
bboxes, scores, class_ids = nanodet.inference(frame)
elapsed_time = time.time() - start_time
# デバッグ描画
debug_image = draw_debug(
debug_image,
elapsed_time,
bboxes,
scores,
class_ids,
)
# キー処理(ESC:終了) ##############################################
key = cv2.waitKey(1)
if key == 27: # ESC
break
# 画面反映 #########################################################
cv2.imshow('NanoDet Sample', debug_image)
cap.release()
cv2.destroyAllWindows()
def draw_debug(image, elapsed_time, bboxes, scores, class_ids):
debug_image = copy.deepcopy(image)
for bbox, score, class_id in zip(bboxes, scores, class_ids):
x1, y1, x2, y2 = bbox[0], bbox[1], bbox[2], bbox[3]
# バウンディングボックス
debug_image = cv2.rectangle(
debug_image,
(x1, y1),
(x2, y2),
(0, 255, 0),
thickness=2,
)
# クラスID、スコア
score = '%.2f' % score
text = '%s:%s' % (str(class_id), score)
debug_image = cv2.putText(
debug_image,
text,
(bbox[0], bbox[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.7,
(0, 255, 0),
thickness=2,
)
# 推論時間
text = 'Elapsed time:' + '%.0f' % (elapsed_time * 1000)
text = text + 'ms'
debug_image = cv2.putText(
debug_image,
text,
(10, 30),
cv2.FONT_HERSHEY_SIMPLEX,
0.7,
(0, 255, 0),
thickness=2,
)
return debug_image
if __name__ == '__main__':
main()
| 26.213483 | 79 | 0.479854 |
795a068182eca67ab0b70738e73a34be45ec3eeb | 15,445 | py | Python | astropy/io/fits/tests/test_fitstime.py | jbkalmbach/astropy | 88ae8c615533efd1e60de4aded204943f66f881c | [
"BSD-3-Clause"
] | null | null | null | astropy/io/fits/tests/test_fitstime.py | jbkalmbach/astropy | 88ae8c615533efd1e60de4aded204943f66f881c | [
"BSD-3-Clause"
] | 1 | 2018-05-08T13:59:56.000Z | 2018-05-08T13:59:56.000Z | astropy/io/fits/tests/test_fitstime.py | jbkalmbach/astropy | 88ae8c615533efd1e60de4aded204943f66f881c | [
"BSD-3-Clause"
] | 1 | 2019-10-09T21:30:57.000Z | 2019-10-09T21:30:57.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import pytest
import numpy as np
from . import FitsTestCase
from ..fitstime import GLOBAL_TIME_INFO, time_to_fits, is_time_column_keyword
from ....coordinates import EarthLocation
from ....io import fits
from ....table import Table, QTable
from ....time import Time, TimeDelta
from ....time.core import BARYCENTRIC_SCALES
from ....time.formats import FITS_DEPRECATED_SCALES
from ....tests.helper import catch_warnings
class TestFitsTime(FitsTestCase):
def setup_class(self):
self.time = np.array(['1999-01-01T00:00:00.123456789', '2010-01-01T00:00:00'])
def test_is_time_column_keyword(self):
# Time column keyword without column number
assert is_time_column_keyword('TRPOS') is False
# Global time column keyword
assert is_time_column_keyword('TIMESYS') is False
# Valid time column keyword
assert is_time_column_keyword('TRPOS12') is True
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_time_to_fits_loc(self, table_types):
"""
Test all the unusual conditions for locations of ``Time``
columns in a ``Table``.
"""
t = table_types()
t['a'] = Time(self.time, format='isot', scale='utc')
t['b'] = Time(self.time, format='isot', scale='tt')
# Check that vectorized location raises an exception
t['a'].location = EarthLocation([1,2], [2,3], [3,4])
with pytest.raises(ValueError) as err:
table, hdr = time_to_fits(t)
assert 'Vectorized Location of Time Column' in str(err.value)
# Check that multiple Time columns with different locations raise an exception
t['a'].location = EarthLocation(1, 2, 3)
t['b'].location = EarthLocation(2, 3, 4)
with pytest.raises(ValueError) as err:
table, hdr = time_to_fits(t)
assert 'Multiple Time Columns with different geocentric' in str(err.value)
# Check that Time column with no location specified will assume global location
t['b'].location = None
with catch_warnings() as w:
table, hdr = time_to_fits(t)
assert len(w) == 1
assert str(w[0].message).startswith('Time Column "b" has no specified '
'location, but global Time Position '
'is present')
# Check that multiple Time columns with same location can be written
t['b'].location = EarthLocation(1, 2, 3)
with catch_warnings() as w:
table, hdr = time_to_fits(t)
assert len(w) == 0
# Check compatibility of Time Scales and Reference Positions
for scale in BARYCENTRIC_SCALES:
t.replace_column('a', getattr(t['a'], scale))
with catch_warnings() as w:
table, hdr = time_to_fits(t)
assert len(w) == 1
assert str(w[0].message).startswith('Earth Location "TOPOCENTER" '
'for Time Column')
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_time_to_fits_header(self, table_types):
"""
Test the header and metadata returned by ``time_to_fits``.
"""
t = table_types()
t['a'] = Time(self.time, format='isot', scale='utc',
location=EarthLocation(-2446354,
4237210, 4077985, unit='m'))
t['b'] = Time([1,2], format='cxcsec', scale='tt')
ideal_col_hdr = {'OBSGEO-X' : t['a'].location.x.value,
'OBSGEO-Y' : t['a'].location.y.value,
'OBSGEO-Z' : t['a'].location.z.value}
table, hdr = time_to_fits(t)
# Check the global time keywords in hdr
for key, value in GLOBAL_TIME_INFO.items():
assert hdr[key] == value[0]
assert hdr.comments[key] == value[1]
hdr.remove(key)
for key, value in ideal_col_hdr.items():
assert hdr[key] == value
hdr.remove(key)
# Check the column-specific time metadata
coord_info = table.meta['__coordinate_columns__']
for colname in coord_info:
assert coord_info[colname]['coord_type'] == t[colname].scale.upper()
assert coord_info[colname]['coord_unit'] == 'd'
assert coord_info['a']['time_ref_pos'] == 'TOPOCENTER'
assert len(hdr) == 0
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_fits_to_time_meta(self, table_types):
"""
Test that the relevant global time metadata is read into
``Table.meta`` as ``Time``.
"""
t = table_types()
t['a'] = Time(self.time, format='isot', scale='utc')
t.meta['DATE'] = '1999-01-01T00:00:00'
t.meta['MJD-OBS'] = 56670
# Test for default write behaviour (full precision) and read it
# back using native astropy objects; thus, ensure its round-trip
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
# Test DATE
assert isinstance(tm.meta['DATE'], Time)
assert tm.meta['DATE'].value == t.meta['DATE'] + '(UTC)'
assert tm.meta['DATE'].format == 'fits'
# Default time scale according to the FITS standard is UTC
assert tm.meta['DATE'].scale == 'utc'
# Test MJD-xxx
assert isinstance(tm.meta['MJD-OBS'], Time)
assert tm.meta['MJD-OBS'].value == t.meta['MJD-OBS']
assert tm.meta['MJD-OBS'].format == 'mjd'
assert tm.meta['MJD-OBS'].scale == 'utc'
# Explicitly specified Time Scale
t.meta['TIMESYS'] = 'ET'
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
# Test DATE
assert isinstance(tm.meta['DATE'], Time)
assert tm.meta['DATE'].value == t.meta['DATE'] + '(UTC)'
assert tm.meta['DATE'].scale == 'utc'
# Test MJD-xxx
assert isinstance(tm.meta['MJD-OBS'], Time)
assert tm.meta['MJD-OBS'].value == t.meta['MJD-OBS']
assert tm.meta['MJD-OBS'].scale == FITS_DEPRECATED_SCALES[t.meta['TIMESYS']]
# Test for conversion of time data to its value, as defined by its format
t['a'].info.serialize_method['fits'] = 'formatted_value'
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits')
# Test DATE
assert not isinstance(tm.meta['DATE'], Time)
assert tm.meta['DATE'] == t.meta['DATE']
# Test MJD-xxx
assert not isinstance(tm.meta['MJD-OBS'], Time)
assert tm.meta['MJD-OBS'] == t.meta['MJD-OBS']
assert (tm['a'] == t['a'].value).all()
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_time_loc_unit(self, table_types):
"""
Test that ``location`` specified by using any valid unit
(length/angle) in ``Time`` columns gets stored in FITS
as ITRS Cartesian coordinates (X, Y, Z), each in m.
Test that it round-trips through FITS.
"""
t = table_types()
t['a'] = Time(self.time, format='isot', scale='utc',
location=EarthLocation(1,2,3, unit='km'))
table, hdr = time_to_fits(t)
# Check the header
hdr['OBSGEO-X'] == t['a'].location.x.to_value(unit='m')
hdr['OBSGEO-Y'] == t['a'].location.y.to_value(unit='m')
hdr['OBSGEO-Z'] == t['a'].location.z.to_value(unit='m')
t.write(self.temp('time.fits'), format='fits', overwrite=True)
tm = table_types.read(self.temp('time.fits'), format='fits',
astropy_native=True)
# Check the round-trip of location
tm['a'].location == t['a'].location
tm['a'].location.x.value == t['a'].location.x.to_value(unit='m')
tm['a'].location.y.value == t['a'].location.y.to_value(unit='m')
tm['a'].location.z.value == t['a'].location.z.to_value(unit='m')
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_read_fits(self, table_types):
"""
Test that FITS table with time columns (standard compliant)
can be read by io.fits as a table with Time columns.
This tests the following:
1. The special-case where a column has the name 'TIME' and a
time unit
2. Time from Epoch (Reference time) is appropriately converted.
3. Coordinate columns (corresponding to coordinate keywords in the header)
other than time, that is, spatial coordinates, are not mistaken
to be time.
"""
filename = self.data('chandra_time.fits')
tm = table_types.read(filename, astropy_native=True)
# Test case 1
assert isinstance(tm['time'], Time)
assert tm['time'].scale == 'tt'
assert tm['time'].format == 'mjd'
non_native = table_types.read(filename)
# Test case 2
ref_time = Time(non_native.meta['MJDREF'], format='mjd',
scale=non_native.meta['TIMESYS'].lower())
delta_time = TimeDelta(non_native['time'])
assert (ref_time + delta_time == tm['time']).all()
# Test case 3
for colname in ['chipx', 'chipy', 'detx', 'dety', 'x', 'y']:
assert not isinstance(tm[colname], Time)
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_read_fits_datetime(self, table_types):
"""
Test that ISO-8601 Datetime String Columns are read correctly.
"""
# Datetime column
c = fits.Column(name='datetime', format='A29', coord_type='TCG',
time_ref_pos='GEOCENTER', array=self.time)
# Explicitly create a FITS Binary Table
bhdu = fits.BinTableHDU.from_columns([c])
bhdu.writeto(self.temp('time.fits'), overwrite=True)
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert isinstance(tm['datetime'], Time)
assert tm['datetime'].scale == 'tcg'
assert tm['datetime'].format == 'fits'
assert (tm['datetime'] == self.time).all()
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_read_fits_location(self, table_types):
"""
Test that geocentric/geodetic observatory position is read
properly, as and when it is specified.
"""
# Datetime column
c = fits.Column(name='datetime', format='A29', coord_type='TT',
time_ref_pos='TOPOCENTER', array=self.time)
# Observatory position in ITRS Cartesian coordinates (geocentric)
cards = [('OBSGEO-X', -2446354), ('OBSGEO-Y', 4237210),
('OBSGEO-Z', 4077985)]
# Explicitly create a FITS Binary Table
bhdu = fits.BinTableHDU.from_columns([c], header=fits.Header(cards))
bhdu.writeto(self.temp('time.fits'), overwrite=True)
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert isinstance(tm['datetime'], Time)
assert tm['datetime'].location.x.value == -2446354
assert tm['datetime'].location.y.value == 4237210
assert tm['datetime'].location.z.value == 4077985
# Observatory position in geodetic coordinates
cards = [('OBSGEO-L', 0), ('OBSGEO-B', 0), ('OBSGEO-H', 0)]
# Explicitly create a FITS Binary Table
bhdu = fits.BinTableHDU.from_columns([c], header=fits.Header(cards))
bhdu.writeto(self.temp('time.fits'), overwrite=True)
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert isinstance(tm['datetime'], Time)
assert tm['datetime'].location.lon.value == 0
assert tm['datetime'].location.lat.value == 0
assert tm['datetime'].location.height.value == 0
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_read_fits_scale(self, table_types):
"""
Test handling of 'GPS' and 'LOCAL' time scales which are
recognized by the FITS standard but are not native to astropy.
"""
# GPS scale column
gps_time = np.array([630720013, 630720014])
c = fits.Column(name='gps_time', format='D', unit='s', coord_type='GPS',
coord_unit='s', time_ref_pos='TOPOCENTER', array=gps_time)
cards = [('OBSGEO-L', 0), ('OBSGEO-B', 0), ('OBSGEO-H', 0)]
bhdu = fits.BinTableHDU.from_columns([c], header=fits.Header(cards))
bhdu.writeto(self.temp('time.fits'), overwrite=True)
with catch_warnings() as w:
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert len(w) == 1
assert 'FITS recognized time scale value "GPS"' in str(w[0].message)
assert isinstance(tm['gps_time'], Time)
assert tm['gps_time'].format == 'gps'
assert tm['gps_time'].scale == 'tai'
assert (tm['gps_time'].value == gps_time).all()
# LOCAL scale column
local_time = np.array([1, 2])
c = fits.Column(name='local_time', format='D', unit='d',
coord_type='LOCAL', coord_unit='d',
time_ref_pos='RELOCATABLE', array=local_time)
bhdu = fits.BinTableHDU.from_columns([c])
bhdu.writeto(self.temp('time.fits'), overwrite=True)
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert isinstance(tm['local_time'], Time)
assert tm['local_time'].format == 'mjd'
assert tm['local_time'].scale == 'local'
assert (tm['local_time'].value == local_time).all()
@pytest.mark.parametrize('table_types', (Table, QTable))
def test_io_time_read_fits_location_warnings(self, table_types):
"""
Test warnings for time column reference position.
"""
# Time reference position "TOPOCENTER" without corresponding
# observatory position.
c = fits.Column(name='datetime', format='A29', coord_type='TT',
time_ref_pos='TOPOCENTER', array=self.time)
bhdu = fits.BinTableHDU.from_columns([c])
bhdu.writeto(self.temp('time.fits'), overwrite=True)
with catch_warnings() as w:
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert len(w) == 1
assert ('observatory position is not properly specified' in
str(w[0].message))
# Default value for time reference position is "TOPOCENTER"
c = fits.Column(name='datetime', format='A29', coord_type='TT',
array=self.time)
bhdu = fits.BinTableHDU.from_columns([c])
bhdu.writeto(self.temp('time.fits'), overwrite=True)
with catch_warnings() as w:
tm = table_types.read(self.temp('time.fits'), astropy_native=True)
assert len(w) == 1
assert ('"TRPOSn" is not specified. The default value for '
'it is "TOPOCENTER"' in str(w[0].message))
| 40.751979 | 87 | 0.593784 |
795a0704fd3fa35114687135a9a67c2348b1d3c6 | 6,210 | py | Python | python/services/iam/alpha/workload_identity_pool.py | trodge/declarative-resource-client-library | 2cb7718a5074776b3113cc18a7483b54022238f3 | [
"Apache-2.0"
] | null | null | null | python/services/iam/alpha/workload_identity_pool.py | trodge/declarative-resource-client-library | 2cb7718a5074776b3113cc18a7483b54022238f3 | [
"Apache-2.0"
] | null | null | null | python/services/iam/alpha/workload_identity_pool.py | trodge/declarative-resource-client-library | 2cb7718a5074776b3113cc18a7483b54022238f3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from connector import channel
from google3.cloud.graphite.mmv2.services.google.iam import workload_identity_pool_pb2
from google3.cloud.graphite.mmv2.services.google.iam import (
workload_identity_pool_pb2_grpc,
)
from typing import List
class WorkloadIdentityPool(object):
def __init__(
self,
name: str = None,
display_name: str = None,
description: str = None,
state: str = None,
disabled: bool = None,
project: str = None,
location: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.display_name = display_name
self.description = description
self.disabled = disabled
self.project = project
self.location = location
self.service_account_file = service_account_file
def apply(self):
stub = workload_identity_pool_pb2_grpc.IamAlphaWorkloadIdentityPoolServiceStub(
channel.Channel()
)
request = workload_identity_pool_pb2.ApplyIamAlphaWorkloadIdentityPoolRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.display_name):
request.resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.disabled):
request.resource.disabled = Primitive.to_proto(self.disabled)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
request.service_account_file = self.service_account_file
response = stub.ApplyIamAlphaWorkloadIdentityPool(request)
self.name = Primitive.from_proto(response.name)
self.display_name = Primitive.from_proto(response.display_name)
self.description = Primitive.from_proto(response.description)
self.state = WorkloadIdentityPoolStateEnum.from_proto(response.state)
self.disabled = Primitive.from_proto(response.disabled)
self.project = Primitive.from_proto(response.project)
self.location = Primitive.from_proto(response.location)
def delete(self):
stub = workload_identity_pool_pb2_grpc.IamAlphaWorkloadIdentityPoolServiceStub(
channel.Channel()
)
request = workload_identity_pool_pb2.DeleteIamAlphaWorkloadIdentityPoolRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.display_name):
request.resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.description):
request.resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.disabled):
request.resource.disabled = Primitive.to_proto(self.disabled)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
request.resource.location = Primitive.to_proto(self.location)
response = stub.DeleteIamAlphaWorkloadIdentityPool(request)
@classmethod
def list(self, project, location, service_account_file=""):
stub = workload_identity_pool_pb2_grpc.IamAlphaWorkloadIdentityPoolServiceStub(
channel.Channel()
)
request = workload_identity_pool_pb2.ListIamAlphaWorkloadIdentityPoolRequest()
request.service_account_file = service_account_file
request.Project = project
request.Location = location
return stub.ListIamAlphaWorkloadIdentityPool(request).items
def to_proto(self):
resource = workload_identity_pool_pb2.IamAlphaWorkloadIdentityPool()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.display_name):
resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.description):
resource.description = Primitive.to_proto(self.description)
if Primitive.to_proto(self.disabled):
resource.disabled = Primitive.to_proto(self.disabled)
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
if Primitive.to_proto(self.location):
resource.location = Primitive.to_proto(self.location)
return resource
class WorkloadIdentityPoolStateEnum(object):
@classmethod
def to_proto(self, resource):
if not resource:
return resource
return workload_identity_pool_pb2.IamAlphaWorkloadIdentityPoolStateEnum.Value(
"IamAlphaWorkloadIdentityPoolStateEnum%s" % resource
)
@classmethod
def from_proto(self, resource):
if not resource:
return resource
return workload_identity_pool_pb2.IamAlphaWorkloadIdentityPoolStateEnum.Name(
resource
)[len("IamAlphaWorkloadIdentityPoolStateEnum") :]
class Primitive(object):
@classmethod
def to_proto(self, s):
if not s:
return ""
return s
@classmethod
def from_proto(self, s):
return s
| 38.09816 | 88 | 0.700644 |
795a070d9e5c9845522e0da06ca8e772b3f1d6b1 | 2,503 | py | Python | main.py | JeyDi/GameOfLife | 963d2d084e82321eb814e07d146af9af4e6106ff | [
"MIT"
] | null | null | null | main.py | JeyDi/GameOfLife | 963d2d084e82321eb814e07d146af9af4e6106ff | [
"MIT"
] | null | null | null | main.py | JeyDi/GameOfLife | 963d2d084e82321eb814e07d146af9af4e6106ff | [
"MIT"
] | null | null | null | """
Game of Life: Object plain python implementation
Author: Andrea Guzzo (JeyDi)
"""
import time
import typer
import logging
from config import configure_logging, ROWS, COLUMNS, MAX_PROB
from src.board import GameBoard
from src.rules import game_status
def main(
user: int = typer.Option(1, min=0, max=1),
rows: int = typer.Option(40, min=10),
columns: int = typer.Option(40, min=10),
max_prob: int = typer.Option(2, min=2, max=9),
max_tick: int = typer.Option(60, min=-1),
sleep: int = typer.Option(1, min=1),
verbosity: str = typer.Option("info"),
):
# Configure the logging
configure_logging(verbosity, "./logs")
# Use typer
if user == "1":
# Ask for the user value input
rows = int(typer.prompt("Insert the number of rows"))
columns = int(typer.prompt("Insert the number of columns"))
max_prob = int(
typer.prompt(
"Insert the probability of spawning a new living cell"
)
)
max_tick = int(
typer.prompt(
"Insert the number of iterations you want to observe (-1 for endless)"
)
)
launch = typer.confirm("Do you want to launch the simulation?")
if not launch:
message = typer.style("Ok! Bye...", fg=typer.colors.RED, bold=True)
typer.echo(message)
raise typer.Abort()
message = typer.style("Launching...", fg=typer.colors.GREEN, bold=True)
typer.echo(message)
# Define the simulation default parameters
if rows == 0:
rows = ROWS
if columns == 0:
columns = COLUMNS
if max_prob == 0:
max_prob = MAX_PROB
tick = 0
logging.info("Launching the game")
logging.debug(f"Rows: {rows}")
logging.debug(f"Columns: {columns}")
logging.debug(f"Max_prob: {max_prob}")
logging.debug(f"Max Ticks: {max_tick}")
logging.debug(f"Verbosity: {verbosity}")
logging.debug(f"Max Ticks: {sleep}")
logging.debug(f"Alive probability: {1/(max_prob+1)}")
# create a board:
game_board = GameBoard(rows, columns, max_prob)
# run the first iteration of the board:
game_board.print_board(tick)
# Update the game status for every tick
while tick <= max_tick:
logging.debug(f"Tick: {tick}")
game_status(game_board, tick)
time.sleep(sleep)
tick += 1
game_board.print_board(tick)
if __name__ == "__main__":
typer.run(main)
| 28.443182 | 86 | 0.610467 |
795a071d6d4d757db0d24007cdfe6916ce62eb59 | 2,290 | py | Python | 2019/24_rockem_sockem_robots/my_solution.py | erik-kristofer-anderson/codewars | fda780f40d1a2d8c5210cfd6ccf81148444bc9e8 | [
"MIT"
] | null | null | null | 2019/24_rockem_sockem_robots/my_solution.py | erik-kristofer-anderson/codewars | fda780f40d1a2d8c5210cfd6ccf81148444bc9e8 | [
"MIT"
] | 1 | 2019-07-27T15:42:25.000Z | 2019-07-27T15:42:25.000Z | 2019/24_rockem_sockem_robots/my_solution.py | erik-kristofer-anderson/Codewars | fda780f40d1a2d8c5210cfd6ccf81148444bc9e8 | [
"MIT"
] | null | null | null | def fight(robot_1, robot_2, tactics):
class My_robot:
""" represents a robot
this class is mutable
attributes:
name: string
health: integer
speed: integer
tactics: list of strings
"""
def __init__(self, name = "", health = 0, speed = 0, tactics = []):
self.name = name
self.health = health
self.speed = speed
self.tactics = tactics
def __str__(self):
s = "robot: " + self.name + " with health " + str(self.health) + ", speed " + str(self.speed)
s += ", and tactics " + str(self.tactics)
return s
def fight(first_robot, second_robot):
while True:
if len(first_robot.tactics) > 0:
attack = first_robot.tactics.pop(0)
second_robot.health -= tactics[attack]
if second_robot.health <= 0:
winner = first_robot.name
break
if len(second_robot.tactics) > 0:
attack = second_robot.tactics.pop(0)
first_robot.health -= tactics[attack]
if first_robot.health <= 0:
winner = second_robot.name
break
if len(first_robot.tactics) == 0 and len(second_robot.tactics) == 0:
if first_robot.health > second_robot.health:
winner = first_robot.name
break
elif first_robot.health < second_robot.health:
winner = second_robot.name
break
else:
winner = "'draw'"
break
return winner
my_robot_1 = My_robot(robot_1["name"], robot_1["health"], robot_1["speed"], robot_1["tactics"])
my_robot_2 = My_robot(robot_2["name"], robot_2["health"], robot_2["speed"], robot_2["tactics"])
# print(my_robot_1)
# print(my_robot_2)
# print(tactics)
if my_robot_1.speed >= my_robot_2.speed:
winner = fight(my_robot_1, my_robot_2)
else:
winner = fight(my_robot_2, my_robot_1)
# print(winner)
if winner == "'draw'":
return "The fight was a draw."
else:
return winner + " has won the fight."
| 34.69697 | 105 | 0.524891 |
795a071f5cbb9b2dbf4bc78a8dac457b3360ccd2 | 534 | py | Python | packages/python/plotly/plotly/validators/scattergl/line/_dash.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/scattergl/line/_dash.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | packages/python/plotly/plotly/validators/scattergl/line/_dash.py | mastermind88/plotly.py | efa70710df1af22958e1be080e105130042f1839 | [
"MIT"
] | null | null | null | import _plotly_utils.basevalidators
class DashValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="dash", parent_name="scattergl.line", **kwargs):
super(DashValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop(
"values", ["dash", "dashdot", "dot", "longdash", "longdashdot", "solid"]
),
**kwargs,
)
| 35.6 | 88 | 0.61236 |
795a078d14b1208ce1970cbea0cf52eae570c0c3 | 20,672 | py | Python | sympy/combinatorics/partitions.py | ricopicone/sympy | de27c97214d540247a35c8215c7920e9a46b54ed | [
"BSD-3-Clause"
] | 2 | 2019-02-05T19:20:24.000Z | 2019-04-23T13:24:38.000Z | sympy/combinatorics/partitions.py | ricopicone/sympy | de27c97214d540247a35c8215c7920e9a46b54ed | [
"BSD-3-Clause"
] | null | null | null | sympy/combinatorics/partitions.py | ricopicone/sympy | de27c97214d540247a35c8215c7920e9a46b54ed | [
"BSD-3-Clause"
] | 1 | 2019-10-15T10:55:42.000Z | 2019-10-15T10:55:42.000Z | from __future__ import print_function, division
from sympy.core import Basic, Dict, sympify
from sympy.core.compatibility import as_int, default_sort_key, range
from sympy.core.sympify import _sympify
from sympy.functions.combinatorial.numbers import bell
from sympy.matrices import zeros
from sympy.sets.sets import FiniteSet, Union
from sympy.utilities.iterables import flatten, group
from collections import defaultdict
class Partition(FiniteSet):
"""
This class represents an abstract partition.
A partition is a set of disjoint sets whose union equals a given set.
See Also
========
sympy.utilities.iterables.partitions,
sympy.utilities.iterables.multiset_partitions
"""
_rank = None
_partition = None
def __new__(cls, *partition):
"""
Generates a new partition object.
This method also verifies if the arguments passed are
valid and raises a ValueError if they are not.
Examples
========
Creating Partition from Python lists:
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3])
>>> a
{{3}, {1, 2}}
>>> a.partition
[[1, 2], [3]]
>>> len(a)
2
>>> a.members
(1, 2, 3)
Creating Partition from Python sets:
>>> Partition({1, 2, 3}, {4, 5})
{{4, 5}, {1, 2, 3}}
Creating Partition from SymPy finite sets:
>>> from sympy.sets.sets import FiniteSet
>>> a = FiniteSet(1, 2, 3)
>>> b = FiniteSet(4, 5)
>>> Partition(a, b)
{{4, 5}, {1, 2, 3}}
"""
args = []
dups = False
for arg in partition:
if isinstance(arg, list):
as_set = set(arg)
if len(as_set) < len(arg):
dups = True
break # error below
arg = as_set
args.append(_sympify(arg))
if not all(isinstance(part, FiniteSet) for part in args):
raise ValueError(
"Each argument to Partition should be " \
"a list, set, or a FiniteSet")
# sort so we have a canonical reference for RGS
U = Union(*args)
if dups or len(U) < sum(len(arg) for arg in args):
raise ValueError("Partition contained duplicate elements.")
obj = FiniteSet.__new__(cls, *args)
obj.members = tuple(U)
obj.size = len(U)
return obj
def sort_key(self, order=None):
"""Return a canonical key that can be used for sorting.
Ordering is based on the size and sorted elements of the partition
and ties are broken with the rank.
Examples
========
>>> from sympy.utilities.iterables import default_sort_key
>>> from sympy.combinatorics.partitions import Partition
>>> from sympy.abc import x
>>> a = Partition([1, 2])
>>> b = Partition([3, 4])
>>> c = Partition([1, x])
>>> d = Partition(list(range(4)))
>>> l = [d, b, a + 1, a, c]
>>> l.sort(key=default_sort_key); l
[{{1, 2}}, {{1}, {2}}, {{1, x}}, {{3, 4}}, {{0, 1, 2, 3}}]
"""
if order is None:
members = self.members
else:
members = tuple(sorted(self.members,
key=lambda w: default_sort_key(w, order)))
return tuple(map(default_sort_key, (self.size, members, self.rank)))
@property
def partition(self):
"""Return partition as a sorted list of lists.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> Partition([1], [2, 3]).partition
[[1], [2, 3]]
"""
if self._partition is None:
self._partition = sorted([sorted(p, key=default_sort_key)
for p in self.args])
return self._partition
def __add__(self, other):
"""
Return permutation whose rank is ``other`` greater than current rank,
(mod the maximum rank for the set).
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3])
>>> a.rank
1
>>> (a + 1).rank
2
>>> (a + 100).rank
1
"""
other = as_int(other)
offset = self.rank + other
result = RGS_unrank((offset) %
RGS_enum(self.size),
self.size)
return Partition.from_rgs(result, self.members)
def __sub__(self, other):
"""
Return permutation whose rank is ``other`` less than current rank,
(mod the maximum rank for the set).
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3])
>>> a.rank
1
>>> (a - 1).rank
0
>>> (a - 100).rank
1
"""
return self.__add__(-other)
def __le__(self, other):
"""
Checks if a partition is less than or equal to
the other based on rank.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3, 4, 5])
>>> b = Partition([1], [2, 3], [4], [5])
>>> a.rank, b.rank
(9, 34)
>>> a <= a
True
>>> a <= b
True
"""
return self.sort_key() <= sympify(other).sort_key()
def __lt__(self, other):
"""
Checks if a partition is less than the other.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3, 4, 5])
>>> b = Partition([1], [2, 3], [4], [5])
>>> a.rank, b.rank
(9, 34)
>>> a < b
True
"""
return self.sort_key() < sympify(other).sort_key()
@property
def rank(self):
"""
Gets the rank of a partition.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3], [4, 5])
>>> a.rank
13
"""
if self._rank is not None:
return self._rank
self._rank = RGS_rank(self.RGS)
return self._rank
@property
def RGS(self):
"""
Returns the "restricted growth string" of the partition.
The RGS is returned as a list of indices, L, where L[i] indicates
the block in which element i appears. For example, in a partition
of 3 elements (a, b, c) into 2 blocks ([c], [a, b]) the RGS is
[1, 1, 0]: "a" is in block 1, "b" is in block 1 and "c" is in block 0.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> a = Partition([1, 2], [3], [4, 5])
>>> a.members
(1, 2, 3, 4, 5)
>>> a.RGS
(0, 0, 1, 2, 2)
>>> a + 1
{{3}, {4}, {5}, {1, 2}}
>>> _.RGS
(0, 0, 1, 2, 3)
"""
rgs = {}
partition = self.partition
for i, part in enumerate(partition):
for j in part:
rgs[j] = i
return tuple([rgs[i] for i in sorted(
[i for p in partition for i in p], key=default_sort_key)])
@classmethod
def from_rgs(self, rgs, elements):
"""
Creates a set partition from a restricted growth string.
The indices given in rgs are assumed to be the index
of the element as given in elements *as provided* (the
elements are not sorted by this routine). Block numbering
starts from 0. If any block was not referenced in ``rgs``
an error will be raised.
Examples
========
>>> from sympy.combinatorics.partitions import Partition
>>> Partition.from_rgs([0, 1, 2, 0, 1], list('abcde'))
{{c}, {a, d}, {b, e}}
>>> Partition.from_rgs([0, 1, 2, 0, 1], list('cbead'))
{{e}, {a, c}, {b, d}}
>>> a = Partition([1, 4], [2], [3, 5])
>>> Partition.from_rgs(a.RGS, a.members)
{{2}, {1, 4}, {3, 5}}
"""
if len(rgs) != len(elements):
raise ValueError('mismatch in rgs and element lengths')
max_elem = max(rgs) + 1
partition = [[] for i in range(max_elem)]
j = 0
for i in rgs:
partition[i].append(elements[j])
j += 1
if not all(p for p in partition):
raise ValueError('some blocks of the partition were empty.')
return Partition(*partition)
class IntegerPartition(Basic):
"""
This class represents an integer partition.
In number theory and combinatorics, a partition of a positive integer,
``n``, also called an integer partition, is a way of writing ``n`` as a
list of positive integers that sum to n. Two partitions that differ only
in the order of summands are considered to be the same partition; if order
matters then the partitions are referred to as compositions. For example,
4 has five partitions: [4], [3, 1], [2, 2], [2, 1, 1], and [1, 1, 1, 1];
the compositions [1, 2, 1] and [1, 1, 2] are the same as partition
[2, 1, 1].
See Also
========
sympy.utilities.iterables.partitions,
sympy.utilities.iterables.multiset_partitions
Reference: https://en.wikipedia.org/wiki/Partition_%28number_theory%29
"""
_dict = None
_keys = None
def __new__(cls, partition, integer=None):
"""
Generates a new IntegerPartition object from a list or dictionary.
The partition can be given as a list of positive integers or a
dictionary of (integer, multiplicity) items. If the partition is
preceded by an integer an error will be raised if the partition
does not sum to that given integer.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([5, 4, 3, 1, 1])
>>> a
IntegerPartition(14, (5, 4, 3, 1, 1))
>>> print(a)
[5, 4, 3, 1, 1]
>>> IntegerPartition({1:3, 2:1})
IntegerPartition(5, (2, 1, 1, 1))
If the value that the partition should sum to is given first, a check
will be made to see n error will be raised if there is a discrepancy:
>>> IntegerPartition(10, [5, 4, 3, 1])
Traceback (most recent call last):
...
ValueError: The partition is not valid
"""
if integer is not None:
integer, partition = partition, integer
if isinstance(partition, (dict, Dict)):
_ = []
for k, v in sorted(list(partition.items()), reverse=True):
if not v:
continue
k, v = as_int(k), as_int(v)
_.extend([k]*v)
partition = tuple(_)
else:
partition = tuple(sorted(map(as_int, partition), reverse=True))
sum_ok = False
if integer is None:
integer = sum(partition)
sum_ok = True
else:
integer = as_int(integer)
if not sum_ok and sum(partition) != integer:
raise ValueError("Partition did not add to %s" % integer)
if any(i < 1 for i in partition):
raise ValueError("The summands must all be positive.")
obj = Basic.__new__(cls, integer, partition)
obj.partition = list(partition)
obj.integer = integer
return obj
def prev_lex(self):
"""Return the previous partition of the integer, n, in lexical order,
wrapping around to [1, ..., 1] if the partition is [n].
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> p = IntegerPartition([4])
>>> print(p.prev_lex())
[3, 1]
>>> p.partition > p.prev_lex().partition
True
"""
d = defaultdict(int)
d.update(self.as_dict())
keys = self._keys
if keys == [1]:
return IntegerPartition({self.integer: 1})
if keys[-1] != 1:
d[keys[-1]] -= 1
if keys[-1] == 2:
d[1] = 2
else:
d[keys[-1] - 1] = d[1] = 1
else:
d[keys[-2]] -= 1
left = d[1] + keys[-2]
new = keys[-2]
d[1] = 0
while left:
new -= 1
if left - new >= 0:
d[new] += left//new
left -= d[new]*new
return IntegerPartition(self.integer, d)
def next_lex(self):
"""Return the next partition of the integer, n, in lexical order,
wrapping around to [n] if the partition is [1, ..., 1].
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> p = IntegerPartition([3, 1])
>>> print(p.next_lex())
[4]
>>> p.partition < p.next_lex().partition
True
"""
d = defaultdict(int)
d.update(self.as_dict())
key = self._keys
a = key[-1]
if a == self.integer:
d.clear()
d[1] = self.integer
elif a == 1:
if d[a] > 1:
d[a + 1] += 1
d[a] -= 2
else:
b = key[-2]
d[b + 1] += 1
d[1] = (d[b] - 1)*b
d[b] = 0
else:
if d[a] > 1:
if len(key) == 1:
d.clear()
d[a + 1] = 1
d[1] = self.integer - a - 1
else:
a1 = a + 1
d[a1] += 1
d[1] = d[a]*a - a1
d[a] = 0
else:
b = key[-2]
b1 = b + 1
d[b1] += 1
need = d[b]*b + d[a]*a - b1
d[a] = d[b] = 0
d[1] = need
return IntegerPartition(self.integer, d)
def as_dict(self):
"""Return the partition as a dictionary whose keys are the
partition integers and the values are the multiplicity of that
integer.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> IntegerPartition([1]*3 + [2] + [3]*4).as_dict()
{1: 3, 2: 1, 3: 4}
"""
if self._dict is None:
groups = group(self.partition, multiple=False)
self._keys = [g[0] for g in groups]
self._dict = dict(groups)
return self._dict
@property
def conjugate(self):
"""
Computes the conjugate partition of itself.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([6, 3, 3, 2, 1])
>>> a.conjugate
[5, 4, 3, 1, 1, 1]
"""
j = 1
temp_arr = list(self.partition) + [0]
k = temp_arr[0]
b = [0]*k
while k > 0:
while k > temp_arr[j]:
b[k - 1] = j
k -= 1
j += 1
return b
def __lt__(self, other):
"""Return True if self is less than other when the partition
is listed from smallest to biggest.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([3, 1])
>>> a < a
False
>>> b = a.next_lex()
>>> a < b
True
>>> a == b
False
"""
return list(reversed(self.partition)) < list(reversed(other.partition))
def __le__(self, other):
"""Return True if self is less than other when the partition
is listed from smallest to biggest.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> a = IntegerPartition([4])
>>> a <= a
True
"""
return list(reversed(self.partition)) <= list(reversed(other.partition))
def as_ferrers(self, char='#'):
"""
Prints the ferrer diagram of a partition.
Examples
========
>>> from sympy.combinatorics.partitions import IntegerPartition
>>> print(IntegerPartition([1, 1, 5]).as_ferrers())
#####
#
#
"""
return "\n".join([char*i for i in self.partition])
def __str__(self):
return str(list(self.partition))
def random_integer_partition(n, seed=None):
"""
Generates a random integer partition summing to ``n`` as a list
of reverse-sorted integers.
Examples
========
>>> from sympy.combinatorics.partitions import random_integer_partition
For the following, a seed is given so a known value can be shown; in
practice, the seed would not be given.
>>> random_integer_partition(100, seed=[1, 1, 12, 1, 2, 1, 85, 1])
[85, 12, 2, 1]
>>> random_integer_partition(10, seed=[1, 2, 3, 1, 5, 1])
[5, 3, 1, 1]
>>> random_integer_partition(1)
[1]
"""
from sympy.utilities.randtest import _randint
n = as_int(n)
if n < 1:
raise ValueError('n must be a positive integer')
randint = _randint(seed)
partition = []
while (n > 0):
k = randint(1, n)
mult = randint(1, n//k)
partition.append((k, mult))
n -= k*mult
partition.sort(reverse=True)
partition = flatten([[k]*m for k, m in partition])
return partition
def RGS_generalized(m):
"""
Computes the m + 1 generalized unrestricted growth strings
and returns them as rows in matrix.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_generalized
>>> RGS_generalized(6)
Matrix([
[ 1, 1, 1, 1, 1, 1, 1],
[ 1, 2, 3, 4, 5, 6, 0],
[ 2, 5, 10, 17, 26, 0, 0],
[ 5, 15, 37, 77, 0, 0, 0],
[ 15, 52, 151, 0, 0, 0, 0],
[ 52, 203, 0, 0, 0, 0, 0],
[203, 0, 0, 0, 0, 0, 0]])
"""
d = zeros(m + 1)
for i in range(0, m + 1):
d[0, i] = 1
for i in range(1, m + 1):
for j in range(m):
if j <= m - i:
d[i, j] = j * d[i - 1, j] + d[i - 1, j + 1]
else:
d[i, j] = 0
return d
def RGS_enum(m):
"""
RGS_enum computes the total number of restricted growth strings
possible for a superset of size m.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_enum
>>> from sympy.combinatorics.partitions import Partition
>>> RGS_enum(4)
15
>>> RGS_enum(5)
52
>>> RGS_enum(6)
203
We can check that the enumeration is correct by actually generating
the partitions. Here, the 15 partitions of 4 items are generated:
>>> a = Partition(list(range(4)))
>>> s = set()
>>> for i in range(20):
... s.add(a)
... a += 1
...
>>> assert len(s) == 15
"""
if (m < 1):
return 0
elif (m == 1):
return 1
else:
return bell(m)
def RGS_unrank(rank, m):
"""
Gives the unranked restricted growth string for a given
superset size.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_unrank
>>> RGS_unrank(14, 4)
[0, 1, 2, 3]
>>> RGS_unrank(0, 4)
[0, 0, 0, 0]
"""
if m < 1:
raise ValueError("The superset size must be >= 1")
if rank < 0 or RGS_enum(m) <= rank:
raise ValueError("Invalid arguments")
L = [1] * (m + 1)
j = 1
D = RGS_generalized(m)
for i in range(2, m + 1):
v = D[m - i, j]
cr = j*v
if cr <= rank:
L[i] = j + 1
rank -= cr
j += 1
else:
L[i] = int(rank / v + 1)
rank %= v
return [x - 1 for x in L[1:]]
def RGS_rank(rgs):
"""
Computes the rank of a restricted growth string.
Examples
========
>>> from sympy.combinatorics.partitions import RGS_rank, RGS_unrank
>>> RGS_rank([0, 1, 2, 1, 3])
42
>>> RGS_rank(RGS_unrank(4, 7))
4
"""
rgs_size = len(rgs)
rank = 0
D = RGS_generalized(rgs_size)
for i in range(1, rgs_size):
n = len(rgs[(i + 1):])
m = max(rgs[0:i])
rank += D[n, m + 1] * rgs[i]
return rank
| 28.317808 | 80 | 0.50803 |
795a0791f07218a1f2a8f9f4e81e8b3eef66c6a0 | 7,771 | py | Python | Resnet_models/res50.py | a5372935/Oct_resnet18 | 9e835634151398bb6704c251807d28b21fde5b86 | [
"MIT"
] | 3 | 2019-06-17T17:58:48.000Z | 2020-12-15T06:58:54.000Z | Resnet_models/res50.py | a5372935/Oct_resnet18 | 9e835634151398bb6704c251807d28b21fde5b86 | [
"MIT"
] | null | null | null | Resnet_models/res50.py | a5372935/Oct_resnet18 | 9e835634151398bb6704c251807d28b21fde5b86 | [
"MIT"
] | 2 | 2020-12-16T08:57:36.000Z | 2020-12-26T06:48:58.000Z | import numpy as np
import warnings
from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import add, Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing import image
import tensorflow.keras.backend as K
from keras_applications.imagenet_utils import _obtain_input_shape
from keras.engine.topology import get_source_inputs
def identity_block(input_tensor, kernel_size, filters, stage, block):
filters1, filters2, filters3 = filters
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1), name=conv_name_base + '2a')(input_tensor) # kernel_initializer='he_normal' ???
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size,
padding='same', name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
x = add([x, input_tensor])
x = Activation('relu')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
filters1, filters2, filters3 = filters
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1), strides=strides,
name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size, padding='same',
name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
shortcut = Conv2D(filters3, (1, 1), strides=strides,
name=conv_name_base + '1')(input_tensor)
shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut)
x = add([x, shortcut])
x = Activation('relu')(x)
return x
def ResNet50(include_top = False,
weights=None,
input_tensor=None, input_shape=None,
pooling=None,
classes=1000):
# if weights not in {'imagenet', None}:
# raise ValueError('The `weights` argument should be either '
# '`None` (random initialization) or `imagenet` '
# '(pre-training on ImageNet).')
# if weights == 'imagenet' and include_top and classes != 1000:
# raise ValueError('If using `weights` as imagenet with `include_top`'
# ' as true, `classes` should be 1000')
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=32,
data_format=K.image_data_format(),
require_flatten = include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
#x = ZeroPadding2D((3, 3))(img_input)
x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(img_input)
x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
x = Activation('relu')(x)
#x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
x = AveragePooling2D(name='avg_pool')(x)
if include_top:
x = Flatten()(x)
x = Dense(classes, activation='softmax', name='fc1000')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
x = Dense(classes, activation='softmax', name='resnet50')(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
model = Model(inputs, x, name='resnet50')
# if weights == 'imagenet':
# if include_top:
# weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels.h5',
# WEIGHTS_PATH,
# cache_subdir='models',
# md5_hash='a7b3fe01876f51b976af0dea6bc144eb')
# else:
# weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
# WEIGHTS_PATH_NO_TOP,
# cache_subdir='models',
# md5_hash='a268eb855778b3df3c7506639542a6af')
# model.load_weights(weights_path)
# if K.backend() == 'theano':
# layer_utils.convert_all_kernels_in_model(model)
# if K.image_data_format() == 'channels_first':
# if include_top:
# maxpool = model.get_layer(name='avg_pool')
# shape = maxpool.output_shape[1:]
# dense = model.get_layer(name='fc1000')
# layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')
# if K.backend() == 'tensorflow':
# warnings.warn('You are using the TensorFlow backend, yet you '
# 'are using the Theano '
# 'image data format convention '
# '(`image_data_format="channels_first"`). '
# 'For best performance, set '
# '`image_data_format="channels_last"` in '
# 'your Keras config '
# 'at ~/.keras/keras.json.')
return model | 42.933702 | 173 | 0.564663 |
795a08ae443d0d4645330d2b9f093b9460256edc | 326 | py | Python | 4-1/WIndow Programing/20180427/Code10-20.py | define16/Class | 8b0771a348b2bcb19ba338ebff94326828a293ea | [
"Apache-2.0"
] | null | null | null | 4-1/WIndow Programing/20180427/Code10-20.py | define16/Class | 8b0771a348b2bcb19ba338ebff94326828a293ea | [
"Apache-2.0"
] | null | null | null | 4-1/WIndow Programing/20180427/Code10-20.py | define16/Class | 8b0771a348b2bcb19ba338ebff94326828a293ea | [
"Apache-2.0"
] | null | null | null | from tkinter import *
from tkinter.filedialog import *
## 함수 정의 부분 ##
window = Tk()
window.geometry("400x100")
label1 = Label(window, text = "선택된 파일 이름 ")
label1.pack()
filename = askopenfilename(parent = window, filetypes = (("GIF 파일", "*.gif"), ("모든 파일", "*.*")))
label1.configure(text=str(filename))
window.mainloop()
| 20.375 | 96 | 0.665644 |
795a08c25acbe9cba9a4efa0da6185bbac064358 | 3,670 | py | Python | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/delete_rule_request.py | NQLoong/huaweicloud-sdk-python-v3 | 677944a0b722147c6e105c53df9110724d64152a | [
"Apache-2.0"
] | 1 | 2021-11-03T07:54:50.000Z | 2021-11-03T07:54:50.000Z | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/delete_rule_request.py | mawenbo-huawei/huaweicloud-sdk-python-v3 | 677944a0b722147c6e105c53df9110724d64152a | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/delete_rule_request.py | mawenbo-huawei/huaweicloud-sdk-python-v3 | 677944a0b722147c6e105c53df9110724d64152a | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import pprint
import re
import six
class DeleteRuleRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'rule_id': 'str'
}
attribute_map = {
'instance_id': 'Instance-Id',
'rule_id': 'rule_id'
}
def __init__(self, instance_id=None, rule_id=None):
"""DeleteRuleRequest - a model defined in huaweicloud sdk"""
self._instance_id = None
self._rule_id = None
self.discriminator = None
if instance_id is not None:
self.instance_id = instance_id
self.rule_id = rule_id
@property
def instance_id(self):
"""Gets the instance_id of this DeleteRuleRequest.
**参数说明**:实例ID。物理多租下各实例的唯一标识,一般华为云租户无需携带该参数,仅在物理多租场景下从管理面访问API时需要携带该参数。
:return: The instance_id of this DeleteRuleRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this DeleteRuleRequest.
**参数说明**:实例ID。物理多租下各实例的唯一标识,一般华为云租户无需携带该参数,仅在物理多租场景下从管理面访问API时需要携带该参数。
:param instance_id: The instance_id of this DeleteRuleRequest.
:type: str
"""
self._instance_id = instance_id
@property
def rule_id(self):
"""Gets the rule_id of this DeleteRuleRequest.
**参数说明**:规则ID,用于唯一标识一条规则,在创建规则时由物联网平台分配获得。 **取值范围**:长度不超过32,只允许字母、数字的组合。
:return: The rule_id of this DeleteRuleRequest.
:rtype: str
"""
return self._rule_id
@rule_id.setter
def rule_id(self, rule_id):
"""Sets the rule_id of this DeleteRuleRequest.
**参数说明**:规则ID,用于唯一标识一条规则,在创建规则时由物联网平台分配获得。 **取值范围**:长度不超过32,只允许字母、数字的组合。
:param rule_id: The rule_id of this DeleteRuleRequest.
:type: str
"""
self._rule_id = rule_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteRuleRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.788321 | 80 | 0.564033 |
795a08fd3142bd46b6e574daf65dd9e9c2b5e9c3 | 6,504 | py | Python | lib/model/feature_extractors/resnet_feature_extractor_duo.py | jennyabr/faster-rcnn.pytorch | 90639b26e9bcdbca0b3da140448913b5c41182e0 | [
"MIT"
] | null | null | null | lib/model/feature_extractors/resnet_feature_extractor_duo.py | jennyabr/faster-rcnn.pytorch | 90639b26e9bcdbca0b3da140448913b5c41182e0 | [
"MIT"
] | null | null | null | lib/model/feature_extractors/resnet_feature_extractor_duo.py | jennyabr/faster-rcnn.pytorch | 90639b26e9bcdbca0b3da140448913b5c41182e0 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
import torch.nn as nn
from torchvision.models import resnet101
from torchvision.models.resnet import resnet50, resnet152
from model.feature_extractors.feature_extractor_duo import FeatureExtractorDuo
from model.utils.net_utils import global_average_pooling
class ResNetFeatureExtractorDuo(FeatureExtractorDuo):
def __init__(self, net_variant='101', frozen_blocks=0):
super(ResNetFeatureExtractorDuo, self).__init__(net_variant, frozen_blocks)
def resnet_variant_builder(variant):
if str(variant) == '50':
return resnet50()
elif str(variant) == '101':
return resnet101()
elif str(variant) == '152':
return resnet152()
else:
raise ValueError('The variant Resnet{} is currently not supported'.format(variant))
resnet = resnet_variant_builder(net_variant)
self._rpn_feature_extractor = self._RPNFeatureExtractor(resnet, frozen_blocks)
self._fast_rcnn_feature_extractor = self._FastRCNNFeatureExtractor(resnet)
@property
def rpn_feature_extractor(self):
return self._rpn_feature_extractor
@property
def fast_rcnn_feature_extractor(self):
return self._fast_rcnn_feature_extractor
class _RPNFeatureExtractor(FeatureExtractorDuo._FeatureExtractor):
def __init__(self, resnet, frozen_blocks):
super(ResNetFeatureExtractorDuo._RPNFeatureExtractor, self).__init__()
self._ordered_layer_names = ["conv1.", "bn1.", "relu.", "maxpool.", "layer1.", "layer2.", "layer3."]
# TODO JA: the model should not be able to change independently
# TODO JA: of the list of _ordered_layer_names can change
self._model = nn.Sequential(resnet.conv1,
resnet.bn1,
resnet.relu,
resnet.maxpool,
resnet.layer1,
resnet.layer2,
resnet.layer3)
if not (0 <= frozen_blocks < 4):
raise ValueError('Illegal number of blocks to freeze')
self._frozen_blocks = frozen_blocks
ResNetFeatureExtractorDuo._freeze_layers(self._model, self._frozen_blocks)
self._model.apply(self._freeze_batch_norm_layers)
self._output_num_channels = self.get_output_num_channels(self._model[-1][-1].conv3)
@property
def output_num_channels(self):
return self._output_num_channels
@property
def layer_mapping_to_pretrained(self):
mapping_dict = {layer_ind: layer_name for layer_ind, layer_name in enumerate(self._ordered_layer_names)}
return mapping_dict
def forward(self, input):
return self._model(input)
def train(self, mode=True):
super(ResNetFeatureExtractorDuo._RPNFeatureExtractor, self).train(mode)
ResNetFeatureExtractorDuo._freeze_layers(self._model, self._frozen_blocks)
self._model.apply(self._freeze_batch_norm_layers)
class _FastRCNNFeatureExtractor(FeatureExtractorDuo._FeatureExtractor):
def __init__(self, resnet):
super(ResNetFeatureExtractorDuo._FastRCNNFeatureExtractor, self).__init__()
self._mapping_dict = {0: "layer4."}
# TODO JA: the model should not be able to change independently
# TODO JA: of the list in the mapping_dict can change
self._model = nn.Sequential(resnet.layer4)
self._model.apply(self._freeze_batch_norm_layers)
self._output_num_channels = self.get_output_num_channels(self._model[-1][-1].conv3)
def forward(self, input):
return global_average_pooling(self._model(input))
@property
def output_num_channels(self):
return self._output_num_channels
@property
def layer_mapping_to_pretrained(self):
return self._mapping_dict
def train(self, mode=True):
super(ResNetFeatureExtractorDuo._FastRCNNFeatureExtractor, self).train(mode)
self._model.apply(self._freeze_batch_norm_layers)
@classmethod
def _freeze_layers(cls, model, upto_block_num):
curr_block_num = 0
if upto_block_num > 0:
for module in model.children():
module_name = module.__class__.__name__.lower()
is_block = isinstance(module, nn.Sequential)
if module_name.find('pool') != -1 or is_block:
curr_block_num += 1
if curr_block_num > upto_block_num:
break
else:
module.eval()
if is_block:
for submodule in module.modules():
for p in submodule.parameters():
p.requires_grad = False
else:
for p in module.parameters():
p.requires_grad = False
def convert_pretrained_state_dict(self, pretrained_resnet_state_dict):
rpn_state_dict = {}
fast_rcnn_state_dict = {}
def startswith_one_of(key, mapping_dict):
for new_key, item in mapping_dict.items():
if key.startswith(item):
return new_key, item
return None, None
for orig_key, v in pretrained_resnet_state_dict.items():
replacing_key, item = startswith_one_of(orig_key,
self.fast_rcnn_feature_extractor.layer_mapping_to_pretrained)
if replacing_key is not None:
fast_rcnn_state_dict[orig_key.replace(item, '_model.{}.'.format(str(replacing_key)))] = v
else:
replacing_key, item = startswith_one_of(
orig_key, self.rpn_feature_extractor.layer_mapping_to_pretrained)
if replacing_key is not None:
rpn_state_dict[orig_key.replace(item, '_model.{}.'.format(str(replacing_key)))] = v
fe_subnets = [self.rpn_feature_extractor, self.fast_rcnn_feature_extractor]
fe_state_dicts = [rpn_state_dict, fast_rcnn_state_dict]
return zip(fe_subnets, fe_state_dicts)
| 43.945946 | 116 | 0.619311 |
795a09130cc2807bd1c505859b8fdc9838652d11 | 2,030 | py | Python | tools/obfuscator.py | anima-libera/ports | 782ea3291eb3f66a043a3bc8e3cf2d35ccd62815 | [
"Unlicense"
] | 4 | 2020-05-14T22:27:08.000Z | 2021-07-23T18:56:04.000Z | tools/obfuscator.py | anima-libera/ports | 782ea3291eb3f66a043a3bc8e3cf2d35ccd62815 | [
"Unlicense"
] | null | null | null | tools/obfuscator.py | anima-libera/ports | 782ea3291eb3f66a043a3bc8e3cf2d35ccd62815 | [
"Unlicense"
] | 1 | 2021-05-14T09:33:07.000Z | 2021-05-14T09:33:07.000Z |
""" TODO
Please note that this is clearly unoptimized because who cares
"""
import re
from random import choice
SPECIAL_NAMES = ["o", "o0", "o1", "of", "os", "sa", "ia", "ir"]
NAME_CHARS = "".join([chr(n) for n in range(ord("a"), ord("z")+1)]+[str(i) for i in range(10)])
generated_names = {}
def _generate_name(n):
""" see generate_name """
return "".join([choice(NAME_CHARS) for i in range(n)])
def generate_name(base):
""" generate a new random non-special name if necessary """
global generated_names
if base in SPECIAL_NAMES:
return base
if base in generated_names.keys():
return generated_names[base]
n = 1
name = _generate_name(n)
while (name in generated_names.values()) or (name in SPECIAL_NAMES):
n += 1
name = _generate_name(n)
generated_names[base] = name
return name
def _isnamechar(c):
""" TODO """
return ord("a") <= ord(c) <= ord("z") or ord("0") <= ord(c) <= ord("9")
def obfuscate(src, c80max = False, iterations = 1):
""" TODO """
if not src:
return ""
def replace_name(match):
return generate_name(match.group(0))
parts = (re.sub(r"([a-z0-9]+)", replace_name, src)).split()
patrsj = []
for i in range(len(parts)-1):
patrsj.append(parts[i])
if _isnamechar(parts[i][0]) and _isnamechar(parts[i+1][0]):
patrsj.append(".")
patrsj.append(parts[-1])
osrc = "".join(parts)
if c80max:
p = []
l = 0
i = 78
while i < len(osrc)-1:
while _isnamechar(osrc[i]) and _isnamechar(osrc[i+1]):
i -= 1
p.append(osrc[l:i+1])
l = i+1
i += 79
p.append(osrc[l:i+1])
osrc = "\n".join(p)
if iterations > 1: # o god this is shit
other = obfuscate(src, c80max = c80max, iterations = iterations-1)
if len(other) < len(osrc):
osrc = other
return osrc
if __name__ == "__main__":
from sys import argv, stdin
c80max = ("-8" in argv)
opt = ("-o" in argv) # dont do that
if "-p" in argv:
print(obfuscate(stdin.read(), c80max = c80max, iterations = 200 if opt else 1))
else:
print(obfuscate(argv[1], c80max = c80max, iterations = 200 if opt else 1))
| 26.710526 | 95 | 0.639409 |
795a093d72b9183329c74f285a3cd5db8e7d475a | 2,563 | py | Python | pydemoji/emoji.py | DJStompZone/DiscordEmoji-Wrapper | e3c017c7e7ff888202a068f3e8804d594ecf6e39 | [
"MIT"
] | null | null | null | pydemoji/emoji.py | DJStompZone/DiscordEmoji-Wrapper | e3c017c7e7ff888202a068f3e8804d594ecf6e39 | [
"MIT"
] | null | null | null | pydemoji/emoji.py | DJStompZone/DiscordEmoji-Wrapper | e3c017c7e7ff888202a068f3e8804d594ecf6e39 | [
"MIT"
] | null | null | null | from .errors import InvalidQuery, InvalidOption
import aiohttp
async def fetch(by:str, query, endpoint=None, case_sensitive=None):
"""
Fetch an emoji
Parameters
----------
by : str, fetch an emoji by (option)
query : query that you want to search for
endpoint : optional, if left None, JSON response will be returned
case_sensitive : bool, optional, if left None, it will NOT be case sensitive
Returns
-------
string
data given from the JSON response (if endpoint)
JSON
JSON response, not messed with (if endpoint is None)
Raises
------
InvalidQuery
occurs when the query is not listed on the site or the query is improperly formatted
InvalidOption
occurs when the option/endpoint is invalid
"""
options = ['title', 'id', 'slug']
by = by.lower()
if by in options:
async with aiohttp.ClientSession() as session:
async with session.post(f"https://discordemoji.com/api/") as response:
r = await response.json()
latest_emoji = ''
for emoji in r:
if not case_sensitive or case_sensitive == False:
if emoji[by] == query:
latest_emoji = emoji
else:
pass
else:
if emoji[by].lower() == query.lower():
latest_emoji = emoji
else:
pass
if latest_emoji == '':
raise InvalidQuery("The query you have given is not listed on the site.")
else:
if not endpoint:
return latest_emoji
else:
endpoints = ['title', 'id', 'slug', 'image', 'category', 'license', 'source', 'faves', 'submitted_by']
endpoint = endpoint.lower().replace('author', 'submitted_by').replace('favourite', 'faves').replace('favorite', 'faves')
if not endpoint in endpoints:
available = ", ".join(endpoints)
raise InvalidOption(f"You have provided an invalid endpoint. Available Endpoints: {available}")
else:
if endpoint == '':
return None
else:
return latest_emoji[endpoint]
else:
available = ", ".join(options)
raise InvalidOption(f'You have provided an invalid option. Options: {available}')
| 38.253731 | 137 | 0.531409 |
795a0a2236e63fa8b6ff21374c687334f97927c4 | 34,468 | py | Python | mesonbuild/linkers.py | mine260309/meson | e19a49b8957cd06d6f121812b7b00ef60a57fc7c | [
"Apache-2.0"
] | null | null | null | mesonbuild/linkers.py | mine260309/meson | e19a49b8957cd06d6f121812b7b00ef60a57fc7c | [
"Apache-2.0"
] | null | null | null | mesonbuild/linkers.py | mine260309/meson | e19a49b8957cd06d6f121812b7b00ef60a57fc7c | [
"Apache-2.0"
] | null | null | null | # Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import os
import typing
from . import mesonlib
if typing.TYPE_CHECKING:
from .coredata import OptionDictType
from .environment import Environment
class StaticLinker:
def __init__(self, exelist: typing.List[str]):
self.exelist = exelist
def can_linker_accept_rsp(self) -> bool:
"""
Determines whether the linker can accept arguments using the @rsp syntax.
"""
return mesonlib.is_windows()
def get_base_link_args(self, options: 'OptionDictType') -> typing.List[str]:
"""Like compilers.get_base_link_args, but for the static linker."""
return []
def get_exelist(self) -> typing.List[str]:
return self.exelist.copy()
def get_std_link_args(self) -> typing.List[str]:
return []
def get_buildtype_linker_args(self, buildtype: str) -> typing.List[str]:
return []
def get_output_args(self, target: str) -> typing.List[str]:
return[]
def get_coverage_link_args(self) -> typing.List[str]:
return []
def build_rpath_args(self, env: 'Environment', build_dir: str, from_dir: str,
rpath_paths: str, build_rpath: str,
install_rpath: str) -> typing.List[str]:
return []
def thread_link_flags(self, env: 'Environment') -> typing.List[str]:
return []
def openmp_flags(self) -> typing.List[str]:
return []
def get_option_link_args(self, options: 'OptionDictType') -> typing.List[str]:
return []
@classmethod
def unix_args_to_native(cls, args: typing.List[str]) -> typing.List[str]:
return args
def get_link_debugfile_args(self, targetfile: str) -> typing.List[str]:
# Static libraries do not have PDB files
return []
def get_always_args(self) -> typing.List[str]:
return []
def get_linker_always_args(self) -> typing.List[str]:
return []
class VisualStudioLikeLinker:
always_args = ['/NOLOGO']
def __init__(self, machine: str):
self.machine = machine
def get_always_args(self) -> typing.List[str]:
return self.always_args.copy()
def get_linker_always_args(self) -> typing.List[str]:
return self.always_args.copy()
def get_output_args(self, target: str) -> typing.List[str]:
args = [] # type: typing.List[str]
if self.machine:
args += ['/MACHINE:' + self.machine]
args += ['/OUT:' + target]
return args
@classmethod
def unix_args_to_native(cls, args: typing.List[str]) -> typing.List[str]:
from .compilers import VisualStudioCCompiler
return VisualStudioCCompiler.unix_args_to_native(args)
class VisualStudioLinker(VisualStudioLikeLinker, StaticLinker):
"""Microsoft's lib static linker."""
def __init__(self, exelist: typing.List[str], machine: str):
StaticLinker.__init__(self, exelist)
VisualStudioLikeLinker.__init__(self, machine)
class IntelVisualStudioLinker(VisualStudioLikeLinker, StaticLinker):
"""Intel's xilib static linker."""
def __init__(self, exelist: typing.List[str], machine: str):
StaticLinker.__init__(self, exelist)
VisualStudioLikeLinker.__init__(self, machine)
class ArLinker(StaticLinker):
def __init__(self, exelist: typing.List[str]):
super().__init__(exelist)
self.id = 'ar'
pc, stdo = mesonlib.Popen_safe(self.exelist + ['-h'])[0:2]
# Enable deterministic builds if they are available.
if '[D]' in stdo:
self.std_args = ['csrD']
else:
self.std_args = ['csr']
def get_std_link_args(self) -> typing.List[str]:
return self.std_args
def get_output_args(self, target: str) -> typing.List[str]:
return [target]
class ArmarLinker(ArLinker):
def __init__(self, exelist: typing.List[str]):
StaticLinker.__init__(self, exelist)
self.id = 'armar'
self.std_args = ['-csr']
def can_linker_accept_rsp(self) -> bool:
# armar cann't accept arguments using the @rsp syntax
return False
class DLinker(StaticLinker):
def __init__(self, exelist: typing.List[str], arch: str):
super().__init__(exelist)
self.id = exelist[0]
self.arch = arch
def get_std_link_args(self) -> typing.List[str]:
return ['-lib']
def get_output_args(self, target: str) -> typing.List[str]:
return ['-of=' + target]
def get_linker_always_args(self) -> typing.List[str]:
if mesonlib.is_windows():
if self.arch == 'x86_64':
return ['-m64']
elif self.arch == 'x86_mscoff' and self.id == 'dmd':
return ['-m32mscoff']
return ['-m32']
return []
class CcrxLinker(StaticLinker):
def __init__(self, exelist: typing.List[str]):
super().__init__(exelist)
self.id = 'rlink'
def can_linker_accept_rsp(self) -> bool:
return False
def get_output_args(self, target: str) -> typing.List[str]:
return ['-output=%s' % target]
def get_linker_always_args(self) -> typing.List[str]:
return ['-nologo', '-form=library']
def prepare_rpaths(raw_rpaths: str, build_dir: str, from_dir: str) -> typing.List[str]:
# The rpaths we write must be relative if they point to the build dir,
# because otherwise they have different length depending on the build
# directory. This breaks reproducible builds.
internal_format_rpaths = [evaluate_rpath(p, build_dir, from_dir) for p in raw_rpaths]
ordered_rpaths = order_rpaths(internal_format_rpaths)
return ordered_rpaths
def order_rpaths(rpath_list: typing.List[str]) -> typing.List[str]:
# We want rpaths that point inside our build dir to always override
# those pointing to other places in the file system. This is so built
# binaries prefer our libraries to the ones that may lie somewhere
# in the file system, such as /lib/x86_64-linux-gnu.
#
# The correct thing to do here would be C++'s std::stable_partition.
# Python standard library does not have it, so replicate it with
# sort, which is guaranteed to be stable.
return sorted(rpath_list, key=os.path.isabs)
def evaluate_rpath(p: str, build_dir: str, from_dir: str) -> str:
if p == from_dir:
return '' # relpath errors out in this case
elif os.path.isabs(p):
return p # These can be outside of build dir.
else:
return os.path.relpath(os.path.join(build_dir, p), os.path.join(build_dir, from_dir))
class DynamicLinker(metaclass=abc.ABCMeta):
"""Base class for dynamic linkers."""
_BUILDTYPE_ARGS = {
'plain': [],
'debug': [],
'debugoptimized': [],
'release': [],
'minsize': [],
'custom': [],
} # type: typing.Dict[str, typing.List[str]]
def _apply_prefix(self, arg: str) -> typing.List[str]:
if isinstance(self.prefix_arg, str):
return [self.prefix_arg + arg]
return self.prefix_arg + [arg]
def __init__(self, exelist: typing.List[str], for_machine: mesonlib.MachineChoice,
id_: str, prefix_arg: str, *, version: str = 'unknown version'):
self.exelist = exelist
self.for_machine = for_machine
self.version = version
self.id = id_
self.prefix_arg = prefix_arg
def __repr__(self) -> str:
return '<{}: v{} `{}`>'.format(type(self).__name__, self.version, ' '.join(self.exelist))
def get_id(self) -> str:
return self.id
def get_version_string(self) -> str:
return '({} {})'.format(self.id, self.version)
def get_exelist(self) -> typing.List[str]:
return self.exelist.copy()
def get_accepts_rsp(self) -> bool:
# TODO: is it really a matter of is_windows or is it for_windows?
return mesonlib.is_windows()
def get_always_args(self) -> typing.List[str]:
return []
def get_lib_prefix(self) -> str:
return ''
# XXX: is use_ldflags a compiler or a linker attribute?
def get_args_from_envvars(self) -> typing.List[str]:
flags = os.environ.get('LDFLAGS')
if not flags:
return []
return mesonlib.split_args(flags)
def get_option_args(self, options: 'OptionDictType') -> typing.List[str]:
return []
def has_multi_arguments(self, args: typing.List[str], env: 'Environment') -> typing.Tuple[bool, bool]:
m = 'Language {} does not support has_multi_link_arguments.'
raise mesonlib.EnvironmentException(m.format(self.id))
def get_debugfile_args(self, targetfile: str) -> typing.List[str]:
"""Some compilers (MSVC) write debug into a separate file.
This method takes the target object path and returns a list of
commands to append to the linker invocation to control where that
file is written.
"""
return []
def get_std_shared_lib_args(self) -> typing.List[str]:
return []
def get_std_shared_module_args(self, options: 'OptionDictType') -> typing.List[str]:
return self.get_std_shared_lib_args()
def get_pie_args(self) -> typing.List[str]:
# TODO: this really needs to take a boolean and return the args to
# disable pie, otherwise it only acts to enable pie if pie *isn't* the
# default.
m = 'Linker {} does not support position-independent executable'
raise mesonlib.EnvironmentException(m.format(self.id))
def get_lto_args(self) -> typing.List[str]:
return []
def sanitizer_args(self, value: str) -> typing.List[str]:
return []
def get_buildtype_args(self, buildtype: str) -> typing.List[str]:
# We can override these in children by just overriding the
# _BUILDTYPE_ARGS value.
return self._BUILDTYPE_ARGS[buildtype]
def get_asneeded_args(self) -> typing.List[str]:
return []
def get_link_whole_for(self, args: typing.List[str]) -> typing.List[str]:
raise mesonlib.EnvironmentException(
'Linker {} does not support link_whole'.format(self.id))
def get_allow_undefined_args(self) -> typing.List[str]:
raise mesonlib.EnvironmentException(
'Linker {} does not support allow undefined'.format(self.id))
def invoked_by_compiler(self) -> bool:
"""True if meson uses the compiler to invoke the linker."""
return True
@abc.abstractmethod
def get_output_args(self, outname: str) -> typing.List[str]:
pass
def get_coverage_args(self) -> typing.List[str]:
m = "Linker {} doesn't implement coverage data generation.".format(self.id)
raise mesonlib.EnvironmentException(m)
@abc.abstractmethod
def get_search_args(self, dirname: str) -> typing.List[str]:
pass
def export_dynamic_args(self, env: 'Environment') -> typing.List[str]:
return []
def import_library_args(self, implibname: str) -> typing.List[str]:
"""The name of the outputted import library.
This implementation is used only on Windows by compilers that use GNU ld
"""
return []
def thread_flags(self, env: 'Environment') -> typing.List[str]:
return []
def no_undefined_args(self) -> typing.List[str]:
"""Arguments to error if there are any undefined symbols at link time.
This is the inverse of get_allow_undefined_args().
TODO: A future cleanup might merge this and
get_allow_undefined_args() into a single method taking a
boolean
"""
return []
def fatal_warnings(self) -> typing.List[str]:
"""Arguments to make all warnings errors."""
return []
def bitcode_args(self) -> typing.List[str]:
raise mesonlib.MesonException('This linker does not support bitcode bundles')
def get_debug_crt_args(self) -> typing.List[str]:
return []
def build_rpath_args(self, env: 'Environment', build_dir: str, from_dir: str,
rpath_paths: str, build_rpath: str,
install_rpath: str) -> typing.List[str]:
return []
class PosixDynamicLinkerMixin:
"""Mixin class for POSIX-ish linkers.
This is obviously a pretty small subset of the linker interface, but
enough dynamic linkers that meson supports are POSIX-like but not
GNU-like that it makes sense to split this out.
"""
def get_output_args(self, outname: str) -> typing.List[str]:
return ['-o', outname]
def get_std_shared_lib_args(self) -> typing.List[str]:
return ['-shared']
def get_search_args(self, dirname: str) -> typing.List[str]:
return ['-L', dirname]
class GnuLikeDynamicLinkerMixin:
"""Mixin class for dynamic linkers that provides gnu-like interface.
This acts as a base for the GNU linkers (bfd and gold), the Intel Xild
(which comes with ICC), LLVM's lld, and other linkers like GNU-ld.
"""
_BUILDTYPE_ARGS = {
'plain': [],
'debug': [],
'debugoptimized': [],
'release': ['-O1'],
'minsize': [],
'custom': [],
} # type: typing.Dict[str, typing.List[str]]
def get_buildtype_args(self, buildtype: str) -> typing.List[str]:
# We can override these in children by just overriding the
# _BUILDTYPE_ARGS value.
return mesonlib.listify([self._apply_prefix(a) for a in self._BUILDTYPE_ARGS[buildtype]])
def get_pie_args(self) -> typing.List[str]:
return ['-pie']
def get_asneeded_args(self) -> typing.List[str]:
return self._apply_prefix('--as-needed')
def get_link_whole_for(self, args: typing.List[str]) -> typing.List[str]:
if not args:
return args
return self._apply_prefix('--whole-archive') + args + self._apply_prefix('--no-whole-archive')
def get_allow_undefined_args(self) -> typing.List[str]:
return self._apply_prefix('--allow-shlib-undefined')
def get_lto_args(self) -> typing.List[str]:
return ['-flto']
def sanitizer_args(self, value: str) -> typing.List[str]:
if value == 'none':
return []
return ['-fsanitize=' + value]
def invoked_by_compiler(self) -> bool:
"""True if meson uses the compiler to invoke the linker."""
return True
def get_coverage_args(self) -> typing.List[str]:
return ['--coverage']
def export_dynamic_args(self, env: 'Environment') -> typing.List[str]:
m = env.machines[self.for_machine]
if m.is_windows() or m.is_cygwin():
return self._apply_prefix('--export-all-symbols')
return self._apply_prefix('-export-dynamic')
def import_library_args(self, implibname: str) -> typing.List[str]:
return self._apply_prefix('--out-implib=' + implibname)
def thread_flags(self, env: 'Environment') -> typing.List[str]:
if env.machines[self.for_machine].is_haiku():
return []
return ['-pthread']
def no_undefined_args(self) -> typing.List[str]:
return self._apply_prefix('--no-undefined')
def fatal_warnings(self) -> typing.List[str]:
return self._apply_prefix('--fatal-warnings')
def get_soname_args(self, env: 'Environment', prefix: str, shlib_name: str,
suffix: str, soversion: str, darwin_versions: typing.Tuple[str, str],
is_shared_module: bool) -> typing.List[str]:
m = env.machines[self.for_machine]
if m.is_windows() or m.is_cygwin():
# For PE/COFF the soname argument has no effect
return []
sostr = '' if soversion is None else '.' + soversion
return self._apply_prefix('-soname,{}{}.{}{}'.format(prefix, shlib_name, suffix, sostr))
def build_rpath_args(self, env: 'Environment', build_dir: str, from_dir: str,
rpath_paths: str, build_rpath: str,
install_rpath: str) -> typing.List[str]:
m = env.machines[self.for_machine]
if m.is_windows() or m.is_cygwin():
return []
if not rpath_paths and not install_rpath and not build_rpath:
return []
args = []
origin_placeholder = '$ORIGIN'
processed_rpaths = prepare_rpaths(rpath_paths, build_dir, from_dir)
# Need to deduplicate rpaths, as macOS's install_name_tool
# is *very* allergic to duplicate -delete_rpath arguments
# when calling depfixer on installation.
all_paths = mesonlib.OrderedSet([os.path.join(origin_placeholder, p) for p in processed_rpaths])
# Build_rpath is used as-is (it is usually absolute).
if build_rpath != '':
all_paths.add(build_rpath)
# TODO: should this actually be "for (dragonfly|open)bsd"?
if mesonlib.is_dragonflybsd() or mesonlib.is_openbsd():
# This argument instructs the compiler to record the value of
# ORIGIN in the .dynamic section of the elf. On Linux this is done
# by default, but is not on dragonfly/openbsd for some reason. Without this
# $ORIGIN in the runtime path will be undefined and any binaries
# linked against local libraries will fail to resolve them.
args.extend(self._apply_prefix('-z,origin'))
# In order to avoid relinking for RPATH removal, the binary needs to contain just
# enough space in the ELF header to hold the final installation RPATH.
paths = ':'.join(all_paths)
if len(paths) < len(install_rpath):
padding = 'X' * (len(install_rpath) - len(paths))
if not paths:
paths = padding
else:
paths = paths + ':' + padding
args.extend(self._apply_prefix('-rpath,' + paths))
# TODO: should this actually be "for solaris/sunos"?
if mesonlib.is_sunos():
return args
# Rpaths to use while linking must be absolute. These are not
# written to the binary. Needed only with GNU ld:
# https://sourceware.org/bugzilla/show_bug.cgi?id=16936
# Not needed on Windows or other platforms that don't use RPATH
# https://github.com/mesonbuild/meson/issues/1897
#
# In addition, this linker option tends to be quite long and some
# compilers have trouble dealing with it. That's why we will include
# one option per folder, like this:
#
# -Wl,-rpath-link,/path/to/folder1 -Wl,-rpath,/path/to/folder2 ...
#
# ...instead of just one single looooong option, like this:
#
# -Wl,-rpath-link,/path/to/folder1:/path/to/folder2:...
for p in rpath_paths:
args.extend(self._apply_prefix('-rpath-link,' + os.path.join(build_dir, p)))
return args
class AppleDynamicLinker(PosixDynamicLinkerMixin, DynamicLinker):
"""Apple's ld implementation."""
def get_asneeded_args(self) -> typing.List[str]:
return self._apply_prefix('-dead_strip_dylibs')
def get_allow_undefined_args(self) -> typing.List[str]:
return self._apply_prefix('-undefined,dynamic_lookup')
def get_std_shared_module_args(self, options: 'OptionDictType') -> typing.List[str]:
return ['-bundle'] + self._apply_prefix('-undefined,dynamic_lookup')
def get_link_whole_for(self, args: typing.List[str]) -> typing.List[str]:
result = [] # type: typing.List[str]
for a in args:
result.extend(self._apply_prefix('-force_load'))
result.append(a)
return result
def no_undefined_args(self) -> typing.List[str]:
return self._apply_prefix('-undefined,error')
def get_always_args(self) -> typing.List[str]:
return self._apply_prefix('-headerpad_max_install_names')
def bitcode_args(self) -> typing.List[str]:
return self._apply_prefix('-bitcode_bundle')
def fatal_warnings(self) -> typing.List[str]:
return self._apply_prefix('-fatal_warnings')
def get_soname_args(self, env: 'Environment', prefix: str, shlib_name: str,
suffix: str, soversion: str, darwin_versions: typing.Tuple[str, str],
is_shared_module: bool) -> typing.List[str]:
if is_shared_module:
return []
install_name = ['@rpath/', prefix, shlib_name]
if soversion is not None:
install_name.append('.' + soversion)
install_name.append('.dylib')
args = ['-install_name', ''.join(install_name)]
if darwin_versions:
args.extend(['-compatibility_version', darwin_versions[0],
'-current_version', darwin_versions[1]])
return args
def build_rpath_args(self, env: 'Environment', build_dir: str, from_dir: str,
rpath_paths: str, build_rpath: str,
install_rpath: str) -> typing.List[str]:
if not rpath_paths and not install_rpath and not build_rpath:
return []
# Ensure that there is enough space for install_name_tool in-place
# editing of large RPATHs
args = self._apply_prefix('-headerpad_max_install_names')
# @loader_path is the equivalent of $ORIGIN on macOS
# https://stackoverflow.com/q/26280738
origin_placeholder = '@loader_path'
processed_rpaths = prepare_rpaths(rpath_paths, build_dir, from_dir)
all_paths = mesonlib.OrderedSet([os.path.join(origin_placeholder, p) for p in processed_rpaths])
if build_rpath != '':
all_paths.add(build_rpath)
for rp in all_paths:
args.extend(self._apply_prefix('-rpath,' + rp))
return args
class GnuDynamicLinker(GnuLikeDynamicLinkerMixin, PosixDynamicLinkerMixin, DynamicLinker):
"""Representation of GNU ld.bfd and ld.gold."""
pass
class LLVMDynamicLinker(GnuLikeDynamicLinkerMixin, PosixDynamicLinkerMixin, DynamicLinker):
"""Representation of LLVM's lld (not lld-link) linker.
This is only the posix-like linker.
"""
pass
class XildLinuxDynamicLinker(GnuLikeDynamicLinkerMixin, PosixDynamicLinkerMixin, DynamicLinker):
"""Representation of Intel's Xild linker.
This is only the linux-like linker which dispatches to Gnu ld.
"""
pass
class XildAppleDynamicLinker(AppleDynamicLinker):
"""Representation of Intel's Xild linker.
This is the apple linker, which dispatches to Apple's ld.
"""
pass
class CcrxDynamicLinker(DynamicLinker):
"""Linker for Renesis CCrx compiler."""
def __init__(self, for_machine: mesonlib.MachineChoice,
*, version: str = 'unknown version'):
super().__init__(['rlink.exe'], for_machine, 'rlink', '',
version=version)
def get_accepts_rsp(self) -> bool:
return False
def get_lib_prefix(self) -> str:
return '-lib='
def get_std_shared_lib_args(self) -> typing.List[str]:
return []
def get_output_args(self, outputname: str) -> typing.List[str]:
return ['-output=%s' % outputname]
def get_search_args(self, dirname: str) -> 'typing.NoReturn':
raise EnvironmentError('rlink.exe does not have a search dir argument')
def get_allow_undefined_args(self) -> typing.List[str]:
return []
def get_soname_args(self, env: 'Environment', prefix: str, shlib_name: str,
suffix: str, soversion: str, darwin_versions: typing.Tuple[str, str],
is_shared_module: bool) -> typing.List[str]:
return []
class ArmDynamicLinker(PosixDynamicLinkerMixin, DynamicLinker):
"""Linker for the ARM compiler."""
def __init__(self, for_machine: mesonlib.MachineChoice,
*, version: str = 'unknown version'):
super().__init__(['armlink'], for_machine, 'armlink', '',
version=version)
def get_accepts_rsp(self) -> bool:
return False
def get_std_shared_lib_args(self) -> 'typing.NoReturn':
raise mesonlib.MesonException('The Arm Linkers do not support shared libraries')
def get_allow_undefined_args(self) -> typing.List[str]:
return []
class ArmClangDynamicLinker(ArmDynamicLinker):
"""Linker used with ARM's clang fork.
The interface is similar enough to the old ARM ld that it inherits and
extends a few things as needed.
"""
def export_dynamic_args(self, env: 'Environment') -> typing.List[str]:
return ['--export_dynamic']
def import_library_args(self, implibname: str) -> typing.List[str]:
return ['--symdefs=' + implibname]
class PGIDynamicLinker(PosixDynamicLinkerMixin, DynamicLinker):
"""PGI linker."""
def get_allow_undefined_args(self) -> typing.List[str]:
return []
def get_soname_args(self, env: 'Environment', prefix: str, shlib_name: str,
suffix: str, soversion: str, darwin_versions: typing.Tuple[str, str],
is_shared_module: bool) -> typing.List[str]:
return []
def get_std_shared_lib_args(self) -> typing.List[str]:
# PGI -shared is Linux only.
if mesonlib.is_windows():
return ['-Bdynamic', '-Mmakedll']
elif mesonlib.is_linux():
return ['-shared']
return []
def build_rpath_args(self, env: 'Environment', build_dir: str, from_dir: str,
rpath_paths: str, build_rpath: str,
install_rpath: str) -> typing.List[str]:
if not env.machines[self.for_machine].is_windows():
return ['-R' + os.path.join(build_dir, p) for p in rpath_paths]
return []
class PGIStaticLinker(StaticLinker):
def __init__(self, exelist: typing.List[str]):
super().__init__(exelist)
self.id = 'ar'
self.std_args = ['-r']
def get_std_link_args(self) -> typing.List[str]:
return self.std_args
def get_output_args(self, target: str) -> typing.List[str]:
return [target]
class VisualStudioLikeLinkerMixin:
_BUILDTYPE_ARGS = {
'plain': [],
'debug': [],
'debugoptimized': [],
# The otherwise implicit REF and ICF linker optimisations are disabled by
# /DEBUG. REF implies ICF.
'release': ['/OPT:REF'],
'minsize': ['/INCREMENTAL:NO', '/OPT:REF'],
'custom': [],
} # type: typing.Dict[str, typing.List[str]]
def __init__(self, *args, direct: bool = True, machine: str = 'x86', **kwargs):
super().__init__(*args, **kwargs)
self.direct = direct
self.machine = machine
def invoked_by_compiler(self) -> bool:
return self.direct
def get_debug_crt_args(self) -> typing.List[str]:
"""Arguments needed to select a debug crt for the linker.
Sometimes we need to manually select the CRT (C runtime) to use with
MSVC. One example is when trying to link with static libraries since
MSVC won't auto-select a CRT for us in that case and will error out
asking us to select one.
"""
return self._apply_prefix('/MDd')
def get_output_args(self, outputname: str) -> typing.List[str]:
return self._apply_prefix('/MACHINE:' + self.machine) + self._apply_prefix('/OUT:' + outputname)
def get_always_args(self) -> typing.List[str]:
return self._apply_prefix('/nologo')
def get_search_args(self, dirname: str) -> typing.List[str]:
return self._apply_prefix('/LIBPATH:' + dirname)
def get_std_shared_lib_args(self) -> typing.List[str]:
return self._apply_prefix('/DLL')
def get_debugfile_args(self, targetfile: str) -> typing.List[str]:
pdbarr = targetfile.split('.')[:-1]
pdbarr += ['pdb']
return self._apply_prefix('/DEBUG') + self._apply_prefix('/PDB:' + '.'.join(pdbarr))
def get_link_whole_for(self, args: typing.List[str]) -> typing.List[str]:
# Only since VS2015
args = mesonlib.listify(args)
l = [] # typing.List[str]
for a in args:
l.extend(self._apply_prefix('/WHOLEARCHIVE:' + a))
return l
def get_allow_undefined_args(self) -> typing.List[str]:
# link.exe
return self._apply_prefix('/FORCE:UNRESOLVED')
def get_soname_args(self, env: 'Environment', prefix: str, shlib_name: str,
suffix: str, soversion: str, darwin_versions: typing.Tuple[str, str],
is_shared_module: bool) -> typing.List[str]:
return []
class MSVCDynamicLinker(VisualStudioLikeLinkerMixin, DynamicLinker):
"""Microsoft's Link.exe."""
def __init__(self, for_machine: mesonlib.MachineChoice, *,
exelist: typing.Optional[typing.List[str]] = None,
prefix: typing.Union[str, typing.List[str]] = '',
machine: str = 'x86', version: str = 'unknown version'):
super().__init__(exelist or ['link.exe'], for_machine, 'link',
prefix, machine=machine, version=version)
class ClangClDynamicLinker(VisualStudioLikeLinkerMixin, DynamicLinker):
"""Clang's lld-link.exe."""
def __init__(self, for_machine: mesonlib.MachineChoice, *,
exelist: typing.Optional[typing.List[str]] = None,
prefix: typing.Union[str, typing.List[str]] = '',
version: str = 'unknown version'):
super().__init__(exelist or ['lld-link.exe'], for_machine,
'lld-link', prefix, version=version)
class XilinkDynamicLinker(VisualStudioLikeLinkerMixin, DynamicLinker):
"""Intel's Xilink.exe."""
def __init__(self, for_machine: mesonlib.MachineChoice,
*, version: str = 'unknown version'):
super().__init__(['xilink.exe'], for_machine, 'xilink', '', version=version)
class SolarisDynamicLinker(PosixDynamicLinkerMixin, DynamicLinker):
"""Sys-V derived linker used on Solaris and OpenSolaris."""
def get_link_whole_for(self, args: typing.List[str]) -> typing.List[str]:
if not args:
return args
return self._apply_prefix('--whole-archive') + args + self._apply_prefix('--no-whole-archive')
def no_undefined_args(self) -> typing.List[str]:
return ['-z', 'defs']
def get_allow_undefined_args(self) -> typing.List[str]:
return ['-z', 'nodefs']
def fatal_warnings(self) -> typing.List[str]:
return ['-z', 'fatal-warnings']
def build_rpath_args(self, env: 'Environment', build_dir: str, from_dir: str,
rpath_paths: str, build_rpath: str,
install_rpath: str) -> typing.List[str]:
if not rpath_paths and not install_rpath and not build_rpath:
return []
processed_rpaths = prepare_rpaths(rpath_paths, build_dir, from_dir)
all_paths = mesonlib.OrderedSet([os.path.join('$ORIGIN', p) for p in processed_rpaths])
if build_rpath != '':
all_paths.add(build_rpath)
# In order to avoid relinking for RPATH removal, the binary needs to contain just
# enough space in the ELF header to hold the final installation RPATH.
paths = ':'.join(all_paths)
if len(paths) < len(install_rpath):
padding = 'X' * (len(install_rpath) - len(paths))
if not paths:
paths = padding
else:
paths = paths + ':' + padding
return self._apply_prefix('-rpath,{}'.format(paths))
def get_soname_args(self, env: 'Environment', prefix: str, shlib_name: str,
suffix: str, soversion: str, darwin_versions: typing.Tuple[str, str],
is_shared_module: bool) -> typing.List[str]:
sostr = '' if soversion is None else '.' + soversion
return self._apply_prefix('-soname,{}{}.{}{}'.format(prefix, shlib_name, suffix, sostr))
class OptlinkDynamicLinker(VisualStudioLikeLinkerMixin, DynamicLinker):
"""Digital Mars dynamic linker for windows."""
def __init__(self, for_machine: mesonlib.MachineChoice,
*, version: str = 'unknown version'):
# Use optlink instead of link so we don't interfer with other link.exe
# implementations.
super().__init__(['optlink.exe'], for_machine, 'optlink', prefix_arg='', version=version)
def get_allow_undefined_args(self) -> typing.List[str]:
return []
class CudaLinker(DynamicLinker):
"""Cuda linker (nvlink)"""
@staticmethod
def parse_version():
version_cmd = ['nvlink', '--version']
try:
_, out, _ = mesonlib.Popen_safe(version_cmd)
except OSError:
return 'unknown version'
# Output example:
# nvlink: NVIDIA (R) Cuda linker
# Copyright (c) 2005-2018 NVIDIA Corporation
# Built on Sun_Sep_30_21:09:22_CDT_2018
# Cuda compilation tools, release 10.0, V10.0.166
# we need the most verbose version output. Luckily starting with V
return out.strip().split('V')[-1]
def get_output_args(self, outname: str) -> typing.List[str]:
return ['-o', outname]
def get_search_args(self, dirname: str) -> typing.List[str]:
return ['-L', dirname]
def fatal_warnings(self) -> typing.List[str]:
return ['--warning-as-error']
def get_allow_undefined_args(self) -> typing.List[str]:
return []
def get_soname_args(self, env: 'Environment', prefix: str, shlib_name: str,
suffix: str, soversion: str, darwin_versions: typing.Tuple[str, str],
is_shared_module: bool) -> typing.List[str]:
return []
| 36.016719 | 106 | 0.63279 |
795a0aa4772cf37c8a90b3b18c38faf49e9dd316 | 10,879 | py | Python | src/modlunky2/ui/widgets.py | mriswithe/modlunky2 | 20de5c7d226df7134cf87a9b9351fc9c28a89d6a | [
"Apache-2.0"
] | null | null | null | src/modlunky2/ui/widgets.py | mriswithe/modlunky2 | 20de5c7d226df7134cf87a9b9351fc9c28a89d6a | [
"Apache-2.0"
] | null | null | null | src/modlunky2/ui/widgets.py | mriswithe/modlunky2 | 20de5c7d226df7134cf87a9b9351fc9c28a89d6a | [
"Apache-2.0"
] | null | null | null | import logging
import os
import queue
import tkinter as tk
from tkinter import ttk
from tkinter.scrolledtext import ScrolledText
logger = logging.getLogger("modlunky2")
class QueueHandler(logging.Handler):
def __init__(self, log_queue):
super().__init__()
self.log_queue = log_queue
def emit(self, record):
self.log_queue.put(record)
# Adapted from https://beenje.github.io/blog/posts/logging-to-a-tkinter-scrolledtext-widget/
class ConsoleWindow(tk.Frame):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Create a ScrolledText wdiget
self.scrolled_text = ScrolledText(self, height=7, state="disabled")
self.scrolled_text.pack(expand=True, fill="both")
self.scrolled_text.configure(font="TkFixedFont")
self.scrolled_text.tag_config("INFO", foreground="green")
self.scrolled_text.tag_config("DEBUG", foreground="gray")
self.scrolled_text.tag_config("WARNING", foreground="orange")
self.scrolled_text.tag_config("ERROR", foreground="red")
self.scrolled_text.tag_config("CRITICAL", foreground="red", underline=1)
# Create a logging handler using a queue
self.log_queue = queue.Queue()
self.queue_handler = QueueHandler(self.log_queue)
formatter = logging.Formatter("%(asctime)s: %(message)s")
self.queue_handler.setFormatter(formatter)
logger.addHandler(self.queue_handler)
# Start polling messages from the queue
self.after(100, self.poll_log_queue)
def display(self, record):
msg = self.queue_handler.format(record)
self.scrolled_text.configure(state="normal")
self.scrolled_text.insert(tk.END, msg + "\n", record.levelname)
self.scrolled_text.configure(state="disabled")
self.scrolled_text.yview(tk.END)
def poll_log_queue(self):
# Check every 100ms if there is a new message in the queue to display
while True:
try:
record = self.log_queue.get(block=False)
except queue.Empty:
break
else:
self.display(record)
self.after(100, self.poll_log_queue)
def close(self):
pass
class Tab(ttk.Frame):
""" Base class that all tabs should inherit from."""
def on_load(self):
""" Called whenever the tab is loaded."""
class ScrollableFrame(ttk.LabelFrame):
def __init__(self, container, *args, **kwargs):
super().__init__(container, *args, **kwargs)
self.canvas = tk.Canvas(self)
self.scrollbar = ttk.Scrollbar(
self, orient="vertical", command=self.canvas.yview
)
self.scrollable_frame = tk.Frame(self.canvas)
self.canvas.pack(side="left", fill="both", expand=True)
self.scrollable_frame.bind(
"<Configure>",
lambda e: self.canvas.configure(scrollregion=self.canvas.bbox("all")),
)
self.canvas.create_window((0, 0), window=self.scrollable_frame, anchor="nw")
self.canvas.configure(yscrollcommand=self.scrollbar.set)
self.canvas.bind("<Enter>", self._bind_to_mousewheel)
self.canvas.bind("<Leave>", self._unbind_from_mousewheel)
self.canvas.pack(side="left", fill="both", expand=True)
self.scrollbar.pack(side="right", fill="y")
def _on_mousewheel(self, event):
scroll_dir = None
if event.num == 5 or event.delta == -120:
scroll_dir = 1
elif event.num == 4 or event.delta == 120:
scroll_dir = -1
if scroll_dir is None:
return
# If the scrollbar is max size don't bother scrolling
if self.scrollbar.get() == (0.0, 1.0):
return
self.canvas.yview_scroll(scroll_dir, "units")
def _bind_to_mousewheel(self, _event):
if "nt" in os.name:
self.canvas.bind_all("<MouseWheel>", self._on_mousewheel)
else:
self.canvas.bind_all("<Button-4>", self._on_mousewheel)
self.canvas.bind_all("<Button-5>", self._on_mousewheel)
def _unbind_from_mousewheel(self, _event):
if "nt" in os.name:
self.canvas.unbind_all("<MouseWheel>")
else:
self.canvas.unbind_all("<Button-4>")
self.canvas.unbind_all("<Button-5>")
class ToggledFrame(tk.Frame):
def __init__(self, parent, text, *args, **options):
tk.Frame.__init__(self, parent, *args, **options)
self.show = tk.IntVar()
self.show.set(0)
self.title_frame = ttk.Frame(self)
self.title_frame.pack(fill="x", expand=1)
ttk.Label(self.title_frame, text=text).pack(side="left", fill="x", expand=1)
self.toggle_button = ttk.Checkbutton(
self.title_frame,
width=2,
text="+",
command=self.toggle,
variable=self.show,
style="Toolbutton",
)
self.toggle_button.pack(side="left")
self.sub_frame = tk.Frame(self, relief="sunken", borderwidth=1)
def toggle(self):
if bool(self.show.get()):
self.sub_frame.pack(fill="x", expand=1)
self.toggle_button.configure(text="-")
else:
self.sub_frame.forget()
self.toggle_button.configure(text="+")
class LevelsTree(ttk.Treeview):
def __init__(self, parent, *args, **kwargs):
ttk.Treeview.__init__(self, parent, *args, **kwargs)
self.popup_menu = tk.Menu(self, tearoff=0)
# self.popup_menu.add_command(label="Rename List Node",
# command=self.rename)
# self.popup_menu.add_command(label="Delete Room", command=self.delete_selected)
# self.popup_menu.add_command(label="Add Room", command=self.add_room)
self.bind("<Button-3>", self.popup) # Button-2 on Aqua
def popup(self, event):
try:
self.popup_menu.tk_popup(event.x_root, event.y_root, 0)
finally:
self.popup_menu.grab_release()
def rename(self):
for i in self.selection()[::-1]:
self.rename_dialog()
# def delete_selected(self):
# item_iid = self.selection()[0]
# parent_iid = self.parent(item_iid) # gets selected room
# if parent_iid:
# if (item_iid==LevelsTab.last_selected_room):
# LevelsTab.canvas.grid_remove()
# LevelsTab.canvas_dual.grid_remove()
# LevelsTab.foreground_label.grid_remove()
# LevelsTab.background_label.grid_remove()
# self.delete(item_iid)
#
# def add_room(self):
# item_iid = self.tree_levels.selection()[0]
# parent_iid = self.tree_levels.parent(item_iid) # gets selected room
# if parent_iid:
# edited = self.insert(
# parent_iid,
# self.tree_levels.index(item_iid),
# text="room",
# values=room_save,
# )
# #self.selection_set(0, 'end')
def rename_dialog(self):
# First check if a blank space was selected
entry_index = self.focus()
if entry_index == "":
return
# Set up window
win = tk.Toplevel()
win.title("Edit Entry")
win.attributes("-toolwindow", True)
self.center(win)
####
# Set up the window's other attributes and geometry
####
# Grab the entry's values
for child in self.get_children():
if child == entry_index:
values = self.item(child)["values"]
break
entry_name = str(values[0])
entry_note = ""
entry = str(values[0]).split("//", 2)
if len(entry) > 1:
entry_name = entry[0]
entry_note = entry[1]
col1_lbl = tk.Label(win, text="Entry: ")
col1_ent = tk.Entry(win)
col1_ent.insert(0, entry_name) # Default is column 1's current value
col1_lbl.grid(row=0, column=0)
col1_ent.grid(row=0, column=1)
col2_lbl = tk.Label(win, text="Note Name: ")
col2_ent = tk.Entry(win)
col2_ent.insert(0, entry_note) # Default is column 2's current value
col2_lbl.grid(row=0, column=2)
col2_ent.grid(row=0, column=3)
def update_then_destroy():
if self.confirm_entry(self, col1_ent.get(), col2_ent.get()):
win.destroy()
ok_button = tk.Button(win, text="Ok")
ok_button.bind("<Button-1>", lambda e: update_then_destroy())
ok_button.grid(row=1, column=1)
cancel_button = tk.Button(win, text="Cancel")
cancel_button.bind("<Button-1>", lambda c: win.destroy())
cancel_button.grid(row=1, column=3)
def confirm_entry(self, entry1, entry2, entry3):
####
# Whatever validation you need
####
# Grab the current index in the tree
current_index = self.index(self.focus())
# Remove it from the tree
self.delete(self.focus())
# Put it back in with the upated values
self.insert(
"", current_index, values=(str("\\" + str(entry1) + " //" + str(entry2)))
)
self.save_needed = True
return True
def center(self, toplevel):
toplevel.update_idletasks()
# Tkinter way to find the screen resolution
# screen_width = toplevel.winfo_screenwidth()
# screen_height = toplevel.winfo_screenheight()
# find the screen resolution
screen_width = 1280
screen_height = 720
size = tuple(int(_) for _ in toplevel.geometry().split("+")[0].split("x"))
x = screen_width / 2 - size[0] / 2
y = screen_height / 2 - size[1] / 2
toplevel.geometry("+%d+%d" % (x, y))
class RulesTree(ttk.Treeview):
def __init__(self, parent, *args, **kwargs):
ttk.Treeview.__init__(self, parent, *args, **kwargs)
self.popup_menu = tk.Menu(self, tearoff=0)
self.popup_menu.add_command(label="Add", command=self.add)
self.popup_menu.add_command(label="Delete", command=self.delete_selected)
self.bind("<Button-3>", self.popup) # Button-2 on Aqua
def popup(self, event):
try:
self.popup_menu.tk_popup(event.x_root, event.y_root, 0)
finally:
self.popup_menu.grab_release()
def delete_selected(self):
msg_box = tk.messagebox.askquestion(
"Delete?",
"Delete this rule?",
icon="warning",
)
if msg_box == "yes":
item_iid = self.selection()[0]
self.delete(item_iid)
def add(self):
edited = self.insert(
"",
"end",
values=["COMMENT", "VAL", "// COMMENT"],
)
# self.selection_set(0, 'end')
| 32.966667 | 92 | 0.594448 |
795a0c13ac99a253fae579a867a400298ab95559 | 3,731 | py | Python | ooobuild/lo/uno/uik.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/uno/uik.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/uno/uik.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Struct Class
# this is a auto generated file generated by Cheetah
# Namespace: com.sun.star.uno
# Libre Office Version: 7.3
from ooo.oenv.env_const import UNO_NONE
import typing
class Uik(object):
"""
Struct Class
Specifies a universal interface key (globally unique).
This struct is deprecated. Uiks are not used anymore.
.. deprecated::
Class is deprecated.
See Also:
`API Uik <https://api.libreoffice.org/docs/idl/ref/structcom_1_1sun_1_1star_1_1uno_1_1Uik.html>`_
"""
__ooo_ns__: str = 'com.sun.star.uno'
__ooo_full_ns__: str = 'com.sun.star.uno.Uik'
__ooo_type_name__: str = 'struct'
typeName: str = 'com.sun.star.uno.Uik'
"""Literal Constant ``com.sun.star.uno.Uik``"""
def __init__(self, Data1: typing.Optional[int] = 0, Data2: typing.Optional[int] = 0, Data3: typing.Optional[int] = 0, Data4: typing.Optional[int] = 0, Data5: typing.Optional[int] = 0) -> None:
"""
Constructor
Arguments:
Data1 (int, optional): Data1 value.
Data2 (int, optional): Data2 value.
Data3 (int, optional): Data3 value.
Data4 (int, optional): Data4 value.
Data5 (int, optional): Data5 value.
"""
super().__init__()
if isinstance(Data1, Uik):
oth: Uik = Data1
self.Data1 = oth.Data1
self.Data2 = oth.Data2
self.Data3 = oth.Data3
self.Data4 = oth.Data4
self.Data5 = oth.Data5
return
kargs = {
"Data1": Data1,
"Data2": Data2,
"Data3": Data3,
"Data4": Data4,
"Data5": Data5,
}
self._init(**kargs)
def _init(self, **kwargs) -> None:
self._data1 = kwargs["Data1"]
self._data2 = kwargs["Data2"]
self._data3 = kwargs["Data3"]
self._data4 = kwargs["Data4"]
self._data5 = kwargs["Data5"]
@property
def Data1(self) -> int:
"""
specifies a 4 byte data block.
"""
return self._data1
@Data1.setter
def Data1(self, value: int) -> None:
self._data1 = value
@property
def Data2(self) -> int:
"""
specifies a 2 byte data block.
"""
return self._data2
@Data2.setter
def Data2(self, value: int) -> None:
self._data2 = value
@property
def Data3(self) -> int:
"""
specifies a 2 byte data block.
"""
return self._data3
@Data3.setter
def Data3(self, value: int) -> None:
self._data3 = value
@property
def Data4(self) -> int:
"""
specifies a 4 byte data block.
"""
return self._data4
@Data4.setter
def Data4(self, value: int) -> None:
self._data4 = value
@property
def Data5(self) -> int:
"""
specifies a 4 byte data block.
"""
return self._data5
@Data5.setter
def Data5(self, value: int) -> None:
self._data5 = value
__all__ = ['Uik']
| 26.274648 | 196 | 0.579201 |
795a0d7430e0837732b2c119c916065add941106 | 857 | py | Python | django_easy_scraper/switch.py | dearopen/django-easy-scraper | eeddf2c6da220ac7c19ed0ea0b5173000c9e4227 | [
"BSD-3-Clause"
] | 2 | 2020-11-17T08:48:51.000Z | 2022-01-28T11:53:24.000Z | django_easy_scraper/switch.py | dearopen/django-easy-scraper | eeddf2c6da220ac7c19ed0ea0b5173000c9e4227 | [
"BSD-3-Clause"
] | 4 | 2020-11-22T10:17:43.000Z | 2020-11-26T09:59:30.000Z | django_easy_scraper/switch.py | dearopen/django-easy-scraper | eeddf2c6da220ac7c19ed0ea0b5173000c9e4227 | [
"BSD-3-Clause"
] | 1 | 2020-11-17T08:48:54.000Z | 2020-11-17T08:48:54.000Z | import re
def get_domain_name(url):
domain_name_pat = re.compile(r'.*\://(?:www.)?([^\/]+)')
extract = domain_name_pat.findall(url)
if len(extract) == 0:
raise ValueError("Invalid domain name !! We couldn't parse domain name from your given url")
else:
return extract[0]
class BaseSwitch(object):
"""
Switer dict should be like following this:
'example.com': MyScraperClass.regex_url_scrape
Above you meant, first one will be domain name you are scraping and last one will be class method rregex_url_scrape
"""
switcher = {}
@classmethod
def get_data(self, url, raise_exception=False):
domain = get_domain_name(url)
if not domain in self.switcher:
raise NotImplementedError(url)
return self.switcher.get(domain)(url, raise_exception)
| 25.969697 | 123 | 0.654609 |
795a0d8d3dd7b5490374e7bc40cf3632df6f5f1f | 2,168 | py | Python | mnist/restore.py | tensorflow123/python_tutorial | 0226237010127594b7e8a8b8fc3ed9fbd1c7bab2 | [
"Apache-2.0"
] | 3 | 2019-02-25T04:28:04.000Z | 2019-11-23T07:12:28.000Z | mnist/restore.py | tensorflow123/python_tutorial | 0226237010127594b7e8a8b8fc3ed9fbd1c7bab2 | [
"Apache-2.0"
] | null | null | null | mnist/restore.py | tensorflow123/python_tutorial | 0226237010127594b7e8a8b8fc3ed9fbd1c7bab2 | [
"Apache-2.0"
] | 1 | 2020-04-09T02:35:03.000Z | 2020-04-09T02:35:03.000Z | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
saved_model_dir='./model'
index=1
signature_key = 'test_signature'
input_key_x = 'input_x'
input_key_y = 'input_y'
input_key_keep_prob = 'keep_prob'
output_key_prediction = 'prediction'
def compute_accuracy(sess, prediction, input_x, keep_prob, v_xs, v_ys):
y_pre = sess.run(prediction, feed_dict={input_x: v_xs, keep_prob: 1})
correct_prediction = tf.equal(tf.argmax(y_pre,1), tf.argmax(v_ys,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
result = sess.run(accuracy, feed_dict={input_x: v_xs, input_y: v_ys, keep_prob: 1})
return result
with tf.Session() as sess:
meta_graph_def = tf.saved_model.loader.load(sess, ['model_final'], saved_model_dir)
# 从meta_graph_def中取出SignatureDef对象
signature = meta_graph_def.signature_def
# 从signature中找出具体输入输出的tensor name
x_tensor_name = signature[signature_key].inputs[input_key_x].name
y_tensor_name = signature[signature_key].inputs[input_key_y].name
keep_prob_tensor_name = signature[signature_key].inputs[input_key_keep_prob].name
prediction_tensor_name = signature[signature_key].outputs[output_key_prediction].name
# 获取tensor 并inference
input_x = sess.graph.get_tensor_by_name(x_tensor_name)
input_y = sess.graph.get_tensor_by_name(y_tensor_name)
keep_prob = sess.graph.get_tensor_by_name(keep_prob_tensor_name)
prediction = sess.graph.get_tensor_by_name(prediction_tensor_name)
# 测试单个数据
x = mnist.test.images[index].reshape(1, 784)
y = mnist.test.labels[index].reshape(1, 10) # 转为one-hot形式
print (y)
pred_y = sess.run(prediction, feed_dict={input_x: x, keep_prob : 1 })
print (pred_y)
print("Actual class: ", str(sess.run(tf.argmax(y, 1))), \
", predict class ",str(sess.run(tf.argmax(pred_y, 1))), \
", predict ", str(sess.run(tf.equal(tf.argmax(y, 1), tf.argmax(pred_y, 1))))
)
# 测试数据集
print(compute_accuracy(sess, prediction, input_x, keep_prob,
mnist.test.images[:1000], mnist.test.labels[:1000]))
| 39.418182 | 89 | 0.734779 |
795a0dfea26ea15aaec8a3f832c20383413aa497 | 20,178 | py | Python | src/genie/libs/parser/asa/show_interface.py | kacann/genieparser | 76e19003199c393c59a33546726de3ff5486da80 | [
"Apache-2.0"
] | null | null | null | src/genie/libs/parser/asa/show_interface.py | kacann/genieparser | 76e19003199c393c59a33546726de3ff5486da80 | [
"Apache-2.0"
] | null | null | null | src/genie/libs/parser/asa/show_interface.py | kacann/genieparser | 76e19003199c393c59a33546726de3ff5486da80 | [
"Apache-2.0"
] | 1 | 2021-07-07T18:07:56.000Z | 2021-07-07T18:07:56.000Z | ''' show_interface.py
Parser for the following show commands:
* show interface summary
* show interface ip brief
* show interface details
'''
# Python
import re
# Metaparser
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Schema, Any, Optional
# =============================================
# Schema for 'show interface summary'
# =============================================
class ShowInterfaceSummarySchema(MetaParser):
"""Schema for
* show interface summary
"""
schema = {
'interfaces': {
Any(): {
'link_status': bool,
'line_protocol': bool,
Optional('name'): str,
Optional('mac_address'): str,
Optional('mtu'): int,
Optional('ipv4'): {
Any(): {
Optional('ip'): str,
Optional('prefix_length'): str
}
},
Optional('subnet'): str,
Optional('interface_state'): bool,
Optional('config_status'): bool,
Optional('config_issue'): str
},
}
}
# =============================================
# Parser for 'show interface summary'
# =============================================
class ShowInterfaceSummary(ShowInterfaceSummarySchema):
"""Parser for
* show interface summary
"""
cli_command = 'show interface summary'
def cli(self, output=None):
if output is None:
# excute command to get output
out = self.device.execute(self.cli_command)
else:
out = output
ret_dict = {}
# Interface Vlan100 "pod10", is up, line protocol is up
p1 = re.compile(r'^Interface +(?P<interface>\S+) +"(?P<name>\S*)", +is +'
'(?P<link_status>\w+), +line +protocol +is +(?P<line_protocol>\w+)$')
# MAC address aa11.bb22.cc33, MTU 1500
p2 = re.compile(r'^MAC address +(?P<mac_address>[\w\.]+), +MTU +(?P<mtu>\d+)$')
# IP address 10.10.10.1, subnet mask 255.255.255.0
p3 = re.compile(r'^IP +address +(?P<ip>[a-z0-9\.]+)'
'(\/(?P<prefix_length>[0-9]+))?, +subnet +mask '
'+(?P<subnet>[\w\.]+)$')
# Available but not configured via nameif
p4 = re.compile(r'^(?P<interface_state>Available) +but +'
'(?P<config_status>not +configured) +via +(?P<config_issue>\S*)$')
for line in out.splitlines():
line = line.strip()
# Interface Vlan100 "pod10", is up, line protocol is up
m = p1.match(line)
if m:
groups = m.groupdict()
interface = groups['interface']
instance_dict = ret_dict.setdefault('interfaces', {}). \
setdefault(interface, {})
instance_dict.update({'name': groups['name']})
link_status = groups['link_status'].lower()
if 'up' in link_status:
instance_dict.update({'link_status': True})
if 'down' in link_status:
instance_dict.update({'link_status': False})
line_protocol = groups['line_protocol'].lower()
if 'up' in line_protocol:
instance_dict.update({'line_protocol': True})
if 'down' in line_protocol:
instance_dict.update({'line_protocol': False})
if groups['name'] and link_status == 'up' \
and line_protocol == 'up':
instance_dict.update({'interface_state': True, \
'config_status': True})
if groups['name'] and link_status == 'down' \
and line_protocol == 'down':
instance_dict.update({'interface_state': False, \
'config_status': True})
if groups['name'] == '' and link_status == 'down' \
and line_protocol == 'down':
instance_dict.update({'interface_state': False, \
'config_status': False})
continue
# MAC address 11aa.22bb.33cc, MTU 1500
m = p2.match(line)
if m:
groups = m.groupdict()
instance_dict.update({'mac_address': groups['mac_address']})
instance_dict.update({'mtu': int(groups['mtu'])})
continue
# IP address 10.10.1.1, subnet mask 255.255.255.0
m = p3.match(line)
if m:
groups = m.groupdict()
ipv4 = groups['ip']
if groups['prefix_length']:
address = groups['ip'] + '/' + groups['prefix_length']
dict_ipv4 = instance_dict.setdefault('ipv4', {}).setdefault(ipv4, {})
dict_ipv4.update({'ip': groups['ip']})
if groups['prefix_length']:
dict_ipv4.update({'prefix_length': groups['prefix_length']})
instance_dict.update({'subnet': groups['subnet']})
continue
# Available but not configured via nameif
m = p4.match(line)
if m:
groups = m.groupdict()
if groups['interface_state'] == 'Available' \
and groups['config_status'] == 'not configured':
instance_dict.update({'interface_state': True})
instance_dict.update({'config_status': False})
instance_dict.update({'config_issue': groups['config_issue']})
continue
return ret_dict
# =============================================
# Schema for 'show interface ip brief'
# =============================================
class ShowInterfaceIpBriefSchema(MetaParser):
"""Schema for
* show interface ip brief
"""
schema = {
'interfaces': {
Any(): {
Optional('ipv4'): {
Any(): {
Optional('ip'): str,
Optional('prefix_length'): str
},
Optional('unnumbered'): {
Optional('unnumbered_intf_ref'): str
}
},
'check': str,
'method': str,
'link_status': str,
Optional('line_protocol'): str
},
}
}
# =============================================
# Parser for 'show interface ip brief'
# =============================================
class ShowInterfaceIpBrief(ShowInterfaceIpBriefSchema):
"""Parser for
* show interface ip brief
"""
cli_command = 'show interface ip brief'
def cli(self,output=None):
if output is None:
# excute command to get output
out = self.device.execute(self.cli_command)
else:
out = output
ret_dict = {}
# Control0/0 10.10.1.1 YES CONFIG up up
# GigabitEthernet0/0 10.10.1.1 YES CONFIG up up
# GigabitEthernet0/1 unassigned YES unset admin down down
# GigabitEthernet0/2 10.10.1.1 YES manual admin down down
# GigabitEthernet0/3 10.10.1.1 YES DHCP admin down down
# Management0/0 10.10.1.1 YES CONFIG up
p1 = re.compile(r'^(?P<interface>\S+) *(?P<ip>unassigned|\d+.\d+.\d+.\d+)?'
'(\/(?P<prefix_length>[0-9]+))? *(?P<check>\w+) *(?P<method>\S* ?\S*?) *'
'(?P<link_status>\w+) *(?P<line_protocol>\w+)?$')
for line in out.splitlines():
line = line.strip()
# Control0/0 10.10.1.1 YES CONFIG up up
# GigabitEthernet0/0 10.10.1.1 YES CONFIG up up
# GigabitEthernet0/1 unassigned YES unset admin down down
# GigabitEthernet0/2 10.10.1.1 YES manual admin down down
# GigabitEthernet0/3 10.10.1.1 YES DHCP admin down down
# Management0/0 10.10.1.1 YES CONFIG up
m = p1.match(line)
if m:
groups = m.groupdict()
interface = groups['interface']
instance_dict = ret_dict.setdefault('interfaces', {}). \
setdefault(interface, {})
if groups['ip'] == 'unassigned':
dict_unnumbered = instance_dict.setdefault('ipv4', {}). \
setdefault('unnumbered', {})
dict_unnumbered.update({'unnumbered_intf_ref': groups['ip']})
else:
ipv4 = groups['ip']
if groups['prefix_length']:
address = groups['ip'] + '/' + groups['prefix_length']
dict_ipv4 = instance_dict.setdefault('ipv4', {}). \
setdefault(ipv4, {})
dict_ipv4.update({'ip': groups['ip']})
if groups['prefix_length']:
dict_ipv4.update({'prefix_length': groups['prefix_length']})
instance_dict.update({'check': groups['check']})
method = groups['method']
method = method.strip()
instance_dict.update({'method': method})
instance_dict.update({'link_status': groups['link_status']})
if groups['line_protocol']:
instance_dict.update({'line_protocol': groups['line_protocol']})
continue
return ret_dict
# =============================================
# Schema for 'show interface detail'
# =============================================
class ShowInterfaceDetailSchema(MetaParser):
"""Schema for
* show interface detail
"""
schema = {
'interfaces': {
Any(): {
'link_status': bool,
'line_protocol': bool,
Optional('name'): str,
Optional('mac_address'): str,
Optional('mtu'): int,
Optional('ipv4'): {
Any(): {
Optional('ip'): str,
Optional('prefix_length'): str
},
},
Optional('subnet'): str,
Optional('interface_state'): bool,
Optional('config_status'): bool,
Optional('config_issue'): str,
Optional('traffic_statistics'): {
'packets_input': int,
'bytes_input': int,
'packets_output': int,
'bytes_output': int,
'packets_dropped': int
},
Optional('control_point_states'): {
'interface': {
'interface_number': int,
'interface_config_status': str,
'interface_state': str
},
Any():{
'interface_vlan_config_status': str,
'interface_vlan_state': str
},
}
},
}
}
# =============================================
# Parser for 'show interface detail'
# =============================================
class ShowInterfaceDetail(ShowInterfaceDetailSchema):
"""Parser for
* show interface detail
"""
cli_command = 'show interface detail'
def cli(self, output=None):
if output is None:
# excute command to get output
out = self.device.execute(self.cli_command)
else:
out = output
ret_dict = {}
# Interface Vlan300 "admin-out", is up, line protocol is up
p1 = re.compile(r'^Interface +(?P<interface>\S+) +"(?P<name>\S*)", +is +'
'(?P<link_status>\w+), +line +protocol +is +(?P<line_protocol>\w+)$')
# MAC address aa11.bb22.cc33, MTU 1500
p2 = re.compile(r'^MAC address +(?P<mac_address>[\w\.]+), +MTU +(?P<mtu>\d+)$')
# IP address 10.10.10.1, subnet mask 255.255.255.0
p3 = re.compile(r'^IP +address +(?P<ip>[a-z0-9\.]+)'
'(\/(?P<prefix_length>[0-9]+))?, +subnet +mask '
'+(?P<subnet>[\w\.]+)$')
# Available but not configured via nameif
p4 = re.compile(r'^(?P<interface_state>Available) +but +'
'(?P<config_status>not +configured) +via +(?P<config_issue>\S*)$')
# 889007666 packets input, 785740327549 bytes
p5 = re.compile(r'^(?P<packets_input>\d+) +packets +input, '
'+(?P<bytes_input>[\d]+) +bytes$')
# 621453837 packets output, 428046938178 bytes
p6 = re.compile(r'^(?P<packets_output>\d+) +packets +output, '
'+(?P<bytes_output>[\d]+) +bytes$')
# 2988535 packets dropped
p7 = re.compile(r'^(?P<packets_dropped>\d+) +packets +dropped$')
# Interface number is 5
p8 = re.compile(r'^Interface +number +is +(?P<interface_number>\d+)$')
# Interface config status is active
# Interface config status is not active
p9 = re.compile(r'^Interface +config +status +is '
'+(?P<interface_config_status>[\S\s]+)$')
# Interface state is active
# Interface state is not active
p10 = re.compile(r'^Interface +state +is +(?P<interface_state>[\w\ ]+)$')
# Interface vlan config status is active
# Interface vlan config status is not active
p11 = re.compile(r'^Interface +vlan +config +status +is '
'+(?P<interface_vlan_config_status>[\S\s]+)$')
# Interface vlan state is UP
# Interface vlan state is DOWN (down in system space)
p12 = re.compile(r'^Interface +vlan +state +is +'
'(?P<interface_vlan_state>\w+)+([\S\s]+)?$')
for line in out.splitlines():
line = line.strip()
# Interface Vlan300 "admin-out", is up, line protocol is up
m = p1.match(line)
if m:
groups = m.groupdict()
interface = groups['interface']
instance_dict = ret_dict.setdefault('interfaces', {}). \
setdefault(interface, {})
instance_dict.update({'name': groups['name']})
link_status = groups['link_status'].lower()
if 'up' in link_status:
instance_dict.update({'link_status': True})
if 'down' in link_status:
instance_dict.update({'link_status': False})
line_protocol = groups['line_protocol'].lower()
if 'up' in line_protocol:
instance_dict.update({'line_protocol': True})
if 'down' in line_protocol:
instance_dict.update({'line_protocol': False})
if groups['name'] \
and link_status == 'up' \
and line_protocol == 'up':
instance_dict.update({'interface_state': True, \
'config_status': True})
if groups['name'] and link_status == 'down' \
and line_protocol == 'down':
instance_dict.update({'interface_state': False, \
'config_status': True})
if groups['name'] == '' \
and link_status == 'down' \
and line_protocol == 'down':
instance_dict.update({'interface_state': False, \
'config_status': False})
continue
# MAC address aa11.bb22.cc33, MTU 1500
m = p2.match(line)
if m:
groups = m.groupdict()
instance_dict.update({'mac_address': groups['mac_address']})
instance_dict.update({'mtu': int(groups['mtu'])})
continue
# IP address 10.10.10.1, subnet mask 255.255.255.0
m = p3.match(line)
if m:
groups = m.groupdict()
ipv4 = groups['ip']
if groups['prefix_length']:
address = groups['ip'] + '/' + groups['prefix_length']
dict_ipv4 = instance_dict.setdefault('ipv4', {}).setdefault(ipv4, {})
dict_ipv4.update({'ip': groups['ip']})
if groups['prefix_length']:
dict_ipv4.update({'prefix_length': groups['prefix_length']})
instance_dict.update({'subnet': groups['subnet']})
continue
# Available but not configured via nameif
m = p4.match(line)
if m:
groups = m.groupdict()
if groups['interface_state'] == 'Available' \
and groups['config_status'] == 'not configured':
instance_dict.update({'interface_state': True})
instance_dict.update({'config_status': False})
instance_dict.update({'config_issue': groups['config_issue']})
continue
# 889007666 packets input, 785740327549 bytes
m = p5.match(line)
if m:
groups = m.groupdict()
dict_traffic = instance_dict.setdefault('traffic_statistics', {})
dict_traffic.update({'packets_input': \
int(groups['packets_input'])})
dict_traffic.update({'bytes_input': \
int(groups['bytes_input'])})
continue
# 621453837 packets output, 428046938178 bytes
m = p6.match(line)
if m:
groups = m.groupdict()
dict_traffic.update({'packets_output': \
int(groups['packets_output'])})
dict_traffic.update({'bytes_output': \
int(groups['bytes_output'])})
continue
# 2988535 packets dropped
m = p7.match(line)
if m:
groups = m.groupdict()
dict_traffic.update({'packets_dropped': \
int(groups['packets_dropped'])})
continue
# Interface number is 5
m = p8.match(line)
if m:
groups = m.groupdict()
dict_control = instance_dict.setdefault('control_point_states', {})
dict_interface = dict_control.setdefault('interface', {})
dict_interface.update({'interface_number': \
int(groups['interface_number'])})
continue
# Interface config status is active
# Interface config status is not active
m = p9.match(line)
if m:
groups = m.groupdict()
dict_interface. \
update({'interface_config_status': groups['interface_config_status']})
continue
# Interface state is active
# Interface state is not active
m = p10.match(line)
if m:
groups = m.groupdict()
dict_interface.update({'interface_state': groups['interface_state']})
continue
# Interface vlan config status is active
# Interface vlan config status is not active
m = p11.match(line)
if m:
groups = m.groupdict()
dict_vlan = dict_control.setdefault(interface, {})
dict_vlan.update({'interface_vlan_config_status': \
groups['interface_vlan_config_status']})
continue
# Interface vlan state is UP
# Interface vlan state is DOWN (down in system space)
m = p12.match(line)
if m:
groups = m.groupdict()
dict_vlan.update({'interface_vlan_state': \
groups['interface_vlan_state']})
continue
return ret_dict | 39.642436 | 96 | 0.483051 |
795a0e120da150f4ca49bb637967a8deaa1819e0 | 106 | py | Python | Taekwon/Python/implementation/knight.py | sonnysorry/codingtest | 478e0168e3209eb97b6b16910027bf12ccc3ccd0 | [
"MIT"
] | 2 | 2021-09-27T19:10:36.000Z | 2021-11-09T05:40:39.000Z | Taekwon/Python/implementation/knight.py | sonnysorry/codingtest | 478e0168e3209eb97b6b16910027bf12ccc3ccd0 | [
"MIT"
] | 1 | 2021-11-15T14:56:54.000Z | 2021-11-15T14:56:54.000Z | Taekwon/Python/implementation/knight.py | sonnysorry/codingtest | 478e0168e3209eb97b6b16910027bf12ccc3ccd0 | [
"MIT"
] | null | null | null | a = input()
dx = [-1, -2, -2, -1, 1, 2, 2, 1]
dy = [2, 1, -1, -2, -2, -1, 1, 2]
for i in range(8):
| 13.25 | 33 | 0.358491 |
795a0e8d2ab744563d04372b83cc232012451cc3 | 15,578 | py | Python | sdk/python/pulumi_azure_native/network/v20160901/get_application_gateway.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20160901/get_application_gateway.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20160901/get_application_gateway.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetApplicationGatewayResult',
'AwaitableGetApplicationGatewayResult',
'get_application_gateway',
]
@pulumi.output_type
class GetApplicationGatewayResult:
"""
Application gateway resource
"""
def __init__(__self__, authentication_certificates=None, backend_address_pools=None, backend_http_settings_collection=None, etag=None, frontend_ip_configurations=None, frontend_ports=None, gateway_ip_configurations=None, http_listeners=None, id=None, location=None, name=None, operational_state=None, probes=None, provisioning_state=None, request_routing_rules=None, resource_guid=None, sku=None, ssl_certificates=None, ssl_policy=None, tags=None, type=None, url_path_maps=None, web_application_firewall_configuration=None):
if authentication_certificates and not isinstance(authentication_certificates, list):
raise TypeError("Expected argument 'authentication_certificates' to be a list")
pulumi.set(__self__, "authentication_certificates", authentication_certificates)
if backend_address_pools and not isinstance(backend_address_pools, list):
raise TypeError("Expected argument 'backend_address_pools' to be a list")
pulumi.set(__self__, "backend_address_pools", backend_address_pools)
if backend_http_settings_collection and not isinstance(backend_http_settings_collection, list):
raise TypeError("Expected argument 'backend_http_settings_collection' to be a list")
pulumi.set(__self__, "backend_http_settings_collection", backend_http_settings_collection)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if frontend_ip_configurations and not isinstance(frontend_ip_configurations, list):
raise TypeError("Expected argument 'frontend_ip_configurations' to be a list")
pulumi.set(__self__, "frontend_ip_configurations", frontend_ip_configurations)
if frontend_ports and not isinstance(frontend_ports, list):
raise TypeError("Expected argument 'frontend_ports' to be a list")
pulumi.set(__self__, "frontend_ports", frontend_ports)
if gateway_ip_configurations and not isinstance(gateway_ip_configurations, list):
raise TypeError("Expected argument 'gateway_ip_configurations' to be a list")
pulumi.set(__self__, "gateway_ip_configurations", gateway_ip_configurations)
if http_listeners and not isinstance(http_listeners, list):
raise TypeError("Expected argument 'http_listeners' to be a list")
pulumi.set(__self__, "http_listeners", http_listeners)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if operational_state and not isinstance(operational_state, str):
raise TypeError("Expected argument 'operational_state' to be a str")
pulumi.set(__self__, "operational_state", operational_state)
if probes and not isinstance(probes, list):
raise TypeError("Expected argument 'probes' to be a list")
pulumi.set(__self__, "probes", probes)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if request_routing_rules and not isinstance(request_routing_rules, list):
raise TypeError("Expected argument 'request_routing_rules' to be a list")
pulumi.set(__self__, "request_routing_rules", request_routing_rules)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if ssl_certificates and not isinstance(ssl_certificates, list):
raise TypeError("Expected argument 'ssl_certificates' to be a list")
pulumi.set(__self__, "ssl_certificates", ssl_certificates)
if ssl_policy and not isinstance(ssl_policy, dict):
raise TypeError("Expected argument 'ssl_policy' to be a dict")
pulumi.set(__self__, "ssl_policy", ssl_policy)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if url_path_maps and not isinstance(url_path_maps, list):
raise TypeError("Expected argument 'url_path_maps' to be a list")
pulumi.set(__self__, "url_path_maps", url_path_maps)
if web_application_firewall_configuration and not isinstance(web_application_firewall_configuration, dict):
raise TypeError("Expected argument 'web_application_firewall_configuration' to be a dict")
pulumi.set(__self__, "web_application_firewall_configuration", web_application_firewall_configuration)
@property
@pulumi.getter(name="authenticationCertificates")
def authentication_certificates(self) -> Optional[Sequence['outputs.ApplicationGatewayAuthenticationCertificateResponse']]:
"""
Authentication certificates of the application gateway resource.
"""
return pulumi.get(self, "authentication_certificates")
@property
@pulumi.getter(name="backendAddressPools")
def backend_address_pools(self) -> Optional[Sequence['outputs.ApplicationGatewayBackendAddressPoolResponse']]:
"""
Backend address pool of the application gateway resource.
"""
return pulumi.get(self, "backend_address_pools")
@property
@pulumi.getter(name="backendHttpSettingsCollection")
def backend_http_settings_collection(self) -> Optional[Sequence['outputs.ApplicationGatewayBackendHttpSettingsResponse']]:
"""
Backend http settings of the application gateway resource.
"""
return pulumi.get(self, "backend_http_settings_collection")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="frontendIPConfigurations")
def frontend_ip_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayFrontendIPConfigurationResponse']]:
"""
Frontend IP addresses of the application gateway resource.
"""
return pulumi.get(self, "frontend_ip_configurations")
@property
@pulumi.getter(name="frontendPorts")
def frontend_ports(self) -> Optional[Sequence['outputs.ApplicationGatewayFrontendPortResponse']]:
"""
Frontend ports of the application gateway resource.
"""
return pulumi.get(self, "frontend_ports")
@property
@pulumi.getter(name="gatewayIPConfigurations")
def gateway_ip_configurations(self) -> Optional[Sequence['outputs.ApplicationGatewayIPConfigurationResponse']]:
"""
Subnets of application the gateway resource.
"""
return pulumi.get(self, "gateway_ip_configurations")
@property
@pulumi.getter(name="httpListeners")
def http_listeners(self) -> Optional[Sequence['outputs.ApplicationGatewayHttpListenerResponse']]:
"""
Http listeners of the application gateway resource.
"""
return pulumi.get(self, "http_listeners")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="operationalState")
def operational_state(self) -> str:
"""
Operational state of the application gateway resource. Possible values are: 'Stopped', 'Started', 'Running', and 'Stopping'.
"""
return pulumi.get(self, "operational_state")
@property
@pulumi.getter
def probes(self) -> Optional[Sequence['outputs.ApplicationGatewayProbeResponse']]:
"""
Probes of the application gateway resource.
"""
return pulumi.get(self, "probes")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
Provisioning state of the application gateway resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="requestRoutingRules")
def request_routing_rules(self) -> Optional[Sequence['outputs.ApplicationGatewayRequestRoutingRuleResponse']]:
"""
Request routing rules of the application gateway resource.
"""
return pulumi.get(self, "request_routing_rules")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> Optional[str]:
"""
Resource GUID property of the application gateway resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.ApplicationGatewaySkuResponse']:
"""
SKU of the application gateway resource.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="sslCertificates")
def ssl_certificates(self) -> Optional[Sequence['outputs.ApplicationGatewaySslCertificateResponse']]:
"""
SSL certificates of the application gateway resource.
"""
return pulumi.get(self, "ssl_certificates")
@property
@pulumi.getter(name="sslPolicy")
def ssl_policy(self) -> Optional['outputs.ApplicationGatewaySslPolicyResponse']:
"""
SSL policy of the application gateway resource.
"""
return pulumi.get(self, "ssl_policy")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="urlPathMaps")
def url_path_maps(self) -> Optional[Sequence['outputs.ApplicationGatewayUrlPathMapResponse']]:
"""
URL path map of the application gateway resource.
"""
return pulumi.get(self, "url_path_maps")
@property
@pulumi.getter(name="webApplicationFirewallConfiguration")
def web_application_firewall_configuration(self) -> Optional['outputs.ApplicationGatewayWebApplicationFirewallConfigurationResponse']:
"""
Web application firewall configuration.
"""
return pulumi.get(self, "web_application_firewall_configuration")
class AwaitableGetApplicationGatewayResult(GetApplicationGatewayResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetApplicationGatewayResult(
authentication_certificates=self.authentication_certificates,
backend_address_pools=self.backend_address_pools,
backend_http_settings_collection=self.backend_http_settings_collection,
etag=self.etag,
frontend_ip_configurations=self.frontend_ip_configurations,
frontend_ports=self.frontend_ports,
gateway_ip_configurations=self.gateway_ip_configurations,
http_listeners=self.http_listeners,
id=self.id,
location=self.location,
name=self.name,
operational_state=self.operational_state,
probes=self.probes,
provisioning_state=self.provisioning_state,
request_routing_rules=self.request_routing_rules,
resource_guid=self.resource_guid,
sku=self.sku,
ssl_certificates=self.ssl_certificates,
ssl_policy=self.ssl_policy,
tags=self.tags,
type=self.type,
url_path_maps=self.url_path_maps,
web_application_firewall_configuration=self.web_application_firewall_configuration)
def get_application_gateway(application_gateway_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApplicationGatewayResult:
"""
Application gateway resource
:param str application_gateway_name: The name of the application gateway.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['applicationGatewayName'] = application_gateway_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20160901:getApplicationGateway', __args__, opts=opts, typ=GetApplicationGatewayResult).value
return AwaitableGetApplicationGatewayResult(
authentication_certificates=__ret__.authentication_certificates,
backend_address_pools=__ret__.backend_address_pools,
backend_http_settings_collection=__ret__.backend_http_settings_collection,
etag=__ret__.etag,
frontend_ip_configurations=__ret__.frontend_ip_configurations,
frontend_ports=__ret__.frontend_ports,
gateway_ip_configurations=__ret__.gateway_ip_configurations,
http_listeners=__ret__.http_listeners,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
operational_state=__ret__.operational_state,
probes=__ret__.probes,
provisioning_state=__ret__.provisioning_state,
request_routing_rules=__ret__.request_routing_rules,
resource_guid=__ret__.resource_guid,
sku=__ret__.sku,
ssl_certificates=__ret__.ssl_certificates,
ssl_policy=__ret__.ssl_policy,
tags=__ret__.tags,
type=__ret__.type,
url_path_maps=__ret__.url_path_maps,
web_application_firewall_configuration=__ret__.web_application_firewall_configuration)
| 44.130312 | 528 | 0.694248 |
795a0f396bd73004af5bcb891e25299664a1c46f | 44,490 | py | Python | scripts/vk_validation_stats.py | shannon-lunarg/Vulkan-ValidationLayers | ff80a937c8a505abbdddb95d8ffaa446820c8391 | [
"Apache-2.0"
] | 1 | 2019-09-22T22:45:07.000Z | 2019-09-22T22:45:07.000Z | scripts/vk_validation_stats.py | shannon-lunarg/Vulkan-ValidationLayers | ff80a937c8a505abbdddb95d8ffaa446820c8391 | [
"Apache-2.0"
] | null | null | null | scripts/vk_validation_stats.py | shannon-lunarg/Vulkan-ValidationLayers | ff80a937c8a505abbdddb95d8ffaa446820c8391 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2019 The Khronos Group Inc.
# Copyright (c) 2015-2019 Valve Corporation
# Copyright (c) 2015-2019 LunarG, Inc.
# Copyright (c) 2015-2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Tobin Ehlis <tobine@google.com>
# Author: Dave Houlton <daveh@lunarg.com>
# Author: Shannon McPherson <shannon@lunarg.com>
import argparse
import os
import sys
import operator
import platform
import json
import re
import csv
import html
import time
from collections import defaultdict
verbose_mode = False
txt_db = False
csv_db = False
html_db = False
txt_filename = "validation_error_database.txt"
csv_filename = "validation_error_database.csv"
html_filename = "validation_error_database.html"
header_filename = "../layers/vk_validation_error_messages.h"
test_file = '../tests/layer_validation_tests.cpp'
vuid_prefixes = ['VUID-', 'UNASSIGNED-']
# Hard-coded flags that could be command line args, if we decide that's useful
# replace KHR vuids with non-KHR during consistency checking
dealias_khr = True
ignore_unassigned = True # These are not found in layer code unless they appear explicitly (most don't), so produce false positives
generated_layer_source_directories = [
'build',
'dbuild',
'release',
'../build/Vulkan-ValidationLayers/'
]
generated_layer_source_files = [
'parameter_validation.cpp',
'object_tracker.cpp',
]
layer_source_files = [
'../layers/buffer_validation.cpp',
'../layers/core_validation.cpp',
'../layers/descriptor_sets.cpp',
'../layers/parameter_validation_utils.cpp',
'../layers/object_tracker_utils.cpp',
'../layers/shader_validation.cpp',
'../layers/stateless_validation.h'
]
# This needs to be updated as new extensions roll in
khr_aliases = {
'VUID-vkBindBufferMemory2KHR-device-parameter' : 'VUID-vkBindBufferMemory2-device-parameter',
'VUID-vkBindBufferMemory2KHR-pBindInfos-parameter' : 'VUID-vkBindBufferMemory2-pBindInfos-parameter',
'VUID-vkBindImageMemory2KHR-device-parameter' : 'VUID-vkBindImageMemory2-device-parameter',
'VUID-vkBindImageMemory2KHR-pBindInfos-parameter' : 'VUID-vkBindImageMemory2-pBindInfos-parameter',
'VUID-vkCmdDispatchBaseKHR-commandBuffer-parameter' : 'VUID-vkCmdDispatchBase-commandBuffer-parameter',
'VUID-vkCmdSetDeviceMaskKHR-commandBuffer-parameter' : 'VUID-vkCmdSetDeviceMask-commandBuffer-parameter',
'VUID-vkCreateDescriptorUpdateTemplateKHR-device-parameter' : 'VUID-vkCreateDescriptorUpdateTemplate-device-parameter',
'VUID-vkCreateDescriptorUpdateTemplateKHR-pDescriptorUpdateTemplate-parameter' : 'VUID-vkCreateDescriptorUpdateTemplate-pDescriptorUpdateTemplate-parameter',
'VUID-vkCreateSamplerYcbcrConversionKHR-device-parameter' : 'VUID-vkCreateSamplerYcbcrConversion-device-parameter',
'VUID-vkCreateSamplerYcbcrConversionKHR-pYcbcrConversion-parameter' : 'VUID-vkCreateSamplerYcbcrConversion-pYcbcrConversion-parameter',
'VUID-vkDestroyDescriptorUpdateTemplateKHR-descriptorUpdateTemplate-parameter' : 'VUID-vkDestroyDescriptorUpdateTemplate-descriptorUpdateTemplate-parameter',
'VUID-vkDestroyDescriptorUpdateTemplateKHR-descriptorUpdateTemplate-parent' : 'VUID-vkDestroyDescriptorUpdateTemplate-descriptorUpdateTemplate-parent',
'VUID-vkDestroyDescriptorUpdateTemplateKHR-device-parameter' : 'VUID-vkDestroyDescriptorUpdateTemplate-device-parameter',
'VUID-vkDestroySamplerYcbcrConversionKHR-device-parameter' : 'VUID-vkDestroySamplerYcbcrConversion-device-parameter',
'VUID-vkDestroySamplerYcbcrConversionKHR-ycbcrConversion-parameter' : 'VUID-vkDestroySamplerYcbcrConversion-ycbcrConversion-parameter',
'VUID-vkDestroySamplerYcbcrConversionKHR-ycbcrConversion-parent' : 'VUID-vkDestroySamplerYcbcrConversion-ycbcrConversion-parent',
'VUID-vkEnumeratePhysicalDeviceGroupsKHR-instance-parameter' : 'VUID-vkEnumeratePhysicalDeviceGroups-instance-parameter',
'VUID-vkEnumeratePhysicalDeviceGroupsKHR-pPhysicalDeviceGroupProperties-parameter' : 'VUID-vkEnumeratePhysicalDeviceGroups-pPhysicalDeviceGroupProperties-parameter',
'VUID-vkGetBufferMemoryRequirements2KHR-device-parameter' : 'VUID-vkGetBufferMemoryRequirements2-device-parameter',
'VUID-vkGetDescriptorSetLayoutSupportKHR-device-parameter' : 'VUID-vkGetDescriptorSetLayoutSupport-device-parameter',
'VUID-vkGetDeviceGroupPeerMemoryFeaturesKHR-device-parameter' : 'VUID-vkGetDeviceGroupPeerMemoryFeatures-device-parameter',
'VUID-vkGetDeviceGroupPeerMemoryFeaturesKHR-pPeerMemoryFeatures-parameter' : 'VUID-vkGetDeviceGroupPeerMemoryFeatures-pPeerMemoryFeatures-parameter',
'VUID-vkGetImageMemoryRequirements2KHR-device-parameter' : 'VUID-vkGetImageMemoryRequirements2-device-parameter',
'VUID-vkGetImageSparseMemoryRequirements2KHR-device-parameter' : 'VUID-vkGetImageSparseMemoryRequirements2-device-parameter',
'VUID-vkGetImageSparseMemoryRequirements2KHR-pSparseMemoryRequirements-parameter' : 'VUID-vkGetImageSparseMemoryRequirements2-pSparseMemoryRequirements-parameter',
'VUID-vkGetPhysicalDeviceExternalBufferPropertiesKHR-physicalDevice-parameter' : 'VUID-vkGetPhysicalDeviceExternalBufferProperties-physicalDevice-parameter',
'VUID-vkGetPhysicalDeviceExternalFencePropertiesKHR-physicalDevice-parameter' : 'VUID-vkGetPhysicalDeviceExternalFenceProperties-physicalDevice-parameter',
'VUID-vkGetPhysicalDeviceExternalSemaphorePropertiesKHR-physicalDevice-parameter' : 'VUID-vkGetPhysicalDeviceExternalSemaphoreProperties-physicalDevice-parameter',
'VUID-vkGetPhysicalDeviceFeatures2KHR-physicalDevice-parameter' : 'VUID-vkGetPhysicalDeviceFeatures2-physicalDevice-parameter',
'VUID-vkGetPhysicalDeviceFormatProperties2KHR-format-parameter' : 'VUID-vkGetPhysicalDeviceFormatProperties2-format-parameter',
'VUID-vkGetPhysicalDeviceFormatProperties2KHR-physicalDevice-parameter' : 'VUID-vkGetPhysicalDeviceFormatProperties2-physicalDevice-parameter',
'VUID-vkGetPhysicalDeviceImageFormatProperties2KHR-physicalDevice-parameter' : 'VUID-vkGetPhysicalDeviceImageFormatProperties2-physicalDevice-parameter',
'VUID-vkGetPhysicalDeviceMemoryProperties2KHR-physicalDevice-parameter' : 'VUID-vkGetPhysicalDeviceMemoryProperties2-physicalDevice-parameter',
'VUID-vkGetPhysicalDeviceProperties2KHR-physicalDevice-parameter' : 'VUID-vkGetPhysicalDeviceProperties2-physicalDevice-parameter',
'VUID-vkGetPhysicalDeviceQueueFamilyProperties2KHR-pQueueFamilyProperties-parameter' : 'VUID-vkGetPhysicalDeviceQueueFamilyProperties2-pQueueFamilyProperties-parameter',
'VUID-vkGetPhysicalDeviceSparseImageFormatProperties2KHR-pProperties-parameter' : 'VUID-vkGetPhysicalDeviceSparseImageFormatProperties2-pProperties-parameter',
'VUID-vkGetPhysicalDeviceSparseImageFormatProperties2KHR-physicalDevice-parameter' : 'VUID-vkGetPhysicalDeviceSparseImageFormatProperties2-physicalDevice-parameter',
'VUID-vkTrimCommandPoolKHR-commandPool-parameter' : 'VUID-vkTrimCommandPool-commandPool-parameter',
'VUID-vkTrimCommandPoolKHR-commandPool-parent' : 'VUID-vkTrimCommandPool-commandPool-parent',
'VUID-vkTrimCommandPoolKHR-device-parameter' : 'VUID-vkTrimCommandPool-device-parameter',
'VUID-vkTrimCommandPoolKHR-flags-zerobitmask' : 'VUID-vkTrimCommandPool-flags-zerobitmask',
'VUID-vkUpdateDescriptorSetWithTemplateKHR-descriptorSet-parameter' : 'VUID-vkUpdateDescriptorSetWithTemplate-descriptorSet-parameter',
'VUID-vkUpdateDescriptorSetWithTemplateKHR-descriptorUpdateTemplate-parameter' : 'VUID-vkUpdateDescriptorSetWithTemplate-descriptorUpdateTemplate-parameter',
'VUID-vkUpdateDescriptorSetWithTemplateKHR-descriptorUpdateTemplate-parent' : 'VUID-vkUpdateDescriptorSetWithTemplate-descriptorUpdateTemplate-parent',
'VUID-vkUpdateDescriptorSetWithTemplateKHR-device-parameter' : 'VUID-vkUpdateDescriptorSetWithTemplate-device-parameter',
'VUID-vkCreateDescriptorUpdateTemplateKHR-pCreateInfo-parameter' : 'VUID-vkCreateDescriptorUpdateTemplate-pCreateInfo-parameter',
'VUID-vkCreateSamplerYcbcrConversionKHR-pCreateInfo-parameter' : 'VUID-vkCreateSamplerYcbcrConversion-pCreateInfo-parameter',
'VUID-vkGetBufferMemoryRequirements2KHR-pInfo-parameter' : 'VUID-vkGetBufferMemoryRequirements2-pInfo-parameter',
'VUID-vkGetBufferMemoryRequirements2KHR-pMemoryRequirements-parameter' : 'VUID-vkGetBufferMemoryRequirements2-pMemoryRequirements-parameter',
'VUID-vkGetDescriptorSetLayoutSupportKHR-pCreateInfo-parameter' : 'VUID-vkGetDescriptorSetLayoutSupport-pCreateInfo-parameter',
'VUID-vkGetDescriptorSetLayoutSupportKHR-pSupport-parameter' : 'VUID-vkGetDescriptorSetLayoutSupport-pSupport-parameter',
'VUID-vkGetImageMemoryRequirements2KHR-pInfo-parameter' : 'VUID-vkGetImageMemoryRequirements2-pInfo-parameter',
'VUID-vkGetImageMemoryRequirements2KHR-pMemoryRequirements-parameter' : 'VUID-vkGetImageMemoryRequirements2-pMemoryRequirements-parameter',
'VUID-vkGetImageSparseMemoryRequirements2KHR-pInfo-parameter' : 'VUID-vkGetImageSparseMemoryRequirements2-pInfo-parameter',
'VUID-vkGetPhysicalDeviceExternalBufferPropertiesKHR-pExternalBufferInfo-parameter' : 'VUID-vkGetPhysicalDeviceExternalBufferProperties-pExternalBufferInfo-parameter',
'VUID-vkGetPhysicalDeviceExternalBufferPropertiesKHR-pExternalBufferProperties-parameter' : 'VUID-vkGetPhysicalDeviceExternalBufferProperties-pExternalBufferProperties-parameter',
'VUID-vkGetPhysicalDeviceExternalFencePropertiesKHR-pExternalFenceInfo-parameter' : 'VUID-vkGetPhysicalDeviceExternalFenceProperties-pExternalFenceInfo-parameter',
'VUID-vkGetPhysicalDeviceExternalFencePropertiesKHR-pExternalFenceProperties-parameter' : 'VUID-vkGetPhysicalDeviceExternalFenceProperties-pExternalFenceProperties-parameter',
'VUID-vkGetPhysicalDeviceExternalSemaphorePropertiesKHR-pExternalSemaphoreInfo-parameter' : 'VUID-vkGetPhysicalDeviceExternalSemaphoreProperties-pExternalSemaphoreInfo-parameter',
'VUID-vkGetPhysicalDeviceExternalSemaphorePropertiesKHR-pExternalSemaphoreProperties-parameter' : 'VUID-vkGetPhysicalDeviceExternalSemaphoreProperties-pExternalSemaphoreProperties-parameter',
'VUID-vkGetPhysicalDeviceFeatures2KHR-pFeatures-parameter' : 'VUID-vkGetPhysicalDeviceFeatures2-pFeatures-parameter',
'VUID-vkGetPhysicalDeviceFormatProperties2KHR-pFormatProperties-parameter' : 'VUID-vkGetPhysicalDeviceFormatProperties2-pFormatProperties-parameter',
'VUID-vkGetPhysicalDeviceImageFormatProperties2KHR-pImageFormatInfo-parameter' : 'VUID-vkGetPhysicalDeviceImageFormatProperties2-pImageFormatInfo-parameter',
'VUID-vkGetPhysicalDeviceImageFormatProperties2KHR-pImageFormatProperties-parameter' : 'VUID-vkGetPhysicalDeviceImageFormatProperties2-pImageFormatProperties-parameter',
'VUID-vkGetPhysicalDeviceMemoryProperties2KHR-pMemoryProperties-parameter' : 'VUID-vkGetPhysicalDeviceMemoryProperties2-pMemoryProperties-parameter',
'VUID-vkGetPhysicalDeviceProperties2KHR-pProperties-parameter' : 'VUID-vkGetPhysicalDeviceProperties2-pProperties-parameter',
'VUID-vkGetPhysicalDeviceSparseImageFormatProperties2KHR-pFormatInfo-parameter' : 'VUID-vkGetPhysicalDeviceSparseImageFormatProperties2-pFormatInfo-parameter' }
def printHelp():
print ("Usage:")
print (" python vk_validation_stats.py <json_file>")
print (" [ -c ]")
print (" [ -todo ]")
print (" [ -vuid <vuid_name> ]")
print (" [ -text [ <text_out_filename>] ]")
print (" [ -csv [ <csv_out_filename>] ]")
print (" [ -html [ <html_out_filename>] ]")
print (" [ -export_header ]")
print (" [ -verbose ]")
print (" [ -help ]")
print ("\n The vk_validation_stats script parses validation layer source files to")
print (" determine the set of valid usage checks and tests currently implemented,")
print (" and generates coverage values by comparing against the full set of valid")
print (" usage identifiers in the Vulkan-Headers registry file 'validusage.json'")
print ("\nArguments: ")
print (" <json-file> (required) registry file 'validusage.json'")
print (" -c report consistency warnings")
print (" -todo report unimplemented VUIDs")
print (" -vuid <vuid_name> report status of individual VUID <vuid_name>")
print (" -text [filename] output the error database text to <text_database_filename>,")
print (" defaults to 'validation_error_database.txt'")
print (" -csv [filename] output the error database in csv to <csv_database_filename>,")
print (" defaults to 'validation_error_database.csv'")
print (" -html [filename] output the error database in html to <html_database_filename>,")
print (" defaults to 'validation_error_database.html'")
print (" -export_header export a new VUID error text header file to <%s>" % header_filename)
print (" -verbose show your work (to stdout)")
class ValidationJSON:
def __init__(self, filename):
self.filename = filename
self.explicit_vuids = set()
self.implicit_vuids = set()
self.all_vuids = set()
self.vuid_db = defaultdict(list) # Maps VUID string to list of json-data dicts
self.apiversion = ""
self.duplicate_vuids = set()
# A set of specific regular expression substitutions needed to clean up VUID text
self.regex_dict = {}
self.regex_dict[re.compile('<.*?>|&(amp;)+lt;|&(amp;)+gt;')] = ""
self.regex_dict[re.compile(r'\\\(codeSize \\over 4\\\)')] = "(codeSize/4)"
self.regex_dict[re.compile(r'\\\(\\lceil{\\frac{height}{maxFragmentDensityTexelSize_{height}}}\\rceil\\\)')] = "the ceiling of height/maxFragmentDensityTexelSize.height"
self.regex_dict[re.compile(r'\\\(\\lceil{\\frac{width}{maxFragmentDensityTexelSize_{width}}}\\rceil\\\)')] = "the ceiling of width/maxFragmentDensityTexelSize.width"
self.regex_dict[re.compile(r'\\\(\\lceil{\\frac{maxFramebufferHeight}{minFragmentDensityTexelSize_{height}}}\\rceil\\\)')] = "the ceiling of maxFramebufferHeight/minFragmentDensityTexelSize.height"
self.regex_dict[re.compile(r'\\\(\\lceil{\\frac{maxFramebufferWidth}{minFragmentDensityTexelSize_{width}}}\\rceil\\\)')] = "the ceiling of maxFramebufferWidth/minFragmentDensityTexelSize.width"
self.regex_dict[re.compile(r'\\\(\\lceil\{\\mathit\{rasterizationSamples} \\over 32}\\rceil\\\)')] = "(rasterizationSamples/32)"
self.regex_dict[re.compile(r'\\\(\\textrm\{codeSize} \\over 4\\\)')] = "(codeSize/4)"
# Some fancy punctuation chars that break the Android build...
self.regex_dict[re.compile('→')] = "->" # Arrow char
self.regex_dict[re.compile('’')] = "'" # Left-slanting apostrophe to apostrophe
self.regex_dict[re.compile('̶(0|1);')] = "'" # L/R-slanting quotes to apostrophe
def read(self):
self.json_dict = {}
if os.path.isfile(self.filename):
json_file = open(self.filename, 'r', encoding='utf-8')
self.json_dict = json.load(json_file)
json_file.close()
if len(self.json_dict) == 0:
print("Error: Error loading validusage.json file <%s>" % self.filename)
sys.exit(-1)
try:
version = self.json_dict['version info']
validation = self.json_dict['validation']
self.apiversion = version['api version']
except:
print("Error: Failure parsing validusage.json object")
sys.exit(-1)
# Parse vuid from json into local databases
for apiname in validation.keys():
# print("entrypoint:%s"%apiname)
apidict = validation[apiname]
for ext in apidict.keys():
vlist = apidict[ext]
for ventry in vlist:
vuid_string = ventry['vuid']
if (vuid_string[-5:-1].isdecimal()):
self.explicit_vuids.add(vuid_string) # explicit end in 5 numeric chars
vtype = 'explicit'
else:
self.implicit_vuids.add(vuid_string) # otherwise, implicit
vtype = 'implicit'
vuid_text = ventry['text']
for regex, replacement in self.regex_dict.items():
vuid_text = re.sub(regex, replacement, vuid_text) # do regex substitution
vuid_text = html.unescape(vuid_text) # anything missed by the regex
self.vuid_db[vuid_string].append({'api':apiname, 'ext':ext, 'type':vtype, 'text':vuid_text})
self.all_vuids = self.explicit_vuids | self.implicit_vuids
self.duplicate_vuids = set({v for v in self.vuid_db if len(self.vuid_db[v]) > 1})
if len(self.duplicate_vuids) > 0:
print("Warning: duplicate VUIDs found in validusage.json")
class ValidationSource:
def __init__(self, source_file_list, generated_source_file_list, generated_source_directories):
self.source_files = source_file_list
self.generated_source_files = generated_source_file_list
self.generated_source_dirs = generated_source_directories
self.vuid_count_dict = {} # dict of vuid values to the count of how much they're used, and location of where they're used
self.duplicated_checks = 0
self.explicit_vuids = set()
self.implicit_vuids = set()
self.unassigned_vuids = set()
self.all_vuids = set()
if len(self.generated_source_files) > 0:
qualified_paths = []
for source in self.generated_source_files:
for build_dir in self.generated_source_dirs:
filepath = '../%s/layers/%s' % (build_dir, source)
if os.path.isfile(filepath):
qualified_paths.append(filepath)
break
if len(self.generated_source_files) != len(qualified_paths):
print("Error: Unable to locate one or more of the following source files in the %s directories" % (", ".join(generated_source_directories)))
print(self.generated_source_files)
print("Failed to locate one or more codegen files in layer source code - cannot proceed.")
exit(1)
else:
self.source_files.extend(qualified_paths)
def parse(self):
prepend = None
for sf in self.source_files:
line_num = 0
with open(sf) as f:
for line in f:
line_num = line_num + 1
if True in [line.strip().startswith(comment) for comment in ['//', '/*']]:
continue
# Find vuid strings
if prepend is not None:
line = prepend[:-2] + line.lstrip().lstrip('"') # join lines skipping CR, whitespace and trailing/leading quote char
prepend = None
if any(prefix in line for prefix in vuid_prefixes):
line_list = line.split()
# A VUID string that has been broken by clang will start with a vuid prefix and end with -, and will be last in the list
broken_vuid = line_list[-1].strip('"')
if any(broken_vuid.startswith(prefix) for prefix in vuid_prefixes) and broken_vuid.endswith('-'):
prepend = line
continue
vuid_list = []
for str in line_list:
if any(prefix in str for prefix in vuid_prefixes):
vuid_list.append(str.strip(',);{}"'))
for vuid in vuid_list:
if vuid not in self.vuid_count_dict:
self.vuid_count_dict[vuid] = {}
self.vuid_count_dict[vuid]['count'] = 1
self.vuid_count_dict[vuid]['file_line'] = []
else:
if self.vuid_count_dict[vuid]['count'] == 1: # only count first time duplicated
self.duplicated_checks = self.duplicated_checks + 1
self.vuid_count_dict[vuid]['count'] = self.vuid_count_dict[vuid]['count'] + 1
self.vuid_count_dict[vuid]['file_line'].append('%s,%d' % (sf, line_num))
# Sort vuids by type
for vuid in self.vuid_count_dict.keys():
if (vuid.startswith('VUID-')):
if (vuid[-5:-1].isdecimal()):
self.explicit_vuids.add(vuid) # explicit end in 5 numeric chars
else:
self.implicit_vuids.add(vuid)
elif (vuid.startswith('UNASSIGNED-')):
self.unassigned_vuids.add(vuid)
else:
print("Unable to categorize VUID: %s" % vuid)
print("Confused while parsing VUIDs in layer source code - cannot proceed. (FIXME)")
exit(-1)
self.all_vuids = self.explicit_vuids | self.implicit_vuids | self.unassigned_vuids
# Class to parse the validation layer test source and store testnames
class ValidationTests:
def __init__(self, test_file_list, test_group_name=['VkLayerTest', 'VkPositiveLayerTest', 'VkWsiEnabledLayerTest']):
self.test_files = test_file_list
self.test_trigger_txt_list = []
for tg in test_group_name:
self.test_trigger_txt_list.append('TEST_F(%s' % tg)
self.explicit_vuids = set()
self.implicit_vuids = set()
self.unassigned_vuids = set()
self.all_vuids = set()
#self.test_to_vuids = {} # Map test name to VUIDs tested
self.vuid_to_tests = defaultdict(set) # Map VUIDs to set of test names where implemented
# Parse test files into internal data struct
def parse(self):
# For each test file, parse test names into set
grab_next_line = False # handle testname on separate line than wildcard
testname = ''
prepend = None
for test_file in self.test_files:
with open(test_file) as tf:
for line in tf:
if True in [line.strip().startswith(comment) for comment in ['//', '/*']]:
continue
# if line ends in a broken VUID string, fix that before proceeding
if prepend is not None:
line = prepend[:-2] + line.lstrip().lstrip('"') # join lines skipping CR, whitespace and trailing/leading quote char
prepend = None
if any(prefix in line for prefix in vuid_prefixes):
line_list = line.split()
# A VUID string that has been broken by clang will start with a vuid prefix and end with -, and will be last in the list
broken_vuid = line_list[-1].strip('"')
if any(broken_vuid.startswith(prefix) for prefix in vuid_prefixes) and broken_vuid.endswith('-'):
prepend = line
continue
if any(ttt in line for ttt in self.test_trigger_txt_list):
testname = line.split(',')[-1]
testname = testname.strip().strip(' {)')
if ('' == testname):
grab_next_line = True
continue
#self.test_to_vuids[testname] = []
if grab_next_line: # test name on its own line
grab_next_line = False
testname = testname.strip().strip(' {)')
#self.test_to_vuids[testname] = []
if any(prefix in line for prefix in vuid_prefixes):
line_list = re.split('[\s{}[\]()"]+',line)
for sub_str in line_list:
if any(prefix in sub_str for prefix in vuid_prefixes):
vuid_str = sub_str.strip(',);:"')
self.vuid_to_tests[vuid_str].add(testname)
#self.test_to_vuids[testname].append(vuid_str)
if (vuid_str.startswith('VUID-')):
if (vuid_str[-5:-1].isdecimal()):
self.explicit_vuids.add(vuid_str) # explicit end in 5 numeric chars
else:
self.implicit_vuids.add(vuid_str)
elif (vuid_str.startswith('UNASSIGNED-')):
self.unassigned_vuids.add(vuid_str)
else:
print("Unable to categorize VUID: %s" % vuid_str)
print("Confused while parsing VUIDs in test code - cannot proceed. (FIXME)")
exit(-1)
self.all_vuids = self.explicit_vuids | self.implicit_vuids | self.unassigned_vuids
# Class to do consistency checking
#
class Consistency:
def __init__(self, all_json, all_checks, all_tests):
self.valid = all_json
self.checks = all_checks
self.tests = all_tests
if (dealias_khr):
dk = set()
for vuid in self.checks:
if vuid in khr_aliases:
dk.add(khr_aliases[vuid])
else:
dk.add(vuid)
self.checks = dk
dk = set()
for vuid in self.tests:
if vuid in khr_aliases:
dk.add(khr_aliases[vuid])
else:
dk.add(vuid)
self.tests = dk
# Report undefined VUIDs in source code
def undef_vuids_in_layer_code(self):
undef_set = self.checks - self.valid
undef_set.discard('VUID-Undefined') # don't report Undefined
if ignore_unassigned:
unassigned = set({uv for uv in undef_set if uv.startswith('UNASSIGNED-')})
undef_set = undef_set - unassigned
if (len(undef_set) > 0):
print("\nFollowing VUIDs found in layer code are not defined in validusage.json (%d):" % len(undef_set))
undef = list(undef_set)
undef.sort()
for vuid in undef:
print(" %s" % vuid)
return False
return True
# Report undefined VUIDs in tests
def undef_vuids_in_tests(self):
undef_set = self.tests - self.valid
undef_set.discard('VUID-Undefined') # don't report Undefined
if ignore_unassigned:
unassigned = set({uv for uv in undef_set if uv.startswith('UNASSIGNED-')})
undef_set = undef_set - unassigned
if (len(undef_set) > 0):
ok = False
print("\nFollowing VUIDs found in layer tests are not defined in validusage.json (%d):" % len(undef_set))
undef = list(undef_set)
undef.sort()
for vuid in undef:
print(" %s" % vuid)
return False
return True
# Report vuids in tests that are not in source
def vuids_tested_not_checked(self):
undef_set = self.tests - self.checks
undef_set.discard('VUID-Undefined') # don't report Undefined
if ignore_unassigned:
unassigned = set()
for vuid in undef_set:
if vuid.startswith('UNASSIGNED-'):
unassigned.add(vuid)
undef_set = undef_set - unassigned
if (len(undef_set) > 0):
ok = False
print("\nFollowing VUIDs found in tests but are not checked in layer code (%d):" % len(undef_set))
undef = list(undef_set)
undef.sort()
for vuid in undef:
print(" %s" % vuid)
return False
return True
# TODO: Explicit checked VUIDs which have no test
# def explicit_vuids_checked_not_tested(self):
# Class to output database in various flavors
#
class OutputDatabase:
def __init__(self, val_json, val_source, val_tests):
self.vj = val_json
self.vs = val_source
self.vt = val_tests
self.header_version = "/* THIS FILE IS GENERATED - DO NOT EDIT (scripts/vk_validation_stats.py) */"
self.header_version += "\n/* Vulkan specification version: %s */" % val_json.apiversion
self.header_version += "\n/* Header generated: %s */\n" % time.strftime('%Y-%m-%d %H:%M:%S')
self.header_preamble = """
/*
* Vulkan
*
* Copyright (c) 2016-2019 Google Inc.
* Copyright (c) 2016-2019 LunarG, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Tobin Ehlis <tobine@google.com>
* Author: Dave Houlton <daveh@lunarg.com>
*/
#pragma once
// Disable auto-formatting for generated file
// clang-format off
// Mapping from VUID string to the corresponding spec text
typedef struct _vuid_spec_text_pair {
const char * vuid;
const char * spec_text;
} vuid_spec_text_pair;
static const vuid_spec_text_pair vuid_spec_text[] = {
"""
self.header_postamble = """};
"""
self.spec_url = "https://www.khronos.org/registry/vulkan/specs/1.1-extensions/html/vkspec.html"
def dump_txt(self):
print("\n Dumping database to text file: %s" % txt_filename)
with open (txt_filename, 'w') as txt:
txt.write("## VUID Database\n")
txt.write("## Format: VUID_NAME | CHECKED | TEST | TYPE | API/STRUCT | EXTENSION | VUID_TEXT\n##\n")
vuid_list = list(self.vj.all_vuids)
vuid_list.sort()
for vuid in vuid_list:
db_list = self.vj.vuid_db[vuid]
db_list.sort(key=operator.itemgetter('ext')) # sort list to ease diffs of output file
for db_entry in db_list:
checked = 'N'
if vuid in self.vs.all_vuids:
checked = 'Y'
test = 'None'
if vuid in self.vt.vuid_to_tests:
test_list = list(self.vt.vuid_to_tests[vuid])
test_list.sort() # sort tests, for diff-ability
sep = ', '
test = sep.join(test_list)
txt.write("%s | %s | %s | %s | %s | %s | %s\n" % (vuid, checked, test, db_entry['type'], db_entry['api'], db_entry['ext'], db_entry['text']))
def dump_csv(self):
print("\n Dumping database to csv file: %s" % csv_filename)
with open (csv_filename, 'w', newline='') as csvfile:
cw = csv.writer(csvfile)
cw.writerow(['VUID_NAME','CHECKED','TEST','TYPE','API/STRUCT','EXTENSION','VUID_TEXT'])
vuid_list = list(self.vj.all_vuids)
vuid_list.sort()
for vuid in vuid_list:
for db_entry in self.vj.vuid_db[vuid]:
row = [vuid]
if vuid in self.vs.all_vuids:
row.append('Y')
else:
row.append('N')
test = 'None'
if vuid in self.vt.vuid_to_tests:
sep = ', '
test = sep.join(self.vt.vuid_to_tests[vuid])
row.append(test)
row.append(db_entry['type'])
row.append(db_entry['api'])
row.append(db_entry['ext'])
row.append(db_entry['text'])
cw.writerow(row)
def dump_html(self):
print("\n Dumping database to html file: %s" % html_filename)
preamble = '<!DOCTYPE html>\n<html>\n<head>\n<style>\ntable, th, td {\n border: 1px solid black;\n border-collapse: collapse; \n}\n</style>\n<body>\n<h2>Valid Usage Database</h2>\n<font size="2" face="Arial">\n<table style="width:100%">\n'
headers = '<tr><th>VUID NAME</th><th>CHECKED</th><th>TEST</th><th>TYPE</th><th>API/STRUCT</th><th>EXTENSION</th><th>VUID TEXT</th></tr>\n'
with open (html_filename, 'w') as hfile:
hfile.write(preamble)
hfile.write(headers)
vuid_list = list(self.vj.all_vuids)
vuid_list.sort()
for vuid in vuid_list:
for db_entry in self.vj.vuid_db[vuid]:
hfile.write('<tr><th>%s</th>' % vuid)
checked = '<span style="color:red;">N</span>'
if vuid in self.vs.all_vuids:
checked = '<span style="color:limegreen;">Y</span>'
hfile.write('<th>%s</th>' % checked)
test = 'None'
if vuid in self.vt.vuid_to_tests:
sep = ', '
test = sep.join(self.vt.vuid_to_tests[vuid])
hfile.write('<th>%s</th>' % test)
hfile.write('<th>%s</th>' % db_entry['type'])
hfile.write('<th>%s</th>' % db_entry['api'])
hfile.write('<th>%s</th>' % db_entry['ext'])
hfile.write('<th>%s</th></tr>\n' % db_entry['text'])
hfile.write('</table>\n</body>\n</html>\n')
def export_header(self):
print("\n Exporting header file to: %s" % header_filename)
with open (header_filename, 'w') as hfile:
hfile.write(self.header_version)
hfile.write(self.header_preamble)
vuid_list = list(self.vj.all_vuids)
vuid_list.sort()
for vuid in vuid_list:
db_entry = self.vj.vuid_db[vuid][0]
hfile.write(' {"%s", "%s (%s#%s)"},\n' % (vuid, db_entry['text'].strip(' '), self.spec_url, vuid))
# For multiply-defined VUIDs, include versions with extension appended
if len(self.vj.vuid_db[vuid]) > 1:
for db_entry in self.vj.vuid_db[vuid]:
hfile.write(' {"%s[%s]", "%s (%s#%s)"},\n' % (vuid, db_entry['ext'].strip(' '), db_entry['text'].strip(' '), self.spec_url, vuid))
hfile.write(self.header_postamble)
def main(argv):
global verbose_mode
global txt_filename
global csv_filename
global html_filename
run_consistency = False
report_unimplemented = False
get_vuid_status = ''
txt_out = False
csv_out = False
html_out = False
header_out = False
if (1 > len(argv)):
printHelp()
sys.exit()
# Parse script args
json_filename = argv[0]
i = 1
while (i < len(argv)):
arg = argv[i]
i = i + 1
if (arg == '-c'):
run_consistency = True
elif (arg == '-vuid'):
get_vuid_status = argv[i]
i = i + 1
elif (arg == '-todo'):
report_unimplemented = True
elif (arg == '-text'):
txt_out = True
# Set filename if supplied, else use default
if i < len(argv) and not argv[i].startswith('-'):
txt_filename = argv[i]
i = i + 1
elif (arg == '-csv'):
csv_out = True
# Set filename if supplied, else use default
if i < len(argv) and not argv[i].startswith('-'):
csv_filename = argv[i]
i = i + 1
elif (arg == '-html'):
html_out = True
# Set filename if supplied, else use default
if i < len(argv) and not argv[i].startswith('-'):
html_filename = argv[i]
i = i + 1
elif (arg == '-export_header'):
header_out = True
elif (arg in ['-verbose']):
verbose_mode = True
elif (arg in ['-help', '-h']):
printHelp()
sys.exit()
else:
print("Unrecognized argument: %s\n" % arg)
printHelp()
sys.exit()
result = 0 # Non-zero result indicates an error case
# Parse validusage json
val_json = ValidationJSON(json_filename)
val_json.read()
exp_json = len(val_json.explicit_vuids)
imp_json = len(val_json.implicit_vuids)
all_json = len(val_json.all_vuids)
if verbose_mode:
print("Found %d unique error vuids in validusage.json file." % all_json)
print(" %d explicit" % exp_json)
print(" %d implicit" % imp_json)
if len(val_json.duplicate_vuids) > 0:
print("%d VUIDs appear in validusage.json more than once." % len(val_json.duplicate_vuids))
for vuid in val_json.duplicate_vuids:
print(" %s" % vuid)
for ext in val_json.vuid_db[vuid]:
print(" with extension: %s" % ext['ext'])
# Parse layer source files
val_source = ValidationSource(layer_source_files, generated_layer_source_files, generated_layer_source_directories)
val_source.parse()
exp_checks = len(val_source.explicit_vuids)
imp_checks = len(val_source.implicit_vuids)
all_checks = len(val_source.vuid_count_dict.keys())
if verbose_mode:
print("Found %d unique vuid checks in layer source code." % all_checks)
print(" %d explicit" % exp_checks)
print(" %d implicit" % imp_checks)
print(" %d unassigned" % len(val_source.unassigned_vuids))
print(" %d checks are implemented more that once" % val_source.duplicated_checks)
# Parse test files
val_tests = ValidationTests([test_file, ])
val_tests.parse()
exp_tests = len(val_tests.explicit_vuids)
imp_tests = len(val_tests.implicit_vuids)
all_tests = len(val_tests.all_vuids)
if verbose_mode:
print("Found %d unique error vuids in test file %s." % (all_tests, test_file))
print(" %d explicit" % exp_tests)
print(" %d implicit" % imp_tests)
print(" %d unassigned" % len(val_tests.unassigned_vuids))
# Process stats
print("\nValidation Statistics (using validusage.json version %s)" % val_json.apiversion)
print(" VUIDs defined in JSON file: %04d explicit, %04d implicit, %04d total." % (exp_json, imp_json, all_json))
print(" VUIDs checked in layer code: %04d explicit, %04d implicit, %04d total." % (exp_checks, imp_checks, all_checks))
print(" VUIDs tested in layer tests: %04d explicit, %04d implicit, %04d total." % (exp_tests, imp_tests, all_tests))
print("\nVUID check coverage")
print(" Explicit VUIDs checked: %.1f%% (%d checked vs %d defined)" % ((100.0 * exp_checks / exp_json), exp_checks, exp_json))
print(" Implicit VUIDs checked: %.1f%% (%d checked vs %d defined)" % ((100.0 * imp_checks / imp_json), imp_checks, imp_json))
print(" Overall VUIDs checked: %.1f%% (%d checked vs %d defined)" % ((100.0 * all_checks / all_json), all_checks, all_json))
print("\nVUID test coverage")
print(" Explicit VUIDs tested: %.1f%% (%d tested vs %d checks)" % ((100.0 * exp_tests / exp_checks), exp_tests, exp_checks))
print(" Implicit VUIDs tested: %.1f%% (%d tested vs %d checks)" % ((100.0 * imp_tests / imp_checks), imp_tests, imp_checks))
print(" Overall VUIDs tested: %.1f%% (%d tested vs %d checks)" % ((100.0 * all_tests / all_checks), all_tests, all_checks))
# Report status of a single VUID
if len(get_vuid_status) > 1:
print("\n\nChecking status of <%s>" % get_vuid_status);
if get_vuid_status not in val_json.all_vuids:
print(' Not a valid VUID string.')
else:
if get_vuid_status in val_source.explicit_vuids:
print(' Implemented!')
line_list = val_source.vuid_count_dict[get_vuid_status]['file_line']
for line in line_list:
print(' => %s' % line)
elif get_vuid_status in val_source.implicit_vuids:
print(' Implemented! (Implicit)')
line_list = val_source.vuid_count_dict[get_vuid_status]['file_line']
for line in line_list:
print(' => %s' % line)
else:
print(' Not implemented.')
if get_vuid_status in val_tests.all_vuids:
print(' Has a test!')
test_list = val_tests.vuid_to_tests[get_vuid_status]
for test in test_list:
print(' => %s' % test)
else:
print(' Not tested.')
# Report unimplemented explicit VUIDs
if report_unimplemented:
unim_explicit = val_json.explicit_vuids - val_source.explicit_vuids
print("\n\n%d explicit VUID checks remain unimplemented:" % len(unim_explicit))
ulist = list(unim_explicit)
ulist.sort()
for vuid in ulist:
print(" => %s" % vuid)
# Consistency tests
if run_consistency:
print("\n\nRunning consistency tests...")
con = Consistency(val_json.all_vuids, val_source.all_vuids, val_tests.all_vuids)
ok = con.undef_vuids_in_layer_code()
ok &= con.undef_vuids_in_tests()
ok &= con.vuids_tested_not_checked()
if ok:
print(" OK! No inconsistencies found.")
# Output database in requested format(s)
db_out = OutputDatabase(val_json, val_source, val_tests)
if txt_out:
db_out.dump_txt()
if csv_out:
db_out.dump_csv()
if html_out:
db_out.dump_html()
if header_out:
db_out.export_header()
return result
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 56.174242 | 247 | 0.614767 |
795a0f604f8c99e979c022f3d2f6ff099d5dbaf7 | 2,583 | py | Python | tests/ut/python/parallel/test_linear.py | ZephyrChenzf/mindspore | 8f191847cf71e12715ced96bc3575914f980127a | [
"Apache-2.0"
] | 7 | 2020-05-24T03:19:26.000Z | 2020-05-24T03:20:00.000Z | tests/ut/python/parallel/test_linear.py | dilingsong/mindspore | 4276050f2494cfbf8682560a1647576f859991e8 | [
"Apache-2.0"
] | null | null | null | tests/ut/python/parallel/test_linear.py | dilingsong/mindspore | 4276050f2494cfbf8682560a1647576f859991e8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import mindspore as ms
import mindspore.nn as nn
from mindspore import Tensor
from mindspore import context
from mindspore.common.api import _executor
from mindspore.ops import composite as C
from mindspore.ops import operations as P
class NetWithLoss(nn.Cell):
def __init__(self, network, strategy3):
super(NetWithLoss, self).__init__()
self.loss = P.SoftmaxCrossEntropyWithLogits().set_strategy(strategy3)
self.network = network
def construct(self, x, y, bias, label):
predict = self.network(x, y, bias)
return self.loss(predict, label)[0]
class GradWrap(nn.Cell):
def __init__(self, network):
super(GradWrap, self).__init__()
self.network = network
def construct(self, x, y, bias, label):
return C.grad_all(self.network)(x, y, bias, label)
def test_linear():
class Net(nn.Cell):
def __init__(self, strategy0, strategy1, strategy2):
super().__init__()
self.fc_nobias = P.MatMul(transpose_b=True).set_strategy(strategy0)
self.add = P.TensorAdd().set_strategy(strategy1)
self.gelu = P.Gelu().set_strategy(strategy2)
def construct(self, x, y, bias):
out = self.fc_nobias(x, y)
out = self.add(out, bias)
out = self.gelu(out)
return out
context.set_auto_parallel_context(device_num=16, global_rank=0)
strategy0 = ((2, 4), (2, 4))
strategy1 = ((2, 4), (4,))
strategy2 = ((2, 8),)
strategy3 = ((16, 1), (16, 1))
net = GradWrap(NetWithLoss(Net(strategy0, strategy1, strategy2), strategy3))
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
net.set_auto_parallel()
x = Tensor(np.ones([64, 32]), dtype=ms.float32)
y = Tensor(np.ones([64, 32]), dtype=ms.float32)
bias = Tensor(np.ones([64]), dtype=ms.float32)
label = Tensor(np.ones([64, 64]), dtype=ms.float32)
_executor.compile(net, x, y, bias, label)
| 34.905405 | 80 | 0.67712 |
795a0feb9b33f5a507d835c5e65d281436a11c0f | 5,669 | py | Python | onepiece/site/c18comic.py | fenglui/TencentComicBook | 5ea70a0d1fcb6cd29492ba7b1148f7b55ecc97c9 | [
"MIT"
] | null | null | null | onepiece/site/c18comic.py | fenglui/TencentComicBook | 5ea70a0d1fcb6cd29492ba7b1148f7b55ecc97c9 | [
"MIT"
] | null | null | null | onepiece/site/c18comic.py | fenglui/TencentComicBook | 5ea70a0d1fcb6cd29492ba7b1148f7b55ecc97c9 | [
"MIT"
] | null | null | null | import re
import logging
from urllib.parse import urljoin
from ..crawlerbase import CrawlerBase
logger = logging.getLogger(__name__)
class C18comicCrawler(CrawlerBase):
SITE = "18comic"
SITE_INDEX = 'https://18comic.org/'
SOURCE_NAME = "禁漫天堂"
LOGIN_URL = SITE_INDEX
R18 = True
DEFAULT_COMICID = 201118
DEFAULT_SEARCH_NAME = '騎馬的女孩好想被她騎'
DEFAULT_TAG = 'CG集'
def __init__(self, comicid=None):
self.comicid = comicid
super().__init__()
@property
def source_url(self):
return self.get_source_url(self.comicid)
def get_source_url(self, comicid):
return urljoin(self.SITE_INDEX, "/album/{}/".format(comicid))
def get_comicbook_item(self):
soup = self.get_soup(self.source_url)
name = soup.find('div', {'itemprop': 'name'}).text.strip()
author = ''
desc = ''
for i in soup.find_all('div', {'class': 'p-t-5 p-b-5'}):
if '敘述:' in i.text:
desc = i.text.strip().replace('\n', '').replace('敘述:', '', 1)
for i in soup.find_all('div', {'class': 'tag-block'}):
if '作者:' in i.text:
author = i.text.strip().replace('\n', '').replace('作者:', '', 1)
cover_image_url = soup.find('img', {'itemprop': 'image'}).get('src')
res = soup.find('div', {'class': 'episode'})
book = self.new_comicbook_item(name=name,
desc=desc,
cover_image_url=cover_image_url,
author=author,
source_url=self.source_url)
for a in soup.find('span', {'itemprop': 'genre'}).find_all('a'):
tag_name = a.text.strip()
book.add_tag(name=tag_name, tag=tag_name)
if not res:
chapter_number = 1
url = urljoin(self.SITE_INDEX, '/photo/{}/'.format(self.comicid))
book.add_chapter(chapter_number=chapter_number, source_url=url, title=str(chapter_number))
else:
a_list = res.find_all('a')
for idx, a_soup in enumerate(a_list, start=1):
chapter_number = idx
for i in a_soup.find_all('span'):
i.decompose()
title = a_soup.text.strip().replace('\n', ' ')
url = a_soup.get('href')
full_url = urljoin(self.SITE_INDEX, url)
book.add_chapter(chapter_number=chapter_number, source_url=full_url, title=title)
return book
def get_chapter_item(self, citem):
soup = self.get_soup(citem.source_url)
img_list = soup.find('div', 'row thumb-overlay-albums')\
.find_all('img', {'id': re.compile(r'album_photo_\d+')})
image_urls = []
for img_soup in img_list:
url = img_soup.get('data-original')
if not url:
url = img_soup.get('src')
image_urls.append(url)
return self.new_chapter_item(chapter_number=citem.chapter_number,
title=citem.title,
image_urls=image_urls,
source_url=citem.source_url)
def search(self, name, page=1, size=None):
url = urljoin(
self.SITE_INDEX,
'/search/photos?search_query={}&page={}'.format(name, page)
)
soup = self.get_soup(url)
result = self.new_search_result_item()
for div in soup.find_all('div', {'class': 'thumb-overlay'}):
comicid = div.a.get('id').split('_')[-1]
name = div.img.get('alt')
cover_image_url = div.img.get('data-original')
source_url = self.get_source_url(comicid)
result.add_result(comicid=comicid,
name=name,
cover_image_url=cover_image_url,
source_url=source_url)
return result
def latest(self, page=1):
url = 'https://18comic.org/albums?o=mr&page=%s' % page
soup = self.get_soup(url)
result = self.new_search_result_item()
for div in soup.find_all('div', {'class': 'thumb-overlay-albums'}):
comicid = div.a.get('id').split('_')[-1]
name = div.img.get('alt')
cover_image_url = div.img.get('data-original')
source_url = self.get_source_url(comicid)
result.add_result(comicid=comicid,
name=name,
cover_image_url=cover_image_url,
source_url=source_url)
return result
def get_tags(self):
url = "https://18comic.org/theme/"
soup = self.get_soup(url)
div_list = soup.find('div', {'id': 'wrapper'}).find('div', {'class': 'container'})\
.find_all('div', {'class': 'row'})
tags = self.new_tags_item()
for div in div_list:
h4 = div.h4
if not h4:
continue
category = h4.text
for li in div.find_all('li'):
name = li.a.text
tags.add_tag(category=category, name=name, tag=name)
return tags
def get_tag_result(self, tag, page=1):
return self.search(name=tag, page=page)
def login(self):
self.selenium_login(login_url=self.LOGIN_URL,
check_login_status_func=self.check_login_status)
def check_login_status(self):
session = self.get_session()
if session.cookies.get("remember", domain=".18comic.org"):
return True
| 38.828767 | 102 | 0.540131 |
795a129d9b75b89eee617ee93d830fd83b867eaf | 317 | py | Python | Data Scientist Career Path/10. Natural Language Processing/5. Word Embeding/3. what.py | myarist/Codecademy | 2ba0f104bc67ab6ef0f8fb869aa12aa02f5f1efb | [
"MIT"
] | 23 | 2021-06-06T15:35:55.000Z | 2022-03-21T06:53:42.000Z | Data Scientist Career Path/10. Natural Language Processing/5. Word Embeding/3. what.py | shivaniverma1/Data-Scientist | f82939a411484311171465591455880c8e354750 | [
"MIT"
] | null | null | null | Data Scientist Career Path/10. Natural Language Processing/5. Word Embeding/3. what.py | shivaniverma1/Data-Scientist | f82939a411484311171465591455880c8e354750 | [
"MIT"
] | 9 | 2021-06-08T01:32:04.000Z | 2022-03-18T15:38:09.000Z | import spacy
# load word embedding model
nlp = spacy.load('en')
# define word embedding vectors
happy_vec = nlp('happy').vector
print(happy_vec)
sad_vec = nlp('sad').vector
print(sad_vec)
angry_vec = nlp('angry').vector
print(angry_vec)
# find vector length here
vector_length = len(happy_vec)
print(vector_length) | 19.8125 | 31 | 0.760252 |
795a12c607c6315af44fde7ad7a75a7b5336a496 | 15,034 | bzl | Python | source/extensions/extensions_build_config.bzl | ryanmcnamara/envoy | 29b30911dbfb3f9760efeb28238ceac36e1a1a23 | [
"Apache-2.0"
] | null | null | null | source/extensions/extensions_build_config.bzl | ryanmcnamara/envoy | 29b30911dbfb3f9760efeb28238ceac36e1a1a23 | [
"Apache-2.0"
] | null | null | null | source/extensions/extensions_build_config.bzl | ryanmcnamara/envoy | 29b30911dbfb3f9760efeb28238ceac36e1a1a23 | [
"Apache-2.0"
] | null | null | null | # See bazel/README.md for details on how this system works.
EXTENSIONS = {
#
# Access loggers
#
"envoy.access_loggers.file": "//source/extensions/access_loggers/file:config",
"envoy.access_loggers.http_grpc": "//source/extensions/access_loggers/grpc:http_config",
"envoy.access_loggers.tcp_grpc": "//source/extensions/access_loggers/grpc:tcp_config",
#
# Clusters
#
"envoy.clusters.aggregate": "//source/extensions/clusters/aggregate:cluster",
"envoy.clusters.dynamic_forward_proxy": "//source/extensions/clusters/dynamic_forward_proxy:cluster",
"envoy.clusters.redis": "//source/extensions/clusters/redis:redis_cluster",
#
# gRPC Credentials Plugins
#
"envoy.grpc_credentials.file_based_metadata": "//source/extensions/grpc_credentials/file_based_metadata:config",
"envoy.grpc_credentials.aws_iam": "//source/extensions/grpc_credentials/aws_iam:config",
#
# Health checkers
#
"envoy.health_checkers.redis": "//source/extensions/health_checkers/redis:config",
#
# HTTP filters
#
"envoy.filters.http.adaptive_concurrency": "//source/extensions/filters/http/adaptive_concurrency:config",
"envoy.filters.http.buffer": "//source/extensions/filters/http/buffer:config",
"envoy.filters.http.cors": "//source/extensions/filters/http/cors:config",
"envoy.filters.http.csrf": "//source/extensions/filters/http/csrf:config",
"envoy.filters.http.dynamic_forward_proxy": "//source/extensions/filters/http/dynamic_forward_proxy:config",
"envoy.filters.http.dynamo": "//source/extensions/filters/http/dynamo:config",
"envoy.filters.http.ext_authz": "//source/extensions/filters/http/ext_authz:config",
"envoy.filters.http.fault": "//source/extensions/filters/http/fault:config",
"envoy.filters.http.grpc_http1_bridge": "//source/extensions/filters/http/grpc_http1_bridge:config",
"envoy.filters.http.grpc_http1_reverse_bridge": "//source/extensions/filters/http/grpc_http1_reverse_bridge:config",
"envoy.filters.http.grpc_json_transcoder": "//source/extensions/filters/http/grpc_json_transcoder:config",
"envoy.filters.http.grpc_stats": "//source/extensions/filters/http/grpc_stats:config",
"envoy.filters.http.grpc_web": "//source/extensions/filters/http/grpc_web:config",
"envoy.filters.http.gzip": "//source/extensions/filters/http/gzip:config",
"envoy.filters.http.header_to_metadata": "//source/extensions/filters/http/header_to_metadata:config",
"envoy.filters.http.health_check": "//source/extensions/filters/http/health_check:config",
"envoy.filters.http.ip_tagging": "//source/extensions/filters/http/ip_tagging:config",
"envoy.filters.http.jwt_authn": "//source/extensions/filters/http/jwt_authn:config",
"envoy.filters.http.lua": "//source/extensions/filters/http/lua:config",
"envoy.filters.http.original_src": "//source/extensions/filters/http/original_src:config",
"envoy.filters.http.ratelimit": "//source/extensions/filters/http/ratelimit:config",
"envoy.filters.http.rbac": "//source/extensions/filters/http/rbac:config",
"envoy.filters.http.router": "//source/extensions/filters/http/router:config",
"envoy.filters.http.squash": "//source/extensions/filters/http/squash:config",
"envoy.filters.http.tap": "//source/extensions/filters/http/tap:config",
#
# Listener filters
#
"envoy.filters.listener.http_inspector": "//source/extensions/filters/listener/http_inspector:config",
# NOTE: The original_dst filter is implicitly loaded if original_dst functionality is
# configured on the listener. Do not remove it in that case or configs will fail to load.
"envoy.filters.listener.original_dst": "//source/extensions/filters/listener/original_dst:config",
"envoy.filters.listener.original_src": "//source/extensions/filters/listener/original_src:config",
# NOTE: The proxy_protocol filter is implicitly loaded if proxy_protocol functionality is
# configured on the listener. Do not remove it in that case or configs will fail to load.
"envoy.filters.listener.proxy_protocol": "//source/extensions/filters/listener/proxy_protocol:config",
"envoy.filters.listener.tls_inspector": "//source/extensions/filters/listener/tls_inspector:config",
#
# Network filters
#
"envoy.filters.network.client_ssl_auth": "//source/extensions/filters/network/client_ssl_auth:config",
"envoy.filters.network.dubbo_proxy": "//source/extensions/filters/network/dubbo_proxy:config",
"envoy.filters.network.echo": "//source/extensions/filters/network/echo:config",
"envoy.filters.network.ext_authz": "//source/extensions/filters/network/ext_authz:config",
"envoy.filters.network.http_connection_manager": "//source/extensions/filters/network/http_connection_manager:config",
# WiP
"envoy.filters.network.kafka_broker": "//source/extensions/filters/network/kafka:kafka_broker_config_lib",
"envoy.filters.network.local_ratelimit": "//source/extensions/filters/network/local_ratelimit:config",
"envoy.filters.network.mongo_proxy": "//source/extensions/filters/network/mongo_proxy:config",
"envoy.filters.network.mysql_proxy": "//source/extensions/filters/network/mysql_proxy:config",
"envoy.filters.network.ratelimit": "//source/extensions/filters/network/ratelimit:config",
"envoy.filters.network.rbac": "//source/extensions/filters/network/rbac:config",
"envoy.filters.network.redis_proxy": "//source/extensions/filters/network/redis_proxy:config",
"envoy.filters.network.tcp_proxy": "//source/extensions/filters/network/tcp_proxy:config",
"envoy.filters.network.thrift_proxy": "//source/extensions/filters/network/thrift_proxy:config",
"envoy.filters.network.sni_cluster": "//source/extensions/filters/network/sni_cluster:config",
"envoy.filters.network.zookeeper_proxy": "//source/extensions/filters/network/zookeeper_proxy:config",
#
# UDP filters
#
"envoy.filters.udp_listener.udp_proxy": "//source/extensions/filters/udp/udp_proxy:config",
#
# Resource monitors
#
"envoy.resource_monitors.fixed_heap": "//source/extensions/resource_monitors/fixed_heap:config",
"envoy.resource_monitors.injected_resource": "//source/extensions/resource_monitors/injected_resource:config",
#
# Stat sinks
#
"envoy.stat_sinks.dog_statsd": "//source/extensions/stat_sinks/dog_statsd:config",
"envoy.stat_sinks.hystrix": "//source/extensions/stat_sinks/hystrix:config",
"envoy.stat_sinks.metrics_service": "//source/extensions/stat_sinks/metrics_service:config",
"envoy.stat_sinks.statsd": "//source/extensions/stat_sinks/statsd:config",
#
# Thrift filters
#
"envoy.filters.thrift.router": "//source/extensions/filters/network/thrift_proxy/router:config",
"envoy.filters.thrift.ratelimit": "//source/extensions/filters/network/thrift_proxy/filters/ratelimit:config",
#
# Tracers
#
"envoy.tracers.dynamic_ot": "//source/extensions/tracers/dynamic_ot:config",
"envoy.tracers.lightstep": "//source/extensions/tracers/lightstep:config",
"envoy.tracers.datadog": "//source/extensions/tracers/datadog:config",
"envoy.tracers.zipkin": "//source/extensions/tracers/zipkin:config",
"envoy.tracers.opencensus": "//source/extensions/tracers/opencensus:config",
# WiP
"envoy.tracers.xray": "//source/extensions/tracers/xray:config",
#
# Transport sockets
#
"envoy.transport_sockets.alts": "//source/extensions/transport_sockets/alts:config",
"envoy.transport_sockets.raw_buffer": "//source/extensions/transport_sockets/raw_buffer:config",
"envoy.transport_sockets.tap": "//source/extensions/transport_sockets/tap:config",
#
# Retry host predicates
#
"envoy.retry_host_predicates.previous_hosts": "//source/extensions/retry/host/previous_hosts:config",
"envoy.retry_host_predicates.omit_canary_hosts": "//source/extensions/retry/host/omit_canary_hosts:config",
#
# Retry priorities
#
"envoy.retry_priorities.previous_priorities": "//source/extensions/retry/priority/previous_priorities:config",
}
WINDOWS_EXTENSIONS = {
#
# Access loggers
#
"envoy.access_loggers.file": "//source/extensions/access_loggers/file:config",
#"envoy.access_loggers.http_grpc": "//source/extensions/access_loggers/grpc:http_config",
#
# gRPC Credentials Plugins
#
#"envoy.grpc_credentials.file_based_metadata": "//source/extensions/grpc_credentials/file_based_metadata:config",
#
# Health checkers
#
#"envoy.health_checkers.redis": "//source/extensions/health_checkers/redis:config",
#
# HTTP filters
#
#"envoy.filters.http.buffer": "//source/extensions/filters/http/buffer:config",
#"envoy.filters.http.cors": "//source/extensions/filters/http/cors:config",
#"envoy.filters.http.csrf": "//source/extensions/filters/http/csrf:config",
#"envoy.filters.http.dynamo": "//source/extensions/filters/http/dynamo:config",
#"envoy.filters.http.ext_authz": "//source/extensions/filters/http/ext_authz:config",
#"envoy.filters.http.fault": "//source/extensions/filters/http/fault:config",
#"envoy.filters.http.grpc_http1_bridge": "//source/extensions/filters/http/grpc_http1_bridge:config",
#"envoy.filters.http.grpc_json_transcoder": "//source/extensions/filters/http/grpc_json_transcoder:config",
#"envoy.filters.http.grpc_web": "//source/extensions/filters/http/grpc_web:config",
#"envoy.filters.http.gzip": "//source/extensions/filters/http/gzip:config",
#"envoy.filters.http.health_check": "//source/extensions/filters/http/health_check:config",
#"envoy.filters.http.ip_tagging": "//source/extensions/filters/http/ip_tagging:config",
#"envoy.filters.http.lua": "//source/extensions/filters/http/lua:config",
#"envoy.filters.http.ratelimit": "//source/extensions/filters/http/ratelimit:config",
#"envoy.filters.http.rbac": "//source/extensions/filters/http/rbac:config",
#"envoy.filters.http.router": "//source/extensions/filters/http/router:config",
#"envoy.filters.http.squash": "//source/extensions/filters/http/squash:config",
#
# Listener filters
#
# NOTE: The proxy_protocol filter is implicitly loaded if proxy_protocol functionality is
# configured on the listener. Do not remove it in that case or configs will fail to load.
"envoy.filters.listener.proxy_protocol": "//source/extensions/filters/listener/proxy_protocol:config",
# NOTE: The original_dst filter is implicitly loaded if original_dst functionality is
# configured on the listener. Do not remove it in that case or configs will fail to load.
#"envoy.filters.listener.original_dst": "//source/extensions/filters/listener/original_dst:config",
"envoy.filters.listener.tls_inspector": "//source/extensions/filters/listener/tls_inspector:config",
#
# Network filters
#
"envoy.filters.network.client_ssl_auth": "//source/extensions/filters/network/client_ssl_auth:config",
#"envoy.filters.network.echo": "//source/extensions/filters/network/echo:config",
#"envoy.filters.network.ext_authz": "//source/extensions/filters/network/ext_authz:config",
#"envoy.filters.network.http_connection_manager": "//source/extensions/filters/network/http_connection_manager:config",
#"envoy.filters.network.mongo_proxy": "//source/extensions/filters/network/mongo_proxy:config",
#"envoy.filters.network.mysql_proxy": "//source/extensions/filters/network/mysql_proxy:config",
#"envoy.filters.network.redis_proxy": "//source/extensions/filters/network/redis_proxy:config",
#"envoy.filters.network.ratelimit": "//source/extensions/filters/network/ratelimit:config",
"envoy.filters.network.tcp_proxy": "//source/extensions/filters/network/tcp_proxy:config",
#"envoy.filters.network.thrift_proxy": "//source/extensions/filters/network/thrift_proxy:config",
#"envoy.filters.network.sni_cluster": "//source/extensions/filters/network/sni_cluster:config",
#"envoy.filters.network.zookeeper_proxy": "//source/extensions/filters/network/zookeeper_proxy:config",
#
# Stat sinks
#
#"envoy.stat_sinks.dog_statsd": "//source/extensions/stat_sinks/dog_statsd:config",
#"envoy.stat_sinks.metrics_service": "//source/extensions/stat_sinks/metrics_service:config",
#"envoy.stat_sinks.statsd": "//source/extensions/stat_sinks/statsd:config",
#
# Tracers
#
#"envoy.tracers.dynamic_ot": "//source/extensions/tracers/dynamic_ot:config",
#"envoy.tracers.lightstep": "//source/extensions/tracers/lightstep:config",
#"envoy.tracers.zipkin": "//source/extensions/tracers/zipkin:config",
#
# Transport sockets
#
#"envoy.transport_sockets.tap": "//source/extensions/transport_sockets/tap:config",
}
| 58.726563 | 132 | 0.636025 |
795a13bbca4157f8898bd1e47ccf63d6fe8e9727 | 772 | py | Python | Phase-2/Json/Day-38.py | emetowinner/python-challenges | 520da69da0f2632deb1e81136d2b62d40555a4aa | [
"MIT"
] | 3 | 2020-05-21T20:19:40.000Z | 2022-02-27T08:20:10.000Z | Phase-2/Json/tasks.py | emetowinner/python-challenges | 520da69da0f2632deb1e81136d2b62d40555a4aa | [
"MIT"
] | null | null | null | Phase-2/Json/tasks.py | emetowinner/python-challenges | 520da69da0f2632deb1e81136d2b62d40555a4aa | [
"MIT"
] | 4 | 2020-05-12T16:41:52.000Z | 2020-05-21T20:17:22.000Z | 1. Write a Python program to convert JSON data to Python object.
2. Write a Python program to convert Python object to JSON data.
3. Write a Python program to convert Python objects into JSON strings. Print all the values.
4. Write a Python program to convert Python dictionary object (sort by key) to JSON data. Print the object members with indent level 4.
5. Write a Python program to convert JSON encoded data into Python objects.
6. Write a Python program to create a new JSON file from an existing JSON file.
7. Write a Python program to check whether an instance is complex or not.
8. Write a Python program to check whether a JSON string contains complex object or not.
9. Write a Python program to access only unique key value of a Python object.
| 29.692308 | 135 | 0.772021 |
795a13fda03ad4d84d2b7ec872218f7207ec5de5 | 478 | py | Python | egs/zeroth/s5/data/local/lm/buildLM/_scripts_/checkPronun.py | SYHPARK/kaldi | 71af1b0d3c3885936cb4b2a92048a9fa2371bd32 | [
"Apache-2.0"
] | 330 | 2018-02-03T02:12:05.000Z | 2022-03-31T14:18:18.000Z | egs/zeroth/s5/data/local/lm/buildLM/_scripts_/checkPronun.py | SYHPARK/kaldi | 71af1b0d3c3885936cb4b2a92048a9fa2371bd32 | [
"Apache-2.0"
] | 17 | 2018-02-12T06:28:30.000Z | 2022-03-16T08:36:54.000Z | egs/zeroth/s5/data/local/lm/buildLM/_scripts_/checkPronun.py | SYHPARK/kaldi | 71af1b0d3c3885936cb4b2a92048a9fa2371bd32 | [
"Apache-2.0"
] | 125 | 2018-02-03T00:51:25.000Z | 2022-03-19T05:06:04.000Z | #!/usr/bin/env python3
import fileinput
import sys
import re
def main():
nLine=0
for line in fileinput.input():
if nLine%1000 == 0:
print(' %d line processed' %(nLine), end='\r', file=sys.stderr)
sys.stderr.flush()
nLine+=1
tstr = line.strip()
if len(tstr.split()) == 1:
print(tstr)
print(' %d line processed' %(nLine), file=sys.stderr)
if __name__ == '__main__':
main()
| 20.782609 | 76 | 0.529289 |
795a14749f265aa825635d79de27cc0396ab1b5e | 10,380 | py | Python | sdk/yapily/models/pre_authorisation_request.py | yapily/yapily-sdk-python | c09930c44e8795e270e2846a2c0fb783200df76a | [
"MIT"
] | 11 | 2018-05-18T14:38:49.000Z | 2021-09-08T13:24:37.000Z | sdk/yapily/models/pre_authorisation_request.py | yapily/yapily-sdk-python | c09930c44e8795e270e2846a2c0fb783200df76a | [
"MIT"
] | 5 | 2019-10-23T15:06:33.000Z | 2021-08-03T21:18:50.000Z | sdk/yapily/models/pre_authorisation_request.py | yapily/yapily-sdk-python | c09930c44e8795e270e2846a2c0fb783200df76a | [
"MIT"
] | 8 | 2019-04-27T00:02:18.000Z | 2021-11-21T02:54:12.000Z | # coding: utf-8
"""
Yapily API
To access endpoints that require authentication, use your application key and secret created in the Dashboard (https://dashboard.yapily.com) # noqa: E501
The version of the OpenAPI document: 1.154.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from yapily.configuration import Configuration
class PreAuthorisationRequest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'scope': 'str',
'user_uuid': 'str',
'application_user_id': 'str',
'forward_parameters': 'list[str]',
'institution_id': 'str',
'callback': 'str',
'redirect': 'RedirectRequest',
'one_time_token': 'bool'
}
attribute_map = {
'scope': 'scope',
'user_uuid': 'userUuid',
'application_user_id': 'applicationUserId',
'forward_parameters': 'forwardParameters',
'institution_id': 'institutionId',
'callback': 'callback',
'redirect': 'redirect',
'one_time_token': 'oneTimeToken'
}
def __init__(self, scope=None, user_uuid=None, application_user_id=None, forward_parameters=None, institution_id=None, callback=None, redirect=None, one_time_token=None, local_vars_configuration=None): # noqa: E501
"""PreAuthorisationRequest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._scope = None
self._user_uuid = None
self._application_user_id = None
self._forward_parameters = None
self._institution_id = None
self._callback = None
self._redirect = None
self._one_time_token = None
self.discriminator = None
self.scope = scope
if user_uuid is not None:
self.user_uuid = user_uuid
if application_user_id is not None:
self.application_user_id = application_user_id
if forward_parameters is not None:
self.forward_parameters = forward_parameters
self.institution_id = institution_id
self.callback = callback
if redirect is not None:
self.redirect = redirect
self.one_time_token = one_time_token
@property
def scope(self):
"""Gets the scope of this PreAuthorisationRequest. # noqa: E501
Defines the scope of the pre-authorisation request. # noqa: E501
:return: The scope of this PreAuthorisationRequest. # noqa: E501
:rtype: str
"""
return self._scope
@scope.setter
def scope(self, scope):
"""Sets the scope of this PreAuthorisationRequest.
Defines the scope of the pre-authorisation request. # noqa: E501
:param scope: The scope of this PreAuthorisationRequest. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and scope is None: # noqa: E501
raise ValueError("Invalid value for `scope`, must not be `None`") # noqa: E501
self._scope = scope
@property
def user_uuid(self):
"""Gets the user_uuid of this PreAuthorisationRequest. # noqa: E501
Uuid of the application user who will authorise access to their data. Either the userUuid or applicationUserId must be provided. # noqa: E501
:return: The user_uuid of this PreAuthorisationRequest. # noqa: E501
:rtype: str
"""
return self._user_uuid
@user_uuid.setter
def user_uuid(self, user_uuid):
"""Sets the user_uuid of this PreAuthorisationRequest.
Uuid of the application user who will authorise access to their data. Either the userUuid or applicationUserId must be provided. # noqa: E501
:param user_uuid: The user_uuid of this PreAuthorisationRequest. # noqa: E501
:type: str
"""
self._user_uuid = user_uuid
@property
def application_user_id(self):
"""Gets the application_user_id of this PreAuthorisationRequest. # noqa: E501
Descriptive identifier for the application user.Either the userUuid or applicationUserId must be provided. # noqa: E501
:return: The application_user_id of this PreAuthorisationRequest. # noqa: E501
:rtype: str
"""
return self._application_user_id
@application_user_id.setter
def application_user_id(self, application_user_id):
"""Sets the application_user_id of this PreAuthorisationRequest.
Descriptive identifier for the application user.Either the userUuid or applicationUserId must be provided. # noqa: E501
:param application_user_id: The application_user_id of this PreAuthorisationRequest. # noqa: E501
:type: str
"""
self._application_user_id = application_user_id
@property
def forward_parameters(self):
"""Gets the forward_parameters of this PreAuthorisationRequest. # noqa: E501
:return: The forward_parameters of this PreAuthorisationRequest. # noqa: E501
:rtype: list[str]
"""
return self._forward_parameters
@forward_parameters.setter
def forward_parameters(self, forward_parameters):
"""Sets the forward_parameters of this PreAuthorisationRequest.
:param forward_parameters: The forward_parameters of this PreAuthorisationRequest. # noqa: E501
:type: list[str]
"""
self._forward_parameters = forward_parameters
@property
def institution_id(self):
"""Gets the institution_id of this PreAuthorisationRequest. # noqa: E501
:return: The institution_id of this PreAuthorisationRequest. # noqa: E501
:rtype: str
"""
return self._institution_id
@institution_id.setter
def institution_id(self, institution_id):
"""Sets the institution_id of this PreAuthorisationRequest.
:param institution_id: The institution_id of this PreAuthorisationRequest. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and institution_id is None: # noqa: E501
raise ValueError("Invalid value for `institution_id`, must not be `None`") # noqa: E501
self._institution_id = institution_id
@property
def callback(self):
"""Gets the callback of this PreAuthorisationRequest. # noqa: E501
:return: The callback of this PreAuthorisationRequest. # noqa: E501
:rtype: str
"""
return self._callback
@callback.setter
def callback(self, callback):
"""Sets the callback of this PreAuthorisationRequest.
:param callback: The callback of this PreAuthorisationRequest. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and callback is None: # noqa: E501
raise ValueError("Invalid value for `callback`, must not be `None`") # noqa: E501
self._callback = callback
@property
def redirect(self):
"""Gets the redirect of this PreAuthorisationRequest. # noqa: E501
:return: The redirect of this PreAuthorisationRequest. # noqa: E501
:rtype: RedirectRequest
"""
return self._redirect
@redirect.setter
def redirect(self, redirect):
"""Sets the redirect of this PreAuthorisationRequest.
:param redirect: The redirect of this PreAuthorisationRequest. # noqa: E501
:type: RedirectRequest
"""
self._redirect = redirect
@property
def one_time_token(self):
"""Gets the one_time_token of this PreAuthorisationRequest. # noqa: E501
:return: The one_time_token of this PreAuthorisationRequest. # noqa: E501
:rtype: bool
"""
return self._one_time_token
@one_time_token.setter
def one_time_token(self, one_time_token):
"""Sets the one_time_token of this PreAuthorisationRequest.
:param one_time_token: The one_time_token of this PreAuthorisationRequest. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and one_time_token is None: # noqa: E501
raise ValueError("Invalid value for `one_time_token`, must not be `None`") # noqa: E501
self._one_time_token = one_time_token
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PreAuthorisationRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PreAuthorisationRequest):
return True
return self.to_dict() != other.to_dict()
| 33.162939 | 219 | 0.641137 |
795a158f721c5e990d32fad5dc6cdb09cc729de2 | 18,658 | py | Python | casaconfig/jplephem_request.py | casangi/casaconfig | 1f7fa1f4b149eb5e6b5ac3206a27b9a99307eed2 | [
"Apache-2.0"
] | null | null | null | casaconfig/jplephem_request.py | casangi/casaconfig | 1f7fa1f4b149eb5e6b5ac3206a27b9a99307eed2 | [
"Apache-2.0"
] | null | null | null | casaconfig/jplephem_request.py | casangi/casaconfig | 1f7fa1f4b149eb5e6b5ac3206a27b9a99307eed2 | [
"Apache-2.0"
] | null | null | null | #
# Utilities for having JPL-Horizons ephemerides mailed to you.
# See JPL_ephem_reader.py for doing something with them.
#
# Examples:
#
# import recipes.ephemerides.request as jplreq
#
# # I recommend you not ask for more than ~18 months of anything with
# # date_incr ~ 1h, because the result would be split into multiple
# # emails which you would have to stitch together.
#
# for thing in jplreq.asteroids.keys() + jplreq.planets_and_moons.keys():
# jplreq.request_from_JPL(thing, '2012-12-31')
#
## A trick to avoid fast moving objects:
#for thing in jplreq.asteroids.keys() + jplreq.planets_and_moons.keys():
# if thing not in jplreq.default_date_incrs:
# jplreq.request_from_JPL(thing, '2012-12-31')
from __future__ import absolute_import
from __future__ import print_function
import os
import re
import smtplib
import socket
import time
from email.mime.text import MIMEText
# Maps from object names to numbers that JPL-Horizons will recognize without
# fussing around with choosing between barycenters, substrings, etc..
# Use lower case keys.
# Do not use keys for asteroids that are also in planets_and_moons. (The IAU
# might enforce this anyway.)
#
# They are numbered by order of discovery, which is not quite the same as size.
asteroids = {'ceres': 1,
'pallas': 2,
'juno': 3, # Large crater and temperature changes.
'vesta': 4,
'astraea': 5,
'hygiea': 10, # Careful with the spelling. It used to be
# Hygeia, and it is named after the Greek
# goddess Hygieia (or Hygeia). Also, it is
# fairly oblate and eccentric.
'parthenope': 11,
'victoria': 12,
'eunomia': 15,
'euphrosyne': 31,
'52 europa': 52,
'cybele': 65,
'sylvia': 87, # Has two moons.
'davida': 511,
'interamnia': 704}
planets_and_moons = {'mercury': 199,
'venus': 299,
'moon':301,
'mars': 499,
'phobos': 401,
'deimos': 402,
'jupiter': 599,
'io': 501,
'europa': 502,
'ganymede': 503,
'callisto': 504,
'saturn': 699,
'mimas': 601,
'enceladus': 602,
'tethys': 603,
'dione': 604,
'rhea': 605,
'titan': 606,
'hyperion': 607,
'iapetus': 608,
'phoebe': 609,
'janus': 610,
'epimetheus': 611,
'helene': 612,
'telesto': 613,
'calypso': 614,
'atlas': 615,
'prometheus': 616,
'pandora': 617,
'pan': 618,
# I've been to Ymir, so I have a soft spot for it, but it
# has an unknown radius (2010).
#'ymir': 619,
'uranus': 799,
'ariel': 701,
'umbriel': 702,
'titania': 703,
'oberon': 704,
'miranda': 705,
'cordelia': 706,
'ophelia': 707,
'bianca': 708,
'cressida': 709,
'desdemona': 710,
'juliet': 711,
'portia': 712,
'rosalind': 713,
'belinda': 714,
'puck': 715,
# 'caliban': 716, Uncertain radius, 2010.
# 'sycorax': 717, Uncertain radius, 2010.
# 'prospero': 718, Unknown radius, 2010
# 'setebos': 719, Unknown radius, 2010
# 'stephano': 720, Unknown radius, 2010
# 'trinculo': 721, Unknown radius, 2010
# 'francisco': 722, "
# 'margaret': 723, Unknown radius, 2010
# 'ferdinand': 724, Unknown radius, 2010
# 'perdita': 725, Unknown radius, 2010
# 'mab': 726, Unknown radius, 2010
# 'cupid': 727, "
'neptune': 899,
'triton': 801,
'nereid': 802,
'naiad': 803,
'thalassa': 804,
'despina': 805,
'galatea': 806,
'larissa': 807,
'proteus': 808,
'pluto': 999, # It's still a planet in this sense.
'charon': 901
# 'nix': 902 Unknown radius, 2010
# 'hydra': 903 Unknown radius, 2010
}
# defaults
should_have_orientation = ['mars', 'deimos', 'phobos', 'vesta', 'jupiter', 'io',
'janus', 'enceladus', 'mimas', 'iapetus',
'phoebe', 'tethys', 'uranus', 'ariel', 'miranda',
'neptune']
should_have_sublong = ['mars', 'deimos', 'phobos', 'jupiter', 'io',
'janus', 'enceladus', 'phoebe', 'mimas', 'tethys',
'neptune']
# Getting positions once a day is not enough for many moons, if the position of
# the moon relative to its primary will be needed. Note that a maximum
# suitable increment is imposed by Earth's motion.
default_date_incrs = {
'default': "1 d", # The default default.
'ariel': '0.5d',
'cordelia': '0.05d',
'deimos': '0.25d',
'dione': '0.5d',
'enceladus': '0.25d',
'io': '0.25d',
'janus': '0.1d',
'mimas': '0.2d',
'miranda': '0.25 d',
'phobos': '0.05d',
'tethys': '0.4d',
'moon': '1 h'
}
def request_from_JPL(objnam, enddate,
startdate=None,
date_incr=None,
get_axis_orientation=None,
get_axis_ang_orientation=None,
get_sub_long=None,
obsloc="",
return_address=None,
mailserver=None,
use_apparent=True,
get_sep=None):
"""
Request an ASCII ephemeris table from JPL-Horizons for a Solar System
object. If all goes well it should arrive by email in a few minutes to
an hour. (The return value from this function is whether or not it sent
the request.)
All but the first two parameters have hopefully sensible defaults:
objnam:
The name of the object (case-insensitive). It will be used to refer to
specifically its center, as opposed to other possible locations in the
vicinity of the object. For example, if objnam ="Mars", it will choose
Mars, not the Mars barycenter or the Mars Reconnaissance Orbiter.
enddate:
The date that the ephemeris should end on.
It can be an epoch measure or string (yyyy-mm-dd, assumes UT).
startdate:
Defaults to today, but it can be specified like enddate.
date_incr:
The increment between dates in the ephemeris. casapy's setjy
task and me tool automatically interpolate. It can be a (time) quantity
or a string (which will be interpreted as if it were a quantity).
Unlike the JPL email interface, this does not need it to be an integer
number of time units. request_from_JPL() will do its best to convert
it to fit JPL's required format.
Default: 1 Earth day.
get_axis_orientation:
Request the orientation of the object's polar axis relative to the line
of sight. This is needed (along with the flattening) if treating the
disk as an ellipse, but it is often unavailable.
True or False
Defaults to whether or not objnam is in should_have_orientation.
get_axis_ang_orientation:
Request the angular orientation (position angle and angular distance from
sub-observer point) of the object's polar axis relative to the line
of sight.
True or False (by default it is included)
get_sub_long:
Request the planetographic (geodetic) longitudes and latitudes of the
subobserver and sub-Solar points. Only needed if the object has
significant known surface features.
True or False
Defaults to whether or not objnam is in should_have_sublong.
obsloc:
Observatory name, used to get topocentric coordinates.
Obviously not all observatories are recognized.
Default: "" (geocentric)
return_address:
The email address that the ephemeris will be sent to.
Default: <username>@<domainname>.
mailserver:
The computer at _your_ end to send the mail from.
Default: a semi-intelligent guess.
use_apparent:
Get the apparent instead of J2000 RA and Dec. No refraction by Earth's
atmosphere will be applied; MeasComet assumes apparent directions and
JPL_ephem_reader would be confused if both apparent and J2000
directions were present.
Default: True
get_sep:
Get the angular separation from the primary, and whether it is
transiting, in eclipse, etc.. This only makes sense for moons and does
not guarantee that nothing else (like Earth, Luna, a bright extrasolar
object) is in the line of sight!
Default: True if it is in the moons list, False otherwise.
"""
lobjnam = objnam.lower()
# Handle defaults
if get_axis_orientation == None: # remember False is valid.
if lobjnam in should_have_orientation:
get_axis_orientation = True
else:
get_axis_orientation = False
if get_sub_long == None: # remember False is valid.
if lobjnam in should_have_sublong:
get_sub_long = True
else:
get_sub_long = False
if not return_address:
fqdn = socket.getfqdn()
# Only use the top two levels, i.e. eso.org and nrao.edu, not
# (faraday.)cv.nrao.edu.
domain = '.'.join(fqdn.split('.')[-2:])
return_address = os.getlogin() + '@' + domain
if not mailserver:
try:
#mailserver = socket.getfqdn(socket.gethostbyname('mail'))
mailserver = socket.getfqdn(socket.gethostbyname('smtp'))
except socket.gaierror:
print("Could not find a mailserver.")
return False
if not startdate:
syr, smon, s_d, s_h, smin, s_s, swday, syday, sisdst = time.gmtime()
startdate = "%d-%02d-%02d" % (syr, smon, s_d)
if not date_incr:
date_incr = default_date_incrs.get(lobjnam,
default_date_incrs['default'])
if get_sep == None:
get_sep = (planets_and_moons.get(lobjnam, 99) % 100) < 99
# Get to work.
if lobjnam in asteroids:
objnum = str(asteroids[lobjnam]) + ';'
elif lobjnam in planets_and_moons:
objnum = str(planets_and_moons[lobjnam])
else:
#print "The JPL object number for", objnam, "is not known. Try looking it up at"
#print 'http://ssd.jpl.nasa.gov/horizons.cgi?s_body=1#top and adding it.'
print(" request_from_JPL() does not recognize the input object name, %s." % objnam)
print(" The request will be sent as is without a check if it complies with the JPL-Horizons convention.")
print(" Please refer http://ssd.jpl.nasa.gov/horizons.cgi?s_body=1#top when you get \"no matches found\"")
print(" mail from the JPL-Horizons Systrem.")
objnum=objnam
try:
isnum=int(objnum)
except:
objnum="DES="+objnam
#return False
if obsloc and obsloc.lower() != 'geocentric':
print("Topocentric coordinates are not yet supported by this script.")
print("Defaulting to geocentric.")
# to set site coordinates,
# CENTER=coord@500
# COORD_TYPE='GEODETIC'
# SITE_COORD='E.lon,lat,height' (in deg and km)
# e.g for ALMA
# SITE_COORD='-67.7549290,-23.022886,5.05680'
# 500@399: 399=earth 500=body center (==g@399==geo)
center = '500@399'
# quantities = [2, 10, 12, 19, 20, 24]
# Original default set of quantities: apparent RA/DEC, Illum. frac, ang. separation between
# non-lunar target and the center of primary body, helio range and range rate, observer
# range and range rate, S-T-O
# Bryan's request
# [1,14,15,17,19,20,24]
# extra: 10
# need : 17
# new default quantities
quantities = [2, 12, 17, 19, 20, 24]
if not use_apparent:
quantities[0] = 1
if get_axis_orientation:
quantities.append(32)
if get_sub_long:
quantities.append(14)
quantities.append(15)
if not get_sep:
quantities.remove(12)
if not get_axis_ang_orientation:
quantities.remove(17)
print("Retrieved quantity code list=",quantities)
# It seems that STEP_SIZE must be an integer, but the unit can be changed
# to hours or minutes.
match = re.match(r'([0-9.]+)\s*([dhm])', date_incr)
n_time_units = float(match.group(1))
time_unit = match.group(2)
if n_time_units < 1.0:
if time_unit == 'd':
n_time_units *= 24.0
time_unit = 'h'
if time_unit == 'h' and n_time_units < 1.0: # Note fallthrough.
n_time_units *= 60.0
time_unit = 'm'
if n_time_units < 1.0: # Uh oh.
print(date_incr, "is an odd request for a date increment.")
print("Please change it or make your request manually.")
return False
print("Translating date_incr from", date_incr, end=' ')
date_incr = "%.0f %s" % (n_time_units, time_unit)
print("to", date_incr)
instructions = "\n".join(["!$$SOF",
"COMMAND= '%s'" % objnum,
'CENTER= ' + center,
"MAKE_EPHEM= 'YES'",
"TABLE_TYPE= 'OBSERVER'",
"START_TIME= '%s'" % startdate,
"STOP_TIME= '%s'" % enddate,
"STEP_SIZE= '%s'" % date_incr,
"CAL_FORMAT= 'CAL'", #date format(CAL,JD,or BOTH)
"TIME_DIGITS= 'MINUTES'",
"ANG_FORMAT= 'DEG'",
"OUT_UNITS= 'KM-S'",
"RANGE_UNITS= 'AU'",
"APPARENT= 'AIRLESS'", # for apparent position
"SOLAR_ELONG= '0,180'",
"SUPPRESS_RANGE_RATE= 'NO'",
"SKIP_DAYLT= 'NO'",
"EXTRA_PREC= 'NO'",
"R_T_S_ONLY= 'NO'",
"REF_SYSTEM= 'J2000'",
"CSV_FORMAT= 'NO'",
"OBJ_DATA= 'YES'",
"QUANTITIES= '%s'" % ','.join([str(q) for q in quantities]),
'!$$EOF'])
# Set up a MIMEText object (it's a dictionary)
msg = MIMEText(instructions)
msg['To'] = "horizons@ssd.jpl.nasa.gov"
msg['Subject'] = 'JOB'
msg['From'] = return_address
msg['Reply-to'] = return_address
# Establish an SMTP object and connect to the mail server
s = smtplib.SMTP()
s.connect(mailserver)
# Send the email - real from, real to, extra headers and content ...
s.sendmail(return_address, msg['To'], msg.as_string())
s.close()
return True
def list_moons():
"""
List planets_and_moons in a more organized way.
"""
# Gather the moons by planet number.
planets = {}
moons = {}
for lcname in planets_and_moons:
num = planets_and_moons[lcname]
planet = num / 100
if num % 100 == 99:
planets[planet] = lcname.title()
else:
if planet not in moons:
moons[planet] = {}
moons[planet][num % 100] = lcname.title()
#print "planets =", planets
#print "moons:"
#for p in planets:
# print planets[p]
# print " ", moons.get(p, "None")
# For formatting the output table, find the column widths,
# and maximum number of moons per planet.
maxmoons = max([len(moons.get(p, '')) for p in planets])
maxwidths = {}
for planet in planets:
if planet in moons:
maxwidths[planet] = max([len(m) for m in moons[planet].values()])
else:
maxwidths[planet] = 0
if len(planets[planet]) > maxwidths[planet]:
maxwidths[planet] = len(planets[planet])
# Set up the table columns.
plannums = planets.keys()
plannums.sort()
sortedmoons = {}
formstr = ''
hrule = ''
for p in plannums:
formstr += '| %-' + str(maxwidths[p]) + 's '
if p == 1:
hrule += '|'
else:
hrule += '+'
hrule += '-' * (maxwidths[p] + 2)
moonkeys = moons.get(p, {}).keys()
moonkeys.sort()
sortedmoons[p] = {}
for row in xrange(len(moonkeys)):
sortedmoons[p][row] = moons[p][moonkeys[row]]
formstr += '|'
hrule += '|'
print(formstr % tuple([planets[p] for p in plannums]))
print(hrule)
for row in xrange(maxmoons):
print(formstr % tuple([sortedmoons[p].get(row, '') for p in plannums]))
def list_asteroids():
"""
Like list_moons, but list the asteroids by their numbers
(= order of discovery, ~ albedo * size)
"""
astnums = asteroids.values()
astnums.sort()
invast = {}
for a in asteroids:
invast[asteroids[a]] = a.title()
for n in astnums:
print("%3d %s" % (n, invast[n]))
| 39.28 | 114 | 0.5231 |
795a165a5b8005cc04d67fcaecc27e13d510a1c7 | 2,387 | py | Python | context/opencensus-context/tests/test_runtime_context.py | jtbeach/opencensus-python | 2e396b063a238b3e823b6efc136b9a0405dd5565 | [
"Apache-2.0"
] | 1 | 2019-09-21T13:52:19.000Z | 2019-09-21T13:52:19.000Z | context/opencensus-context/tests/test_runtime_context.py | dineshkrishnareddy/opencensus-python | e5e752ceab3371ec4b78cec23a717168e2ed9372 | [
"Apache-2.0"
] | null | null | null | context/opencensus-context/tests/test_runtime_context.py | dineshkrishnareddy/opencensus-python | e5e752ceab3371ec4b78cec23a717168e2ed9372 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from opencensus.common.runtime_context import RuntimeContext
class RuntimeContextTest(unittest.TestCase):
def test_register(self):
RuntimeContext.register_slot('foo')
self.assertIsNone(RuntimeContext.foo)
RuntimeContext.foo = 123
self.assertEqual(RuntimeContext.foo, 123)
def test_register_with_default(self):
RuntimeContext.register_slot('bar', 123)
self.assertEqual(RuntimeContext.bar, 123)
def test_register_duplicate(self):
self.assertRaises(ValueError, lambda: [
RuntimeContext.register_slot('dup'),
RuntimeContext.register_slot('dup'),
])
def test_get_non_existing(self):
self.assertRaises(AttributeError, lambda: RuntimeContext.non_existing)
def test_set_non_existing(self):
def set_non_existing():
RuntimeContext.non_existing = 1
self.assertRaises(AttributeError, set_non_existing)
def test_clear(self):
RuntimeContext.register_slot('baz')
RuntimeContext.baz = 123
self.assertEqual(RuntimeContext.baz, 123)
RuntimeContext.clear()
self.assertEqual(RuntimeContext.baz, None)
def test_with_current_context(self):
from threading import Thread
RuntimeContext.register_slot('operation_id')
def work(name):
self.assertEqual(RuntimeContext.operation_id, 'foo')
RuntimeContext.operation_id = name
self.assertEqual(RuntimeContext.operation_id, name)
RuntimeContext.operation_id = 'foo'
thread = Thread(
target=RuntimeContext.with_current_context(work),
args=('bar'),
)
thread.start()
thread.join()
self.assertEqual(RuntimeContext.operation_id, 'foo')
| 33.152778 | 78 | 0.697528 |
795a17808709cacf8cf15d6849b43ce3533a95fa | 3,026 | py | Python | Src/StdLib/Lib/site-packages/pythonwin/pywin/Demos/ocx/ocxserialtest.py | cwensley/ironpython2 | f854444e1e08afc8850cb7c1a739a7dd2d10d32a | [
"Apache-2.0"
] | 1,078 | 2016-07-19T02:48:30.000Z | 2022-03-30T21:22:34.000Z | Src/StdLib/Lib/site-packages/pythonwin/pywin/Demos/ocx/ocxserialtest.py | cwensley/ironpython2 | f854444e1e08afc8850cb7c1a739a7dd2d10d32a | [
"Apache-2.0"
] | 576 | 2017-05-21T12:36:48.000Z | 2022-03-30T13:47:03.000Z | Src/StdLib/Lib/site-packages/pythonwin/pywin/Demos/ocx/ocxserialtest.py | cwensley/ironpython2 | f854444e1e08afc8850cb7c1a739a7dd2d10d32a | [
"Apache-2.0"
] | 269 | 2017-05-21T04:44:47.000Z | 2022-03-31T16:18:13.000Z | # ocxserialtest.py
#
# Sample that uses the mscomm OCX to talk to a serial
# device.
# Very simple - queries a modem for ATI responses
import win32ui, win32uiole
import win32con
from pywin.mfc import dialog, activex
from win32com.client import gencache
import pythoncom
SERIAL_SETTINGS = '19200,n,8,1'
SERIAL_PORT = 2
win32ui.DoWaitCursor(1)
serialModule = gencache.EnsureModule("{648A5603-2C6E-101B-82B6-000000000014}", 0, 1, 1)
win32ui.DoWaitCursor(0)
if serialModule is None:
raise ImportError("MS COMM Control does not appear to be installed on the PC")
def MakeDlgTemplate():
style = win32con.DS_MODALFRAME | win32con.WS_POPUP \
| win32con.WS_VISIBLE | win32con.WS_CAPTION \
| win32con.WS_SYSMENU | win32con.DS_SETFONT
cs = win32con.WS_CHILD | win32con.WS_VISIBLE
dlg = [ ["Very Basic Terminal",
(0, 0, 350, 180), style, None, (8, "MS Sans Serif")], ]
s = win32con.WS_TABSTOP | cs
dlg.append(["RICHEDIT", None, 132, (5, 5, 340, 170),s | win32con.ES_WANTRETURN | win32con.ES_MULTILINE | win32con.ES_AUTOVSCROLL | win32con.WS_VSCROLL])
return dlg
####################################
#
# Serial Control
#
class MySerialControl(activex.Control, serialModule.MSComm):
def __init__(self, parent):
activex.Control.__init__(self)
serialModule.MSComm.__init__(self)
self.parent = parent
def OnComm(self):
self.parent.OnComm()
class TestSerDialog(dialog.Dialog):
def __init__(self, *args):
dialog.Dialog.__init__(*(self,)+args)
self.olectl = None
def OnComm(self):
event = self.olectl.CommEvent
if event == serialModule.OnCommConstants.comEvReceive:
self.editwindow.ReplaceSel(self.olectl.Input)
def OnKey(self, key):
if self.olectl:
self.olectl.Output = chr(key)
def OnInitDialog(self):
rc = dialog.Dialog.OnInitDialog(self)
self.editwindow = self.GetDlgItem(132)
self.editwindow.HookAllKeyStrokes(self.OnKey)
self.olectl = MySerialControl(self)
try:
self.olectl.CreateControl("OCX",
win32con.WS_TABSTOP | win32con.WS_VISIBLE,
(7,43,500,300), self._obj_, 131)
except win32ui.error:
self.MessageBox("The Serial Control could not be created")
self.olectl = None
self.EndDialog(win32con.IDCANCEL)
if self.olectl:
self.olectl.Settings = SERIAL_SETTINGS
self.olectl.CommPort = SERIAL_PORT
self.olectl.RThreshold = 1
try:
self.olectl.PortOpen = 1
except pythoncom.com_error, details:
print "Could not open the specified serial port - %s" % (details.excepinfo[2])
self.EndDialog(win32con.IDCANCEL)
return rc
def OnDestroy(self, msg):
if self.olectl:
try:
self.olectl.PortOpen = 0
except pythoncom.com_error, details:
print "Error closing port - %s" % (details.excepinfo[2])
return dialog.Dialog.OnDestroy(self, msg)
def test():
d = TestSerDialog(MakeDlgTemplate() )
d.DoModal()
if __name__ == "__main__":
import demoutils
if demoutils.NeedGoodGUI():
test()
| 29.666667 | 156 | 0.692003 |
795a18cd9d9916678d5bffaaf081f614f2a7f26e | 1,990 | py | Python | sdk/media/azure-mgmt-media/azure/mgmt/media/operations/__init__.py | JianpingChen/azure-sdk-for-python | 3072fc8c0366287fbaea1b02493a50259c3248a2 | [
"MIT"
] | 3 | 2020-06-23T02:25:27.000Z | 2021-09-07T18:48:11.000Z | sdk/media/azure-mgmt-media/azure/mgmt/media/operations/__init__.py | JianpingChen/azure-sdk-for-python | 3072fc8c0366287fbaea1b02493a50259c3248a2 | [
"MIT"
] | 510 | 2019-07-17T16:11:19.000Z | 2021-08-02T08:38:32.000Z | sdk/media/azure-mgmt-media/azure/mgmt/media/operations/__init__.py | JianpingChen/azure-sdk-for-python | 3072fc8c0366287fbaea1b02493a50259c3248a2 | [
"MIT"
] | 5 | 2019-09-04T12:51:37.000Z | 2020-09-16T07:28:40.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._account_filters_operations import AccountFiltersOperations
from ._operations import Operations
from ._mediaservices_operations import MediaservicesOperations
from ._private_link_resources_operations import PrivateLinkResourcesOperations
from ._private_endpoint_connections_operations import PrivateEndpointConnectionsOperations
from ._locations_operations import LocationsOperations
from ._assets_operations import AssetsOperations
from ._asset_filters_operations import AssetFiltersOperations
from ._content_key_policies_operations import ContentKeyPoliciesOperations
from ._transforms_operations import TransformsOperations
from ._jobs_operations import JobsOperations
from ._streaming_policies_operations import StreamingPoliciesOperations
from ._streaming_locators_operations import StreamingLocatorsOperations
from ._live_events_operations import LiveEventsOperations
from ._live_outputs_operations import LiveOutputsOperations
from ._streaming_endpoints_operations import StreamingEndpointsOperations
__all__ = [
'AccountFiltersOperations',
'Operations',
'MediaservicesOperations',
'PrivateLinkResourcesOperations',
'PrivateEndpointConnectionsOperations',
'LocationsOperations',
'AssetsOperations',
'AssetFiltersOperations',
'ContentKeyPoliciesOperations',
'TransformsOperations',
'JobsOperations',
'StreamingPoliciesOperations',
'StreamingLocatorsOperations',
'LiveEventsOperations',
'LiveOutputsOperations',
'StreamingEndpointsOperations',
]
| 45.227273 | 94 | 0.779899 |
795a1a9a0fe929f68be1a5b83bbb027909722c55 | 3,017 | py | Python | fixture/contact.py | kate-moroz/python_work | 9d52c7d3fdfe6c1a4956a66ed74cb47420437276 | [
"Apache-2.0"
] | null | null | null | fixture/contact.py | kate-moroz/python_work | 9d52c7d3fdfe6c1a4956a66ed74cb47420437276 | [
"Apache-2.0"
] | null | null | null | fixture/contact.py | kate-moroz/python_work | 9d52c7d3fdfe6c1a4956a66ed74cb47420437276 | [
"Apache-2.0"
] | null | null | null | class ContactHelper:
def __init__(self, app):
self.app = app
def create(self, contact):
wd = self.app.wd
# init contact creation
if not wd.current_url.endswith("/edit.php"):
wd.find_element_by_link_text("add new").click()
# fill contact form
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys(contact.fname)
wd.find_element_by_name("middlename").click()
wd.find_element_by_name("middlename").clear()
wd.find_element_by_name("middlename").send_keys(contact.mname)
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys(contact.lname)
# submit contact creation
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
self.return_to_homepage(wd)
def return_to_homepage(self, wd):
wd = self.app.wd
if not(wd.current_url.endswith("/index.php") and len(wd.find_elements_by_name("searchstring")) > 0):
wd.find_element_by_link_text("home").click()
def delete_first_contact(self):
wd = self.app.wd
self.return_to_homepage(wd)
# select first contact
wd.find_element_by_name("selected[]").click()
# submit deletion
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
# confirm deletion
wd.switch_to_alert().accept()
self.return_to_homepage(wd)
def edit_first_contact(self):
wd = self.app.wd
self.enter_editing_mode()
# deleting some information
wd.find_element_by_name("address").click()
wd.find_element_by_name("address").clear()
# confirm update
wd.find_element_by_name("update").click()
def modify_first_contact(self, new_contact_data):
wd = self.app.wd
self.return_to_homepage(wd)
self.enter_editing_mode()
self.fill_contact_form(new_contact_data)
# submit modification
wd.find_element_by_name("update").click()
self.return_to_homepage(wd)
def fill_contact_form(self, contact):
wd = self.app.wd
self.change_field_value("firstname", contact.fname)
self.change_field_value("middlename", contact.mname)
self.change_field_value("lastname", contact.lname)
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def enter_editing_mode(self):
wd = self.app.wd
wd.find_element_by_xpath("//*[@id='maintable']/tbody/tr[2]/td[8]/a/img").click()
def count(self):
wd = self.app.wd
return len(wd.find_elements_by_name("selected[]"))
| 37.7125 | 108 | 0.649652 |
795a1ab64cb59c04ac1064a932fc7e303ccac1c4 | 271 | py | Python | pyAitu/models/update/form_submitted.py | waihislam/pyAitu | 6e7c3bfa7454d6ae91ba998017cf772bb6a25770 | [
"Apache-2.0"
] | 11 | 2019-09-26T05:09:20.000Z | 2021-05-20T10:45:28.000Z | pyAitu/models/update/form_submitted.py | waihislam/pyAitu | 6e7c3bfa7454d6ae91ba998017cf772bb6a25770 | [
"Apache-2.0"
] | 8 | 2019-10-08T13:41:09.000Z | 2020-06-25T11:35:56.000Z | pyAitu/models/update/form_submitted.py | waihislam/pyAitu | 6e7c3bfa7454d6ae91ba998017cf772bb6a25770 | [
"Apache-2.0"
] | 4 | 2020-02-18T09:02:37.000Z | 2020-11-25T06:11:05.000Z | from .form_update import FormUpdate
class FormSubmitted(FormUpdate):
def __init__(self, json_object):
super().__init__(json_object)
self.metadata = json_object.get("metadata")
self.additional_metadata = json_object.get("additionalMetadata")
| 30.111111 | 72 | 0.730627 |
795a1ab72288c1f4d34500768098a1bd58fb6522 | 41,089 | py | Python | src/sage/functions/hypergeometric.py | LaisRast/sage | 5fb2a6ea44400e469caee82748cf863ca0c5f724 | [
"BSL-1.0"
] | null | null | null | src/sage/functions/hypergeometric.py | LaisRast/sage | 5fb2a6ea44400e469caee82748cf863ca0c5f724 | [
"BSL-1.0"
] | null | null | null | src/sage/functions/hypergeometric.py | LaisRast/sage | 5fb2a6ea44400e469caee82748cf863ca0c5f724 | [
"BSL-1.0"
] | null | null | null | r"""
Hypergeometric Functions
This module implements manipulation of infinite hypergeometric series
represented in standard parametric form (as `\,_pF_q` functions).
AUTHORS:
- Fredrik Johansson (2010): initial version
- Eviatar Bach (2013): major changes
EXAMPLES:
Examples from :trac:`9908`::
sage: maxima('integrate(bessel_j(2, x), x)').sage()
1/24*x^3*hypergeometric((3/2,), (5/2, 3), -1/4*x^2)
sage: sum(((2*I)^x/(x^3 + 1)*(1/4)^x), x, 0, oo)
hypergeometric((1, 1, -1/2*I*sqrt(3) - 1/2, 1/2*I*sqrt(3) - 1/2),...
(2, -1/2*I*sqrt(3) + 1/2, 1/2*I*sqrt(3) + 1/2), 1/2*I)
sage: sum((-1)^x/((2*x + 1)*factorial(2*x + 1)), x, 0, oo)
hypergeometric((1/2,), (3/2, 3/2), -1/4)
Simplification (note that ``simplify_full`` does not yet call
``simplify_hypergeometric``)::
sage: hypergeometric([-2], [], x).simplify_hypergeometric()
x^2 - 2*x + 1
sage: hypergeometric([], [], x).simplify_hypergeometric()
e^x
sage: a = hypergeometric((hypergeometric((), (), x),), (),
....: hypergeometric((), (), x))
sage: a.simplify_hypergeometric()
1/((-e^x + 1)^e^x)
sage: a.simplify_hypergeometric(algorithm='sage')
1/((-e^x + 1)^e^x)
Equality testing::
sage: bool(hypergeometric([], [], x).derivative(x) ==
....: hypergeometric([], [], x)) # diff(e^x, x) == e^x
True
sage: bool(hypergeometric([], [], x) == hypergeometric([], [1], x))
False
Computing terms and series::
sage: var('z')
z
sage: hypergeometric([], [], z).series(z, 0)
Order(1)
sage: hypergeometric([], [], z).series(z, 1)
1 + Order(z)
sage: hypergeometric([], [], z).series(z, 2)
1 + 1*z + Order(z^2)
sage: hypergeometric([], [], z).series(z, 3)
1 + 1*z + 1/2*z^2 + Order(z^3)
sage: hypergeometric([-2], [], z).series(z, 3)
1 + (-2)*z + 1*z^2
sage: hypergeometric([-2], [], z).series(z, 6)
1 + (-2)*z + 1*z^2
sage: hypergeometric([-2], [], z).series(z, 6).is_terminating_series()
True
sage: hypergeometric([-2], [], z).series(z, 2)
1 + (-2)*z + Order(z^2)
sage: hypergeometric([-2], [], z).series(z, 2).is_terminating_series()
False
sage: hypergeometric([1], [], z).series(z, 6)
1 + 1*z + 1*z^2 + 1*z^3 + 1*z^4 + 1*z^5 + Order(z^6)
sage: hypergeometric([], [1/2], -z^2/4).series(z, 11)
1 + (-1/2)*z^2 + 1/24*z^4 + (-1/720)*z^6 + 1/40320*z^8 +...
(-1/3628800)*z^10 + Order(z^11)
sage: hypergeometric([1], [5], x).series(x, 5)
1 + 1/5*x + 1/30*x^2 + 1/210*x^3 + 1/1680*x^4 + Order(x^5)
sage: sum(hypergeometric([1, 2], [3], 1/3).terms(6)).n()
1.29788359788360
sage: hypergeometric([1, 2], [3], 1/3).n()
1.29837194594696
sage: hypergeometric([], [], x).series(x, 20)(x=1).n() == e.n()
True
Plotting::
sage: f(x) = hypergeometric([1, 1], [3, 3, 3], x)
sage: plot(f, x, -30, 30)
Graphics object consisting of 1 graphics primitive
sage: g(x) = hypergeometric([x], [], 2)
sage: complex_plot(g, (-1, 1), (-1, 1))
Graphics object consisting of 1 graphics primitive
Numeric evaluation::
sage: hypergeometric([1], [], 1/10).n() # geometric series
1.11111111111111
sage: hypergeometric([], [], 1).n() # e
2.71828182845905
sage: hypergeometric([], [], 3., hold=True)
hypergeometric((), (), 3.00000000000000)
sage: hypergeometric([1, 2, 3], [4, 5, 6], 1/2).n()
1.02573619590134
sage: hypergeometric([1, 2, 3], [4, 5, 6], 1/2).n(digits=30)
1.02573619590133865036584139535
sage: hypergeometric([5 - 3*I], [3/2, 2 + I, sqrt(2)], 4 + I).n()
5.52605111678803 - 7.86331357527540*I
sage: hypergeometric((10, 10), (50,), 2.)
-1705.75733163554 - 356.749986056024*I
Conversions::
sage: maxima(hypergeometric([1, 1, 1], [3, 3, 3], x))
hypergeometric([1,1,1],[3,3,3],_SAGE_VAR_x)
sage: hypergeometric((5, 4), (4, 4), 3)._sympy_()
hyper((5, 4), (4, 4), 3)
sage: hypergeometric((5, 4), (4, 4), 3)._mathematica_init_()
'HypergeometricPFQ[{5,4},{4,4},3]'
Arbitrary level of nesting for conversions::
sage: maxima(nest(lambda y: hypergeometric([y], [], x), 3, 1))
1/(1-_SAGE_VAR_x)^(1/(1-_SAGE_VAR_x)^(1/(1-_SAGE_VAR_x)))
sage: maxima(nest(lambda y: hypergeometric([y], [3], x), 3, 1))._sage_()
hypergeometric((hypergeometric((hypergeometric((1,), (3,), x),), (3,),...
x),), (3,), x)
sage: nest(lambda y: hypergeometric([y], [], x), 3, 1)._mathematica_init_()
'HypergeometricPFQ[{HypergeometricPFQ[{HypergeometricPFQ[{1},{},x]},...
The confluent hypergeometric functions can arise as solutions to second-order
differential equations (example from `here <http://ask.sagemath.org/question/
1168/how-can-one-use-maxima-kummer-confluent-functions>`_)::
sage: var('m')
m
sage: y = function('y')(x)
sage: desolve(diff(y, x, 2) + 2*x*diff(y, x) - 4*m*y, y,
....: contrib_ode=true, ivar=x)
[y(x) == _K1*hypergeometric_M(-m, 1/2, -x^2) +...
_K2*hypergeometric_U(-m, 1/2, -x^2)]
Series expansions of confluent hypergeometric functions::
sage: hypergeometric_M(2, 2, x).series(x, 3)
1 + 1*x + 1/2*x^2 + Order(x^3)
sage: hypergeometric_U(2, 2, x).series(x == 3, 100).subs(x=1).n()
0.403652637676806
sage: hypergeometric_U(2, 2, 1).n()
0.403652637676806
"""
# ****************************************************************************
# Copyright (C) 2010 Fredrik Johansson <fredrik.johansson@gmail.com>
# Copyright (C) 2013 Eviatar Bach <eviatarbach@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.rings.integer import Integer
from sage.rings.integer_ring import ZZ
from sage.rings.rational_field import QQ
from sage.rings.infinity import Infinity
from sage.arith.all import binomial, rising_factorial, factorial
from sage.symbolic.constants import pi
from sage.symbolic.function import BuiltinFunction
from sage.symbolic.ring import SR
from sage.structure.element import get_coercion_model
from sage.misc.latex import latex
from sage.misc.misc_c import prod
from sage.libs.mpmath import utils as mpmath_utils
from sage.symbolic.expression import Expression
from sage.calculus.functional import derivative
from functools import reduce
from .gamma import gamma
from .other import sqrt, real_part
from .log import exp, log
from .hyperbolic import cosh, sinh
from .error import erf
def rational_param_as_tuple(x):
r"""
Utility function for converting rational `\,_pF_q` parameters to
tuples (which mpmath handles more efficiently).
EXAMPLES::
sage: from sage.functions.hypergeometric import rational_param_as_tuple
sage: rational_param_as_tuple(1/2)
(1, 2)
sage: rational_param_as_tuple(3)
3
sage: rational_param_as_tuple(pi)
pi
"""
try:
x = x.pyobject()
except AttributeError:
pass
try:
if x.parent() is QQ:
p = int(x.numer())
q = int(x.denom())
return p, q
except AttributeError:
pass
return x
class Hypergeometric(BuiltinFunction):
r"""
Represent a (formal) generalized infinite hypergeometric series.
It is defined as
.. MATH::
\,_pF_q(a_1, \ldots, a_p; b_1, \ldots, b_q; z)
= \sum_{n=0}^{\infty} \frac{(a_1)_n \cdots (a_p)_n}{(b_1)_n
\cdots(b_q)_n} \, \frac{z^n}{n!},
where `(x)_n` is the rising factorial.
"""
def __init__(self):
"""
Initialize class.
EXAMPLES::
sage: maxima(hypergeometric)
hypergeometric
TESTS::
sage: F = hypergeometric([-4,2],[1],1) # optional - maple
sage: G = maple(F); G # optional - maple
hypergeom([-4, 2],[1],1)
sage: G.simplify() # optional - maple
0
"""
BuiltinFunction.__init__(self, 'hypergeometric', nargs=3,
conversions={'mathematica':
'HypergeometricPFQ',
'maxima': 'hypergeometric',
'maple': 'hypergeom',
'sympy': 'hyper',
'fricas': 'hypergeometricF'})
def __call__(self, a, b, z, **kwargs):
"""
Return symbolic hypergeometric function expression.
INPUT:
- ``a`` -- a list or tuple of parameters
- ``b`` -- a list or tuple of parameters
- ``z`` -- a number or symbolic expression
EXAMPLES::
sage: hypergeometric([], [], 1)
hypergeometric((), (), 1)
sage: hypergeometric([], [1], 1)
hypergeometric((), (1,), 1)
sage: hypergeometric([2, 3], [1], 1)
hypergeometric((2, 3), (1,), 1)
sage: hypergeometric([], [], x)
hypergeometric((), (), x)
sage: hypergeometric([x], [], x^2)
hypergeometric((x,), (), x^2)
The only simplification that is done automatically is returning 1
if ``z`` is 0. For other simplifications use the
``simplify_hypergeometric`` method.
"""
return BuiltinFunction.__call__(self,
SR._force_pyobject(a),
SR._force_pyobject(b),
z, **kwargs)
def _print_latex_(self, a, b, z):
r"""
TESTS::
sage: latex(hypergeometric([1, 1], [2], -1))
\,_2F_1\left(\begin{matrix} 1,1 \\ 2 \end{matrix} ; -1 \right)
"""
aa = ",".join(latex(c) for c in a)
bb = ",".join(latex(c) for c in b)
z = latex(z)
return (r"\,_{}F_{}\left(\begin{{matrix}} {} \\ {} \end{{matrix}} ; "
r"{} \right)").format(len(a), len(b), aa, bb, z)
def _eval_(self, a, b, z, **kwargs):
"""
EXAMPLES::
sage: hypergeometric([], [], 0)
1
"""
if not isinstance(a, tuple) or not isinstance(b, tuple):
raise TypeError("The first two parameters must be of type list")
if not isinstance(z, Expression) and z == 0: # Expression is excluded
return Integer(1) # to avoid call to Maxima
def _evalf_try_(self, a, b, z):
"""
Call :meth:`_evalf_` if one of the arguments is numerical and none
of the arguments are symbolic.
OUTPUT:
- ``None`` if we didn't succeed to call :meth:`_evalf_` or if
the input wasn't suitable for it.
- otherwise, a numerical value for the function.
EXAMPLES::
sage: hypergeometric._evalf_try_((1.0,), (2.0,), 3.0)
6.36184564106256
sage: hypergeometric._evalf_try_((1.0, 1), (), 3.0)
-0.0377593153441588 + 0.750349833788561*I
sage: hypergeometric._evalf_try_((1, 1), (), 3) # exact input
sage: hypergeometric._evalf_try_((x,), (), 1.0) # symbolic
sage: hypergeometric._evalf_try_(1.0, 2.0, 3.0) # not tuples
"""
# We need to override this for hypergeometric functions since
# the first 2 arguments are tuples and the generic _evalf_try_
# cannot handle that.
if not isinstance(a, tuple) or not isinstance(b, tuple):
return None
args = list(a) + list(b) + [z]
if any(self._is_numerical(x) for x in args):
if not any(isinstance(x, Expression) for x in args):
p = get_coercion_model().common_parent(*args)
return self._evalf_(a, b, z, parent=p)
def _evalf_(self, a, b, z, parent, algorithm=None):
"""
TESTS::
sage: hypergeometric([1, 1], [2], -1).n()
0.693147180559945
sage: hypergeometric([], [], RealField(100)(1))
2.7182818284590452353602874714
"""
if not isinstance(a, tuple) or not isinstance(b, tuple):
raise TypeError("The first two parameters must be of type list")
from mpmath import hyper
aa = [rational_param_as_tuple(c) for c in a]
bb = [rational_param_as_tuple(c) for c in b]
return mpmath_utils.call(hyper, aa, bb, z, parent=parent)
def _tderivative_(self, a, b, z, *args, **kwargs):
"""
EXAMPLES::
sage: hypergeometric([1/3, 2/3], [5], x^2).diff(x)
4/45*x*hypergeometric((4/3, 5/3), (6,), x^2)
sage: hypergeometric([1, 2], [x], 2).diff(x)
Traceback (most recent call last):
...
NotImplementedError: derivative of hypergeometric function with...
respect to parameters. Try calling .simplify_hypergeometric()...
first.
sage: hypergeometric([1/3, 2/3], [5], 2).diff(x)
0
"""
diff_param = kwargs['diff_param']
if diff_param in hypergeometric(a, b, 1).variables(): # ignore z
raise NotImplementedError("derivative of hypergeometric function "
"with respect to parameters. Try calling"
" .simplify_hypergeometric() first.")
t = (reduce(lambda x, y: x * y, a, 1) *
reduce(lambda x, y: x / y, b, Integer(1)))
return (t * derivative(z, diff_param) *
hypergeometric([c + 1 for c in a], [c + 1 for c in b], z))
class EvaluationMethods():
def _fast_callable_(self, a, b, z, etb):
"""
Override the ``fast_callable`` method.
OUTPUT:
A :class:`~sage.ext.fast_callable.ExpressionCall` representing the
hypergeometric function in the expression tree.
EXAMPLES::
sage: h = hypergeometric([], [], x)
sage: from sage.ext.fast_callable import ExpressionTreeBuilder
sage: etb = ExpressionTreeBuilder(vars=['x'])
sage: h._fast_callable_(etb)
{hypergeometric((), (), x)}(v_0)
sage: var('x, y')
(x, y)
sage: f = fast_callable(hypergeometric([y], [], x), vars=[x, y])
sage: f(3, 4)
hypergeometric((4,), (), 3)
"""
return etb.call(self, *map(etb.var, etb._vars))
def sorted_parameters(self, a, b, z):
"""
Return with parameters sorted in a canonical order.
EXAMPLES::
sage: hypergeometric([2, 1, 3], [5, 4],
....: 1/2).sorted_parameters()
hypergeometric((1, 2, 3), (4, 5), 1/2)
"""
return hypergeometric(sorted(a), sorted(b), z)
def eliminate_parameters(self, a, b, z):
"""
Eliminate repeated parameters by pairwise cancellation of identical
terms in ``a`` and ``b``.
EXAMPLES::
sage: hypergeometric([1, 1, 2, 5], [5, 1, 4],
....: 1/2).eliminate_parameters()
hypergeometric((1, 2), (4,), 1/2)
sage: hypergeometric([x], [x], x).eliminate_parameters()
hypergeometric((), (), x)
sage: hypergeometric((5, 4), (4, 4), 3).eliminate_parameters()
hypergeometric((5,), (4,), 3)
"""
aa = list(a) # tuples are immutable
bb = list(b)
p = pp = len(aa)
q = qq = len(bb)
i = 0
while i < qq and aa:
bbb = bb[i]
if bbb in aa:
aa.remove(bbb)
bb.remove(bbb)
pp -= 1
qq -= 1
else:
i += 1
if (pp, qq) != (p, q):
return hypergeometric(aa, bb, z)
return self
def is_termwise_finite(self, a, b, z):
"""
Determine whether all terms of ``self`` are finite.
Any infinite terms or ambiguous terms beyond the first
zero, if one exists, are ignored.
Ambiguous cases (where a term is the product of both zero
and an infinity) are not considered finite.
EXAMPLES::
sage: hypergeometric([2], [3, 4], 5).is_termwise_finite()
True
sage: hypergeometric([2], [-3, 4], 5).is_termwise_finite()
False
sage: hypergeometric([-2], [-3, 4], 5).is_termwise_finite()
True
sage: hypergeometric([-3], [-3, 4],
....: 5).is_termwise_finite() # ambiguous
False
sage: hypergeometric([0], [-1], 5).is_termwise_finite()
True
sage: hypergeometric([0], [0],
....: 5).is_termwise_finite() # ambiguous
False
sage: hypergeometric([1], [2], Infinity).is_termwise_finite()
False
sage: (hypergeometric([0], [0], Infinity)
....: .is_termwise_finite()) # ambiguous
False
sage: (hypergeometric([0], [], Infinity)
....: .is_termwise_finite()) # ambiguous
False
"""
if z == 0:
return 0 not in b
if abs(z) == Infinity:
return False
if abs(z) == Infinity:
return False
for bb in b:
if bb in ZZ and bb <= 0:
if any((aa in ZZ) and (bb < aa <= 0) for aa in a):
continue
return False
return True
def is_terminating(self, a, b, z):
r"""
Determine whether the series represented by ``self`` terminates
after a finite number of terms.
This happens if any of the
numerator parameters are nonnegative integers (with no
preceding nonnegative denominator parameters), or `z = 0`.
If terminating, the series represents a polynomial of `z`.
EXAMPLES::
sage: hypergeometric([1, 2], [3, 4], x).is_terminating()
False
sage: hypergeometric([1, -2], [3, 4], x).is_terminating()
True
sage: hypergeometric([1, -2], [], x).is_terminating()
True
"""
if z == 0:
return True
for aa in a:
if (aa in ZZ) and (aa <= 0):
return self.is_termwise_finite()
return False
def is_absolutely_convergent(self, a, b, z):
r"""
Determine whether ``self`` converges absolutely as an infinite
series. ``False`` is returned if not all terms are finite.
EXAMPLES:
Degree giving infinite radius of convergence::
sage: hypergeometric([2, 3], [4, 5],
....: 6).is_absolutely_convergent()
True
sage: hypergeometric([2, 3], [-4, 5],
....: 6).is_absolutely_convergent() # undefined
False
sage: (hypergeometric([2, 3], [-4, 5], Infinity)
....: .is_absolutely_convergent()) # undefined
False
Ordinary geometric series (unit radius of convergence)::
sage: hypergeometric([1], [], 1/2).is_absolutely_convergent()
True
sage: hypergeometric([1], [], 2).is_absolutely_convergent()
False
sage: hypergeometric([1], [], 1).is_absolutely_convergent()
False
sage: hypergeometric([1], [], -1).is_absolutely_convergent()
False
sage: hypergeometric([1], [], -1).n() # Sum still exists
0.500000000000000
Degree `p = q+1` (unit radius of convergence)::
sage: hypergeometric([2, 3], [4], 6).is_absolutely_convergent()
False
sage: hypergeometric([2, 3], [4], 1).is_absolutely_convergent()
False
sage: hypergeometric([2, 3], [5], 1).is_absolutely_convergent()
False
sage: hypergeometric([2, 3], [6], 1).is_absolutely_convergent()
True
sage: hypergeometric([-2, 3], [4],
....: 5).is_absolutely_convergent()
True
sage: hypergeometric([2, -3], [4],
....: 5).is_absolutely_convergent()
True
sage: hypergeometric([2, -3], [-4],
....: 5).is_absolutely_convergent()
True
sage: hypergeometric([2, -3], [-1],
....: 5).is_absolutely_convergent()
False
Degree giving zero radius of convergence::
sage: hypergeometric([1, 2, 3], [4],
....: 2).is_absolutely_convergent()
False
sage: hypergeometric([1, 2, 3], [4],
....: 1/2).is_absolutely_convergent()
False
sage: (hypergeometric([1, 2, -3], [4], 1/2)
....: .is_absolutely_convergent()) # polynomial
True
"""
p, q = len(a), len(b)
if not self.is_termwise_finite():
return False
if p <= q:
return True
if self.is_terminating():
return True
if p == q + 1:
if abs(z) < 1:
return True
if abs(z) == 1:
if real_part(sum(b) - sum(a)) > 0:
return True
return False
def terms(self, a, b, z, n=None):
"""
Generate the terms of ``self`` (optionally only ``n`` terms).
EXAMPLES::
sage: list(hypergeometric([-2, 1], [3, 4], x).terms())
[1, -1/6*x, 1/120*x^2]
sage: list(hypergeometric([-2, 1], [3, 4], x).terms(2))
[1, -1/6*x]
sage: list(hypergeometric([-2, 1], [3, 4], x).terms(0))
[]
"""
if n is None:
n = Infinity
t = Integer(1)
k = 1
while k <= n:
yield t
for aa in a:
t *= (aa + k - 1)
for bb in b:
t /= (bb + k - 1)
t *= z
if t == 0:
break
t /= k
k += 1
def deflated(self, a, b, z):
r"""
Rewrite as a linear combination of functions of strictly lower
degree by eliminating all parameters ``a[i]`` and ``b[j]`` such
that ``a[i]`` = ``b[i]`` + ``m`` for nonnegative integer ``m``.
EXAMPLES::
sage: x = hypergeometric([6, 1], [3, 4, 5], 10)
sage: y = x.deflated()
sage: y
1/252*hypergeometric((4,), (7, 8), 10)
+ 1/12*hypergeometric((3,), (6, 7), 10)
+ 1/2*hypergeometric((2,), (5, 6), 10)
+ hypergeometric((1,), (4, 5), 10)
sage: x.n(); y.n()
2.87893612686782
2.87893612686782
sage: x = hypergeometric([6, 7], [3, 4, 5], 10)
sage: y = x.deflated()
sage: y
25/27216*hypergeometric((), (11,), 10)
+ 25/648*hypergeometric((), (10,), 10)
+ 265/504*hypergeometric((), (9,), 10)
+ 181/63*hypergeometric((), (8,), 10)
+ 19/3*hypergeometric((), (7,), 10)
+ 5*hypergeometric((), (6,), 10)
+ hypergeometric((), (5,), 10)
sage: x.n(); y.n()
63.0734110716969
63.0734110716969
"""
return sum(map(prod, self._deflated()))
def _deflated(self, a, b, z):
"""
Private helper to return list of deflated terms.
EXAMPLES::
sage: x = hypergeometric([5], [4], 3)
sage: y = x.deflated()
sage: y
7/4*hypergeometric((), (), 3)
sage: x.n(); y.n()
35.1496896155784
35.1496896155784
"""
new = self.eliminate_parameters()
aa = new.operands()[0].operands()
bb = new.operands()[1].operands()
for i, aaa in enumerate(aa):
for j, bbb in enumerate(bb):
m = aaa - bbb
if m in ZZ and m > 0:
aaaa = aa[:i] + aa[i + 1:]
bbbb = bb[:j] + bb[j + 1:]
terms = []
for k in range(m + 1):
# TODO: could rewrite prefactors as recurrence
term = binomial(m, k)
for c in aaaa:
term *= rising_factorial(c, k)
for c in bbbb:
term /= rising_factorial(c, k)
term *= z ** k
term /= rising_factorial(aaa - m, k)
F = hypergeometric([c + k for c in aaaa],
[c + k for c in bbbb], z)
unique = []
counts = []
for c, f in F._deflated():
if f in unique:
counts[unique.index(f)] += c
else:
unique.append(f)
counts.append(c)
Fterms = zip(counts, unique)
terms += [(term * termG, G) for (termG, G) in
Fterms]
return terms
return ((1, new),)
hypergeometric = Hypergeometric()
def closed_form(hyp):
"""
Try to evaluate ``hyp`` in closed form using elementary
(and other simple) functions.
It may be necessary to call :meth:`Hypergeometric.deflated` first to
find some closed forms.
EXAMPLES::
sage: from sage.functions.hypergeometric import closed_form
sage: var('a b c z')
(a, b, c, z)
sage: closed_form(hypergeometric([1], [], 1 + z))
-1/z
sage: closed_form(hypergeometric([], [], 1 + z))
e^(z + 1)
sage: closed_form(hypergeometric([], [1/2], 4))
cosh(4)
sage: closed_form(hypergeometric([], [3/2], 4))
1/4*sinh(4)
sage: closed_form(hypergeometric([], [5/2], 4))
3/16*cosh(4) - 3/64*sinh(4)
sage: closed_form(hypergeometric([], [-3/2], 4))
19/3*cosh(4) - 4*sinh(4)
sage: closed_form(hypergeometric([-3, 1], [var('a')], z))
-3*z/a + 6*z^2/((a + 1)*a) - 6*z^3/((a + 2)*(a + 1)*a) + 1
sage: closed_form(hypergeometric([-3, 1/3], [-4], z))
7/162*z^3 + 1/9*z^2 + 1/4*z + 1
sage: closed_form(hypergeometric([], [], z))
e^z
sage: closed_form(hypergeometric([a], [], z))
1/((-z + 1)^a)
sage: closed_form(hypergeometric([1, 1, 2], [1, 1], z))
(z - 1)^(-2)
sage: closed_form(hypergeometric([2, 3], [1], x))
-1/(x - 1)^3 + 3*x/(x - 1)^4
sage: closed_form(hypergeometric([1/2], [3/2], -5))
1/10*sqrt(5)*sqrt(pi)*erf(sqrt(5))
sage: closed_form(hypergeometric([2], [5], 3))
4
sage: closed_form(hypergeometric([2], [5], 5))
48/625*e^5 + 612/625
sage: closed_form(hypergeometric([1/2, 7/2], [3/2], z))
1/5*z^2/(-z + 1)^(5/2) + 2/3*z/(-z + 1)^(3/2) + 1/sqrt(-z + 1)
sage: closed_form(hypergeometric([1/2, 1], [2], z))
-2*(sqrt(-z + 1) - 1)/z
sage: closed_form(hypergeometric([1, 1], [2], z))
-log(-z + 1)/z
sage: closed_form(hypergeometric([1, 1], [3], z))
-2*((z - 1)*log(-z + 1)/z - 1)/z
sage: closed_form(hypergeometric([1, 1, 1], [2, 2], x))
hypergeometric((1, 1, 1), (2, 2), x)
"""
if hyp.is_terminating():
return sum(hyp.terms())
new = hyp.eliminate_parameters()
def _closed_form(hyp):
a, b, z = hyp.operands()
a, b = a.operands(), b.operands()
p, q = len(a), len(b)
if z == 0:
return Integer(1)
if p == q == 0:
return exp(z)
if p == 1 and q == 0:
return (1 - z) ** (-a[0])
if p == 0 and q == 1:
# TODO: make this require only linear time
def _0f1(b, z):
F12 = cosh(2 * sqrt(z))
F32 = sinh(2 * sqrt(z)) / (2 * sqrt(z))
if 2 * b == 1:
return F12
if 2 * b == 3:
return F32
if 2 * b > 3:
return ((b - 2) * (b - 1) / z * (_0f1(b - 2, z) -
_0f1(b - 1, z)))
if 2 * b < 1:
return (_0f1(b + 1, z) + z / (b * (b + 1)) *
_0f1(b + 2, z))
raise ValueError
# Can evaluate 0F1 in terms of elementary functions when
# the parameter is a half-integer
if 2 * b[0] in ZZ and b[0] not in ZZ:
return _0f1(b[0], z)
# Confluent hypergeometric function
if p == 1 and q == 1:
aa, bb = a[0], b[0]
if aa * 2 == 1 and bb * 2 == 3:
t = sqrt(-z)
return sqrt(pi) / 2 * erf(t) / t
if a == 1 and b == 2:
return (exp(z) - 1) / z
n, m = aa, bb
if n in ZZ and m in ZZ and m > 0 and n > 0:
rf = rising_factorial
if m <= n:
return (exp(z) * sum(rf(m - n, k) * (-z) ** k /
factorial(k) / rf(m, k) for k in
range(n - m + 1)))
else:
T = sum(rf(n - m + 1, k) * z ** k /
(factorial(k) * rf(2 - m, k)) for k in
range(m - n))
U = sum(rf(1 - n, k) * (-z) ** k /
(factorial(k) * rf(2 - m, k)) for k in
range(n))
return (factorial(m - 2) * rf(1 - m, n) *
z ** (1 - m) / factorial(n - 1) *
(T - exp(z) * U))
if p == 2 and q == 1:
R12 = QQ((1, 2))
R32 = QQ((3, 2))
def _2f1(a, b, c, z):
"""
Evaluation of 2F1(a, b; c; z), assuming a, b, c positive
integers or half-integers
"""
if b == c:
return (1 - z) ** (-a)
if a == c:
return (1 - z) ** (-b)
if a == 0 or b == 0:
return Integer(1)
if a > b:
a, b = b, a
if b >= 2:
F1 = _2f1(a, b - 1, c, z)
F2 = _2f1(a, b - 2, c, z)
q = (b - 1) * (z - 1)
return (((c - 2 * b + 2 + (b - a - 1) * z) * F1 +
(b - c - 1) * F2) / q)
if c > 2:
# how to handle this case?
if a - c + 1 == 0 or b - c + 1 == 0:
raise NotImplementedError
F1 = _2f1(a, b, c - 1, z)
F2 = _2f1(a, b, c - 2, z)
r1 = (c - 1) * (2 - c - (a + b - 2 * c + 3) * z)
r2 = (c - 1) * (c - 2) * (1 - z)
q = (a - c + 1) * (b - c + 1) * z
return (r1 * F1 + r2 * F2) / q
if (a, b, c) == (R12, 1, 2):
return (2 - 2 * sqrt(1 - z)) / z
if (a, b, c) == (1, 1, 2):
return -log(1 - z) / z
if (a, b, c) == (1, R32, R12):
return (1 + z) / (1 - z) ** 2
if (a, b, c) == (1, R32, 2):
return 2 * (1 / sqrt(1 - z) - 1) / z
if (a, b, c) == (R32, 2, R12):
return (1 + 3 * z) / (1 - z) ** 3
if (a, b, c) == (R32, 2, 1):
return (2 + z) / (2 * (sqrt(1 - z) * (1 - z) ** 2))
if (a, b, c) == (2, 2, 1):
return (1 + z) / (1 - z) ** 3
raise NotImplementedError
aa, bb = a
cc, = b
if z == 1:
return (gamma(cc) * gamma(cc - aa - bb) / gamma(cc - aa) /
gamma(cc - bb))
if all((cf * 2) in ZZ and cf > 0 for cf in (aa, bb, cc)):
try:
return _2f1(aa, bb, cc, z)
except NotImplementedError:
pass
return hyp
return sum([coeff * _closed_form(pfq) for coeff, pfq in new._deflated()])
class Hypergeometric_M(BuiltinFunction):
r"""
The confluent hypergeometric function of the first kind,
`y = M(a,b,z)`, is defined to be the solution to Kummer's differential
equation
.. MATH::
zy'' + (b-z)y' - ay = 0.
This is not the same as Kummer's `U`-hypergeometric function, though it
satisfies the same DE that `M` does.
.. warning::
In the literature, both are called "Kummer confluent
hypergeometric" functions.
EXAMPLES::
sage: hypergeometric_M(1, 1, 1)
hypergeometric_M(1, 1, 1)
sage: hypergeometric_M(1, 1, 1.)
2.71828182845905
sage: hypergeometric_M(1, 1, 1).n(70)
2.7182818284590452354
sage: hypergeometric_M(1, 1, 1).simplify_hypergeometric()
e
sage: hypergeometric_M(1, 1/2, x).simplify_hypergeometric()
(-I*sqrt(pi)*x*erf(I*sqrt(-x))*e^x + sqrt(-x))/sqrt(-x)
sage: hypergeometric_M(1, 3/2, 1).simplify_hypergeometric()
1/2*sqrt(pi)*erf(1)*e
"""
def __init__(self):
r"""
TESTS::
sage: maxima(hypergeometric_M(1,1,x))
kummer_m(1,1,_SAGE_VAR_x)
sage: latex(hypergeometric_M(1,1,x))
M\left(1, 1, x\right)
"""
BuiltinFunction.__init__(self, 'hypergeometric_M', nargs=3,
conversions={'mathematica':
'Hypergeometric1F1',
'maple': 'KummerM',
'maxima': 'kummer_m',
'fricas': 'kummerM'},
latex_name='M')
def _eval_(self, a, b, z, **kwargs):
"""
TESTS::
sage: (a,b)=var('a,b')
sage: hypergeometric_M(a,b,0)
1
"""
if not isinstance(z, Expression) and z == 0:
return Integer(1)
return
def _evalf_(self, a, b, z, parent, algorithm=None):
"""
TESTS::
sage: hypergeometric_M(1,1,1).n()
2.71828182845905
"""
from mpmath import hyp1f1
return mpmath_utils.call(hyp1f1, a, b, z, parent=parent)
def _derivative_(self, a, b, z, diff_param):
"""
TESTS::
sage: diff(hypergeometric_M(1,1,x),x,3)
hypergeometric_M(4, 4, x)
sage: diff(hypergeometric_M(x,1,1),x,3)
Traceback (most recent call last):
...
NotImplementedError: derivative of hypergeometric function with respect to parameters
"""
if diff_param == 2:
return (a / b) * hypergeometric_M(a + 1, b + 1, z)
raise NotImplementedError('derivative of hypergeometric function '
'with respect to parameters')
class EvaluationMethods():
def generalized(self, a, b, z):
"""
Return as a generalized hypergeometric function.
EXAMPLES::
sage: var('a b z')
(a, b, z)
sage: hypergeometric_M(a, b, z).generalized()
hypergeometric((a,), (b,), z)
"""
return hypergeometric([a], [b], z)
hypergeometric_M = Hypergeometric_M()
class Hypergeometric_U(BuiltinFunction):
r"""
The confluent hypergeometric function of the second kind,
`y = U(a,b,z)`, is defined to be the solution to Kummer's differential
equation
.. MATH::
zy'' + (b-z)y' - ay = 0.
This satisfies `U(a,b,z) \sim z^{-a}`, as
`z\rightarrow \infty`, and is sometimes denoted
`z^{-a}{}_2F_0(a,1+a-b;;-1/z)`. This is not the same as Kummer's
`M`-hypergeometric function, denoted sometimes as
`_1F_1(\alpha,\beta,z)`, though it satisfies the same DE that
`U` does.
.. warning::
In the literature, both are called "Kummer confluent
hypergeometric" functions.
EXAMPLES::
sage: hypergeometric_U(1, 1, 1)
hypergeometric_U(1, 1, 1)
sage: hypergeometric_U(1, 1, 1.)
0.596347362323194
sage: hypergeometric_U(1, 1, 1).n(70)
0.59634736232319407434
sage: hypergeometric_U(10^4, 1/3, 1).n()
6.60377008885811e-35745
sage: hypergeometric_U(2 + I, 2, 1).n()
0.183481989942099 - 0.458685959185190*I
sage: hypergeometric_U(1, 3, x).simplify_hypergeometric()
(x + 1)/x^2
sage: hypergeometric_U(1, 2, 2).simplify_hypergeometric()
1/2
"""
def __init__(self):
r"""
TESTS::
sage: maxima(hypergeometric_U(1,1,x))
kummer_u(1,1,_SAGE_VAR_x)
sage: latex(hypergeometric_U(1,1,x))
U\left(1, 1, x\right)
"""
BuiltinFunction.__init__(self, 'hypergeometric_U', nargs=3,
conversions={'mathematica':
'HypergeometricU',
'maple': 'KummerU',
'maxima': 'kummer_u',
'fricas': 'kummerU'},
latex_name='U')
def _eval_(self, a, b, z, **kwargs):
return
def _evalf_(self, a, b, z, parent, algorithm=None):
"""
TESTS::
sage: hypergeometric_U(1,1,1).n()
0.596347362323194
"""
from mpmath import hyperu
return mpmath_utils.call(hyperu, a, b, z, parent=parent)
def _derivative_(self, a, b, z, diff_param):
"""
TESTS::
sage: diff(hypergeometric_U(1,1,x),x,3)
-6*hypergeometric_U(4, 4, x)
sage: diff(hypergeometric_U(x,1,1),x,3)
Traceback (most recent call last):
...
NotImplementedError: derivative of hypergeometric function with respect to parameters
"""
if diff_param == 2:
return -a * hypergeometric_U(a + 1, b + 1, z)
raise NotImplementedError('derivative of hypergeometric function '
'with respect to parameters')
class EvaluationMethods():
def generalized(self, a, b, z):
"""
Return in terms of the generalized hypergeometric function.
EXAMPLES::
sage: var('a b z')
(a, b, z)
sage: hypergeometric_U(a, b, z).generalized()
hypergeometric((a, a - b + 1), (), -1/z)/z^a
sage: hypergeometric_U(1, 3, 1/2).generalized()
2*hypergeometric((1, -1), (), -2)
sage: hypergeometric_U(3, I, 2).generalized()
1/8*hypergeometric((3, -I + 4), (), -1/2)
"""
return z ** (-a) * hypergeometric([a, a - b + 1], [], -z ** (-1))
hypergeometric_U = Hypergeometric_U()
| 36.265666 | 97 | 0.467935 |
795a1aed2de71edd17ccb1cec491f3a1c9f49428 | 8,555 | py | Python | models/res_lstm_l.py | wangkenpu/rsrgan | 0efafbdb4008becd3a81650ca0237c660e976d4a | [
"MIT"
] | 66 | 2018-07-06T07:07:56.000Z | 2021-07-30T07:59:54.000Z | models/res_lstm_l.py | wangkenpu/rsrgan | 0efafbdb4008becd3a81650ca0237c660e976d4a | [
"MIT"
] | 7 | 2018-09-01T03:03:14.000Z | 2019-11-04T10:51:04.000Z | models/res_lstm_l.py | wangkenpu/rsrgan | 0efafbdb4008becd3a81650ca0237c660e976d4a | [
"MIT"
] | 15 | 2018-07-03T13:47:26.000Z | 2021-10-17T04:26:13.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 Ke Wang
"""Build the LSTM neural networks.
This module provides an example of definiting compute graph with tensorflow.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import numpy as np
import tensorflow as tf
from tensorflow.contrib.layers import batch_norm, fully_connected
from tensorflow.contrib.layers import xavier_initializer, l2_regularizer
sys.path.append(os.path.dirname(sys.path[0]))
from utils.ops import leakyrelu
class RES_LSTM_L(object):
def __init__(self, lstm):
self.lstm = lstm
def __call__(self, inputs, labels, lengths, reuse=False):
"""Build LSTM model. On first pass will make vars."""
self.inputs = inputs
self.labels = labels
self.lengths = lengths
outputs = self.infer(reuse)
return outputs
def infer(self, reuse):
lstm = self.lstm
lstm_cell_size = 760
num_projection = 257
lstm_num_layer = 3
in_dims = self.inputs.get_shape().as_list()
assert len(in_dims) == 3
if lstm.cross_validation:
is_training = False
else:
is_training = True
with tf.variable_scope("g_model") as scope:
if reuse:
scope.reuse_variables()
if lstm.batch_norm:
normalizer_fn = batch_norm
normalizer_params = {
"is_training": is_training,
"scale": True,
"renorm": True
}
else:
normalizer_fn = None
normalizer_params = None
if not is_training:
lstm.keep_prob = 1.0
if not reuse:
print("****************************************")
print("*** Generator summary ***")
print("G inputs shape: {}".format(self.inputs.get_shape()))
sys.stdout.flush()
inputs = self.inputs
# h = fully_connected(inputs, num_projection,
# activation_fn=leakyrelu,
# normalizer_fn=normalizer_fn,
# normalizer_params=normalizer_params,
# weights_initializer=xavier_initializer(),
# biases_initializer=tf.zeros_initializer())
def lstm_cell():
return tf.contrib.rnn.LSTMCell(
lstm_cell_size, use_peepholes=True,
initializer=xavier_initializer(),
num_proj=num_projection,
forget_bias=1.0, state_is_tuple=True,
activation=tf.tanh,
reuse=reuse)
attn_cell = lstm_cell
if is_training and lstm.keep_prob < 1.0:
def attn_cell():
return tf.contrib.rnn.DropoutWrapper(
lstm_cell(), output_keep_prob=lstm.keep_prob)
with tf.variable_scope("lstm_cell_1"):
cell1 = attn_cell()
initial_states = cell1.zero_state(lstm.batch_size, tf.float32)
outputs1, states1 = tf.nn.dynamic_rnn(cell1, self.inputs,
sequence_length=self.lengths,
initial_state=initial_states,
dtype=tf.float32,
time_major=False)
with tf.variable_scope("lstm_cell_2"):
inputs2 = outputs1 + self.inputs
cell2 = attn_cell()
initial_states = cell2.zero_state(lstm.batch_size, tf.float32)
outputs2, states2 = tf.nn.dynamic_rnn(cell2, inputs2,
sequence_length=self.lengths,
initial_state=initial_states,
dtype=tf.float32,
time_major=False)
with tf.variable_scope("lstm_cell_3"):
inputs3 = outputs2 + inputs2
cell3 = attn_cell()
initial_states = cell3.zero_state(lstm.batch_size, tf.float32)
outputs3, states3 = tf.nn.dynamic_rnn(cell3, inputs3,
sequence_length=self.lengths,
initial_state=initial_states,
dtype=tf.float32,
time_major=False)
with tf.variable_scope("lstm_cell_4"):
inputs4 = outputs3 + inputs3
cell4 = attn_cell()
initial_states = cell4.zero_state(lstm.batch_size, tf.float32)
outputs4, states4 = tf.nn.dynamic_rnn(cell4, inputs4,
sequence_length=self.lengths,
initial_state=initial_states,
dtype=tf.float32,
time_major=False)
# with tf.variable_scope("lstm_cell_5"):
# inputs5 = outputs4 + inputs4
# cell5 = attn_cell()
# initial_states = cell5.zero_state(lstm.batch_size, tf.float32)
# outputs5, states5 = tf.nn.dynamic_rnn(cell5, inputs5,
# sequence_length=self.lengths,
# initial_state=initial_states,
# dtype=tf.float32,
# time_major=False)
# with tf.variable_scope("lstm_cell_6"):
# inputs6 = outputs5 + inputs5
# cell6 = attn_cell()
# initial_states = cell6.zero_state(lstm.batch_size, tf.float32)
# outputs6, states6 = tf.nn.dynamic_rnn(cell6, inputs6,
# sequence_length=self.lengths,
# initial_state=initial_states,
# dtype=tf.float32,
# time_major=False)
# with tf.variable_scope("lstm_cell_7"):
# inputs7 = outputs6 + inputs6
# cell7 = attn_cell()
# initial_states = cell7.zero_state(lstm.batch_size, tf.float32)
# outputs7, states7 = tf.nn.dynamic_rnn(cell7, inputs7,
# sequence_length=self.lengths,
# initial_state=initial_states,
# dtype=tf.float32,
# time_major=False)
# with tf.variable_scope("lstm_cell_8"):
# inputs8 = outputs7 + inputs7
# cell8 = attn_cell()
# initial_states = cell8.zero_state(lstm.batch_size, tf.float32)
# outputs8, states8 = tf.nn.dynamic_rnn(cell8, inputs8,
# sequence_length=self.lengths,
# initial_state=initial_states,
# dtype=tf.float32,
# time_major=False)
if not reuse:
print("G hidden layer number is {}".format(lstm_num_layer))
print("G cell size is {}".format(lstm_cell_size))
print("G projection num is {}".format(num_projection))
sys.stdout.flush()
# Linear output
with tf.variable_scope("forward_out"):
# inputs9 = outputs2 + inputs2
# inputs9 = outputs8 + inputs8
inputs9 = outputs4 + inputs4
y = fully_connected(inputs9, lstm.output_dim,
activation_fn=None,
weights_initializer=xavier_initializer(),
biases_initializer=tf.zeros_initializer())
if not reuse:
print("G output shape: {}".format(y.get_shape()))
sys.stdout.flush()
return y
| 42.775 | 80 | 0.477382 |
795a1b5bf0319d3307bed2fda7f44e3ac7eef547 | 563 | py | Python | webapp/home/migrations/0013_alter_notice_notice_class.py | usegalaxy-au/galaxy-media-site | 3ec13e0f42591d2543768f252be037784933e271 | [
"MIT"
] | null | null | null | webapp/home/migrations/0013_alter_notice_notice_class.py | usegalaxy-au/galaxy-media-site | 3ec13e0f42591d2543768f252be037784933e271 | [
"MIT"
] | 36 | 2021-11-14T21:34:22.000Z | 2022-03-24T22:46:42.000Z | webapp/home/migrations/0013_alter_notice_notice_class.py | neoformit/galaxy-content-site | a6eeaf1893c12dd4d7d714fb823f43509a0a3893 | [
"MIT"
] | null | null | null | # Generated by Django 3.2 on 2022-02-25 01:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0012_alter_notice_notice_class'),
]
operations = [
migrations.AlterField(
model_name='notice',
name='notice_class',
field=models.CharField(choices=[('info', 'info'), ('warning', 'warning'), ('danger', 'danger'), ('none', 'none')], default='', help_text='A style class to set a color schema for the notice.', max_length=16),
),
]
| 29.631579 | 219 | 0.612789 |
795a1dbe0431c9dc89274fa4cbb529410c97c566 | 432 | py | Python | feed/china_market.py | datar/finer | 204a806dd8c81759dedddc141eaea0a59bfe573f | [
"Apache-2.0"
] | null | null | null | feed/china_market.py | datar/finer | 204a806dd8c81759dedddc141eaea0a59bfe573f | [
"Apache-2.0"
] | null | null | null | feed/china_market.py | datar/finer | 204a806dd8c81759dedddc141eaea0a59bfe573f | [
"Apache-2.0"
] | null | null | null | company_people_page_url_pattern = 'http://www.cfachina.org/cfainfo/organbaseinfoOneServlet?organid=+%s+¤tPage=1&pageSize=2000&selectType=personinfo&all=undefined'
ALL_PEOPLE_PAGE_URL = 'http://www.cfachina.org/cfainfo/personinfoServlet?organid=¤tPage=1&pageSize=100000&selectType=check&cardid='
trunk_of_all_employee = 'http://www.cfachina.org/orc-report/api/practitioners/baseInfo?pageNo=%d&pageSize=%d&flag=&keyword='
| 108 | 168 | 0.826389 |
795a1de0d19a1f57d9957fb928e914d363ef48c2 | 39,461 | py | Python | pybea/client.py | areed1192/python-bureau-economic-analysis-api-client | 04c5572b924428822e9d4d8e1b2b7d1f7785ec11 | [
"MIT"
] | 10 | 2020-07-12T15:46:33.000Z | 2022-03-31T18:27:38.000Z | pybea/client.py | areed1192/python-bureau-economic-analysis-api-client | 04c5572b924428822e9d4d8e1b2b7d1f7785ec11 | [
"MIT"
] | 1 | 2020-12-23T09:46:58.000Z | 2021-03-13T00:18:33.000Z | pybea/client.py | areed1192/python-bureau-economic-analysis-api-client | 04c5572b924428822e9d4d8e1b2b7d1f7785ec11 | [
"MIT"
] | 5 | 2020-08-13T23:22:06.000Z | 2022-01-16T14:59:29.000Z | from typing import List
from typing import Union
import requests
class BureauEconomicAnalysisClient:
"""
### Overview
----
Represents the main BEA Client that
is used to access the different services.
"""
def __init__(self, api_key: str) -> None:
"""Initalize the Bureau of Economic Analysis Client.
### Arguments:
----
api_key (str):
Your Bureau of Economic Analysis API
Key.
### Usage:
----
>>> # Initalize the new Client.
>>> bea_client = BureauEconomicAnalysisClient(api_key=API_KEY)
"""
# base URL for the SEC EDGAR browser
self.bea_url = "https://apps.bea.gov/api/data/"
self.api_key = api_key
self._format = "JSON"
if self.api_key:
self.authstate = True
else:
self.authstate = False
@property
def format(self) -> str:
"""Used to return the Content format currently set for request.
### Returns:
----
str:
If `JSON`, then data from API request will be sent back
as JSON data. If `XML` then the data will be returned back
as XML data.
"""
return self._format
@format.setter
def format(self, value) -> None:
"""Used to return the Content format currently set for request.
### Arguments:
----
value (str):
If `JSON`, then data from API request will be sent back
as JSON data. If `XML` then the data will be returned back
as XML data.
### Raises:
----
`ValueError`:
If the format is incorrect will raise a ValueError.
"""
if value.upper() not in ["JSON", "XML"]:
raise ValueError("Incorrect format, please set to either `XML` or `JSON`.")
self._format = value.upper()
def __repr__(self) -> str:
"""String representation of our BEA Class instance."""
# define the string representation
str_representation = (
f"<BureauEconomicAnalysis Client (authorized={self.authstate})>"
)
return str_representation
def _make_request(self, method: str, params: dict) -> Union[dict, str]:
"""Makes all the request for the BEA Client.
### Arguments:
----
method (str):
The type of request to make. Can be one of the
following: ['get', 'post', 'put', 'delete', 'put']
params (dict):
Any parameters to send along with the request.
### Raises:
----
`requests.ConnectionError`:
If connection error occurs will raise
an error.
### Returns:
----
Union[dict, str]:
The JSON or XML content.
"""
# Define a new session.
request_session = requests.Session()
request_session.verify = True
# Define a new request.
request_request = requests.Request(
method=method.upper(), url=self.bea_url, params=params
).prepare()
# Send the request.
response: requests.Response = request_session.send(request=request_request)
# Close the Session
request_session.close()
print(response.url)
# If the response is OK then return it.
if response.ok and self._format == "JSON":
final_response = response.json()
elif response.ok and self._format == "XML":
final_response = response.text
else:
raise requests.ConnectionError()
return final_response
def get_dataset_list(self) -> dict:
"""Returns a list of all the datasets available from the API.
### Returns:
----
dict:
A dictionary with a collection of datasets,
their corresponding names, and their descriptions.
### Usage:
----
>>> # Initalize the new Client.
>>> bea_client = BureauEconomicAnalysisClient(api_key=API_KEY)
>>> # Grab the Dataset List.
>>> dataset_list = bea_client.get_dataset_list()
"""
# Define the parameters.
params = {
"UserID": self.api_key,
"method": "GETDATASETLIST",
"ResultFormat": self._format,
}
# Make the request.
response = self._make_request(method="get", params=params)
return response
def get_parameters_list(self, dataset_name: str) -> dict:
"""Retrieves a list of the parameters (required and optional) for
a particular dataset.
### Returns:
----
dict:
A dictionary with a collection of datasets parameters, their
corresponding names, and their descriptions
### Usage:
----
>>> # Initalize the new Client.
>>> bea_client = BureauEconomicAnalysisClient(api_key=API_KEY)
>>> # Grab the Paramters List.
>>> parameters_set_list = bea_client.get_parameters_list()
"""
# Define the parameters.
params = {
"userid": self.api_key,
"method": "GETPARAMETERLIST",
"datasetname": dataset_name,
"resultformat": self._format,
}
# Make the request.
response = self._make_request(method="get", params=params)
return response
def gdp_by_industry(
self,
year: List[str] = "ALL",
industry: List[str] = "ALL",
frequency: List[str] = "A,Q,M",
table_id: List[str] = "ALL",
) -> dict:
"""Grabs the estimates of value added, gross output,
intermediate inputs, KLEMS, and employment statistics by industry.
### Arguments:
----
year (List[str], optional, Default='ALL'):
List of year(s) of data to retrieve (ALL for All). Defaults to 'ALL'.
industry (List[str], optional, Default='ALL'):
List of industries to retrieve (ALL for All). Defaults to 'ALL'.
frequency (str, optional, Default="A,Q,M"):
`Q` for Quarterly data or `A` for Annual,
`A,Q` for both.
table_id (List[str], optional, Default='ALL'):
The unique GDP by Industry table identifier (ALL for All).
### Returns:
----
dict:
A list of GDP figures for the industry specified.
### Usage:
----
>>> # Initalize the new Client.
>>> bea_client = BureauEconomicAnalysisClient(api_key=API_KEY)
>>> # Grab GDP Data by Industry.
>>> national_income = bea_client.gdp_by_industry(
table_name='T10101',
industry='ALL',
frequency=['A', 'Q'],
year=['2011', '2012'],
table_id=['1']
)
>>> national_income
"""
if isinstance(year, list):
year = ",".join(year)
if isinstance(frequency, list):
frequency = ",".join(frequency)
if isinstance(table_id, list):
table_id = ",".join(table_id)
if isinstance(industry, list):
industry = ",".join(industry)
# Define the parameters.
params = {
"userid": self.api_key,
"method": "GetData",
"datasetname": "GDPbyIndustry",
"year": year,
"resultformat": self._format,
"industry": industry,
"frequency": frequency,
"tableid": table_id,
}
# Make the request.
response = self._make_request(method="get", params=params)
return response
def underlying_gdp_by_industry(
self,
year: List[str] = "ALL",
industry: List[str] = "ALL",
frequency: List[str] = "A,Q,M",
table_id: List[str] = "ALL",
) -> dict:
"""The underlying gross domestic product by industry data are
contained within a dataset called UnderlyingGDPbyIndustry.
### Overview:
----
BEA's industry accounts are used extensively by policymakers and
businesses to understand industry interactions, productivity trends,
and the changing structure of the U.S. economy. The underlying
GDP-by-industry dataset includes data in both current and chained (real)
dollars. The dataset contains estimates for value added, gross output,
and intermediate input statistics. This dataset is structurally similar to
the GDPbyIndustry dataset (Appendix F), but contains additional industry detail.
### Arguments:
----
year (List[str], optional, Default='ALL'):
List of year(s) of data to retrieve (ALL for All).
industry (List[str], optional):
List of industries to retrieve (ALL for All).
frequency (str, optional, Default="A,Q,M"):
`Q` for Quarterly data or `A` for Annual,
`A,Q` for both.
table_id (List[str], optional, Default='ALL'):
The unique GDP by Industry table identifier (ALL for All).
### Returns:
----
dict:
A list of GDP figures for the industry specified.
### Usage:
----
>>> # Initalize the new Client.
>>> bea_client = BureauEconomicAnalysisClient(api_key=API_KEY)
>>> # Quarterly Value Added by Industry data for all industries
>>> # for years 2012 and 2013.
>>> underlying_gdp_by_industry = bea_client.underlying_gdp_by_industry(
industry='ALL',
frequency=['Q'],
year=['2012', '2013'],
table_id='ALL'
)
>>> underlying_gdp_by_industry
"""
if year != "ALL":
year = ",".join(year)
# Define the parameters.
params = {
"userid": self.api_key,
"method": "GetData",
"datasetname": "underlyingGDPbyIndustry",
"year": year,
"resultformat": self._format,
"industry": industry,
"frequency": ",".join(frequency),
"tableid": table_id,
}
# Make the request.
response = self._make_request(method="get", params=params)
return response
def international_trade_services(
self,
type_of_service: str = "ALL",
trade_direction: List[str] = "ALL",
affiliation: List[str] = "ALL",
year: List[str] = "ALL",
area_or_country: List[str] = "AllCountries",
) -> dict:
"""This dataset contains annual data on U.S. international trade in services.
### Overview:
----
These data are updated each October to reflect the International Transactions
Accounts annual update released in June. BEA's statistics on services supplied
through affiliates by multinational enterprises are not included in this dataset.
### Arguments:
----
type_of_service (List[str], optional, Default='ALL'):
The TypeOfService parameter specifies the type of service being traded (e.g. travel,
transport, or insurance services). Exactly one TypeOfService parameter value other
than “All” must be provided in all data requests unless exactly one AreaOrCountry
parameter value other than “All” is requested. That is, multiple Indicators can
only be specified if a single AreaOrCountry parameter is specified.
trade_direction (List[str], optional, Default='ALL'):
The TradeDirection parameter specifies the trade direction of the services
transactions. There are four valid parameter values other than “All”:
1. Exports - Exports
2. Imports - Imports
3. Balance - Balance (exports less imports)
4. SupplementalIns - Supplemental detail on insurance transactions.
affiliation (str, optional, Default='ALL'):
The Affiliation parameter specifies the tradedirection for the services
transactions. There are five valid parameter values other than “All”:
1. AllAffiliations - The total for all trade, whether affiliated or unaffiliated.
2. Unaffiliated - Unaffiliated trade.
3. Affiliated - Affiliated trade.
4. UsParents - U.S. parents' trade with their foreign affiliates.
5. UsAffiliates - U.S. affiliates' trade with their foreign parent groups.
year (List[str], optional, Default='ALL'):
List of year(s) of data to retrieve (ALL for All).
area_or_country (List[str], optional, Default='AllCountries'):
The AreaOrCountry parameter specifies the counterparty area or country
of the services transactions. The default parameter value (“AllCountries”)
returns the total for all countries, while “All” returns all data available
by area and country. Exactly one AreaOrCountry parameter value must be provided
in all data requests unless exactly one TypeOfService parameter value other
than “All” is requested.That is, a list of countries can only be specified if a
single TypeOfService is specified.
### Returns:
----
dict:
A list of international trade services.
### Usage:
----
>>> # Initalize the new Client.
>>> bea_client = BureauEconomicAnalysisClient(api_key=API_KEY)
>>> # Imports of services from Germany for 2014 and 2015.
>>> international_trade_services = bea_client.international_trade_services(
type_of_service='AllServiceTypes',
trade_direction=['Imports'],
year=['2014', '2015'],
affiliation=['AllAffiliations'],
area_or_country=['Germany']
)
>>> international_trade_services
"""
if year != "ALL":
year = ",".join(year)
if isinstance(area_or_country, list):
area_or_country = ",".join(area_or_country)
# Define the parameters.
params = {
"userid": self.api_key,
"method": "GetData",
"datasetname": "IntlServTrade",
"year": year,
"resultformat": self._format,
"typeofservice": type_of_service,
"tradedirection": trade_direction,
"affiliation": affiliation,
"areaorcountry": area_or_country,
}
# Make the request.
response = self._make_request(method="get", params=params)
return response
def national_income_and_product_accounts(
self,
table_name: str,
year: List[str] = "ALL",
frequency: List[str] = "A,Q,M",
) -> dict:
"""Grabs the data from the National Income and Product Accounts.
### Overview:
----
This dataset contains data from the National Income and Product Accounts
which include measures of the value and composition of U.S.production and
the incomes generated in producing it. NIPA data is provided on a table basis;
individual tables contain between fewer than 10 to more than 200 distinct
data series.
### Arguments:
----
year (List[str], optional, Default='ALL'):
List of year(s) of data to retrieve (ALL for All).
industry (List[str], optional, Default='ALL'):
List of industries to retrieve (ALL for All).
frequency (str, optional, Default="A,Q,M"):
`Q` for Quarterly data or `A` for Annual, `M` for
monthly.
table_id (List[str], optional, Default='ALL'):
The unique GDP by Industry table identifier (ALL for All).
### Returns:
----
dict:
A list of GDP figures for the industry specified.
### Usage:
----
>>> # Initalize the new Client.
>>> bea_client = BureauEconomicAnalysisClient(api_key=API_KEY)
>>> # Grab National Income & Product Data.
>>> national_income = bea_client.national_income_and_product_accounts(
table_name='T10101',
frequency=['A', 'Q'],
year=['2011', '2012']
)
>>> national_income
"""
if isinstance(year, list):
year = ",".join(year)
if isinstance(table_name, list):
table_name = ",".join(table_name)
if isinstance(frequency, list):
frequency = ",".join(frequency)
# Define the parameters.
params = {
"userid": self.api_key,
"method": "GetData",
"datasetname": "NIPA",
"year": year,
"resultformat": self._format,
"frequency": frequency,
"tablename": table_name,
}
# Make the request.
response = self._make_request(method="get", params=params)
return response
def national_income_and_product_accounts_detail(
self,
table_name: List[str] = "ALL",
year: List[str] = "ALL",
frequency: List[str] = "A,Q,M",
) -> dict:
"""This dataset contains underlying detail data from the
National Income and Product Accounts.
### Overview:
----
This dataset contains data from the National Income and Product Accounts
which include measures of the value and composition of U.S.production and
the incomes generated in producing it. NIPA data is provided on a table basis;
individual tables contain between fewer than 10 to more than 200 distinct data series.
### Arguments:
----
year (List[str], optional, Default='ALL'):
List of year(s) of data to retrieve (ALL for All).
industry (List[str], optional, Default='ALL'):
List of industries to retrieve (ALL for All).
frequency (str, optional, Default="A,Q,M"):
`Q` for Quarterly data or `A` for Annual, "M" for monthly.
table_id (List[str], optional, Default='ALL'):
The unique GDP by Industry table identifier (ALL for All).
### Returns:
----
dict:
A list of GDP figures for the industry specified.
### Usage:
----
>>> # Initalize the new Client.
>>> bea_client = BureauEconomicAnalysisClient(api_key=API_KEY)
>>> # Grab Personal Conumption Expenditures, Current Dollars,
>>> # Annually, Quarterly and Monthly for all years.
>>> national_income = bea_client.national_income_and_product_accounts_detail(
table_name='U20305',
frequency=['A', 'Q'],
year=['2011', '2012']
)
>>> national_income
"""
if isinstance(year, list):
year = ",".join(year)
if isinstance(table_name, list):
table_name = ",".join(table_name)
if isinstance(frequency, list):
frequency = ",".join(frequency)
# Define the parameters.
params = {
"userid": self.api_key,
"method": "GetData",
"datasetname": "NIUnderlyingDetail",
"year": year,
"resultformat": self._format,
"frequency": frequency,
"tablename": table_name,
}
# Make the request.
response = self._make_request(method="get", params=params)
return response
def fixed_assets(
self, table_name: List[str] = "ALL", year: List[str] = "ALL"
) -> dict:
"""This dataset contains data from the standard set of Fixed Assets
tables as published online.
### Arguments:
----
year (List[str], optional, Default='ALL'):
List of year(s) of data to retrieve (ALL for All).
table_name (List[str], optional, Default='ALL'):
The standard NIPA table identifier.
### Returns:
----
dict:
A list of GDP figures for the industry specified.
### Usage:
----
>>> # Initalize the new Client.
>>> bea_client = BureauEconomicAnalysisClient(api_key=API_KEY)
>>> # Grab Current-Cost Net Stock of Private Fixed Assets, Equipment, Structures,
>>> # and Intellectual Property Products by Type, for all years.
>>> fixed_assets = bea_client.fixed_assets(
table_name='FAAt201',
year=['2011', '2012']
)
>>> fixed_assets
"""
if isinstance(year, list):
year = ",".join(year)
if isinstance(table_name, list):
table_name = ",".join(table_name)
# Define the parameters.
params = {
"userid": self.api_key,
"method": "GetData",
"datasetname": "FixedAssets",
"year": year,
"resultformat": self._format,
"tablename": table_name,
}
# Make the request.
response = self._make_request(method="get", params=params)
return response
def direct_investments_and_multinational_enterprises(
self,
direction_of_investment: str,
classification: str,
series_id: int = "ALL",
year: List[str] = "ALL",
country: List[str] = "ALL",
industry: List[str] = "ALL",
footnotes: bool = True,
) -> dict:
"""Grabs one of two datasets from the Direct Investment
and Multinational Enterprises dataset.
### Overview:
----
This dataset contains the following statistics:
Direct Investment (DI)—income and financial transactions in direct
investment that underlie the U.S. balance of payments statistics,
and direct investment positions that underlie the U. S. international
investment positions
### Arguments:
----
direction_of_investment (str):
`outward` for US direct investment abroad, `inward` for
foreign investment in the US.
classification (str):
Results by `country` or `industry`.
series_id (int, optional, Default='ALL'):
Data Series Identifier (ALL for All).
year (List[str], optiona, Default='ALL'):
List of year(s) of data to retrieve (ALL for All).
country (List[str], optional, Default='ALL'):
List of country(s) of data to retrieve (ALL for All).
industry (List[str], optional, Default='ALL'):
List of industries to retrieve (ALL for All).
footnotes (bool, optional, Default=True):
`True` to include footnotes, `False` to not include.
### Returns:
----
dict:
A list of investment data.
### Usage:
----
>>> # Initalize the new Client.
>>> bea_client = BureauEconomicAnalysisClient(api_key=API_KEY)
>>> # U. S. direct investment position in China and Asia for 2011 and 2012
>>> investments = bea_client.direct_investments_and_multinational_enterprises(
direction_of_investment='outward',
classification='country',
series_id=['30'],
year=['2011', '2012'],
country=['650', '699']
)
>>> investments
"""
if isinstance(year, list):
year = ",".join(year)
if isinstance(series_id, list):
series_id = ",".join(series_id)
if isinstance(country, list):
country = ",".join(country)
if isinstance(industry, list):
industry = ",".join(industry)
if footnotes:
footnotes = "Yes"
else:
footnotes = "No"
# Define the parameters.
params = {
"userid": self.api_key,
"method": "GetData",
"datasetname": "MNE",
"year": year,
"country": country,
"industry": industry,
"seriesid": series_id,
"classification": classification,
"directionofinvestment": direction_of_investment,
"resultformat": self._format,
"getfootnotes": footnotes,
}
# Make the request.
response = self._make_request(method="get", params=params)
return response
def activities_investments_and_multinational_enterprises(
self,
direction_of_investment: str,
classification: str,
ownership_level: bool,
non_bank_affilates_only: bool,
series_id: int = "ALL",
states: List[str] = "ALL",
year: List[str] = "ALL",
country: List[str] = "ALL",
industry: List[str] = "ALL",
footnotes: bool = True,
) -> dict:
"""Grabs one of two datasets from the Direct Investment and
Multinational Enterprises dataset.
### Overview:
----
This dataset contains the following statistics:
Activities of Multinational Enterprises (AMNE)—operations and finances
of U.S. parent enterprises and their foreign affiliates and U.S.
affiliates of foreign MNEs.
### Arguments:
----
direction_of_investment (str):
`outward` for US direct investment abroad, `inward` for foreign investment
in the US, `state` provides data on U. S. affiliates of foreign multinational
enterprises at the state level and `parent` provides data on U.S. parent
enterprises.
classification (str]):
Results by `country` or `industry`.
ownership_level (bool):
`True` for majority-owned affiliates, `False` for all affiliates.
non_bank_affilates_only (bool):
`True` Both Bank and NonBank Affiliates, `False` for all Nonbank
Affiliates.
series_id (int, optional, Default='ALL'):
Data Series Identifier (ALL for All).
states (List[str], optional, Default='ALL'):
List of state(s) of data to retrieve (ALL for All).
year (List[str], optional, Default='ALL'):
List of year(s) of data to retrieve (ALL for All).
country (List[str], optional, Default='ALL'):
List of country(s) of data to retrieve (ALL for All)
industry (List[str], optional, Default='ALL'):
List of industries to retrieve (ALL for All).
footnotes (bool, optional, Default=True):
`True` to include footnotes, `False` to not include.
### Returns:
----
dict:
A list of investment data.
### Usage:
----
>>> # Initalize the new Client.
>>> bea_client = BureauEconomicAnalysisClient(api_key=API_KEY)
>>> # Net income and sales for Brazilian affiliates of U.S.
>>> # parent enterprises, all industries, 2011 and 2012.
>>> investments = bea_client.direct_investments_and_multinational_enterprises(
direction_of_investment='outward',
classification='CountryByIndustry',
series_id=['4','5'],
year=['2011', '2012'],
country=['202'],
ownership_level=False,
industry='ALL',
non_bank_affilates_only=False,
)
>>> investments
"""
if isinstance(year, list):
year = ",".join(year)
if isinstance(series_id, list):
series_id = ",".join(series_id)
if isinstance(states, list):
states = ",".join(states)
if isinstance(country, list):
country = ",".join(country)
if isinstance(industry, list):
industry = ",".join(industry)
if footnotes:
footnotes = "Yes"
else:
footnotes = "No"
# Define the parameters.
params = {
"userid": self.api_key,
"method": "GetData",
"datasetname": "MNE",
"year": year,
"nonbankaffiliatesonly": int(non_bank_affilates_only),
"ownershiplevel": int(ownership_level),
"state": states,
"country": country,
"industry": industry,
"seriesid": series_id,
"classification": classification,
"directionofinvestment": direction_of_investment,
"resultformat": self._format,
"getfootnotes": footnotes,
}
# Make the request.
response = self._make_request(method="get", params=params)
return response
def international_transactions(
self,
indicator: str = "ALL",
area_or_country: str = "AllCountries",
year: List[str] = "ALL",
frequency: str = "ALL",
) -> dict:
"""This dataset contains data on U. S. international transactions.
### Overview:
----
The DataSetName is ITA. This dataset contains data on U. S. international transactions.
BEA's international transactions (balance of payments) accounts include all transactions
between U. S. and foreign residents.
### Arguments:
----
indicator (str, optional, Default='ALL'):
The indicator code for the type of transaction requested.
area_or_country (str, optional, Default='AllCountries'):
The area or country requested.
year (List[str], optional, Default='ALL'):
List of year(s) of data to retrieve (ALL for All).
frequency (List[str], optional, Default='ALL'):
A - Annual, QSA - Quarterly seasonally adjusted,
QNSA -Quarterly not seasonally adjusted.
### Returns:
----
dict:
A list of transaction data.
### Usage:
----
>>> # Initalize the new Client.
>>> bea_client = BureauEconomicAnalysisClient(api_key=API_KEY)
>>> # Balance on goods with China for 2011 and 2012.
>>> balance_on_goods = bea_client.international_transactions(
indicator=['BalGds'],
area_or_country=['China'],
year=['2011', '2012'],
frequency=['A']
)
>>> balance_on_goods
"""
if isinstance(year, list):
year = ",".join(year)
if isinstance(area_or_country, list):
area_or_country = ",".join(area_or_country)
if isinstance(frequency, list):
frequency = ",".join(frequency)
# Define the parameters.
params = {
"userid": self.api_key,
"method": "GetData",
"datasetname": "ITA",
"indicator": indicator,
"year": year,
"frequency": frequency,
"areaorcountry": area_or_country,
"resultformat": self._format,
}
# Make the request.
response = self._make_request(method="get", params=params)
return response
def international_investments_positions(
self,
type_of_investment: str = "ALL",
component: str = "ALL",
year: List[str] = "ALL",
frequency: str = "ALL",
) -> dict:
"""This dataset contains data on the U. S. international investment position.
### Overview:
----
The DataSetName is IIP. This dataset contains data on the U.S. international investment
position. BEA's international investment position accounts include the end of period
value of accumulated stocks of U.S. financial assets and liabilities.
### Arguments:
----
type_of_investment (str, optional, Default='ALL'):
The type of investment.
component (str, optional, Default='ALL'):
Component of changes in position.
year (List[str], optional, Default='ALL'):
List of year(s) of data to retrieve (ALL for All).
frequency (List[str], optional, Default='ALL'):
A - Annual, QNSA -Quarterly not seasonally adjusted.
### Returns:
----
dict:
A list of transaction data.
### Usage:
----
>>> # Initalize the new Client.
>>> bea_client = BureauEconomicAnalysisClient(api_key=API_KEY)
>>> # U. S. assets excluding financial derivatives; change in
>>> # position attributable to price changes for all available
>>> # years.
>>> us_assets = bea_client.international_investments_positions(
type_of_investment=['FinAssetsExclFinDeriv'],
component=['ChgPosPrice'],
year='ALL',
frequency=['A']
)
>>> us_assets
"""
if isinstance(year, list):
year = ",".join(year)
if isinstance(component, list):
component = ",".join(component)
if isinstance(frequency, list):
frequency = ",".join(frequency)
if isinstance(type_of_investment, list):
type_of_investment = ",".join(type_of_investment)
# Define the parameters.
params = {
"userid": self.api_key,
"method": "GetData",
"datasetname": "IIP",
"year": year,
"frequency": frequency,
"component": component,
"typeofinvestment": type_of_investment,
"resultformat": self._format,
}
# Make the request.
response = self._make_request(method="get", params=params)
return response
def input_output_statstics(
self, table_id: List[str], year: List[str] = "ALL"
) -> dict:
"""The Input‐Output Statistics are contained within a dataset
called InputOutput.
### Overview:
----
The Input‐Output Statistics are contained within a dataset called
InputOutput. BEA's industry accounts are used extensively by policymakers
and businesses to understand industry interactions, productivity trends,
and the changing structure of the U.S. economy. The input-output accounts
provide a detailed view of the interrelationships between U.S. producers and
users. The Input‐Output dataset contains Make Tables, Use Tables, and Direct
and Total Requirements tables.
### Arguments:
----
table_id (List[str], optional, Default='ALL'):
The unique GDP by Industry table identifier (ALL for All).
year (List[str], optional, Default='ALL'):
List of year(s) of data to retrieve (ALL for All).
### Returns:
----
dict:
A list of input and output statistics.
### Usage:
----
>>> # Initalize the new Client.
>>> bea_client = BureauEconomicAnalysisClient(api_key=API_KEY)
>>> # Data from Industry‐by‐Commodity Total Requirements,
>>> # After Redefinitions (Sector Level) table
>>> # for years 2010, 2011, and 2012.
>>> input_output_data = bea_client.input_output_statstics(
table_id=['56'],
year=['2010', '2011', '2012', '2013']
)
>>> input_output_data
"""
if isinstance(year, list):
year = ",".join(year)
# Define the parameters.
params = {
"userid": self.api_key,
"method": "GetData",
"datasetname": "InputOutput",
"year": year,
"tableid": ",".join(table_id),
"resultformat": self._format,
}
# Make the request.
response = self._make_request(method="get", params=params)
return response
def regional(
self,
table_name: str,
line_code: Union[int, str],
geo_fips: List[str] = "ALL",
year: List[str] = "ALL",
) -> dict:
"""The Input‐Output Statistics are contained within a dataset
called InputOutput.
### Overview:
----
The Regional dataset contains income and employment estimates from the Regional
Economic Accounts by state, county, and metropolitan area. All data accessible
through the Regional InteractiveTables on bea.gov are available through this
dataset. The Regional dataset replaces the RegionalIncome and RegionalProduct
datasets. Additional information may be found at:
http://apps.bea.gov/regional/pdf/RegionalApi.pd
### Arguments:
----
table_name (str):
TableName specifies a published table fromthe regional accounts.
Exactly one TableName must be provided.
line_code (Union[int, str]):
LineCode corresponds to the statistic in a table. It can either be
one value(ie.1,10,11), or 'ALL' to retrieve all the statistics for
one GeoFips.
geo_fips (List[str], optional, Default='ALL')
GeoFips specifies geography. It can be all states (STATE), all counties
(COUNTY), all Metropolitan Statistical Areas (MSA), all Micropolitan
Statistical Areas (MIC), all Metropolitan Divisions (DIV), all Combined
Statistical Areas (CSA), all metropolitan/nonmetropolitan portions
(PORT), or state post office abbreviation for all counties in one
state (e.g. NY).
year (List[str], optional, Default='ALL'):
Year is either a list of comma delimited years, LAST5, LAST10, or
ALL. Year will default to LAST5 years if the parameter is not
specified.
### Returns:
----
dict:
A list of input and output statistics.
### Usage:
----
>>> # Initalize the new Client.
>>> bea_client = BureauEconomicAnalysisClient(api_key=API_KEY)
>>> # Personal income for 2012 and 2013 for all counties.
>>> regional_data = bea_client.regional(
table_name=['CAINC1'],
line_code=1,
geo_fips=['COUNTY'],
year=['2012', '2013']
)
>>> regional_data
"""
if isinstance(year, list):
year = ",".join(year)
if isinstance(geo_fips, list):
geo_fips = ",".join(geo_fips)
# Define the parameters.
params = {
"userid": self.api_key,
"method": "GetData",
"datasetname": "Regional",
"year": year,
"TableName": ",".join(table_name),
"GeoFips": geo_fips,
"LineCode": line_code,
"resultformat": self._format,
}
# Make the request.
response = self._make_request(method="get", params=params)
return response
| 33.188394 | 97 | 0.559743 |
795a1e01d5803c9202620dd51f48f074c82a7daf | 1,746 | py | Python | tests/sentry/api/endpoints/team_projects.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/api/endpoints/team_projects.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/api/endpoints/team_projects.py | pierredup/sentry | 0145e4b3bc0e775bf3482fe65f5e1a689d0dbb80 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
import six
from django.core.urlresolvers import reverse
from sentry.models import Project
from sentry.testutils import APITestCase
from sentry.utils.compat import map
class TeamProjectIndexTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
team = self.create_team(members=[self.user])
project_1 = self.create_project(teams=[team], slug="fiz")
project_2 = self.create_project(teams=[team], slug="buzz")
url = reverse(
"sentry-api-0-team-project-index",
kwargs={"organization_slug": team.organization.slug, "team_slug": team.slug},
)
response = self.client.get(url)
assert response.status_code == 200
assert len(response.data) == 2
assert sorted(map(lambda x: x["id"], response.data)) == sorted(
[six.text_type(project_1.id), six.text_type(project_2.id)]
)
class TeamProjectCreateTest(APITestCase):
def test_simple(self):
self.login_as(user=self.user)
team = self.create_team(members=[self.user])
url = reverse(
"sentry-api-0-team-project-index",
kwargs={"organization_slug": team.organization.slug, "team_slug": team.slug},
)
resp = self.client.post(url, data={"name": "hello world", "slug": "foobar"})
assert resp.status_code == 201, resp.content
project = Project.objects.get(id=resp.data["id"])
assert project.name == "hello world"
assert project.slug == "foobar"
assert project.teams.first() == team
resp = self.client.post(url, data={"name": "hello world", "slug": "foobar"})
assert resp.status_code == 409, resp.content
| 36.375 | 89 | 0.644903 |
795a1e317671d4f15c79d312aca70476e184af55 | 6,521 | py | Python | imbalanced_ensemble/datasets/_imbalance.py | ZhiningLiu1998/imbalanced-ensemble | 26670c8a6b7bab26ae1e18cba3174a9d9038a680 | [
"MIT"
] | 87 | 2021-05-19T08:29:26.000Z | 2022-03-30T23:59:05.000Z | imbalanced_ensemble/datasets/_imbalance.py | ZhiningLiu1998/imbalanced-ensemble | 26670c8a6b7bab26ae1e18cba3174a9d9038a680 | [
"MIT"
] | 8 | 2021-05-28T10:27:28.000Z | 2022-01-11T11:21:03.000Z | imbalanced_ensemble/datasets/_imbalance.py | ZhiningLiu1998/imbalanced-ensemble | 26670c8a6b7bab26ae1e18cba3174a9d9038a680 | [
"MIT"
] | 18 | 2021-05-19T08:30:29.000Z | 2022-03-28T08:30:10.000Z | """Transform a dataset into an imbalanced dataset."""
# Adapted from imbalanced-learn
# Authors: Dayvid Oliveira
# Guillaume Lemaitre <g.lemaitre58@gmail.com>
# Christos Aridas
# Zhining Liu <zhining.liu@outlook.com>
# License: MIT
from collections import Counter
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from ..sampler.under_sampling import RandomUnderSampler
from ..utils import check_sampling_strategy
from ..utils._validation import _deprecate_positional_args
@_deprecate_positional_args
def make_imbalance(
X, y, *, sampling_strategy=None, random_state=None, verbose=False, **kwargs
):
"""Turns a dataset into an imbalanced dataset with a specific sampling
strategy.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the `User Guide <https://imbalanced-learn.org/stable/datasets/index.html#make-imbalanced>`_.
Parameters
----------
X : {array-like, dataframe} of shape (n_samples, n_features)
Matrix containing the data to be imbalanced.
y : ndarray of shape (n_samples,)
Corresponding label for each sample in X.
sampling_strategy : dict or callable,
Ratio to use for resampling the data set.
- When ``dict``, the keys correspond to the targeted classes. The
values correspond to the desired number of samples for each targeted
class.
- When callable, function taking ``y`` and returns a ``dict``. The keys
correspond to the targeted classes. The values correspond to the
desired number of samples for each class.
random_state : int, RandomState instance or None, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random.
verbose : bool, default=False
Show information regarding the sampling.
kwargs : dict
Dictionary of additional keyword arguments to pass to
``sampling_strategy``.
Returns
-------
X_resampled : {ndarray, dataframe} of shape (n_samples_new, n_features)
The array containing the imbalanced data.
y_resampled : ndarray of shape (n_samples_new)
The corresponding label of `X_resampled`.
Notes
-----
See :ref:`sphx_glr_auto_examples_datasets_plot_make_imbalance.py` for an example.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import load_iris
>>> from imbalanced_ensemble.datasets import make_imbalance
>>> data = load_iris()
>>> X, y = data.data, data.target
>>> print(f'Distribution before imbalancing: {Counter(y)}')
Distribution before imbalancing: Counter({0: 50, 1: 50, 2: 50})
>>> X_res, y_res = make_imbalance(X, y,
... sampling_strategy={0: 10, 1: 20, 2: 30},
... random_state=42)
>>> print(f'Distribution after imbalancing: {Counter(y_res)}')
Distribution after imbalancing: Counter({2: 30, 1: 20, 0: 10})
"""
target_stats = Counter(y)
# restrict ratio to be a dict or a callable
if isinstance(sampling_strategy, dict) or callable(sampling_strategy):
sampling_strategy_ = check_sampling_strategy(
sampling_strategy, y, "under-sampling", **kwargs
)
else:
raise ValueError(
f"'sampling_strategy' has to be a dictionary or a "
f"function returning a dictionary. Got {type(sampling_strategy)} "
f"instead."
)
if verbose:
print(f"The original target distribution in the dataset is: {target_stats}")
rus = RandomUnderSampler(
sampling_strategy=sampling_strategy_,
replacement=False,
random_state=random_state,
)
X_resampled, y_resampled = rus.fit_resample(X, y)
if verbose:
print(f"Make the dataset imbalanced: {Counter(y_resampled)}")
return X_resampled, y_resampled
def generate_imbalance_data(n_samples=200, weights=[.9,.1],
test_size=.5, random_state=None, kwargs={}):
"""Generate a random n-classes imbalanced classification problem.
Returns the training and test data and labels.
Parameters
----------
n_samples : int, default=100
The number of samples.
weights : array-like of shape (n_classes,), default=[.9,.1]
The proportions of samples assigned to each class, i.e.,
it determines the imbalance ratio between classes.
If None, then classes are balanced.
Note that the number of class will be automatically set
to the length of weights.
test_size : float or int, default=None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split.
If int, represents the absolute number of test samples.
random_state : int, RandomState instance or None, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random.
kwargs : dict
Dictionary of additional keyword arguments to pass to
``sklearn.datasets.make_classification``.
Please see details `here <https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_classification.html#sklearn.datasets.make_classification>`_.
Returns
-------
X_train : {ndarray, dataframe} of shape (n_samples*(1-test_size), n_features)
The array containing the imbalanced training data.
X_test : {ndarray, dataframe} of shape (n_samples*test_size, n_features)
The array containing the imbalanced test data.
y_train : ndarray of shape (n_samples*(1-test_size))
The corresponding label of `X_train`.
y_test : ndarray of shape (n_samples*test_size)
The corresponding label of `X_test`.
"""
X, y = make_classification(
n_classes=len(weights),
n_samples=n_samples,
weights=weights,
random_state=random_state,
**kwargs
)
return train_test_split(
X, y, test_size=test_size, stratify=y,
random_state=random_state) | 37.051136 | 166 | 0.67551 |
795a1e5e2730d29f9742cfabda267184835b8664 | 1,952 | py | Python | static/extension/dynamic_scope/dynamic_permission.py | EOEPCA/um-login-persistence | 290636438390e4ab6f6b6dbfe9c6b3e1e9135110 | [
"Apache-2.0"
] | null | null | null | static/extension/dynamic_scope/dynamic_permission.py | EOEPCA/um-login-persistence | 290636438390e4ab6f6b6dbfe9c6b3e1e9135110 | [
"Apache-2.0"
] | 1 | 2020-11-05T08:03:26.000Z | 2020-11-05T08:03:26.000Z | static/extension/dynamic_scope/dynamic_permission.py | EOEPCA/um-login-persistence | 290636438390e4ab6f6b6dbfe9c6b3e1e9135110 | [
"Apache-2.0"
] | null | null | null | # oxAuth is available under the MIT License (2008). See http://opensource.org/licenses/MIT for full text.
# Copyright (c) 2016, Gluu
#
# Author: Yuriy Movchan
#
from org.gluu.model.custom.script.type.scope import DynamicScopeType
from org.gluu.service.cdi.util import CdiUtil
from org.gluu.oxauth.service import UserService
from org.gluu.util import StringHelper, ArrayHelper
from java.util import Arrays, ArrayList
import java
class DynamicScope(DynamicScopeType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
def init(self, configurationAttributes):
print "Permission dynamic scope. Initialization"
print "Permission dynamic scope. Initialized successfully"
return True
def destroy(self, configurationAttributes):
print "Permission dynamic scope. Destroy"
print "Permission dynamic scope. Destroyed successfully"
return True
# Update Json Web token before signing/encrypring it
# dynamicScopeContext is org.gluu.oxauth.service.external.context.DynamicScopeExternalContext
# configurationAttributes is java.util.Map<String, SimpleCustomProperty>
def update(self, dynamicScopeContext, configurationAttributes):
print "Permission dynamic scope scope. Update method"
authorizationGrant = dynamicScopeContext.getAuthorizationGrant()
user = dynamicScopeContext.getUser()
jsonWebResponse = dynamicScopeContext.getJsonWebResponse()
claims = jsonWebResponse.getClaims()
userService = CdiUtil.bean(UserService)
roles = userService.getCustomAttribute(user, "role")
if roles != None:
claims.setClaim("role", roles.getValues())
return True
def getSupportedClaims(self, configurationAttributes):
return Arrays.asList("role")
def getApiVersion(self):
return 2
| 36.148148 | 106 | 0.712602 |
795a1e87056d27d1831e8829525b3ee13f41c897 | 6,846 | py | Python | compass/core/_scrapers/hierarchy.py | MrNoScript/compass-interface-core | 8c945ef36f7bee396bd5a744404eaa88d280a845 | [
"MIT"
] | null | null | null | compass/core/_scrapers/hierarchy.py | MrNoScript/compass-interface-core | 8c945ef36f7bee396bd5a744404eaa88d280a845 | [
"MIT"
] | null | null | null | compass/core/_scrapers/hierarchy.py | MrNoScript/compass-interface-core | 8c945ef36f7bee396bd5a744404eaa88d280a845 | [
"MIT"
] | null | null | null | from __future__ import annotations
import json
import typing
from typing import Literal, TYPE_CHECKING, Union
from lxml import html
import pydantic
from compass.core.interface_base import InterfaceBase
from compass.core.schemas import hierarchy as schema
from compass.core.settings import Settings
from compass.core.utility import compass_restify
if TYPE_CHECKING:
import requests
# TYPES_ENDPOINT_LEVELS values are meaningful values as they become the API endpoint paths
TYPES_ENDPOINT_LEVELS = Literal[
"countries",
"hq_sections",
"regions",
"country_sections",
"counties",
"region_sections",
"districts",
"county_sections",
"groups",
"district_sections",
"group_sections",
]
endpoints = {i: f"/{i.replace('_', '/')}" for i in typing.get_args(TYPES_ENDPOINT_LEVELS)}
class HierarchyScraper(InterfaceBase):
def __init__(self, session: requests.Session, validate: bool = False):
"""Constructor for HierarchyScraper.
takes an initialised Session object from Logon
"""
super().__init__(session)
self.validate = validate
gufh_native = list[dict[str, Union[int, str, None]]]
gufh_pydantic = Union[schema.HierarchySection, schema.HierarchyUnit]
# see CompassClient::retrieveLevel or retrieveSections in PGS\Needle php
def get_units_from_hierarchy(self, parent_unit: int, level: TYPES_ENDPOINT_LEVELS) -> Union[gufh_native, gufh_pydantic, None]:
"""Get all children of a given unit.
If LiveData=Y is passed, the resulting JSON additionally contains:
- (duplicated) parent id
- the unit address
- number of members
- SectionType1 and SectionTypeDesc1 keys, if requesting sections data
TODO
Args:
parent_unit: The unit ID to get descendants from
level: string org type, used for selecting API endpoint
Returns:
Mapping of unit properties to data.
E.g.:
{'id': ...,
'name': '...',
'parent_id': ...,
'status': '...',
'address': '...',
'member_count': ...}
Todo:
can we do this without needing to provide the level string?
raises? (from requests etc)
"""
# Get API endpoint from level
level_endpoint = endpoints[level]
# Are we requesting sections here?
is_sections = "/sections" in level_endpoint
result = self._post(f"{Settings.base_url}/hierarchy{level_endpoint}", json={"LiveData": "Y", "ParentID": f"{parent_unit}"})
result_json = result.json()
# Handle unauthorised access TODO raise???
if result_json == {"Message": "Authorization has been denied for this request."}:
return [{"id": None, "name": None}]
result_units = []
for unit_dict in result_json:
parsed = {
"id": int(unit_dict["Value"]),
"name": unit_dict["Description"],
"parent_id": unit_dict["Parent"],
}
if unit_dict["Tag"]: # TODO possible error states - what can we expect here as an invariant?
tag = json.loads(unit_dict["Tag"])[0]
parsed["status"] = tag["org_status"]
parsed["address"] = tag["address"]
parsed["member_count"] = tag["Members"]
# Only include section_type if there is section type data
if "SectionTypeDesc" in tag:
parsed["section_type"] = tag["SectionTypeDesc"]
result_units.append(parsed)
if self.validate:
return pydantic.parse_obj_as(list[schema.HierarchySection if is_sections else schema.HierarchyUnit], result_units)
else:
return result_units
gmwriu_native = dict[str, Union[int, str]]
gmwriu_pydantic = schema.HierarchyMember
def get_members_with_roles_in_unit(
self, unit_number: int, include_name: bool = False, include_primary_role: bool = False
) -> list[Union[gmwriu_native, gmwriu_pydantic]]:
"""Get details of members with roles in a given unit.
Keys within the member_data JSON are (as at 13/01/220):
- contact_number (membership number)
- name (member's name)
- visibility_status (this is meaningless as we can only see Y people)
- address (this doesn't reliably give us postcode and is a lot of data)
- role (This is Primary role and so only sometimes useful)
Args:
unit_number: Compass unit number
include_name: include member name in returned data
include_primary_role: include primary role in returned data
Returns:
A list of member records. Keys are included through args
E.g.:
[
{"contact_number": ..., ...},
...
]
Todo:
raises?
"""
keys_to_keep = ("contact_number",)
if include_name:
keys_to_keep = (*keys_to_keep, "name")
if include_primary_role:
keys_to_keep = (*keys_to_keep, "role")
# Construct request data
# It seems like the time UID value can be constant -- keeping old code in case something breaks
# dt = datetime.datetime.now()
# time_uid = f"{dt.hour}{dt.minute}{dt.microsecond // 1000}"
time_uid = str(12_34_567)
data = {"SearchType": "HIERARCHY", "OrganisationNumber": unit_number, "UI": time_uid}
# Execute search
# JSON data MUST be in the rather odd format of {"Key": key, "Value": value} for each (key, value) pair
self._post(f"{Settings.base_url}/Search/Members", json=compass_restify(data))
# Fetch results from Compass
search_results = self._get(f"{Settings.base_url}/SearchResults.aspx")
# Gets the compass form from the returned document
form = html.fromstring(search_results.content).forms[0]
del search_results
# If the search hasn't worked the form returns an InvalidSearchError - check for this and raise an error if needed
if form.action == "./ScoutsPortal.aspx?Invalid=SearchError":
raise Exception("Invalid Search")
# Get the encoded JSON data from the HTML
member_data_string = form.fields["ctl00$plInnerPanel_head$txt_h_Data"] or "[]"
del form
# parse the data and return it as a usable Python object (list)
member_data = json.loads(member_data_string)
if self.validate:
return [schema.HierarchyMember(**{key: member[key] for key in keys_to_keep}) for member in member_data]
else:
return [{key: member[key] for key in keys_to_keep} for member in member_data]
| 37.005405 | 131 | 0.624598 |
795a1eec5a7a00d957fb52c4ecc7f11a24b65963 | 104,142 | py | Python | aim/tests/unit/agent/test_agent.py | gaurav-dalvi/aci-integration-module | 5d31f65f4fca3e0322d6003e7736ca14aa7ec72d | [
"Apache-2.0"
] | null | null | null | aim/tests/unit/agent/test_agent.py | gaurav-dalvi/aci-integration-module | 5d31f65f4fca3e0322d6003e7736ca14aa7ec72d | [
"Apache-2.0"
] | null | null | null | aim/tests/unit/agent/test_agent.py | gaurav-dalvi/aci-integration-module | 5d31f65f4fca3e0322d6003e7736ca14aa7ec72d | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2016 Cisco Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import time
from apicapi import apic_client
from apicapi import exceptions as aexc
import mock
from aim.agent.aid import service
from aim.agent.aid.universes.aci import aci_universe
from aim import aim_manager
from aim.api import resource
from aim.api import service_graph
from aim.api import status as aim_status
from aim.api import tree as aim_tree
from aim.common.hashtree import structured_tree as tree
from aim.common import utils
from aim import config
from aim import context
from aim.db import hashtree_db_listener
from aim.tests import base
from aim.tests.unit.agent.aid_universes import test_aci_tenant
from aim import tree_manager
def run_once_loop(agent):
def _run_once_loop(ctx, serve=True):
agent.run_daemon_loop = False
try:
agent._run_arguments
except AttributeError:
agent._run_arguments = []
agent._run_arguments = agent._run_arguments + [ctx, serve]
return _run_once_loop
class TestAgent(base.TestAimDBBase, test_aci_tenant.TestAciClientMixin):
def setUp(self):
super(TestAgent, self).setUp(mock_store=False)
self.set_override('agent_down_time', 3600, 'aim')
self.set_override('agent_polling_interval', 0, 'aim')
self.set_override('aci_tenant_polling_yield', 0, 'aim')
self.aim_manager = aim_manager.AimManager()
self.tree_manager = tree_manager.TreeManager(tree.StructuredHashTree)
self.old_post = apic_client.ApicSession.post_body_dict
self.addCleanup(self._reset_apic_client)
self._do_aci_mocks()
self.tenant_thread = mock.patch(
'aim.agent.aid.universes.aci.tenant.AciTenantManager.run')
self.tenant_thread.start()
self.thread_dead = mock.patch(
'aim.agent.aid.universes.aci.tenant.AciTenantManager.is_dead',
return_value=False)
self.thread_dead.start()
self.thread_warm = mock.patch(
'aim.agent.aid.universes.aci.tenant.AciTenantManager.is_warm',
return_value=True)
self.thread_warm.start()
self.events_thread = mock.patch(
'aim.agent.aid.event_handler.EventHandler._spawn_listener')
self.events_thread.start()
self.watcher_threads = mock.patch(
'aim.agent.aid.universes.k8s.k8s_watcher.K8sWatcher.run')
self.watcher_threads.start()
self.stop_watcher_threads = mock.patch(
'aim.agent.aid.universes.k8s.k8s_watcher.K8sWatcher.stop_threads')
self.stop_watcher_threads.start()
self.hb_loop = mock.patch(
'aim.agent.aid.service.AID._spawn_heartbeat_loop')
self.hb_loop.start()
self.addCleanup(self.tenant_thread.stop)
self.addCleanup(self.thread_dead.stop)
self.addCleanup(self.thread_warm.stop)
self.addCleanup(self.events_thread.stop)
self.addCleanup(self.watcher_threads.stop)
self.addCleanup(self.stop_watcher_threads.stop)
self.addCleanup(self.hb_loop.stop)
def _first_serve(self, agent):
# Initialize tenants
agent._reconciliation_cycle()
# Actually serve them
agent._reconciliation_cycle()
def _reset_apic_client(self):
apic_client.ApicSession.post_body_dict = self.old_post
def _mock_current_manager_post(self, mo, data, *params):
# Each post, generates the same set of events for the WS interface
events = []
base = 'uni'
container = mo.container
if container:
base = apic_client.ManagedObjectClass(container).dn(*params[:-1])
self._tree_to_event(data, events, base, self._current_manager)
# pre-create Kubernetes VMM domain so that implicitly
# created Kubernetes objects can be handled
k8s_ctrlr = {'vmmInjectedCont':
{'attributes':
{'dn': ('comp/prov-Kubernetes/'
'ctrlr-[kubernetes]-kube-cluster/injcont')}}}
self._set_events([k8s_ctrlr], manager=self._current_manager,
tag=False, create_parents=True)
# Tagging is done by the tenant manager
self._set_events(events, manager=self._current_manager, tag=False)
def _mock_current_manager_delete(self, dn, **kwargs):
# remove /mo/ and .json
decomposed = test_aci_tenant.decompose_aci_dn(dn[4:-5])
data = [{decomposed[-1][0]: {'attributes': {'dn': dn[4:-5],
'status': 'deleted'}}}]
self._set_events(data, manager=self._current_manager, tag=False)
def _tree_to_event(self, root, result, dn, manager):
if not root:
return
children = root.values()[0]['children']
root.values()[0]['children'] = []
dn += '/' + root.values()[0]['attributes']['rn']
root.values()[0]['attributes']['dn'] = dn
status = root.values()[0]['attributes'].get('status')
if status is None:
root.values()[0]['attributes']['status'] = 'created'
elif status == 'deleted':
# API call fails in case the item doesn't exist
if not test_aci_tenant.mock_get_data(manager.aci_session,
'mo/' + dn):
raise apic_client.cexc.ApicResponseNotOk(
request='delete', status='404',
reason='not found', err_text='not', err_code='404')
result.append(root)
for child in children:
self._tree_to_event(child, result, dn, manager)
def _create_agent(self, host='h1'):
self.set_override('aim_service_identifier', host, 'aim')
aid = service.AID(config.CONF)
session = aci_universe.AciUniverse.establish_aci_session(
self.cfg_manager)
for pair in aid.multiverse:
for universe in pair.values():
if getattr(universe, 'aci_session', None):
universe.aci_session = session
session._data_stash = {}
return aid
def test_init(self):
agent = self._create_agent()
self.assertEqual('h1', agent.host)
# Agent is registered
agents = self.aim_manager.find(self.ctx, resource.Agent)
self.assertEqual(1, len(agents))
self.assertEqual('aid-h1', agents[0].id)
@base.requires(['timestamp'])
def test_send_heartbeat(self):
agent = self._create_agent()
current_tstamp = agent.agent.heartbeat_timestamp
time.sleep(1)
agent._send_heartbeat(self.ctx)
self.assertTrue(current_tstamp < agent.agent.heartbeat_timestamp)
def test_calculate_tenants(self):
# One agent, zero tenants
agent = self._create_agent()
result = agent._calculate_tenants(self.ctx)
self.assertEqual([], result)
self.assertEqual([], agent.agent.hash_trees)
# Same agent, one tenant
data = tree.StructuredHashTree().include(
[{'key': ('keyA', 'keyB')}, {'key': ('keyA', 'keyC')},
{'key': ('keyA', 'keyC', 'keyD')}])
self.tree_manager.update_bulk(self.ctx, [data])
result = agent._calculate_tenants(self.ctx)
self.assertEqual(['keyA'], result)
self.assertEqual(['keyA'], agent.agent.hash_trees)
# Same agent, N Tenants
data2 = tree.StructuredHashTree().include(
[{'key': ('keyA1', 'keyB')}, {'key': ('keyA1', 'keyC')},
{'key': ('keyA1', 'keyC', 'keyD')}])
data3 = tree.StructuredHashTree().include(
[{'key': ('keyA2', 'keyB')}, {'key': ('keyA2', 'keyC')},
{'key': ('keyA2', 'keyC', 'keyD')}])
self.tree_manager.update_bulk(self.ctx, [data2, data3])
result = agent._calculate_tenants(self.ctx)
# All tenants are served by this agent since he's the only one
self.assertEqual(set(['keyA', 'keyA1', 'keyA2']), set(result))
self.assertEqual(set(['keyA', 'keyA1', 'keyA2']),
set(agent.agent.hash_trees))
# Multiple Agents
agent2 = self._create_agent(host='h2')
agent3 = self._create_agent(host='h3')
# Recalculate
result = agent._calculate_tenants(self.ctx)
result2 = agent2._calculate_tenants(self.ctx)
result3 = agent3._calculate_tenants(self.ctx)
# All the tenants must be served
self.assertEqual(set(['keyA', 'keyA1', 'keyA2']),
set(result + result2 + result3))
@base.requires(['timestamp'])
def test_down_time_suicide(self):
with mock.patch.object(service.utils, 'perform_harakiri') as hara:
agent = self._create_agent()
agent._calculate_tenants(self.ctx)
agent.max_down_time = -1
agent._calculate_tenants(self.ctx)
hara.assert_called_once_with(service.LOG, mock.ANY)
@base.requires(['timestamp'])
def test_tenant_association_fail(self):
data = tree.StructuredHashTree().include(
[{'key': ('keyA', 'keyB')}, {'key': ('keyA', 'keyC')},
{'key': ('keyA', 'keyC', 'keyD')}])
data2 = tree.StructuredHashTree().include(
[{'key': ('keyA1', 'keyB')}, {'key': ('keyA1', 'keyC')},
{'key': ('keyA1', 'keyC', 'keyD')}])
data3 = tree.StructuredHashTree().include(
[{'key': ('keyA2', 'keyB')}, {'key': ('keyA2', 'keyC')},
{'key': ('keyA2', 'keyC', 'keyD')}])
self.tree_manager.update_bulk(self.ctx, [data, data2, data3])
agent = self._create_agent()
agent2 = self._create_agent(host='h2')
# Bring agent administratively down
agent.agent.admin_state_up = False
self.aim_manager.create(self.ctx, agent.agent, overwrite=True)
result = agent._calculate_tenants(self.ctx)
result2 = agent2._calculate_tenants(self.ctx)
self.assertEqual(set(['keyA', 'keyA1', 'keyA2']),
set(result2))
# Agent one has no tenant assigned
self.assertEqual([], result)
# Fix agent1
agent.agent.admin_state_up = True
self.aim_manager.create(self.ctx, agent.agent, overwrite=True)
result = agent._calculate_tenants(self.ctx)
result2 = agent2._calculate_tenants(self.ctx)
self.assertEqual(set(['keyA', 'keyA1', 'keyA2']),
set(result + result2))
# neither agent has empty configuration
self.assertNotEqual([], result)
self.assertNotEqual([], result2)
# Upgrade agent2 version
agent2.agent.version = "2.0.0"
self.aim_manager.create(self.ctx, agent2.agent, overwrite=True)
result = agent._calculate_tenants(self.ctx)
result2 = agent2._calculate_tenants(self.ctx)
self.assertEqual(set(['keyA', 'keyA1', 'keyA2']),
set(result2))
# Agent one has no tenant assigned
self.assertEqual([], result)
# Upgrade agent1 version
agent.agent.version = "2.0.0"
self.aim_manager.create(self.ctx, agent.agent, overwrite=True)
result = agent._calculate_tenants(self.ctx)
result2 = agent2._calculate_tenants(self.ctx)
self.assertEqual(set(['keyA', 'keyA1', 'keyA2']),
set(result + result2))
# neither agent has empty configuration
self.assertNotEqual([], result)
self.assertNotEqual([], result2)
def test_main_loop(self):
agent = self._create_agent()
# Keep test compatibility with monitred universe introduction
agent.current_universe = agent.multiverse[0]['current']
agent.desired_universe = agent.multiverse[0]['desired']
tenant_name1 = 'test_main_loop1'
tenant_name2 = 'test_main_loop2'
# Create 2 tenants by initiating their objects
tn1 = resource.Tenant(name=tenant_name1)
tn2 = resource.Tenant(name=tenant_name2)
tn = self.aim_manager.create(self.ctx, tn1)
self.aim_manager.create(self.ctx, tn2)
bd1_tn1 = resource.BridgeDomain(tenant_name=tenant_name1, name='bd1',
vrf_name='vrf1')
bd1_tn2 = resource.BridgeDomain(tenant_name=tenant_name2, name='bd1',
vrf_name='vrf2', display_name='nice')
bd = self.aim_manager.create(self.ctx, bd1_tn2)
self.aim_manager.create(self.ctx, bd1_tn1)
self.aim_manager.get_status(self.ctx, bd1_tn1)
self.aim_manager.get_status(self.ctx, bd1_tn2)
self.aim_manager.set_fault(
self.ctx, bd1_tn1, aim_status.AciFault(
fault_code='516',
external_identifier='uni/tn-%s/BD-bd1/'
'fault-516' % tenant_name1))
# Fault has been registered in the DB
status = self.aim_manager.get_status(self.ctx, bd1_tn1)
self.assertEqual(1, len(status.faults))
# ACI universe is empty right now, one cycle of the main loop will
# reconcile the state
self._first_serve(agent)
# The ACI universe will not push the configuration unless explicitly
# called
self.assertFalse(
agent.current_universe.serving_tenants[tn1.rn].
object_backlog.empty())
self.assertFalse(
agent.current_universe.serving_tenants[tn2.rn].
object_backlog.empty())
# Meanwhile, Operational state has been cleaned from AIM
status = self.aim_manager.get_status(self.ctx, bd1_tn1)
self.assertEqual(0, len(status.faults))
# Events around the BD creation are now sent to the ACI universe, add
# them to the observed tree
apic_client.ApicSession.post_body_dict = (
self._mock_current_manager_post)
apic_client.ApicSession.DELETE = self._mock_current_manager_delete
for tenant in agent.current_universe.serving_tenants.values():
self._current_manager = tenant
tenant._event_loop()
# Now, the two trees are in sync
agent._reconciliation_cycle()
tn_after = self.aim_manager.get(self.ctx, tn)
bd_after = self.aim_manager.get(self.ctx, bd)
self.assertEqual(tn_after.epoch, tn.epoch)
self.assertEqual(bd_after.epoch, bd.epoch)
self._assert_universe_sync(agent.desired_universe,
agent.current_universe,
tenants=[tn1.root, tn2.root])
self._assert_reset_consistency()
# Status for the two BDs is now synced
bd1_tn1_status = self.aim_manager.get_status(self.ctx, bd1_tn1)
bd1_tn2_status = self.aim_manager.get_status(self.ctx, bd1_tn2)
self.assertEqual(aim_status.AciStatus.SYNCED,
bd1_tn1_status.sync_status)
self.assertEqual(aim_status.AciStatus.SYNCED,
bd1_tn2_status.sync_status)
self.assertTrue(
agent.current_universe.serving_tenants[tn1.rn].
object_backlog.empty())
self.assertTrue(
agent.current_universe.serving_tenants[tn2.rn].
object_backlog.empty())
# Delete object and create a new one on tn1
self.aim_manager.delete(self.ctx, bd1_tn1)
bd2_tn1 = resource.BridgeDomain(tenant_name=tenant_name1, name='bd2',
vrf_name='vrf3')
self.aim_manager.create(self.ctx, bd2_tn1)
# Push state
currentserving_tenants = {
k: v for k, v in
agent.current_universe.serving_tenants.iteritems()}
agent._reconciliation_cycle()
self.assertIs(agent.current_universe.serving_tenants[tn1.rn],
currentserving_tenants[tn1.rn])
self.assertIs(agent.current_universe.serving_tenants[tn2.rn],
currentserving_tenants[tn2.rn])
# There are changes on tn1 only
self.assertFalse(
agent.current_universe.serving_tenants[tn1.rn].
object_backlog.empty())
self.assertTrue(
agent.current_universe.serving_tenants[tn2.rn].
object_backlog.empty())
# Get events
for tenant in agent.current_universe.serving_tenants.values():
self._current_manager = tenant
tenant._event_loop()
agent._reconciliation_cycle()
# Everything is in sync again
self._assert_universe_sync(agent.desired_universe,
agent.current_universe,
tenants=[tn1.root, tn2.root])
self._assert_reset_consistency(tn1.rn)
self._assert_reset_consistency(tn2.rn)
# Delete a tenant
self.aim_manager.delete(self.ctx, bd2_tn1)
self.aim_manager.delete(self.ctx, tn1)
agent._reconciliation_cycle()
# There are changes on tn1 only
self.assertFalse(
agent.current_universe.serving_tenants[tn1.rn].
object_backlog.empty())
self.assertTrue(
agent.current_universe.serving_tenants[tn2.rn].
object_backlog.empty())
self.assertIs(agent.current_universe.serving_tenants[tn1.rn],
currentserving_tenants[tn1.rn])
self.assertIs(agent.current_universe.serving_tenants[tn2.rn],
currentserving_tenants[tn2.rn])
# Get events
for tenant in agent.current_universe.serving_tenants.values():
self._current_manager = tenant
tenant._event_loop()
# Depending on the order of operation, we might need another
# iteration to cleanup the tree completely
if agent.current_universe.serving_tenants[tn1.rn]._state.root:
agent._reconciliation_cycle()
for tenant in agent.current_universe.serving_tenants.values():
self._current_manager = tenant
tenant._event_loop()
# Tenant still exist on AIM because observe didn't run yet
self.assertIsNone(
agent.current_universe.serving_tenants[tn1.rn]._state.root)
tree1 = agent.tree_manager.find(self.ctx, root_rn=[tn1.rn])
self.assertEqual(1, len(tree1))
# Now tenant will be deleted (still served)
agent._reconciliation_cycle()
self.assertIsNone(agent.current_universe.state[tn1.rn].root)
tree1 = agent.tree_manager.find(self.ctx, root_rn=[tn1.rn])
self.assertEqual(0, len(tree1))
# Agent not served anymore
agent._reconciliation_cycle()
self.assertFalse(tenant_name1 in agent.current_universe.state)
def test_handle_sigterm(self):
agent = self._create_agent()
self.assertTrue(agent.run_daemon_loop)
agent._handle_sigterm(mock.Mock(), mock.Mock())
self.assertFalse(agent.run_daemon_loop)
def test_change_polling_interval(self):
agent = self._create_agent()
self.set_override('agent_polling_interval', 130, 'aim')
self.assertNotEqual(130, agent.polling_interval)
agent.conf_manager.subs_mgr._poll_and_execute()
self.assertEqual(130, agent.polling_interval)
def test_change_report_interval(self):
agent = self._create_agent()
self.set_override('agent_report_interval', 130, 'aim')
self.assertNotEqual(130, agent.report_interval)
agent.conf_manager.subs_mgr._poll_and_execute()
self.assertEqual(130, agent.report_interval)
def test_monitored_tree_lifecycle(self):
agent = self._create_agent()
current_config = agent.multiverse[0]['current']
tenant_name = 'test_monitored_tree_lifecycle'
current_monitor = agent.multiverse[2]['current']
desired_monitor = agent.multiverse[2]['desired']
apic_client.ApicSession.post_body_dict = (
self._mock_current_manager_post)
apic_client.ApicSession.DELETE = self._mock_current_manager_delete
# start by managing a single tenant (non-monitored)
tn1 = resource.Tenant(name=tenant_name, monitored=True)
aci_tn = self._get_example_aci_tenant(
name=tenant_name, dn='uni/tn-%s' % tenant_name, nameAlias='nice')
self.aim_manager.create(self.ctx, tn1)
# Run loop for serving tenant
self._first_serve(agent)
self._set_events(
[aci_tn], manager=desired_monitor.serving_tenants[tn1.rn],
tag=False)
self._observe_aci_events(current_config)
# Simulate an external actor creating a BD
aci_bd = self._get_example_aci_bd(
tenant_name=tenant_name, name='default',
dn='uni/tn-%s/BD-default' % tenant_name)
aci_rsctx = self._get_example_aci_rs_ctx(
dn='uni/tn-%s/BD-default/rsctx' % tenant_name)
self._set_events(
[aci_bd, aci_rsctx],
manager=desired_monitor.serving_tenants[tn1.rn],
tag=False)
apic_client.ApicSession.post_body_dict = (
self._mock_current_manager_post)
apic_client.ApicSession.DELETE = self._mock_current_manager_delete
# Observe ACI events
self._observe_aci_events(current_config)
# Run the loop for reconciliation
agent._reconciliation_cycle()
# Run loop again to set SYNCED state
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
# A monitored BD should now exist in AIM
aim_bd = self.aim_manager.get(self.ctx, resource.BridgeDomain(
tenant_name=tenant_name, name='default'))
self.assertTrue(aim_bd.monitored)
self.assertEqual('default', aim_bd.vrf_name)
# This BD's sync state should be OK
aim_bd_status = self.aim_manager.get_status(self.ctx, aim_bd)
self.assertEqual(aim_status.AciStatus.SYNCED,
aim_bd_status.sync_status)
# Trees are in sync
self._assert_universe_sync(desired_monitor, current_monitor,
tenants=[tn1.root])
self._assert_reset_consistency()
# Delete the monitored BD, will be re-created
self.aim_manager.delete(self.ctx, aim_bd)
agent._reconciliation_cycle()
# It's reconciled
aim_bd = self.aim_manager.get(self.ctx, resource.BridgeDomain(
tenant_name=tenant_name, name='default'))
self.assertTrue(aim_bd.monitored)
# Send delete event
aci_bd['fvBD']['attributes']['status'] = 'deleted'
aci_rsctx['fvRsCtx']['attributes']['status'] = 'deleted'
ac_bd_tag = {'tagInst': {'attributes': {
'dn': aci_bd['fvBD']['attributes']['dn'] + '/tag-' + self.sys_id,
'status': 'deleted'}}}
self._set_events(
[ac_bd_tag, aci_rsctx, aci_bd],
manager=desired_monitor.serving_tenants[tn1.rn], tag=False)
# Observe ACI events
self._observe_aci_events(current_config)
# Run the loop for reconciliation
agent._reconciliation_cycle()
# BD is deleted
aim_bd = self.aim_manager.get(self.ctx, resource.BridgeDomain(
tenant_name=tenant_name, name='default'))
self.assertIsNone(aim_bd)
self._assert_universe_sync(desired_monitor, current_monitor,
tenants=[tn1.root])
self._assert_reset_consistency(tn1.rn)
def test_monitored_tree_serve_semantics(self):
agent = self._create_agent()
current_config = agent.multiverse[0]['current']
desired_config = agent.multiverse[0]['desired']
desired_monitor = agent.multiverse[2]['desired']
apic_client.ApicSession.post_body_dict = (
self._mock_current_manager_post)
apic_client.ApicSession.DELETE = self._mock_current_manager_delete
tenant_name = 'test_monitored_tree_serve_semantics'
self.assertEqual({}, desired_monitor.aci_session._data_stash)
# start by managing a single monitored tenant
tn1 = resource.Tenant(name=tenant_name, monitored=True)
aci_tn = self._get_example_aci_tenant(
name=tenant_name, dn='uni/tn-%s' % tenant_name)
aci_bd = self._get_example_aci_bd(
tenant_name=tenant_name, name='mybd',
dn='uni/tn-%s/BD-mybd' % tenant_name)
# Create tenant in AIM to start serving it
self.aim_manager.create(self.ctx, tn1)
# Run loop for serving tenant
self._first_serve(agent)
# we need this tenant to exist in ACI
self._set_events(
[aci_tn, aci_bd],
manager=desired_monitor.serving_tenants[tn1.rn], tag=False)
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
bd1 = resource.BridgeDomain(name='bd1', tenant_name=tenant_name)
self.aim_manager.create(self.ctx, bd1)
# Push BD in ACI
agent._reconciliation_cycle()
# Feedback loop
self._observe_aci_events(current_config)
# Observe
agent._reconciliation_cycle()
# Config universes in sync
self._assert_universe_sync(desired_config, current_config,
tenants=[tn1.root])
self._assert_reset_consistency()
# Detele the only managed item
self.aim_manager.delete(self.ctx, bd1)
# Delete on ACI
agent._reconciliation_cycle()
# Feedback loop
self._observe_aci_events(current_config)
# Observe
agent._reconciliation_cycle()
# Delete the tenant on AIM, agents should stop watching it
self.aim_manager.delete(self.ctx, tn1)
agent._reconciliation_cycle()
# Agent will delete remaining objects
agent._reconciliation_cycle()
self.assertTrue(tn1.rn in desired_monitor.serving_tenants)
# Now deletion happens
agent._reconciliation_cycle()
self.assertTrue(tn1.rn not in desired_monitor.serving_tenants)
def test_monitored_tree_relationship(self):
# Set retry to 1 to cause immediate creation surrender
self.set_override('max_operation_retry', 1, 'aim')
agent = self._create_agent()
current_config = agent.multiverse[0]['current']
desired_config = agent.multiverse[0]['desired']
desired_monitor = agent.multiverse[2]['desired']
current_monitor = agent.multiverse[2]['current']
tenant_name = 'test_monitored_tree_relationship'
self.assertEqual({}, desired_monitor.aci_session._data_stash)
apic_client.ApicSession.post_body_dict = (
self._mock_current_manager_post)
apic_client.ApicSession.DELETE = self._mock_current_manager_delete
tn1 = resource.Tenant(name=tenant_name)
# Create tenant in AIM to start serving it
self.aim_manager.create(self.ctx, tn1)
# Run loop for serving tenant
self._first_serve(agent)
self._observe_aci_events(current_config)
# Create a BD manually on this tenant
aci_bd = self._get_example_aci_bd(
tenant_name=tenant_name, name='mybd',
dn='uni/tn-%s/BD-mybd' % tenant_name,
limitIpLearnToSubnets='yes')
self._set_events(
[aci_bd], manager=desired_monitor.serving_tenants[tn1.rn],
tag=False)
self._observe_aci_events(current_config)
# Reconcile
agent._reconciliation_cycle()
# Create a managed subnet in the BD
sub = resource.Subnet(tenant_name=tenant_name, bd_name='mybd',
gw_ip_mask='10.10.10.1/28')
self.aim_manager.create(self.ctx, sub)
bd = resource.BridgeDomain(name='mybd', tenant_name=tenant_name)
bd = self.aim_manager.get(self.ctx, bd)
self.assertTrue(bd.limit_ip_learn_to_subnets)
self.assertTrue(bd.monitored)
# Observe
self._observe_aci_events(current_config)
# Reconcile
agent._reconciliation_cycle()
# Observe
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
# Verify all trees converged
self._assert_universe_sync(desired_config, current_config,
tenants=[tn1.root])
self._assert_universe_sync(desired_monitor, current_monitor,
tenants=[tn1.root])
self._assert_reset_consistency()
# Delete the ACI BD manually
aci_bd['fvBD']['attributes']['status'] = 'deleted'
self._set_events(
[aci_bd], manager=desired_monitor.serving_tenants[tn1.rn],
tag=False)
# Observe
self._observe_aci_events(current_config)
# Reconcile
agent._reconciliation_cycle()
# Observe
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
self.assertIsNone(self.aim_manager.get(self.ctx, bd))
sub = self.aim_manager.get(self.ctx, sub)
sub_status = self.aim_manager.get_status(self.ctx, sub)
self.assertEqual(aim_status.AciStatus.SYNC_FAILED,
sub_status.sync_status)
self.assertNotEqual('', sub_status.sync_message)
# Verify all tree converged
self._assert_universe_sync(desired_config, current_config,
tenants=[tn1.root])
self._assert_universe_sync(desired_monitor, current_monitor,
tenants=[tn1.root])
self._assert_reset_consistency()
def test_monitored_tree_rs_objects(self):
"""Verify that RS objects can be synced for monitored objects
:return:
"""
agent = self._create_agent()
current_config = agent.multiverse[0]['current']
desired_config = agent.multiverse[0]['desired']
desired_monitor = agent.multiverse[2]['desired']
current_monitor = agent.multiverse[2]['current']
tenant_name = 'test_monitored_tree_rs_objects'
apic_client.ApicSession.post_body_dict = (
self._mock_current_manager_post)
tn1 = resource.Tenant(name=tenant_name)
# Create tenant in AIM to start serving it
self.aim_manager.create(self.ctx, tn1)
# Run loop for serving tenant
self._first_serve(agent)
self._observe_aci_events(current_config)
# Create a BD manually on this tenant
aci_l3o = self._get_example_aci_l3_out(
dn='uni/tn-%s/out-default' % tenant_name, name='default')
aci_ext_net = self._get_example_aci_ext_net(
dn='uni/tn-%s/out-default/instP-extnet' % tenant_name)
aci_ext_net_rs_prov = self._get_example_aci_ext_net_rs_prov(
dn='uni/tn-%s/out-default/instP-extnet/'
'rsprov-default' % tenant_name)
self._set_events(
[aci_l3o, aci_ext_net, aci_ext_net_rs_prov],
manager=desired_monitor.serving_tenants[tn1.rn], tag=False)
self._observe_aci_events(current_config)
# Reconcile
agent._reconciliation_cycle()
# Verify AIM ext net doesn't have contracts set
ext_net = resource.ExternalNetwork(
tenant_name=tenant_name, name='extnet', l3out_name='default')
ext_net = self.aim_manager.get(self.ctx, ext_net)
self.assertEqual([], ext_net.provided_contract_names)
self.assertEqual([], ext_net.consumed_contract_names)
self._observe_aci_events(current_config)
# Observe
agent._reconciliation_cycle()
self._assert_universe_sync(desired_monitor, current_monitor,
tenants=[tn1.root])
self._assert_universe_sync(desired_config, current_config,
tenants=[tn1.root])
self._assert_reset_consistency()
# Update ext_net to provide some contract
ext_net = self.aim_manager.update(self.ctx, ext_net,
provided_contract_names=['c1'])
# Reconcile
agent._reconciliation_cycle()
self._observe_aci_events(current_config)
# Observe
agent._reconciliation_cycle()
ext_net = self.aim_manager.get(self.ctx, ext_net)
self.assertEqual(['c1'], ext_net.provided_contract_names)
# Verify contract is provided in ACI
prov = test_aci_tenant.mock_get_data(
desired_monitor.serving_tenants[tn1.rn].aci_session,
'mo/uni/tn-%s/out-default/instP-extnet/rsprov-c1' % tenant_name)
self.assertNotEqual([], prov[0])
# Also its tag exists
prov_tag = test_aci_tenant.mock_get_data(
desired_monitor.serving_tenants[tn1.rn].aci_session,
'mo/uni/tn-%s/out-default/instP-extnet/rsprov-c1/'
'tag-openstack_aid' % tenant_name)
self.assertNotEqual([], prov_tag[0])
# Old contract still exists
prov_def = test_aci_tenant.mock_get_data(
desired_monitor.serving_tenants[tn1.rn].aci_session,
'mo/uni/tn-%s/out-default/instP-extnet/rsprov-default' %
tenant_name)
self.assertNotEqual([], prov_def[0])
# Verify all tree converged
self._assert_universe_sync(desired_monitor, current_monitor,
tenants=[tn1.root])
self._assert_universe_sync(desired_config, current_config,
tenants=[tn1.root])
self._assert_reset_consistency()
def test_manual_rs(self):
agent = self._create_agent()
tenant_name = 'test_manual_rs'
current_config = agent.multiverse[0]['current']
desired_config = agent.multiverse[0]['desired']
current_monitor = agent.multiverse[2]['current']
desired_monitor = agent.multiverse[2]['desired']
apic_client.ApicSession.post_body_dict = (
self._mock_current_manager_post)
apic_client.ApicSession.DELETE = self._mock_current_manager_delete
# start by managing a single tenant (non-monitored)
tn = resource.Tenant(name=tenant_name)
self.aim_manager.create(self.ctx, tn)
# Create a APP profile in such tenant
self.aim_manager.create(
self.ctx, resource.ApplicationProfile(
tenant_name=tenant_name, name='app'))
# Create an EPG
epg = resource.EndpointGroup(
tenant_name=tenant_name, app_profile_name='app', name='epg')
self.aim_manager.create(self.ctx, epg)
# Add 2 contracts
self.aim_manager.create(
self.ctx, resource.Contract(
tenant_name=tenant_name, name='c1'))
self.aim_manager.create(
self.ctx, resource.Contract(
tenant_name=tenant_name, name='c2'))
# Serve
self._first_serve(agent)
self._observe_aci_events(current_config)
# Reconcile
agent._reconciliation_cycle()
# Verify everything is fine
self._assert_universe_sync(desired_monitor, current_monitor,
tenants=[tn.root])
self._assert_universe_sync(desired_config, current_config,
tenants=[tn.root])
self._assert_reset_consistency()
# Now add a contract to the EPG through AIM
self.aim_manager.update(self.ctx, epg, provided_contract_names=['c1'])
# Observe, Reconcile, Verify
agent._reconciliation_cycle()
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
self._assert_universe_sync(desired_monitor, current_monitor,
tenants=[tn.root])
self._assert_universe_sync(desired_config, current_config,
tenants=[tn.root])
self._assert_reset_consistency()
# Add the contract manually (should be removed)
aci_contract_rs = {
"fvRsProv": {
"attributes": {
"dn": "uni/tn-%s/ap-%s/epg-%s/rsprov-%s" % (
tenant_name, 'app', 'epg', 'c2'),
"status": "created",
"tnVzBrCPName": "c2"
}
}
}
self._set_events(
[aci_contract_rs],
manager=desired_monitor.serving_tenants[tn.rn], tag=False)
self._observe_aci_events(current_config)
# Observe, Reconcile, Verify
agent._reconciliation_cycle()
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
self._assert_universe_sync(desired_monitor, current_monitor,
tenants=[tn.root])
self._assert_universe_sync(desired_config, current_config,
tenants=[tn.root])
self._assert_reset_consistency()
# C2 RS is not to be found
self.assertEqual(
[{'fvRsProv': {
'attributes': {
'dn': 'uni/tn-test_manual_rs/ap-app/epg-epg/rsprov-c2',
'tnVzBrCPName': 'c2'}}}],
test_aci_tenant.mock_get_data(
desired_monitor.serving_tenants[tn.rn].aci_session,
'mo/' + aci_contract_rs['fvRsProv']['attributes']['dn']))
def test_monitored_state_change(self):
agent = self._create_agent()
tenant_name = 'test_monitored_state_change'
current_config = agent.multiverse[0]['current']
desired_config = agent.multiverse[0]['desired']
current_monitor = agent.multiverse[2]['current']
desired_monitor = agent.multiverse[2]['desired']
apic_client.ApicSession.post_body_dict = (
self._mock_current_manager_post)
apic_client.ApicSession.DELETE = self._mock_current_manager_delete
tn = resource.Tenant(name=tenant_name, monitored=True)
# Create tenant in AIM to start serving it
self.aim_manager.create(self.ctx, tn)
# Run loop for serving tenant
self._first_serve(agent)
self._observe_aci_events(current_config)
# Create some manual stuff
aci_tn = self._get_example_aci_tenant(
name=tenant_name, dn='uni/tn-%s' % tenant_name)
aci_ap = self._get_example_aci_app_profile(
dn='uni/tn-%s/ap-ap1' % tenant_name, name='ap1')
aci_epg = self._get_example_aci_epg(
dn='uni/tn-%s/ap-ap1/epg-epg1' % tenant_name)
aci_contract = self._get_example_aci_contract(
dn='uni/tn-%s/brc-c' % tenant_name)
aci_prov_contract = self._get_example_provided_contract(
dn='uni/tn-%s/ap-ap1/epg-epg1/rsprov-c' % tenant_name)
self._set_events(
[aci_tn, aci_ap, aci_epg, aci_contract, aci_prov_contract],
manager=desired_monitor.serving_tenants[tn.rn], tag=False)
self._sync_and_verify(agent, current_config,
[(current_config, desired_config),
(current_monitor, desired_monitor)],
tenants=[tn.root])
# retrieve the corresponding AIM objects
ap = self.aim_manager.get(self.ctx, resource.ApplicationProfile(
tenant_name=tenant_name, name='ap1'))
epg = self.aim_manager.get(self.ctx, resource.EndpointGroup(
tenant_name=tenant_name, app_profile_name='ap1', name='epg1'))
contract = self.aim_manager.get(self.ctx, resource.Contract(
tenant_name=tenant_name, name='c'))
self.assertTrue(bool(ap.monitored and epg.monitored and
contract.monitored))
self.assertEqual(['c'], epg.provided_contract_names)
# Now take control of the EPG
self.aim_manager.update(self.ctx, epg, monitored=False)
epg = self.aim_manager.get(self.ctx, resource.EndpointGroup(
tenant_name=tenant_name, app_profile_name='ap1', name='epg1'))
self.assertFalse(epg.monitored)
# We keep and own the contracts
self.assertEqual(['c'], epg.provided_contract_names)
self._sync_and_verify(agent, current_config,
[(desired_config, current_config),
(desired_monitor, current_monitor)],
tenants=[tn.root])
# Tag exists in ACI
tag = test_aci_tenant.mock_get_data(
desired_monitor.serving_tenants[tn.rn].aci_session,
'mo/' + epg.dn + '/tag-openstack_aid')
self.assertNotEqual([], tag)
tag = test_aci_tenant.mock_get_data(
desired_monitor.serving_tenants[tn.rn].aci_session,
'mo/' + epg.dn + '/rsprov-c/tag-openstack_aid')
self.assertNotEqual([], tag)
# Run an empty change on the EPG, bringing it to sync pending
self.aim_manager.update(self.ctx, epg)
self._sync_and_verify(agent, current_config,
[(desired_config, current_config),
(desired_monitor, current_monitor)],
tenants=[tn.root])
# Put back EPG into monitored state
epg = self.aim_manager.update(self.ctx, epg, monitored=True)
self.assertTrue(epg.monitored)
self._sync_and_verify(agent, current_config,
[(desired_config, current_config),
(desired_monitor, current_monitor)],
tenants=[tn.root])
# Tag doesn't exist anymore
self.assertRaises(
apic_client.cexc.ApicResponseNotOk, test_aci_tenant.mock_get_data,
desired_monitor.serving_tenants[tn.rn].aci_session,
'mo/' + epg.dn + '/rsprov-c/tag-openstack_aid')
self.assertRaises(
apic_client.cexc.ApicResponseNotOk, test_aci_tenant.mock_get_data,
desired_monitor.serving_tenants[tn.rn].aci_session,
'mo/' + epg.dn + '/tag-openstack_aid')
# Object is in monitored universe and in good shape
epg = self.aim_manager.get(self.ctx, epg)
self.assertTrue(epg.monitored)
# Still keeping whatever contract we had, but monitored this time
self.assertEqual(['c'], epg.provided_contract_names)
self._sync_and_verify(agent, current_config,
[(desired_config, current_config),
(desired_monitor, current_monitor)],
tenants=[tn.root])
status = self.aim_manager.get_status(self.ctx, epg)
self.assertEqual(status.SYNCED, status.sync_status)
def test_monitored_l3out_vrf_rs(self):
agent = self._create_agent()
tenant_name = 'test_monitored_l3out_vrf_rs'
current_config = agent.multiverse[0]['current']
desired_config = agent.multiverse[0]['desired']
current_monitor = agent.multiverse[2]['current']
desired_monitor = agent.multiverse[2]['desired']
apic_client.ApicSession.post_body_dict = (
self._mock_current_manager_post)
apic_client.ApicSession.DELETE = self._mock_current_manager_delete
tn = resource.Tenant(name=tenant_name, monitored=True)
# Create tenant in AIM to start serving it
self.aim_manager.create(self.ctx, tn)
# Run loop for serving tenant
self._first_serve(agent)
self._observe_aci_events(current_config)
# Create some manual stuff
aci_tn = self._get_example_aci_tenant(
name=tenant_name, dn='uni/tn-%s' % tenant_name)
aci_l3o = self._get_example_aci_l3_out(
dn='uni/tn-%s/out-out' % tenant_name, name='out')
aci_l3o_vrf_rs = self._get_example_aci_l3_out_vrf_rs(
dn='uni/tn-%s/out-out/rsectx' % tenant_name, tnFvCtxName='foo')
self._set_events(
[aci_tn, aci_l3o, aci_l3o_vrf_rs],
manager=desired_monitor.serving_tenants[tn.rn], tag=False)
self._sync_and_verify(agent, current_config,
[(current_config, desired_config),
(current_monitor, desired_monitor)],
tenants=[tn.root])
l3o = self.aim_manager.get(self.ctx, resource.L3Outside(
tenant_name=tenant_name, name='out'))
self.assertIsNotNone(l3o)
self.assertTrue(l3o.monitored)
self.assertEqual('foo', l3o.vrf_name)
def test_monitored_ext_net_contract_rs(self):
agent = self._create_agent()
tenant_name = 'test_monitored_ext_net_contract_rs'
current_config = agent.multiverse[0]['current']
desired_config = agent.multiverse[0]['desired']
current_monitor = agent.multiverse[2]['current']
desired_monitor = agent.multiverse[2]['desired']
apic_client.ApicSession.post_body_dict = (
self._mock_current_manager_post)
apic_client.ApicSession.DELETE = self._mock_current_manager_delete
tn = resource.Tenant(name=tenant_name, monitored=True)
# Create tenant in AIM to start serving it
self.aim_manager.create(self.ctx, tn)
# Run loop for serving tenant
self._first_serve(agent)
self._observe_aci_events(current_config)
# Create some manual stuff
aci_tn = self._get_example_aci_tenant(
name=tenant_name, dn='uni/tn-%s' % tenant_name)
aci_l3o = self._get_example_aci_l3_out(
dn='uni/tn-%s/out-out' % tenant_name, name='out')
aci_ext_net = {'l3extInstP':
{'attributes':
{'dn': 'uni/tn-%s/out-out/instP-inet' % tenant_name,
'name': 'inet'}}}
self._set_events(
[aci_tn, aci_l3o, aci_ext_net],
manager=desired_monitor.serving_tenants[tn.rn], tag=False)
self._sync_and_verify(agent, current_config,
[(current_config, desired_config),
(current_monitor, desired_monitor)],
tenants=[tn.root])
ext_net = self.aim_manager.get(self.ctx, resource.ExternalNetwork(
tenant_name=tenant_name, l3out_name='out', name='inet'))
self.assertIsNotNone(ext_net)
self.assertTrue(ext_net.monitored)
self.assertEqual([], ext_net.provided_contract_names)
self.aim_manager.update(self.ctx, ext_net,
provided_contract_names=['p1'])
ext_net = self.aim_manager.get(self.ctx, ext_net)
self.assertEqual(['p1'], ext_net.provided_contract_names)
agent._reconciliation_cycle()
self._observe_aci_events(current_config)
tag = test_aci_tenant.mock_get_data(
desired_monitor.serving_tenants[tn.rn].aci_session,
'mo/' + ext_net.dn + '/rsprov-p1/tag-openstack_aid')
self.assertNotEqual([], tag)
self.aim_manager.update(self.ctx, ext_net,
provided_contract_names=[])
ext_net = self.aim_manager.get(self.ctx, ext_net)
self.assertEqual([], ext_net.provided_contract_names)
agent._reconciliation_cycle()
self._observe_aci_events(current_config)
self.assertRaises(
apic_client.cexc.ApicResponseNotOk,
test_aci_tenant.mock_get_data,
desired_monitor.serving_tenants[tn.rn].aci_session,
'mo/' + ext_net.dn + '/rsprov-p1')
self.assertRaises(
apic_client.cexc.ApicResponseNotOk,
test_aci_tenant.mock_get_data,
desired_monitor.serving_tenants[tn.rn].aci_session,
'mo/' + ext_net.dn + '/rsprov-p1/tag-openstack_aid')
def test_aci_errors(self):
self.set_override('max_operation_retry', 2, 'aim')
self.set_override('retry_cooldown', -1, 'aim')
agent = self._create_agent()
tenant_name = 'test_manual_rs'
current_config = agent.multiverse[0]['current']
desired_config = agent.multiverse[0]['desired']
tn = resource.Tenant(name=tenant_name)
tn = self.aim_manager.create(self.ctx, tn)
# Serve tenant
self._first_serve(agent)
# Try to create the tenant in multiple iterations and test different
# errors
with mock.patch.object(utils, 'perform_harakiri') as harakiri:
# OPERATION_CRITICAL (fail object immediately)
apic_client.ApicSession.post_body_dict = mock.Mock(
side_effect=aexc.ApicResponseNotOk(
request='', status='400', reason='',
err_text='', err_code='122'))
# Observe and Reconcile
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
# Tenant object should be in sync_error state
self.assertEqual(
aim_status.AciStatus.SYNC_FAILED,
self.aim_manager.get_status(self.ctx, tn).sync_status)
# Put tenant back in pending state
desired_config._scheduled_recovery = 0
agent._reconciliation_cycle()
self.assertEqual(
aim_status.AciStatus.SYNC_PENDING,
self.aim_manager.get_status(self.ctx, tn).sync_status)
# SYSTEM_CRITICAL perform harakiri
apic_client.ApicSession.post_body_dict = mock.Mock(
side_effect=aexc.ApicResponseNoCookie(request=''))
self.assertEqual(0, harakiri.call_count)
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
self.assertEqual(1, harakiri.call_count)
def test_aim_recovery(self):
agent = self._create_agent()
tenant_name = 'test_aim_recovery'
current_config = agent.multiverse[0]['current']
desired_config = agent.multiverse[0]['desired']
tn = resource.Tenant(name=tenant_name)
tn = self.aim_manager.create(self.ctx, tn)
# Serve tenant
self._first_serve(agent)
# Observe and Reconcile
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
# Put object in error state
self.aim_manager.set_resource_sync_error(self.ctx, tn)
# A cycle won't recover it
agent._reconciliation_cycle()
self.assertEqual(
aim_status.AciStatus.SYNC_FAILED,
self.aim_manager.get_status(self.ctx, tn).sync_status)
# Unless there's a scheduled recovery
desired_config._scheduled_recovery = 0
agent._reconciliation_cycle()
self.assertEqual(
aim_status.AciStatus.SYNC_PENDING,
self.aim_manager.get_status(self.ctx, tn).sync_status)
# A new update has been scheduled
self.assertNotEqual(0, desired_config._scheduled_recovery)
# Simulate leaking status
self.aim_manager.create(self.ctx, aim_status.AciStatus(
resource_type='VRF', resource_id='abcd',
resource_root=tn.root, resource_dn='uni/some/dn'))
# Prevent normal operation to cleanup the status
self.aim_manager.delete_all(self.ctx, aim_tree.ActionLog)
# Normal cycle will not fix it
agent._reconciliation_cycle()
leaking_st = self.aim_manager.get(self.ctx, aim_status.AciStatus(
resource_type='VRF', resource_id='abcd',
resource_root=tn.root, resource_dn='uni/some/dn'))
self.assertIsNotNone(leaking_st)
# Recovery will
desired_config._scheduled_recovery = 0
agent._reconciliation_cycle()
leaking_st = self.aim_manager.get(self.ctx, aim_status.AciStatus(
resource_type='VRF', resource_id='abcd',
resource_root=tn.root, resource_dn='uni/some/dn'))
self.assertIsNone(leaking_st)
@base.requires(['hooks'])
def test_multi_context_session(self):
tenant_name = 'test_transaction'
tenant_name2 = 'test_transaction2'
self.aim_manager.create(self.ctx, resource.Tenant(name=tenant_name))
ctx1 = context.AimContext(self.ctx.db_session)
self.aim_manager.create(ctx1, resource.Tenant(name=tenant_name2))
def test_non_tenant_roots(self):
agent = self._create_agent()
vmm = resource.VMMDomain(type='OpenStack', name='ostack')
vmmp = resource.VMMPolicy(type='OpenStack')
phys = resource.PhysicalDomain(name='physdomain')
topology = resource.Topology()
pod = resource.Pod(name='1')
self.aim_manager.create(self.ctx, vmmp)
self.aim_manager.create(self.ctx, vmm)
self.aim_manager.create(self.ctx, phys)
self.aim_manager.create(self.ctx, topology)
self.aim_manager.create(self.ctx, pod)
current_config = agent.multiverse[0]['current']
apic_client.ApicSession.post_body_dict = (
self._mock_current_manager_post)
# Run loop for serving tenant
self._first_serve(agent)
pod_parent = {
'fabricTopology': {'attributes': {'dn': 'topology'}}}
self._set_events(
[pod_parent], manager=current_config.serving_tenants[topology.rn],
tag=False)
self._observe_aci_events(current_config)
vmm = test_aci_tenant.mock_get_data(
current_config.serving_tenants['vmmp-OpenStack'].aci_session,
'mo/' + vmm.dn)
self.assertNotEqual([], vmm)
physl = test_aci_tenant.mock_get_data(
current_config.serving_tenants[phys.rn].aci_session,
'mo/' + phys.dn)
self.assertNotEqual([], physl)
self.assertEqual('topology/pod-1', pod.dn)
pod = test_aci_tenant.mock_get_data(
current_config.serving_tenants[topology.rn].aci_session,
'mo/' + pod.dn)
self.assertNotEqual([], pod)
self._assert_reset_consistency()
self._assert_reset_consistency(vmmp.rn)
self._assert_reset_consistency(phys.rn)
self._assert_reset_consistency(topology.rn)
def test_non_rs_nested_objects(self):
agent = self._create_agent()
tenant_name = 'test_non_rs_nested_objects'
current_config = agent.multiverse[0]['current']
desired_config = agent.multiverse[0]['desired']
current_monitor = agent.multiverse[2]['current']
desired_monitor = agent.multiverse[2]['desired']
apic_client.ApicSession.post_body_dict = (
self._mock_current_manager_post)
apic_client.ApicSession.DELETE = self._mock_current_manager_delete
tn = resource.Tenant(name=tenant_name)
tn = self.aim_manager.create(self.ctx, tn)
# Serve tenant
self._first_serve(agent)
self._sync_and_verify(agent, current_config,
[(current_config, desired_config),
(current_monitor, desired_monitor)],
tenants=[tn.root])
# Create SRP parent
srp_parent = {
'vnsSvcCont': {
'attributes': {'dn': 'uni/tn-%s/svcCont' % tenant_name}}}
self._set_events(
[srp_parent], manager=current_config.serving_tenants[tn.rn],
tag=False)
self._sync_and_verify(agent, current_config,
[(current_config, desired_config),
(current_monitor, desired_monitor)],
tenants=[tn.root])
srp = service_graph.ServiceRedirectPolicy(
tenant_name=tenant_name, name='name')
self.aim_manager.create(self.ctx, srp)
self._sync_and_verify(agent, current_config,
[(current_config, desired_config),
(current_monitor, desired_monitor)],
tenants=[tn.root])
# Create dest policy
self.aim_manager.update(self.ctx, srp,
destinations=[{'ip': '1.1.1.1',
'mac': 'aa:aa:aa:aa:aa'}])
self._sync_and_verify(agent, current_config,
[(current_config, desired_config),
(current_monitor, desired_monitor)],
tenants=[tn.root])
dest = test_aci_tenant.mock_get_data(
current_config.serving_tenants[tn.rn].aci_session,
'mo/' + srp.dn + '/RedirectDest_ip-[1.1.1.1]')
self.assertNotEqual([], dest)
# Create one manually
aci_dst = {
'vnsRedirectDest': {
'attributes': {'dn': srp.dn + '/RedirectDest_ip-[1.1.1.2]',
'ip': '1.1.1.2', 'mac': 'aa:aa:aa:aa:ab'}}}
self._set_events(
[aci_dst], manager=current_config.serving_tenants[tn.rn],
tag=False)
self._observe_aci_events(current_config)
self._sync_and_verify(agent, current_config,
[(current_config, desired_config),
(current_monitor, desired_monitor)],
tenants=[tn.root])
# Dest deleted
self.assertRaises(
apic_client.cexc.ApicResponseNotOk,
test_aci_tenant.mock_get_data,
current_config.serving_tenants[tn.rn].aci_session,
'mo/' + srp.dn + '/RedirectDest_ip-[1.1.1.2]')
self._sync_and_verify(agent, current_config,
[(current_config, desired_config),
(current_monitor, desired_monitor)],
tenants=[tn.root])
def test_monitored_objects_sync_state(self):
agent = self._create_agent()
tenant_name = 'test_monitored_objects_sync_state'
current_config = agent.multiverse[0]['current']
apic_client.ApicSession.post_body_dict = (
self._mock_current_manager_post)
tn = resource.Tenant(name=tenant_name, monitored=True)
tn = self.aim_manager.create(self.ctx, tn)
self._first_serve(agent)
tenant = {
'fvTenant': {
'attributes': {'dn': 'uni/tn-%s' % tenant_name,
'nameAlias': 'test'}}}
self._set_events(
[tenant], manager=current_config.serving_tenants[tn.rn],
tag=False)
self.aim_manager.create(self.ctx, resource.ApplicationProfile(
tenant_name=tenant_name, name='ap-name'))
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
self.assertEqual(aim_status.AciStatus.SYNC_PENDING,
self.aim_manager.get_status(self.ctx, tn).sync_status)
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
self.assertEqual(aim_status.AciStatus.SYNCED,
self.aim_manager.get_status(self.ctx, tn).sync_status)
@base.requires(['k8s'])
def test_k8s_node_faults(self):
agent = self._create_agent()
desired_oper = agent.multiverse[1]['desired']
apic_client.ApicSession.post_body_dict = (
self._mock_current_manager_post)
vmm = resource.VMMDomain(type='Kubernetes', name='kubernetes',
monitored=True)
self.aim_manager.create(self.ctx, vmm)
agent._reconciliation_cycle()
f1 = aim_status.AciFault(
fault_code='F609007',
external_identifier='comp/prov-Kubernetes/'
'ctrlr-[kubernetes]-kubernetes/'
'injcont/ns-[default]/'
'svc-[frontend]/p-http-prot-tcp-t-80/'
'fault-F609007')
self.assertIsNotNone(self.aim_manager.create(self.ctx, f1))
# see if it gets deleted
self._observe_aci_events(desired_oper)
agent._reconciliation_cycle()
self.assertIsNone(self.aim_manager.get(self.ctx, f1))
def test_create_delete(self):
agent = self._create_agent()
tenant_name = 'test_non_rs_nested_objects'
current_config = agent.multiverse[0]['current']
desired_config = agent.multiverse[0]['desired']
current_monitor = agent.multiverse[2]['current']
desired_monitor = agent.multiverse[2]['desired']
apic_client.ApicSession.post_body_dict = (
self._mock_current_manager_post)
apic_client.ApicSession.DELETE = self._mock_current_manager_delete
tn = resource.Tenant(name=tenant_name, monitored=True)
self.aim_manager.create(self.ctx, tn)
aci_tn = self._get_example_aci_tenant(
name=tenant_name, dn='uni/tn-%s' % tenant_name)
self._first_serve(agent)
self._set_events(
[aci_tn], manager=desired_monitor.serving_tenants[tn.rn],
tag=False)
self._observe_aci_events(current_config)
ctr = resource.Contract(
tenant_name=tenant_name,
name='rtr_fb8f33cf-fe9c-48a9-a7b2-aa35ac63f189')
sub = resource.ContractSubject(
tenant_name=tenant_name, contract_name=ctr.name,
name='route', bi_filters=['noirolab_AnyFilter'])
with self.ctx.store.begin(subtransactions=True):
self.aim_manager.create(self.ctx, ctr)
self.aim_manager.create(self.ctx, sub)
with self.ctx.store.begin(subtransactions=True):
self.aim_manager.delete(self.ctx, sub)
self.aim_manager.delete(self.ctx, ctr)
desired_config.observe(self.ctx)
self._sync_and_verify(agent, current_config,
[(current_config, desired_config),
(current_monitor, desired_monitor)],
tenants=[tn.root])
def test_max_action_logs(self):
agent = self._create_agent()
tenant_name = 'test_non_rs_nested_objects'
tenant_name2 = tenant_name + '2'
current_config = agent.multiverse[0]['current']
desired_config = agent.multiverse[0]['desired']
current_monitor = agent.multiverse[2]['current']
desired_monitor = agent.multiverse[2]['desired']
apic_client.ApicSession.post_body_dict = (
self._mock_current_manager_post)
apic_client.ApicSession.DELETE = self._mock_current_manager_delete
tn = resource.Tenant(name=tenant_name)
tn2 = resource.Tenant(name=tenant_name2)
self.aim_manager.create(self.ctx, tn)
self.aim_manager.create(self.ctx, tn2)
self._first_serve(agent)
original_max_value = hashtree_db_listener.MAX_EVENTS_PER_ROOT
try:
hashtree_db_listener.MAX_EVENTS_PER_ROOT = 0
for tenant in [tenant_name, tenant_name2]:
bd = resource.BridgeDomain(tenant_name=tenant, name='bd',
vrf_name='vrf')
vrf = resource.VRF(tenant_name=tenant, name='vrf')
self.aim_manager.create(self.ctx, bd)
self.aim_manager.create(self.ctx, vrf)
# Two reset logs exist
logs = self.aim_manager.find(self.ctx, aim_tree.ActionLog,
action='reset')
self.assertEqual(2, len(logs))
for log in logs:
self.assertEqual(log.action, aim_tree.ActionLog.RESET)
# Even so, syncing operations work properly through full reset
self._sync_and_verify(agent, current_config,
[(current_config, desired_config),
(current_monitor, desired_monitor)],
tenants=[tn.root])
dest = test_aci_tenant.mock_get_data(
current_config.serving_tenants[tn.rn].aci_session,
'mo/' + bd.dn)
self.assertNotEqual([], dest)
# The tree needs_reset attribute should be set to False
for tenant in [tenant_name, tenant_name2]:
base_tree = self.tt_mgr.get_base_tree(self.ctx, 'tn-' + tenant)
self.assertFalse(base_tree.needs_reset)
finally:
hashtree_db_listener.MAX_EVENTS_PER_ROOT = original_max_value
def test_divergence_reset(self):
agent = self._create_agent()
tenant_name = 'test_divergence_reset'
current_config = agent.multiverse[0]['current']
desired_config = agent.multiverse[0]['desired']
current_monitor = agent.multiverse[2]['current']
desired_monitor = agent.multiverse[2]['desired']
current_config.max_backoff_time = -1
apic_client.ApicSession.post_body_dict = (
self._mock_current_manager_post)
apic_client.ApicSession.DELETE = self._mock_current_manager_delete
tn = resource.Tenant(name=tenant_name)
self.aim_manager.create(self.ctx, tn)
self._first_serve(agent)
self._sync_and_verify(agent, current_config,
[(current_config, desired_config),
(current_monitor, desired_monitor)],
tenants=[tn.root])
apic_client.ApicSession.post_body_dict = mock.Mock()
bd = resource.BridgeDomain(tenant_name=tenant_name, name='bd1')
self.aim_manager.create(self.ctx, bd)
agent._reconciliation_cycle()
self._observe_aci_events(current_config)
# Universe not in sync
self.assertRaises(Exception,
self._assert_universe_sync, desired_config,
current_config)
self.assertEqual(1, len(current_config._sync_log))
current_config.reset = mock.Mock()
desired_config.reset = mock.Mock()
current_config.push_resources = mock.Mock()
for x in range(current_config.reset_retry_limit):
current_config.push_resources.reset_mock()
agent._reconciliation_cycle()
self.assertEqual(0, current_config.reset.call_count)
self.assertEqual(0, desired_config.reset.call_count)
current_config.push_resources.assert_called_once_with(
mock.ANY, {'create': [bd], 'delete': []})
current_config.push_resources.reset_mock()
agent._reconciliation_cycle()
current_config.reset.assert_called_once_with(mock.ANY, [tn.rn])
desired_config.reset.assert_called_once_with(mock.ANY, [tn.rn])
self.assertEqual(0, current_config.push_resources.call_count)
# Still not in sync
self.assertRaises(Exception,
self._assert_universe_sync, desired_config,
current_config)
current_config.reset.reset_mock()
desired_config.reset.reset_mock()
# go for the purge
for x in range(current_config.reset_retry_limit,
current_config.purge_retry_limit - 1):
current_config.push_resources.reset_mock()
agent._reconciliation_cycle()
self.assertEqual(0, current_config.reset.call_count)
self.assertEqual(0, desired_config.reset.call_count)
current_config.push_resources.assert_called_once_with(
mock.ANY, {'create': [bd], 'delete': []})
current_config.push_resources.reset_mock()
agent._reconciliation_cycle()
self.assertEqual(0, current_config.reset.call_count)
self.assertEqual(0, desired_config.reset.call_count)
current_config.push_resources.assert_called_once_with(
mock.ANY, {'create': [], 'delete': []})
# Now node should be in error state, thus the universes are in sync
self._assert_universe_sync(desired_config, current_config)
self._sync_and_verify(agent, current_config,
[(current_config, desired_config),
(current_monitor, desired_monitor)],
tenants=[tn.root])
def test_skip_for_managed(self):
agent = self._create_agent()
current_config = agent.multiverse[0]['current']
tenant_name = 'test_skip_for_managed'
current_monitor = agent.multiverse[2]['current']
desired_monitor = agent.multiverse[2]['desired']
apic_client.ApicSession.post_body_dict = (
self._mock_current_manager_post)
apic_client.ApicSession.DELETE = self._mock_current_manager_delete
# start by managing a single tenant (non-monitored)
tn1 = resource.Tenant(name=tenant_name, monitored=True)
aci_tn = self._get_example_aci_tenant(
name=tenant_name, dn='uni/tn-%s' % tenant_name, nameAlias='nice')
self.aim_manager.create(self.ctx, tn1)
# Run loop for serving tenant
self._first_serve(agent)
self._set_events(
[aci_tn], manager=desired_monitor.serving_tenants[tn1.rn],
tag=False)
self._observe_aci_events(current_config)
# Simulate pre existing EPG
aci_ap = self._get_example_aci_app_profile(
name='ap', dn='uni/tn-%s/ap-ap' % tenant_name)
aci_epg = self._get_example_aci_epg(
name='default', dn='uni/tn-%s/ap-ap/epg-default' % tenant_name)
aci_rsprov = self._get_example_provided_contract(
dn='uni/tn-%s/ap-ap/epg-default/rsprov-c' % tenant_name)
aci_rscons = self._get_example_consumed_contract(
dn='uni/tn-%s/ap-ap/epg-default/rscons-c' % tenant_name)
self._set_events(
[aci_ap, aci_epg, aci_rsprov, aci_rscons],
manager=desired_monitor.serving_tenants[tn1.rn],
tag=False)
apic_client.ApicSession.post_body_dict = (
self._mock_current_manager_post)
apic_client.ApicSession.DELETE = self._mock_current_manager_delete
# Observe ACI events
self._observe_aci_events(current_config)
# Run the loop for reconciliation
agent._reconciliation_cycle()
# Run loop again to set SYNCED state
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
# A monitored EPG should now exist in AIM with its contracts
aim_epg = self.aim_manager.get(self.ctx, resource.EndpointGroup(
tenant_name=tenant_name, app_profile_name='ap', name='default'))
self.assertTrue(aim_epg.monitored)
self.assertEqual(['c'], aim_epg.provided_contract_names)
self.assertEqual(['c'], aim_epg.consumed_contract_names)
# Create managed EPG
self.aim_manager.create(
self.ctx, resource.EndpointGroup(tenant_name=tenant_name,
app_profile_name='ap',
name='default2'))
self._observe_aci_events(current_config)
# Run the loop for reconciliation
agent._reconciliation_cycle()
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
# Add contracts manually
aci_rsprov = self._get_example_provided_contract(
dn='uni/tn-%s/ap-ap/epg-default2/rsprov-c' % tenant_name)
aci_rscons = self._get_example_consumed_contract(
dn='uni/tn-%s/ap-ap/epg-default2/rscons-c' % tenant_name)
self._set_events([aci_rsprov, aci_rscons],
manager=desired_monitor.serving_tenants[tn1.rn],
tag=False)
self._observe_aci_events(current_config)
# Run the loop for reconciliation
agent._reconciliation_cycle()
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
# Contracts not in AIM, but still in ACI
aim_epg = self.aim_manager.get(self.ctx, resource.EndpointGroup(
tenant_name=tenant_name, app_profile_name='ap', name='default2'))
self.assertFalse(aim_epg.monitored)
self.assertEqual([], aim_epg.provided_contract_names)
self.assertEqual([], aim_epg.consumed_contract_names)
dest = test_aci_tenant.mock_get_data(
current_config.serving_tenants[tn1.rn].aci_session,
'mo/' + aci_rsprov['fvRsProv']['attributes']['dn'])
self.assertNotEqual([], dest)
dest = test_aci_tenant.mock_get_data(
current_config.serving_tenants[tn1.rn].aci_session,
'mo/' + aci_rscons['fvRsCons']['attributes']['dn'])
self.assertNotEqual([], dest)
self._assert_universe_sync(desired_monitor, current_monitor,
tenants=[tn1.root])
self._assert_reset_consistency(tn1.rn)
def test_skip_monitored_root(self):
agent = self._create_agent()
current_config = agent.multiverse[0]['current']
desired_monitor = agent.multiverse[2]['desired']
current_monitor = agent.multiverse[2]['current']
tenant_name = 'test_skip_monitored_root'
self.assertEqual({}, desired_monitor.aci_session._data_stash)
apic_client.ApicSession.post_body_dict = (
self._mock_current_manager_post)
apic_client.ApicSession.DELETE = self._mock_current_manager_delete
tn1 = resource.Tenant(name=tenant_name, monitored=True)
# Create tenant in AIM to start observing it
self.aim_manager.create(self.ctx, tn1)
self._first_serve(agent)
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
# Tenant still exists
self.assertIsNotNone(self.aim_manager.get(self.ctx, tn1))
# Action cache is empty
self.assertEqual({}, desired_monitor._sync_log)
self.assertEqual({'create': {}, 'delete': {}},
current_monitor._sync_log.values()[0])
def test_tenant_delete_behavior(self):
tenant_name = 'test_tenant_delete_behavior'
self.set_override('max_operation_retry', 2, 'aim')
self.set_override('retry_cooldown', -1, 'aim')
agent = self._create_agent()
current_config = agent.multiverse[0]['current']
desired_config = agent.multiverse[0]['desired']
apic_client.ApicSession.post_body_dict = (
self._mock_current_manager_post)
apic_client.ApicSession.DELETE = self._mock_current_manager_delete
# start by managing a single tenant (non-monitored)
tn1 = resource.Tenant(name=tenant_name)
vrf = resource.VRF(tenant_name=tenant_name, name='vrf1')
self.aim_manager.create(self.ctx, tn1)
self.aim_manager.create(self.ctx, vrf)
self._first_serve(agent)
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
self.aim_manager.delete(self.ctx, tn1)
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
# push config
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
self._observe_aci_events(current_config)
self._assert_universe_sync(desired_config, current_config,
tenants=[tn1.root])
self.assertEqual(
aim_status.AciStatus.SYNC_FAILED,
self.aim_manager.get_status(self.ctx, vrf).sync_status)
self.assertIsNone(self.aim_manager.get(self.ctx, tn1))
def test_sync_flag(self):
agent = self._create_agent()
current_config = agent.multiverse[0]['current']
desired_config = agent.multiverse[0]['desired']
desired_monitor = agent.multiverse[2]['desired']
current_monitor = agent.multiverse[2]['current']
tenant_name = 'test_sync_flag'
apic_client.ApicSession.post_body_dict = (
self._mock_current_manager_post)
apic_client.ApicSession.DELETE = self._mock_current_manager_delete
tn1 = resource.Tenant(name=tenant_name)
ap = resource.ApplicationProfile(tenant_name=tenant_name, name='test')
epg = resource.EndpointGroup(tenant_name=tenant_name,
app_profile_name='test', name='test')
# Create tenant in AIM to start serving it
self.aim_manager.create(self.ctx, tn1)
self.aim_manager.create(self.ctx, ap)
self.aim_manager.create(self.ctx, epg)
# Run loop for serving tenant
self._first_serve(agent)
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
# Verify epg in APIC
prov = test_aci_tenant.mock_get_data(
desired_monitor.serving_tenants[tn1.rn].aci_session,
'mo/uni/tn-%s/ap-test/epg-test' % tenant_name)
self.assertNotEqual([], prov)
# Flip sync flag
self.aim_manager.update(self.ctx, epg, sync=False)
agent._reconciliation_cycle()
self._observe_aci_events(current_config)
# Verify epg not in APIC
self.assertRaises(
apic_client.cexc.ApicResponseNotOk,
test_aci_tenant.mock_get_data,
desired_monitor.serving_tenants[tn1.rn].aci_session,
'mo/uni/tn-%s/ap-test/epg-test' % tenant_name)
# Status doesn't re-create the object
self.aim_manager.get_status(self.ctx, epg)
agent._reconciliation_cycle()
self._observe_aci_events(current_config)
# Verify epg not in APIC
self.assertRaises(
apic_client.cexc.ApicResponseNotOk,
test_aci_tenant.mock_get_data,
desired_monitor.serving_tenants[tn1.rn].aci_session,
'mo/uni/tn-%s/ap-test/epg-test' % tenant_name)
# Or even faults
self.aim_manager.set_fault(
self.ctx, epg, aim_status.AciFault(
fault_code='516',
external_identifier='uni/tn-%s/ap-test/epg-test/'
'fault-516' % tenant_name))
agent._reconciliation_cycle()
self._observe_aci_events(current_config)
# Verify epg not in APIC
self.assertRaises(
apic_client.cexc.ApicResponseNotOk,
test_aci_tenant.mock_get_data,
desired_monitor.serving_tenants[tn1.rn].aci_session,
'mo/uni/tn-%s/ap-test/epg-test' % tenant_name)
# Resetting sync flag will bring it back
self.aim_manager.update(self.ctx, epg, sync=True)
agent._reconciliation_cycle()
self._observe_aci_events(current_config)
# Verify epg in APIC
prov = test_aci_tenant.mock_get_data(
desired_monitor.serving_tenants[tn1.rn].aci_session,
'mo/uni/tn-%s/ap-test/epg-test' % tenant_name)
self.assertNotEqual([], prov)
# Verify all tree converged
self._assert_universe_sync(desired_monitor, current_monitor,
tenants=[tn1.root])
self._assert_universe_sync(desired_config, current_config,
tenants=[tn1.root])
self._assert_reset_consistency()
def test_sync_flag_monitored(self):
agent = self._create_agent()
current_config = agent.multiverse[0]['current']
desired_config = agent.multiverse[0]['desired']
desired_monitor = agent.multiverse[2]['desired']
current_monitor = agent.multiverse[2]['current']
tenant_name = 'test_sync_flag'
apic_client.ApicSession.post_body_dict = (
self._mock_current_manager_post)
apic_client.ApicSession.DELETE = self._mock_current_manager_delete
tn1 = resource.Tenant(name=tenant_name)
ap = resource.ApplicationProfile(tenant_name=tenant_name, name='test')
# Create tenant in AIM to start serving it
self.aim_manager.create(self.ctx, tn1)
self.aim_manager.create(self.ctx, ap)
# Run loop for serving tenant
self._first_serve(agent)
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
# Manual EPG
aci_epg = self._get_example_aci_epg(
tenant_name=tenant_name, name='test',
dn='uni/tn-%s/ap-test/epg-test' % tenant_name)
self._set_events(
[aci_epg],
manager=desired_monitor.serving_tenants[tn1.rn],
tag=False)
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
epg = resource.EndpointGroup(tenant_name=tenant_name,
app_profile_name='test', name='test')
epg = self.aim_manager.get(self.ctx, epg)
self.assertTrue(epg.monitored)
# Remove from sync log
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
self.aim_manager.update(self.ctx, epg, sync=False)
agent._reconciliation_cycle()
self._observe_aci_events(current_config)
# Set back to normal
epg = self.aim_manager.get(self.ctx, epg)
self.assertTrue(epg.sync)
# Verify all tree converged
self._assert_universe_sync(desired_monitor, current_monitor,
tenants=[tn1.root])
self._assert_universe_sync(desired_config, current_config,
tenants=[tn1.root])
self._assert_reset_consistency()
def test_bgp(self):
agent = self._create_agent()
current_config = agent.multiverse[0]['current']
desired_config = agent.multiverse[0]['desired']
desired_monitor = agent.multiverse[2]['desired']
current_monitor = agent.multiverse[2]['current']
tenant_name = 'test_gbp'
apic_client.ApicSession.post_body_dict = (
self._mock_current_manager_post)
apic_client.ApicSession.DELETE = self._mock_current_manager_delete
tn1 = resource.Tenant(name=tenant_name)
# Create tenant in AIM to start serving it
self.aim_manager.create(self.ctx, tn1)
self._first_serve(agent)
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
self.aim_manager.create(
self.ctx, resource.L3Outside(tenant_name=tenant_name,
name='testOut1',
vrf_name='ctx1',
l3_domain_dn='uni/foo',
bgp_enable=True))
self.aim_manager.create(
self.ctx,
resource.L3OutNodeProfile(tenant_name=tenant_name,
l3out_name='testOut1',
name='testNP1'))
self.aim_manager.create(
self.ctx,
resource.L3OutInterfaceProfile(tenant_name=tenant_name,
l3out_name='testOut1',
node_profile_name='testNP1',
name='testLifP1'))
self.aim_manager.create(
self.ctx,
resource.L3OutInterface(tenant_name=tenant_name,
l3out_name='testOut1',
node_profile_name='testNP1',
interface_profile_name='testLifP1',
interface_path='topology/pod-1/paths-101/'
'pathep-[eth1/1]'))
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
self._observe_aci_events(current_config)
bgp = self.aim_manager.create(self.ctx,
resource.L3OutInterfaceBgpPeerP(
tenant_name=tenant_name,
l3out_name='testOut1',
node_profile_name='testNP1',
interface_profile_name='testLifP1',
interface_path='topology/pod-1/'
'paths-101/pathep-[eth1/1]',
addr='1.1.1.0/24',
asn="65000"))
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
self._observe_aci_events(current_config)
self._assert_universe_sync(desired_monitor, current_monitor,
tenants=[tn1.root])
self._assert_universe_sync(desired_config, current_config,
tenants=[tn1.root])
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
self._observe_aci_events(current_config)
status = self.aim_manager.get_status(self.ctx, bgp)
self.assertEqual(aim_status.AciStatus.SYNCED, status.sync_status)
def test_monitored_sg_same_seq(self):
template = {"vnsAbsGraph": {
"attributes": {"dn": "uni/tn-common/AbsGraph-PBRGraph3"}}}
term_prov = {"vnsAbsTermNodeProv": {
"attributes": {"dn": "uni/tn-common/AbsGraph-PBRGraph3/"
"AbsTermNodeProv-T2"}}}
outterm_prov = {"vnsOutTerm__Prov": {
"attributes": {"dn": "uni/tn-common/AbsGraph-PBRGraph3/"
"AbsTermNodeProv-T2/outtmnl"}}}
interm_prov = {"vnsInTerm__Prov": {
"attributes": {"dn": "uni/tn-common/AbsGraph-PBRGraph3/"
"AbsTermNodeProv-T2/intmnl"}}}
conn1 = {"vnsAbsTermConn__Prov": {
"attributes": {"dn": "uni/tn-common/AbsGraph-PBRGraph3/"
"AbsTermNodeProv-T2/AbsTConn"}}}
termprov = {"vnsAbsTermNodeCon": {
"attributes": {"dn": "uni/tn-common/AbsGraph-PBRGraph3/"
"AbsTermNodeCon-T1"}}}
outterm_cons = {"vnsOutTerm__Con": {
"attributes": {"dn": "uni/tn-common/AbsGraph-PBRGraph3/"
"AbsTermNodeCon-T1/outtmnl"}}}
interm_cons = {"vnsInTerm__Con": {
"attributes": {"dn": "uni/tn-common/AbsGraph-PBRGraph3/"
"AbsTermNodeCon-T1/intmnl"}}}
conn2 = {"vnsAbsTermConn__Con": {
"attributes": {"dn": "uni/tn-common/AbsGraph-PBRGraph3/"
"AbsTermNodeCon-T1/AbsTConn"}}}
node1 = {"vnsAbsNode": {"attributes": {
"dn": "uni/tn-common/AbsGraph-PBRGraph3/AbsNode-FW4",
"routingMode": "Redirect", "sequenceNumber": "0"}}}
node1_ldev = {"vnsRsNodeToLDev": {"attributes": {
"dn": "uni/tn-common/AbsGraph-PBRGraph3/AbsNode-FW4/"
"rsNodeToLDev"}}}
node1_conn1 = {"vnsAbsFuncConn": {"attributes": {
"dn": "uni/tn-common/AbsGraph-PBRGraph3/AbsNode-FW4/"
"AbsFConn-provider"}}}
node1_conn2 = {"vnsAbsFuncConn": {"attributes": {
"dn": "uni/tn-common/AbsGraph-PBRGraph3/AbsNode-FW4/"
"AbsFConn-consumer"}}}
node2 = {"vnsAbsNode": {"attributes": {
"dn": "uni/tn-common/AbsGraph-PBRGraph3/AbsNode-FW6",
"routingMode": "Redirect", "sequenceNumber": "0"}}}
node2_ldev = {"vnsRsNodeToLDev": {"attributes": {
"dn": "uni/tn-common/AbsGraph-PBRGraph3/AbsNode-FW6/"
"rsNodeToLDev"}}}
node2_conn1 = {"vnsAbsFuncConn": {"attributes": {
"dn": "uni/tn-common/AbsGraph-PBRGraph3/AbsNode-FW6/"
"AbsFConn-provider"}}}
node2_conn2 = {"vnsAbsFuncConn": {"attributes": {
"dn": "uni/tn-common/AbsGraph-PBRGraph3/AbsNode-FW6/"
"AbsFConn-consumer"}}}
node3 = {"vnsAbsNode": {"attributes": {
"dn": "uni/tn-common/AbsGraph-PBRGraph3/AbsNode-FW5",
"routingMode": "Redirect", "sequenceNumber": "0"}}}
node3_ldev = {"vnsRsNodeToLDev": {"attributes": {
"dn": "uni/tn-common/AbsGraph-PBRGraph3/AbsNode-FW5/"
"rsNodeToLDev"}}}
node3_conn1 = {"vnsAbsFuncConn": {"attributes": {
"dn": "uni/tn-common/AbsGraph-PBRGraph3/AbsNode-FW5/"
"AbsFConn-provider"}}}
node3_conn2 = {"vnsAbsFuncConn": {"attributes": {
"dn": "uni/tn-common/AbsGraph-PBRGraph3/AbsNode-FW5/"
"AbsFConn-consumer"}}}
abs_conn1 = {"vnsAbsConnection": {"attributes": {
"dn": "uni/tn-common/AbsGraph-PBRGraph3/AbsConnection-C4"}}}
abs_conn1_connector1 = {"vnsRsAbsConnectionConns": {"attributes": {
"dn": "uni/tn-common/AbsGraph-PBRGraph3/AbsConnection-C4/"
"rsabsConnectionConns-[uni/tn-common/AbsGraph-PBRGraph3/"
"AbsTermNodeProv-T2/AbsTConn]"}}}
abs_conn1_connector2 = {"vnsRsAbsConnectionConns": {
"attributes": {
"dn": "uni/tn-common/AbsGraph-PBRGraph3/AbsConnection-C4/"
"rsabsConnectionConns-[uni/tn-common/AbsGraph-PBRGraph3/"
"AbsNode-FW6/AbsFConn-provider]"}}}
abs_conn2 = {"vnsAbsConnection": {"attributes": {
"dn": "uni/tn-common/AbsGraph-PBRGraph3/AbsConnection-C2"}}}
abs_conn2_connector1 = {"vnsRsAbsConnectionConns": {"attributes": {
"dn": "uni/tn-common/AbsGraph-PBRGraph3/AbsConnection-C2/"
"rsabsConnectionConns-[uni/tn-common/AbsGraph-PBRGraph3"
"/AbsNode-FW5/AbsFConn-consumer]"}}}
abs_conn2_connector2 = {"vnsRsAbsConnectionConns": {"attributes": {
"dn": "uni/tn-common/AbsGraph-PBRGraph3/AbsConnection-C2/"
"rsabsConnectionConns-[uni/tn-common/AbsGraph-PBRGraph3/"
"AbsNode-FW4/AbsFConn-provider]"}}}
abs_conn3 = {"vnsAbsConnection": {"attributes": {
"dn": "uni/tn-common/AbsGraph-PBRGraph3/AbsConnection-C1"}}}
abs_conn3_connector1 = {"vnsRsAbsConnectionConns": {"attributes": {
"dn": "uni/tn-common/AbsGraph-PBRGraph3/AbsConnection-C1/"
"rsabsConnectionConns-[uni/tn-common/AbsGraph-PBRGraph3/"
"AbsTermNodeCon-T1/AbsTConn]"}}}
abs_conn3_connector2 = {"vnsRsAbsConnectionConns": {"attributes": {
"dn": "uni/tn-common/AbsGraph-PBRGraph3/AbsConnection-C1/"
"rsabsConnectionConns-[uni/tn-common/AbsGraph-PBRGraph3/"
"AbsNode-FW4/AbsFConn-consumer]"}}}
abs_conn4 = {"vnsAbsConnection": {"attributes": {
"dn": "uni/tn-common/AbsGraph-PBRGraph3/AbsConnection-C3"}}}
abs_conn4_connector1 = {"vnsRsAbsConnectionConns": {"attributes": {
"dn": "uni/tn-common/AbsGraph-PBRGraph3/AbsConnection-C3/"
"rsabsConnectionConns-[uni/tn-common/AbsGraph-PBRGraph3/"
"AbsNode-FW5/AbsFConn-provider]"}}}
abs_conn4_connector2 = {"vnsRsAbsConnectionConns": {"attributes": {
"dn": "uni/tn-common/AbsGraph-PBRGraph3/AbsConnection-C3/"
"rsabsConnectionConns-[uni/tn-common/AbsGraph-PBRGraph3/"
"AbsNode-FW6/AbsFConn-consumer]"}}}
agent = self._create_agent()
current_config = agent.multiverse[0]['current']
desired_monitor = agent.multiverse[2]['desired']
tenant_name = 'common'
apic_client.ApicSession.post_body_dict = (
self._mock_current_manager_post)
apic_client.ApicSession.DELETE = self._mock_current_manager_delete
tn1 = resource.Tenant(name=tenant_name, monitored=True)
tenant = {
'fvTenant': {'attributes': {'dn': 'uni/tn-%s' % tenant_name}}}
# Create tenant in AIM to start serving it
self.aim_manager.create(self.ctx, tn1)
# Run loop for serving tenant
self._first_serve(agent)
self._set_events(
[tenant, template, term_prov, outterm_prov, interm_prov, conn1,
termprov, outterm_cons, interm_cons, conn2, node1, node1_ldev,
node1_conn1, node1_conn2, node2, node2_ldev, node2_conn1,
node2_conn2, node3, node3_ldev, node3_conn1, node3_conn2,
abs_conn1, abs_conn1_connector1, abs_conn1_connector2, abs_conn2,
abs_conn2_connector1, abs_conn2_connector2, abs_conn3,
abs_conn3_connector1, abs_conn3_connector2, abs_conn4,
abs_conn4_connector1, abs_conn4_connector2],
manager=desired_monitor.serving_tenants[tn1.rn],
tag=False)
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
self.assertEqual(
1,
len(self.aim_manager.find(self.ctx, service_graph.ServiceGraph)))
self.assertEqual(
3,
len(self.aim_manager.find(self.ctx,
service_graph.ServiceGraphNode)))
self.assertEqual(
4,
len(self.aim_manager.find(self.ctx,
service_graph.ServiceGraphConnection)))
def test_redirect_policy(self):
agent = self._create_agent()
current_config = agent.multiverse[0]['current']
desired_config = agent.multiverse[0]['desired']
tenant_name = 'test_redirect_policy'
desired_monitor = agent.multiverse[2]['desired']
apic_client.ApicSession.post_body_dict = (
self._mock_current_manager_post)
apic_client.ApicSession.DELETE = self._mock_current_manager_delete
tn = resource.Tenant(name=tenant_name)
self.aim_manager.create(self.ctx, tn)
# Run loop for serving tenant
self._first_serve(agent)
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
srp_parent = {
'vnsSvcCont': {
'attributes': {'dn': 'uni/tn-%s/svcCont' % tenant_name}}}
self._set_events(
[srp_parent], manager=current_config.serving_tenants[tn.rn],
tag=False)
agent._reconciliation_cycle()
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
red_pol = service_graph.ServiceRedirectPolicy(tenant_name=tenant_name,
name='r1')
red_pol = self.aim_manager.create(self.ctx, red_pol)
agent._reconciliation_cycle()
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
self._assert_universe_sync(desired_config, current_config,
tenants=[tn.root])
# Ip SLA RS doesn't exist
self.assertRaises(
apic_client.cexc.ApicResponseNotOk, test_aci_tenant.mock_get_data,
desired_monitor.serving_tenants[tn.rn].aci_session,
'mo/' + red_pol.dn + '/rsIPSLAMonitoringPol')
# Add only tenant name
red_pol = self.aim_manager.update(
self.ctx, red_pol, monitoring_policy_tenant_name='common')
agent._reconciliation_cycle()
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
self._assert_universe_sync(desired_config, current_config,
tenants=[tn.root])
self.assertRaises(
apic_client.cexc.ApicResponseNotOk, test_aci_tenant.mock_get_data,
desired_monitor.serving_tenants[tn.rn].aci_session,
'mo/' + red_pol.dn + '/rsIPSLAMonitoringPol')
# Add also policy name
red_pol = self.aim_manager.update(
self.ctx, red_pol, monitoring_policy_name='test')
agent._reconciliation_cycle()
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
self._assert_universe_sync(desired_config, current_config,
tenants=[tn.root])
self.assertIsNotNone(test_aci_tenant.mock_get_data(
desired_monitor.serving_tenants[tn.rn].aci_session,
'mo/' + red_pol.dn + '/rsIPSLAMonitoringPol'))
# Reset tenant name
red_pol = self.aim_manager.update(
self.ctx, red_pol, monitoring_policy_tenant_name='')
agent._reconciliation_cycle()
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
self._assert_universe_sync(desired_config, current_config,
tenants=[tn.root])
self.assertRaises(
apic_client.cexc.ApicResponseNotOk, test_aci_tenant.mock_get_data,
desired_monitor.serving_tenants[tn.rn].aci_session,
'mo/' + red_pol.dn + '/rsIPSLAMonitoringPol')
def test_isolated_tenants(self):
agent = self._create_agent()
current_config = agent.multiverse[0]['current']
desired_config = agent.multiverse[0]['desired']
tenant_name1 = 'test_isolated_tenants1'
tenant_name2 = 'test_isolated_tenants2'
apic_client.ApicSession.post_body_dict = (
self._mock_current_manager_post)
apic_client.ApicSession.DELETE = self._mock_current_manager_delete
tn1 = resource.Tenant(name=tenant_name1)
tn2 = resource.Tenant(name=tenant_name2)
# Create tenant in AIM to start serving it
self.aim_manager.create(self.ctx, tn1)
self.aim_manager.create(self.ctx, tn2)
self._first_serve(agent)
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
ap1 = resource.ApplicationProfile(tenant_name=tenant_name1,
name='test')
ap2 = resource.ApplicationProfile(tenant_name=tenant_name2,
name='test')
self.aim_manager.create(self.ctx, ap1)
self.aim_manager.create(self.ctx, ap2)
# Disturb tenant2 sync
old_get_resources = desired_config.get_resources
def get_resources(diffs):
if diffs:
root = tree_manager.AimHashTreeMaker._extract_root_rn(diffs[0])
if tenant_name2 in root:
raise Exception("disturbed!")
return old_get_resources(diffs)
desired_config.get_resources = get_resources
self._observe_aci_events(current_config)
agent._reconciliation_cycle()
self._observe_aci_events(current_config)
# Verify tenant 1 synced
self._assert_universe_sync(desired_config, current_config,
tenants=[tn1.root])
# Tenant 2 is not synced
self._assert_universe_sync(desired_config, current_config,
tenants=[tn2.root], negative=True)
def _verify_get_relevant_state(self, agent):
current_config = agent.multiverse[0]['current']
desired_config = agent.multiverse[0]['desired']
current_oper = agent.multiverse[1]['current']
desired_oper = agent.multiverse[1]['desired']
current_monitor = agent.multiverse[2]['current']
desired_monitor = agent.multiverse[2]['desired']
self.assertEqual([current_config.state, desired_monitor.state,
desired_oper.state],
current_config.get_relevant_state_for_read())
self.assertEqual([desired_config.state, current_monitor.state],
desired_config.get_relevant_state_for_read())
self.assertEqual([current_oper.state],
current_oper.get_relevant_state_for_read())
self.assertEqual([current_config.state, desired_monitor.state,
desired_oper.state],
desired_oper.get_relevant_state_for_read())
self.assertEqual([current_monitor.state, desired_config.state],
current_monitor.get_relevant_state_for_read())
self.assertEqual([current_config.state, desired_monitor.state,
desired_oper.state],
desired_monitor.get_relevant_state_for_read())
def _observe_aci_events(self, aci_universe):
for tenant in aci_universe.serving_tenants.values():
self._current_manager = tenant
tenant._event_loop()
def _assert_universe_sync(self, desired, current, tenants=None,
negative=False):
def printable_state(universe):
return json.dumps({x: y.root.to_dict() if y.root else {}
for x, y in universe.state.iteritems()},
indent=2)
desired.observe(self.ctx)
current.observe(self.ctx)
# Because of the possible error nodes, we need to verify that the
# diff is empty
self.assertEqual(current.state.keys(), desired.state.keys(),
'Not in sync:\n current(%s)\n: %s \n\n '
'desired(%s)\n: %s' %
(current.name, printable_state(current), desired.name,
printable_state(desired)))
for tenant in (tenants or current.state):
if negative:
self.assertNotEqual(
{"add": [], "remove": []},
desired.state[tenant].diff(current.state[tenant]))
else:
self.assertEqual(
{"add": [], "remove": []},
desired.state[tenant].diff(current.state[tenant]),
'Not in sync:\n current(%s)\n: %s \n\n '
'desired(%s)\n: %s' %
(current.name, printable_state(current), desired.name,
printable_state(desired)))
def _assert_reset_consistency(self, tenant=None):
ctx = mock.Mock()
ctx.obj = {'manager': self.aim_manager, 'aim_ctx': self.ctx}
# get current tree(s)
filters = {}
if tenant:
filters = {'root_rn': [tenant]}
# for each tenant, save their trees
old = self._get_aim_trees_by_tenant(filters)
self.assertNotEqual({}, old)
# Now reset trees
listener = hashtree_db_listener.HashTreeDbListener(self.aim_manager)
listener._delete_trees(self.ctx, root=tenant)
current = self._get_aim_trees_by_tenant(filters)
for trees in current.values():
for t in trees.values():
self.assertEqual('{}', str(t))
listener._recreate_trees(self.ctx, root=tenant)
# Check if they are still the same
new = self._get_aim_trees_by_tenant(filters)
new.pop('comp', None)
self.assertEqual(old, new)
def _get_aim_trees_by_tenant(self, filters):
result = {}
for type in tree_manager.SUPPORTED_TREES:
for t in self.tt_mgr.find(self.ctx, tree=type, **filters):
rn = tree_manager.AimHashTreeMaker._extract_root_rn(t.root_key)
result.setdefault(rn, {})[type] = t
return result
def _sync_and_verify(self, agent, to_observe, couples, tenants=None):
agent._reconciliation_cycle()
self._observe_aci_events(to_observe)
agent._reconciliation_cycle()
# Verify everything is fine
for couple in couples:
self._assert_universe_sync(couple[0], couple[1], tenants=tenants)
self._assert_reset_consistency()
self._verify_get_relevant_state(agent)
| 46.491964 | 79 | 0.618012 |
795a1f8cfd1f470f4f707aeaa37dadf130bfa930 | 1,195 | py | Python | game/save.py | martendo7/sunny-day | 9d61294dc1e475140eb44f99ff06636d2e4e9868 | [
"MIT"
] | null | null | null | game/save.py | martendo7/sunny-day | 9d61294dc1e475140eb44f99ff06636d2e4e9868 | [
"MIT"
] | null | null | null | game/save.py | martendo7/sunny-day | 9d61294dc1e475140eb44f99ff06636d2e4e9868 | [
"MIT"
] | null | null | null | import json
class SaveDataError(Exception):
pass
class SaveReader:
def __init__(self, game):
self.game = game
def load(self, filename):
self.game.savefile = filename
with open(self.game.savefile, "r") as file:
try:
data = json.load(file)
except:
raise SaveDataError("Unable to read save file")
try:
self.game.last_completed_level = data["last_completed_level"]
self.game.player.lives = data["lives"]
if self.game.player.lives < 1:
self.game.player.lives = self.game.player.START_LIVES
self.game.player.coins = data["coins"]
except KeyError as e:
raise SaveDataError(f"Save file is missing save data: \"{e.args[0]}\"")
def save(self, filename=None):
if filename is not None:
self.game.savefile = filename
data = {
"last_completed_level": self.game.last_completed_level,
"lives": self.game.player.lives,
"coins": self.game.player.coins,
}
with open(self.game.savefile, "w") as file:
json.dump(data, file)
| 33.194444 | 83 | 0.569874 |
795a1ffaa7a8432ec2a59c33d2a0a9067904e130 | 1,073 | py | Python | net_models/models/services/cisco_ios/IosLineModels.py | ashkalikava/net_models | 1a99f6dc743665b47c4c4731bbca2a52176ded3f | [
"MIT"
] | null | null | null | net_models/models/services/cisco_ios/IosLineModels.py | ashkalikava/net_models | 1a99f6dc743665b47c4c4731bbca2a52176ded3f | [
"MIT"
] | null | null | null | net_models/models/services/cisco_ios/IosLineModels.py | ashkalikava/net_models | 1a99f6dc743665b47c4c4731bbca2a52176ded3f | [
"MIT"
] | null | null | null | from net_models.models import BaseNetModel
from net_models.fields import GENERIC_OBJECT_NAME, VRF_NAME
from net_models.models.services.cisco_ios.AaaMethods import IosLineAaaConfig
from pydantic.types import conlist, conint
from pydantic.typing import Optional, Literal, List
class IosLineTransport(BaseNetModel):
input: Optional[Literal['all', 'none', 'ssh', 'telnet']]
output: Optional[Literal['all', 'none', 'ssh', 'telnet']]
preferred: Optional[Literal['none', 'ssh', 'telnet']]
class IosLineAccessClass(BaseNetModel):
name: GENERIC_OBJECT_NAME
vrf_also: Optional[bool]
vrf: Optional[VRF_NAME]
direction: Literal['in', 'out']
class IosLineConfig(BaseNetModel):
line_type: Literal['aux', 'console', 'vty']
line_range: conlist(item_type=conint(ge=0), min_items=1, max_items=2)
aaa_config: Optional[IosLineAaaConfig]
"""AAA Configuration Object"""
exec_timeout: Optional[conint(ge=0)]
"""EXEC Timeout in seconds"""
transport: Optional[IosLineTransport]
access_classes: Optional[List[IosLineAccessClass]]
| 32.515152 | 76 | 0.741845 |
795a2218adf4ebe15852e2fe65d9e3084e87acf0 | 386 | py | Python | doc/build/html/add_font.py | yhilpisch/rpi | 3379078699554c25806b7b4ed716b45e16c42f16 | [
"BSD-2-Clause"
] | 3 | 2015-12-19T17:28:01.000Z | 2018-05-27T01:40:42.000Z | doc/build/html/add_font.py | yhilpisch/rpi | 3379078699554c25806b7b4ed716b45e16c42f16 | [
"BSD-2-Clause"
] | null | null | null | doc/build/html/add_font.py | yhilpisch/rpi | 3379078699554c25806b7b4ed716b45e16c42f16 | [
"BSD-2-Clause"
] | 8 | 2015-05-05T04:51:25.000Z | 2022-03-14T04:50:18.000Z | import os
to_add = "<link href='http://fonts.googleapis.com/css?family=PT+Sans' "
to_add += "rel='stylesheet' type='text/css'>\n"
files = os.listdir('.')
for f in files:
if f.endswith('.html') and f[0] in ['0', 'i']:
r = open(f, 'r').readlines()
n = open(f, 'w')
n.writelines(r[:12])
n.write(to_add)
n.writelines(r[12:])
n.close() | 24.125 | 71 | 0.53886 |
795a23b7dbdd931cc8227eb9a11e1c9c849cf4cc | 11,218 | py | Python | Configs/Non_lab.py | yochaiedlitz/T2DM_UKB_predictions | 1e6b22e3d51d515eb065d7d5f46408f86f33d0b8 | [
"MIT"
] | 1 | 2022-01-17T13:13:02.000Z | 2022-01-17T13:13:02.000Z | Configs/Non_lab.py | yochaiedlitz/T2DM_UKB_predictions | 1e6b22e3d51d515eb065d7d5f46408f86f33d0b8 | [
"MIT"
] | null | null | null | Configs/Non_lab.py | yochaiedlitz/T2DM_UKB_predictions | 1e6b22e3d51d515eb065d7d5f46408f86f33d0b8 | [
"MIT"
] | null | null | null | import collections # Used for ordered dictionary
from PRS import PRS_sumstats as PRS_sumstats
from UKBB_Functions import PROBA_FOLDER
import sys
Top_Gen_Dict = PRS_sumstats.Get_Top_Gen_Dict()
Hyp_Param_Dict_A = collections.OrderedDict()
Hyp_Param_Dict_R = collections.OrderedDict()
# TRAIN_PATH = '/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_train.csv'
# TEST_PATH = '/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_test.csv'
# TRAIN_PATH = '/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_Imputed_train.csv'
# TEST_PATH = '/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_Imputed_test.csv'
TRAIN_PATH = '/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_Imputed_train_test.csv'
TEST_PATH = '/net/mraid08/export/jafar/UKBioBank/Data/ukb29741_Diabetes_returned_extended_Imputed_val.csv'
# ['Diabetes_all','Age_and_Sex','Anthropometry','Blood_Tests','BP_and_HR',
# 'Diet','Early_Life_Factors','Family_and_Ethnicity','Lifestyle_and_physical_activity','Medication',
# 'Mental_health','Non_Diabetes_Diagnosis','Physical_health','Socio_demographics','HbA1c']
ALL_TEST_AS_VAL = False
BASIC_JOB_NAME = ['Non_Lab_no_diet']#['Mental_health','Non_Diabetes_Diagnosis','Physical_health','Socio_demographics','HbA1c']
BASIC_PROB_BASED_JOB_NAME = ["imp_" + x for x in BASIC_JOB_NAME]
Sub_Class_array = ["All"] # "All",, "All"
Job_ID = ["2443-0.0"]
RET_FEAT_file_names = BASIC_JOB_NAME
feat_list_folder="Diabetes_Features_lists/For_article/" #Folder where the features lists located
FEAT_file_names = [
"Diabetes_Features_0705"] # Diabetes_Features.csv,Diabetes_Features_No_Baseline.csv,Baseline_Features.csv,Diabetes_Features_Lifestyle.csv,Diabetes_Features_No_Baseline.csv, Full_Diabetes_Features # "Diabetes_Features.csv","Diabetes_Features.csv","Diabetes_Features.csv",BMI_Features_Lifestyle.csv
# Features File name without ending
# Features File name without ending
FEAT_PATH = [feat_list_folder + x + ".csv" for x in FEAT_file_names]
RET_FEAT_PATH = [feat_list_folder + x + ".csv" for x in RET_FEAT_file_names]
#
# Data_Job_Names = {"6150-0.0": "Vascular", "2443-0.0": "Diabetes", "2453-0.0": "Cancer", "4041-0.0": "Gestational diabetes","21001-0.0":'BMI'}
CHARAC_SELECTED = {"Age at last visit": "All", "Sex": "All", "Ethnic background": "All",
"Type of special diet followed": "All"}
DISEASE_PROBA_DICT = {"Diabetes Probabilities": PROBA_FOLDER + "Diabetes_OnlyPROB.csv",
"CVD Probabilities": PROBA_FOLDER + "Vascular_OnlyPROB.csv",
"Cancer Probabilities": PROBA_FOLDER + "Cancer_OnlyPROB.csv"}
# PRS_COLS -Adding PRS -Only final score for each phenotype for each user
PRS_COLS = ['PRS_MAGIC_HbA1C', 'PRS_cigs_per_day', 'PRS_MAGIC_Scott_FG', 'PRS_ln_HOMA-IR', 'PRS_MAGIC_Scott_FI',
'PRS_height', 'PRS_Manning_FI', 'PRS_Leptin_BMI', 'PRS_cardio', 'PRS_triglycerides',
'PRS_Manning_FG', 'PRS_anorexia', 'PRS_Magic_2hrGlucose', 'PRS_Non_Diabetic_glucose2', 'PRS_ever_smoked',
'PRS_age_smoke', 'PRS_MAGIC_fastingProinsulin', 'PRS_Leptin_Unadjusted_BMI',
'PRS_MAGIC_Scott_FI_adjBMI', 'PRS_MAGIC_Scott_2hGlu', 'PRS_glucose_iris', 'PRS_ln_FastingInsulin',
'PRS_bmi', 'PRS_overweight', 'PRS_hba1c', 'PRS_alzheimer', 'PRS_whr', 'PRS_ln_HOMA-B',
'PRS_ldl', 'PRS_obesity_class2', 'PRS_obesity_class1', 'PRS_diabetes_BMI_Unadjusted',
'PRS_Manning_BMI_ADJ_FG', 'PRS_waist', 'PRS_ashtma', 'PRS_HBA1C_ISI', 'PRS_HbA1c_MANTRA',
'PRS_diabetes_BMI_Adjusted', 'PRS_Heart_Rate', 'PRS_Manning_BMI_ADJ_FI', 'PRS_cholesterol', 'PRS_hdl',
'PRS_FastingGlucose', 'PRS_hips']
# Select_Top_Traits_Gen_arr_names = ['HbA1c_MANTRA','t2d_mega_meta',"MAGIC_Scott_FG","triglycerides",'Magic_2hrGlucose','Manning_Fasting_Insulin'] #Keep empty if None
Select_Top_Traits_Gen_arr_names = ['HbA1c_MANTRA', 't2d_mega_meta', "MAGIC_Scott_FG", 'Magic_2hrGlucose',
'bmi', 'anorexia', 'cardio', 'hips', 'waist', "overweight", 'obesity_class1',
'obesity_class2',
"ever_smoked", "hdl", "ldl", 'triglycerides', 'cholesterol',
'diabetes_BMI_Unadjusted',
'diabetes_BMI_Adjusted', 'FastingGlucose', 'ln_HOMA-B', 'ln_HOMA-IR',
'ln_FastingInsulin',
'Leptin_BMI', 'Leptin_Unadjusted_BMI', 'Heart_Rate', 'MAGIC_fastingProinsulin',
'MAGIC_Scott_FI_adjBMI', 'MAGIC_Scott_FI', 'MAGIC_HbA1C', 'Manning_FG',
'Manning_BMI_ADJ_FG',
'Manning_Fasting_Insulin', 'Manning_BMI_ADJ_FI', 'HBA1C_ISI'] #
USE_FAKE_QUE = False
NROWS = None # 1-500000 or None
NROWS_RETURN = None # How many returning participants to load
Split = True #Wheter or not to split data to train and test, should be false only for final testing
Logistic_regression=True #"Should be LR for Linear regression or LGBM for treees"
Use_imp_flag=True
DEBUG = False
USE_PROBA = True # Whether or not to either calculate probability if working on all participants or to use probabilities
# calculated if working with returning participants
USE_PRS = False # wether to use PRS reults
Use_SNPs = False
NFOLD = 5
Choose_N_Fold = 3 # How many CV to make for the initial Cross validation when choosing the hyperparameters
Basic_HYP_PAR_ITER = 20
Prob_HYP_PAR_ITER = 200
MEM = '30G'
N_THREADS = 10
P_THREADS = 1
Calc_Base_Prob = False
CALC_SHAP = True # Whether or not to calculate the SHAP values for the basic probabilities
SORT = True # Used mostly for debugging to activate the SORT_AUC_APS function
# Refit_model - path to model to be refitted in the first visit
Refit_Model = None # '/net/mraid08/export/jafar/UKBioBank/Yochai/UKBB_Runs/Refit/Refit_BL2AF_Diabetes/Diabetes_Results/Diabetes_shap_model.txt'#None##Name of the model to be refitted or None
# /net/mraid08/export/jafar/Yochai/UKBB_Runs/AF_To_refit2_Diabetes/Diabetes_Results
Finalize_Only = False
Calc_Prob_Based_Prob = True
RE_USE_PROBA = False
Calc_Transfer_Learning = False # Used when we would like torefit several base models and not a specific model
REFIT_SERIAL_MODELS = False # #Checking wether to refit a model folder just made in previous step, or use a pedefined folder
# Refit_Return_Model_Path - path to model to be refitted in the first visit
Refit_Return_Model_Path = None # '/net/mraid08/export/jafar/Yochai/UKBB_Runs/mock_refit/Diabetes_Results/'#'/net/mraid08/export/jafar/UKBioBank/Yochai/UKBB_Runs/Refit/Refit_BL2AF_Diabetes/Diabetes_Results/'#None#
HowHow = "left" # "inner" - take only participants who has probabilities for other disease as well, "left" - take all
CALC_P_SHAP = True # Whether or not to calculate the SHAP values for the Preob based predictions
SORT_Prob = True
Finalize_Prob_Based_Only = False
if REFIT_SERIAL_MODELS or Refit_Return_Model_Path:
Refit_Returned = True
else:
Refit_Returned = False
VISITS = [0, 1, 2] # [0,1,2]
NUM_OF_DEP_PLOT = 10
Lite = False # Used for debug
Thresh_in_Column = 0.7
Thresh_in_Row = 0.7
# CHARAC_SELECTED = {"Age at last visit": "All", "Sex": "All", "Ethnic background": "All",
# "Type of special diet followed": "All"}
CHARAC_ID = {"Age at last visit": "21022-0.0", "Sex": "31-0.0", "Ethnic background": "21000-0.0",
"Type of special diet followed": "20086-0.0"}
ETHNIC_CODE = {-3: "Prefer not to answer", -1: "Do not know", 1: "White", 2: "Mixed", 3: "Asian",
4: "Black or Black British", 5: "Chinese", 6: "Other ethnic group", 1001: "British", 1002: "Irish",
1003: "Any other white background", 2001: "White and Black Caribbean",
2002: "White and Black African", 2003: "White and Asian", 2004: "Any other mixed background",
3001: "Indian", 3002: "Pakistani", 3003: "Bangladeshi", 3004: "Any other Asian background",
4001: "Caribbean", 4002: "African", 4003: "Any other Black background"}
SEX_CODE = {"Female": 0, "Male": 1}
DIET_CODE = {"Gluten-free": 8, "Lactose-free": 9, "Low calorie": 10, "Vegetarian": 11, "Vegan": 12, "Other": 13}
Job_name_dict = {"6150-0.0": "Vascular", "2443-0.0": "Diabetes", "2453-0.0": "Cancer",
"4041-0.0": "Gestational diabetes",
"21001-0.0": 'BMI'} # ,"Diabetes", "Cancer", "Gestational diabetes","Vascular"
No_symp_dict = {"6150-0.0": -7, "2443-0.0": 0, '2453-0.0': 0, '21001-0.0': "nan"}
# Hyp_Param_Dict_A['max_depth']=[2,4,8,16]
Hyp_Param_Dict_A['num_leaves'] = [4, 8, 16, 32, 64, 128, 256]
Hyp_Param_Dict_A['is_unbalance'] = [True]
Hyp_Param_Dict_A['objective'] = ['binary']
Hyp_Param_Dict_A['boosting_type'] = ['gbdt'] # ,'rf','dart','goss'
Hyp_Param_Dict_A['metric'] = ["auc"] # MAP, aliases: mean_average_precision,kldiv, Kullback-Leibler divergence, aliases: kullback_leibler
Hyp_Param_Dict_A['num_boost_round'] = [10, 50, 100, 250, 500, 1000, 2000, 4000, 8000] # ,1000, 2000, 4000, 8000
Hyp_Param_Dict_A['learning_rate'] = [0.005, 0.01, 0.05, 0.1]
Hyp_Param_Dict_A["min_child_samples"] = [10, 25, 50, 250, 500]
Hyp_Param_Dict_A["subsample"] = [0.1, 0.25, 0.5, 0.7, 0.9, 1]
Hyp_Param_Dict_A["colsample_bytree"] = [0.03, 0.1, 0.25, 0.5, 0.7, 1]
Hyp_Param_Dict_A["boost_from_average"] = [True]
Hyp_Param_Dict_A['num_threads'] = [N_THREADS]
Hyp_Param_Dict_A['lambda_l1'] = [0, 0.5, 0.9, 0.99, 0.999]
Hyp_Param_Dict_A['lambda_l2'] = [0, 0.5, 0.9, 0.99, 0.999]
Hyp_Param_Dict_A['bagging_freq'] = [0, 1, 5]
Hyp_Param_Dict_A['bagging_fraction'] = [0.25, 0.5, 0.75, 1]
# Hyp_Param_Dict_R['max_depth']=[2,4,8,16]
Hyp_Param_Dict_A['num_leaves'] = [2, 4, 8, 16, 32, 64, 128]
Hyp_Param_Dict_R['is_unbalance'] = [True]
Hyp_Param_Dict_R['objective'] = ['binary']
Hyp_Param_Dict_R['boosting_type'] = ['gbdt']
Hyp_Param_Dict_R['metric'] = [
"auc"] # MAP, aliases: mean_average_precision,kldiv, Kullback-Leibler divergence, aliases: kullback_leibler
Hyp_Param_Dict_R['num_boost_round'] = [50, 100, 250, 500, 1000, 2000, 4000] # ,,1000, 2000, 4000, 8000
Hyp_Param_Dict_R['verbose'] = [-1]
Hyp_Param_Dict_R['learning_rate'] = [0.005, 0.01, 0.05]
Hyp_Param_Dict_R["min_child_samples"] = [5, 10, 25, 50]
Hyp_Param_Dict_R["subsample"] = [0.5, 0.7, 0.9, 1]
Hyp_Param_Dict_R["colsample_bytree"] = [0.01, 0.05, 0.1, 0.25, 0.5, 0.7, 1]
Hyp_Param_Dict_R["boost_from_average"] = [True]
Hyp_Param_Dict_R['num_threads'] = [P_THREADS]
Hyp_Param_Dict_R['lambda_l1'] = [0, 0.25, 0.5, 0.9, 0.99, 0.999]
Hyp_Param_Dict_R['lambda_l2'] = [0, 0.25, 0.5, 0.9, 0.99, 0.999]
Hyp_Param_Dict_A['bagging_freq'] = [0, 1, 5]
Hyp_Param_Dict_A['bagging_fraction'] = [0.5, 0.75, 1]
Select_Traits_Gen = {}
for name in Select_Top_Traits_Gen_arr_names:
Select_Traits_Gen[name] = Top_Gen_Dict[name]
if (len(BASIC_JOB_NAME) != len(Sub_Class_array) or (len(BASIC_JOB_NAME) != len(Sub_Class_array)) or
(len(BASIC_JOB_NAME) != len(Job_ID))):
sys.exit("BASIC_JOB_NAME,Sub_Class_array and Job_ID should be same size") | 60.637838 | 301 | 0.708593 |
795a242ae30acda268a0bd877c743610408f8752 | 468 | py | Python | modeling/ibn.py | sunggukcha/deeplabs | 739be4accdc27fbb912c131aaafc9fcf10e04929 | [
"MIT"
] | 34 | 2019-08-07T20:44:15.000Z | 2021-12-05T10:23:47.000Z | modeling/ibn.py | toluwajosh/deeplabs | 59e292e6777d1e53ed7716b7afd3c3489d57f61a | [
"MIT"
] | 9 | 2020-05-08T07:55:12.000Z | 2020-12-23T08:36:08.000Z | modeling/ibn.py | toluwajosh/deeplabs | 59e292e6777d1e53ed7716b7afd3c3489d57f61a | [
"MIT"
] | 6 | 2020-03-09T15:42:51.000Z | 2021-09-12T19:33:08.000Z | import torch
import torch.nn as nn
import math
class IBN(nn.Module):
def __init__(self, planes, bn):
super(IBN, self).__init__()
half1 = int(planes/2)
self.half = half1
half2 = planes - half1
self.IN = nn.InstanceNorm2d(half1, affine=True)
self.BN = bn(half2)
def forward(self, x):
split = torch.split(x, self.half, 1)
out1 = self.IN(split[0].contiguous())
out2 = self.BN(split[1].contiguous())
out = torch.cat((out1, out2), 1)
return out
| 24.631579 | 50 | 0.666667 |
795a24629fe51b30acec07a3e96efc920f1bc25e | 3,500 | py | Python | test_pyramids/test_code_quality.py | leomauro/pyramids | 4f7a8e97e13a5ee0b037dc528e5ba72f31ac36e5 | [
"MIT"
] | 9 | 2015-09-04T22:33:40.000Z | 2019-04-11T14:05:11.000Z | test_pyramids/test_code_quality.py | leomauro/pyramids | 4f7a8e97e13a5ee0b037dc528e5ba72f31ac36e5 | [
"MIT"
] | 2 | 2015-09-04T22:31:44.000Z | 2017-07-29T04:11:53.000Z | test_pyramids/test_code_quality.py | hosford42/pyramids | 4f7a8e97e13a5ee0b037dc528e5ba72f31ac36e5 | [
"MIT"
] | 3 | 2015-10-14T12:41:26.000Z | 2022-01-08T19:43:47.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests to ensure code quality standards are met."""
import os
from pylama.main import check_path, parse_options
ERROR_TYPE_MAP = {
'W': 'Warning',
'D': 'Documentation',
'E': 'Code Checker Error',
'C': 'Coding Style',
'R': 'Code Complexity',
}
PYLAMA_OPTION_OVERRIDES = {
'linters': ['pep257', 'pydocstyle', 'pycodestyle', 'pyflakes', 'mccabe',
'pylint', 'radon', 'eradicate', 'mypy'],
'ignore': [
'W0212', # pylint can't tell between same- and different-type protected member
# access based on inferred types.
'D102', # pylint, pydocstyle, and pep257 are redundant on this one. Pylint has its own
# code, while the other two share this code.
'D103', # Likewise for this one.
'D105', # I disagree with the standard. Magic methods don't need docstrings. They are
# universal and can easily be googled.
'D107', # Same with __init__. The class's docstring is sufficient.
'D212', # This is optional and directly conflicts with an alternative one.
'D203', # Likewise.
'W0611', # PyFlakes gets confused by MyPy type annotations, which require imports to be
# present.
],
'async': True,
'concurrent': True,
}
def test_code_quality():
"""Test various code quality metrics."""
old_cwd = os.getcwd()
try:
root_path = os.path.dirname(os.path.dirname(__file__))
os.chdir(root_path)
top_level = get_python_source_paths(root_path)
options = parse_options(top_level, **PYLAMA_OPTION_OVERRIDES)
errors = check_path(options, rootdir='.')
if errors:
print('-' * 80)
for error in errors:
print_pylama_error(error, root_path)
print('-' * 80)
assert not errors, "%s code quality errors detected." % len(errors)
finally:
os.chdir(old_cwd)
def print_pylama_error(error, root_path):
"""Print a pylama error in a readable format."""
column = error.get('col')
line_no = error.get('lnum')
error_type = error.get('type')
text = error.get('text')
relative_file_path = error.get('filename')
error_type = ERROR_TYPE_MAP.get(error_type, error_type)
if relative_file_path:
print()
print('File "%s", line %s, col %s:' %
(os.path.join(root_path, relative_file_path), line_no, column))
if line_no is not None:
with open(relative_file_path, encoding='utf-8') as file:
for index, line in enumerate(file):
if index + 1 == line_no:
break
else:
line = None
if line:
print(' ' + line.rstrip('\n'))
if column is not None and not line[:column].isspace():
print(' ' + ' ' * column + '^')
print('%s: %s' % (error_type, text))
else:
print('%s: %s' % (error_type, text))
def get_python_source_paths(root_path):
"""Get a list of all python sources appearing recursively under the given root path."""
results = []
for dir_path, _, file_names in os.walk(root_path):
for filename in file_names:
if filename.endswith('.py'):
results.append(os.path.join(dir_path, filename))
return results
if __name__ == '__main__':
test_code_quality()
| 32.71028 | 96 | 0.584 |
795a24d820f2eec96f7ea8cfd388c5e93eede514 | 6,437 | py | Python | motif/experiments.py | NThande/matched-motif | 3d5338a5db8c8ae69d42c3141d81a8812cd82bd7 | [
"MIT"
] | 1 | 2019-11-06T00:53:58.000Z | 2019-11-06T00:53:58.000Z | motif/experiments.py | NThande/matched-motif | 3d5338a5db8c8ae69d42c3141d81a8812cd82bd7 | [
"MIT"
] | null | null | null | motif/experiments.py | NThande/matched-motif | 3d5338a5db8c8ae69d42c3141d81a8812cd82bd7 | [
"MIT"
] | null | null | null | import config as cfg
import explots
import exputils
import fileutils
import metrics
import motifutils as motif
import visutils as vis
import visualizations
# Runs the segmentation experiment on an audio file with the given name in bin/test.
def segmentation_experiment(name, in_dir, out_dir, write_motifs=False, show_plot=()):
methods = ('Regular', 'Onset', 'Beat')
_experiment('Segmentation', name, in_dir, out_dir, methods, write_motifs, show_plot=show_plot)
# Runs the similarity measure experiment on an audio file with the given name in bin/test.
def similarity_experiment(name, in_dir, out_dir, write_motifs=False, show_plot=()):
methods = ('Match', 'Shazam')
_experiment('Similarity', name, in_dir, out_dir, methods, write_motifs, show_plot=show_plot)
# Runs the k-means experiment on an audio file with the given name in bin/test.
def k_means_experiment(name, in_dir, out_dir, write_motifs=False, show_plot=()):
methods = (3, 5, 10)
_experiment('K-Means', name, in_dir, out_dir, methods, write_motifs, show_plot=show_plot)
# Runs the clustering experiment on an audio file with the given name in bin/test.
def clustering_experiment(name, in_dir, out_dir, write_motifs=False, show_plot=()):
methods = ('K-Means', 'Spectral', 'Agglom')
_experiment('Clustering', name, in_dir, out_dir, methods, write_motifs, show_plot=show_plot)
# Generic method for running an experiment. Runs analysis uses an experiment-specific configuration.
def _experiment(exp_name, audio_name, in_dir, out_dir,
methods, write_motifs=False, show_plot=()):
audio, fs = fileutils.load_audio(audio_name, audio_dir=in_dir)
audio_labels = fileutils.load_labels(audio_name, label_dir=in_dir)
ref_starts, ref_ends, ref_labels = motif.df_to_motif(audio_labels)
ref_motifs = motif.pack_motif(ref_starts, ref_ends, ref_labels)
length = cfg.SEGMENT_LENGTH
if exp_name == 'Segmentation':
results, _ = exputils.segmentation_analysis(audio, fs, length, audio_name,
methods=methods, k=cfg.N_ClUSTERS,
show_plot=show_plot)
elif exp_name == 'Similarity':
results, _ = exputils.similarity_analysis(audio, fs, length, audio_name,
methods=methods, k=cfg.N_ClUSTERS,
show_plot=show_plot)
elif exp_name == 'K-Means':
results, _ = exputils.k_means_analysis(audio, fs, length, audio_name,
k_clusters=methods,
show_plot=show_plot)
elif exp_name == 'Clustering':
results, _ = exputils.clustering_analysis(audio, fs, length, audio_name,
methods=methods, k=cfg.N_ClUSTERS,
show_plot=show_plot)
else:
print("Unrecognized experiment name: {exp_name}".format(exp_name=exp_name))
return
metric_dict = results_to_metrics(results, methods, ref_motifs)
# Output Plots
if exp_name == 'K-Means':
lp = 'k='
else:
lp = ''
# Plot the recall, precision, f-measure, boundary measure, and edit distance as bar plots.
if 'bar' in show_plot:
fig = vis.get_fig()
ax = fig.add_subplot(1, 1, 1)
ax = explots.draw_results_rpf(methods, metric_dict, label_prefix=lp, ax=ax)
fig.suptitle('{exp_name} Performance for {audio_name}'.format(exp_name=exp_name,
audio_name=audio_name))
vis.save_fig(fig, './bin/graphs/', 'RPF_{}_{}'.format(audio_name, exp_name))
fig = vis.get_fig()
explots.draw_results_bed(methods, metric_dict, audio_name, exp_name, fig=fig)
fig.suptitle("{exp_name} Accuracy on {audio_name}".format(exp_name=exp_name, audio_name=audio_name),
fontsize=24)
if exp_name == 'K-Means':
ax = fig.get_axes()[0]
ax.set_xlabel('Number of clusters')
ax = fig.get_axes()[1]
ax.set_xlabel('Number of clusters')
vis.save_fig(fig, './bin/graphs/', 'BED_{}_{}'.format(audio_name, exp_name))
# Plot the motif segmentations as subplots in a larger figure
if 'group' in show_plot:
label_key = 'Ideal'
methods_grp = (label_key,) + methods
results[label_key] = ref_motifs
fig = visualizations.draw_motif_group(audio, fs, results, methods_grp, title='', subplots=(2, 2),
label_prefix=lp)
fig.suptitle('{exp_name} Motifs on {audio_name}'.format(exp_name=exp_name, audio_name=audio_name))
vis.save_fig(fig, './bin/graphs/', 'GRP_{}_{}'.format(audio_name, exp_name))
if exp_name == 'K-Means':
ax = fig.get_axes()[1]
ax.set_title(label_key, fontsize=18)
if write_motifs:
exputils.write_results(audio, fs, audio_name, out_dir, methods, results)
return metric_dict
# Convert results dict to metrics dict
def results_to_metrics(results, methods, ref_motifs):
_, _, ref_labels = motif.unpack_motif(ref_motifs)
metric_dict = dict.fromkeys(methods)
for m in methods:
obs_motifs = results[m]
_, _, obs_labels = motif.unpack_motif(obs_motifs)
this_edit = metrics.edit_distance(obs_labels, ref_labels)
this_recall = metrics.recall(obs_motifs, ref_motifs)
this_precis = metrics.precision(obs_motifs, ref_motifs)
this_f = metrics.f_measure(obs_motifs, ref_motifs)
this_bm = metrics.boundary_distance(obs_motifs, ref_motifs)
metric_dict[m] = [this_edit, this_recall, this_precis, this_f, this_bm]
return metric_dict
# The experiments run to generate our output data
def main():
name = 'Avril'
in_dir = "./bin/test"
out_dir = "./bin/results"
segmentation_experiment(name, in_dir, out_dir, show_plot=('group',), write_motifs=False)
k_means_experiment(name, in_dir, out_dir, show_plot=('group',), write_motifs=False)
similarity_experiment(name, in_dir, out_dir, show_plot=('group',), write_motifs=False)
clustering_experiment(name, in_dir, out_dir, show_plot=('group',), write_motifs=False)
vis.show()
return
if __name__ == '__main__':
main()
| 43.201342 | 108 | 0.64471 |
795a252846308c81e66ff72aaec807afdccfea55 | 1,363 | py | Python | pipeline/transform/cta_train_stops.py | scarletstudio/transithealth | 408e6c1a063e46edb95040c26db93c2ff93c6d33 | [
"MIT"
] | 2 | 2021-05-18T15:34:19.000Z | 2021-10-07T01:29:31.000Z | pipeline/transform/cta_train_stops.py | scarletstudio/transithealth | 408e6c1a063e46edb95040c26db93c2ff93c6d33 | [
"MIT"
] | 89 | 2021-05-21T18:31:04.000Z | 2021-08-16T01:13:02.000Z | pipeline/transform/cta_train_stops.py | scarletstudio/transithealth | 408e6c1a063e46edb95040c26db93c2ff93c6d33 | [
"MIT"
] | 1 | 2021-06-15T10:28:26.000Z | 2021-06-15T10:28:26.000Z | import argparse
import json
import pandas as pd
from shapely.geometry import shape, Polygon, MultiPolygon, Point
from timeit import default_timer as timer
cli = argparse.ArgumentParser(description="Transform CTA L Train stop data.")
cli.add_argument("--input_file", help="File path to read CTA L Train stop data from.")
cli.add_argument("--area_file", help="File path to read area data from.")
cli.add_argument("--output_file", help="File path to write results to.")
args = cli.parse_args()
start = timer()
# Read file
df = pd.read_csv(args.input_file)
df_areas = pd.read_csv(args.area_file)
records = df.to_dict(orient = 'records')
area_records = df_areas.to_dict(orient = 'records')
#Get multipolygon for each community area
area_multipolygons = [
(
area['area_number'],
MultiPolygon(shape(json.loads(area['geometry'])))
)
for area in area_records
]
def get_community_area(long, lat):
my_point = Point([long, lat])
for area_num, mp in area_multipolygons:
if mp.contains(my_point):
return area_num
return None
df['area_number'] = [ get_community_area(x['longitude'], x['latitude']) for x in records]
# Write output
df.to_csv(args.output_file, index=False)
# Show summary
end = timer()
secs = end - start
print(f"Transformed and wrote {len(df)} records in {secs:.1f} secs.")
| 26.72549 | 89 | 0.707263 |
795a254a05cce8c9ee1afc46cf7137d206793840 | 594 | py | Python | setup.py | antoinedelia/bullet | 8950d0459169a544e89821d17dbacde44e5380ae | [
"MIT"
] | null | null | null | setup.py | antoinedelia/bullet | 8950d0459169a544e89821d17dbacde44e5380ae | [
"MIT"
] | null | null | null | setup.py | antoinedelia/bullet | 8950d0459169a544e89821d17dbacde44e5380ae | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(
name='bullet',
version='2.0.0',
description='Beautiful Python prompts made simple.',
long_description="Extensive support for Python list prompts \
formatting and colors",
url='https://github.com/Mckinsey666/bullets',
keywords = "cli list prompt customize colors",
author='Mckinsey666',
license='MIT',
packages=find_packages(),
extras_require={
":sys_platform=='win32'": [
'colorama>=0.4.0, <=0.4.1'
]
}
) | 31.263158 | 68 | 0.574074 |
795a2608aad868816a898556a84e0bfa9ba060cc | 17,939 | py | Python | pandaserver/dataservice/EventPicker.py | eschanet/QMonit | 83f3323fa465b3ae41f9a49f28332bdb5e748685 | [
"MIT"
] | 1 | 2020-12-05T16:59:13.000Z | 2020-12-05T16:59:13.000Z | pandaserver/dataservice/EventPicker.py | eschanet/QMonit | 83f3323fa465b3ae41f9a49f28332bdb5e748685 | [
"MIT"
] | null | null | null | pandaserver/dataservice/EventPicker.py | eschanet/QMonit | 83f3323fa465b3ae41f9a49f28332bdb5e748685 | [
"MIT"
] | null | null | null | '''
add data to dataset
'''
import os
import re
import sys
import fcntl
import datetime
import traceback
import pandaserver.brokerage.broker
from pandaserver.dataservice import DynDataDistributer
from pandaserver.dataservice.MailUtils import MailUtils
from pandaserver.dataservice.Notifier import Notifier
from pandaserver.taskbuffer.JobSpec import JobSpec
from pandaserver.userinterface import Client
from pandaserver.dataservice.DDM import rucioAPI
from pandaserver.dataservice.DataServiceUtils import select_scope
from pandacommon.pandalogger.PandaLogger import PandaLogger
from pandacommon.pandalogger.LogWrapper import LogWrapper
# logger
_logger = PandaLogger().getLogger('EventPicker')
class EventPicker:
# constructor
def __init__(self,taskBuffer,siteMapper,evpFileName,ignoreError):
self.taskBuffer = taskBuffer
self.siteMapper = siteMapper
self.ignoreError = ignoreError
self.evpFileName = evpFileName
self.token = datetime.datetime.utcnow().isoformat(' ')
# logger
self.logger = LogWrapper(_logger,self.token)
self.pd2p = DynDataDistributer.DynDataDistributer([],self.taskBuffer,self.siteMapper,
token=' ',logger=self.logger)
self.userDatasetName = ''
self.creationTime = ''
self.params = ''
self.lockedBy = ''
self.evpFile = None
self.userTaskName = ''
# message buffer
self.msgBuffer = []
self.lineLimit = 100
# JEDI
self.jediTaskID = None
self.prodSourceLabel = None
# main
def run(self):
try:
self.putLog('start %s' % self.evpFileName)
# lock evp file
self.evpFile = open(self.evpFileName)
try:
fcntl.flock(self.evpFile.fileno(),fcntl.LOCK_EX|fcntl.LOCK_NB)
except Exception:
# relase
self.putLog("cannot lock %s" % self.evpFileName)
self.evpFile.close()
return True
# options
runEvtList = []
eventPickDataType = ''
eventPickStreamName = ''
eventPickDS = []
eventPickAmiTag = ''
eventPickNumSites = 1
inputFileList = []
tagDsList = []
tagQuery = ''
tagStreamRef = ''
skipDaTRI = False
runEvtGuidMap = {}
ei_api = ''
# read evp file
for tmpLine in self.evpFile:
tmpMatch = re.search('^([^=]+)=(.+)$',tmpLine)
# check format
if tmpMatch is None:
continue
tmpItems = tmpMatch.groups()
if tmpItems[0] == 'runEvent':
# get run and event number
tmpRunEvt = tmpItems[1].split(',')
if len(tmpRunEvt) == 2:
runEvtList.append(tmpRunEvt)
elif tmpItems[0] == 'eventPickDataType':
# data type
eventPickDataType = tmpItems[1]
elif tmpItems[0] == 'eventPickStreamName':
# stream name
eventPickStreamName = tmpItems[1]
elif tmpItems[0] == 'eventPickDS':
# dataset pattern
eventPickDS = tmpItems[1].split(',')
elif tmpItems[0] == 'eventPickAmiTag':
# AMI tag
eventPickAmiTag = tmpItems[1]
elif tmpItems[0] == 'eventPickNumSites':
# the number of sites where datasets are distributed
try:
eventPickNumSites = int(tmpItems[1])
except Exception:
pass
elif tmpItems[0] == 'userName':
# user name
self.userDN = tmpItems[1]
self.putLog("user=%s" % self.userDN)
elif tmpItems[0] == 'userTaskName':
# user task name
self.userTaskName = tmpItems[1]
elif tmpItems[0] == 'userDatasetName':
# user dataset name
self.userDatasetName = tmpItems[1]
elif tmpItems[0] == 'lockedBy':
# client name
self.lockedBy = tmpItems[1]
elif tmpItems[0] == 'creationTime':
# creation time
self.creationTime = tmpItems[1]
elif tmpItems[0] == 'params':
# parameters
self.params = tmpItems[1]
elif tmpItems[0] == 'ei_api':
# ei api parameter for MC
ei_api = tmpItems[1]
elif tmpItems[0] == 'inputFileList':
# input file list
inputFileList = tmpItems[1].split(',')
try:
inputFileList.remove('')
except Exception:
pass
elif tmpItems[0] == 'tagDS':
# TAG dataset
tagDsList = tmpItems[1].split(',')
elif tmpItems[0] == 'tagQuery':
# query for TAG
tagQuery = tmpItems[1]
elif tmpItems[0] == 'tagStreamRef':
# StreamRef for TAG
tagStreamRef = tmpItems[1]
if not tagStreamRef.endswith('_ref'):
tagStreamRef += '_ref'
elif tmpItems[0] == 'runEvtGuidMap':
# GUIDs
try:
runEvtGuidMap = eval(tmpItems[1])
except Exception:
pass
# extract task name
if self.userTaskName == '' and self.params != '':
try:
tmpMatch = re.search('--outDS(=| ) *([^ ]+)',self.params)
if tmpMatch is not None:
self.userTaskName = tmpMatch.group(2)
if not self.userTaskName.endswith('/'):
self.userTaskName += '/'
except Exception:
pass
# suppress DaTRI
if self.params != '':
if '--eventPickSkipDaTRI' in self.params:
skipDaTRI = True
# get compact user name
compactDN = self.taskBuffer.cleanUserID(self.userDN)
# get jediTaskID
self.jediTaskID = self.taskBuffer.getTaskIDwithTaskNameJEDI(compactDN,self.userTaskName)
# get prodSourceLabel
self.prodSourceLabel = self.taskBuffer.getProdSourceLabelwithTaskID(self.jediTaskID)
# convert run/event list to dataset/file list
tmpRet,locationMap,allFiles = self.pd2p.convertEvtRunToDatasets(runEvtList,
eventPickDataType,
eventPickStreamName,
eventPickDS,
eventPickAmiTag,
self.userDN,
runEvtGuidMap,
ei_api
)
if not tmpRet:
if 'isFatal' in locationMap and locationMap['isFatal'] == True:
self.ignoreError = False
self.endWithError('Failed to convert the run/event list to a dataset/file list')
return False
# use only files in the list
if inputFileList != []:
tmpAllFiles = []
for tmpFile in allFiles:
if tmpFile['lfn'] in inputFileList:
tmpAllFiles.append(tmpFile)
allFiles = tmpAllFiles
# remove redundant CN from DN
tmpDN = self.userDN
tmpDN = re.sub('/CN=limited proxy','',tmpDN)
tmpDN = re.sub('(/CN=proxy)+$','',tmpDN)
# make dataset container
tmpRet = self.pd2p.registerDatasetContainerWithDatasets(self.userDatasetName,allFiles,
locationMap,
nSites=eventPickNumSites,
owner=tmpDN)
if not tmpRet:
self.endWithError('Failed to make a dataset container %s' % self.userDatasetName)
return False
# skip DaTRI
if skipDaTRI:
# successfully terminated
self.putLog("skip DaTRI")
# update task
self.taskBuffer.updateTaskModTimeJEDI(self.jediTaskID)
else:
# get candidates
tmpRet,candidateMaps = self.pd2p.getCandidates(self.userDatasetName, self.prodSourceLabel,
checkUsedFile=False, useHidden=True)
if not tmpRet:
self.endWithError('Failed to find candidate for destination')
return False
# collect all candidates
allCandidates = []
for tmpDS in candidateMaps:
tmpDsVal = candidateMaps[tmpDS]
for tmpCloud in tmpDsVal:
tmpCloudVal = tmpDsVal[tmpCloud]
for tmpSiteName in tmpCloudVal[0]:
if not tmpSiteName in allCandidates:
allCandidates.append(tmpSiteName)
if allCandidates == []:
self.endWithError('No candidate for destination')
return False
# get list of dataset (container) names
if eventPickNumSites > 1:
# decompose container to transfer datasets separately
tmpRet,tmpOut = self.pd2p.getListDatasetReplicasInContainer(self.userDatasetName)
if not tmpRet:
self.endWithError('Failed to get replicas in %s' % self.userDatasetName)
return False
userDatasetNameList = list(tmpOut)
else:
# transfer container at once
userDatasetNameList = [self.userDatasetName]
# loop over all datasets
sitesUsed = []
for tmpUserDatasetName in userDatasetNameList:
# get size of dataset container
tmpRet,totalInputSize = rucioAPI.getDatasetSize(tmpUserDatasetName)
if not tmpRet:
self.endWithError('Failed to get the size of {0} with {1}'.format(tmpUserDatasetName, totalInputSize))
return False
# run brokerage
tmpJob = JobSpec()
tmpJob.AtlasRelease = ''
self.putLog("run brokerage for %s" % tmpDS)
pandaserver.brokerage.broker.schedule([tmpJob],self.taskBuffer,self.siteMapper,True,allCandidates,
True,datasetSize=totalInputSize)
if tmpJob.computingSite.startswith('ERROR'):
self.endWithError('brokerage failed with %s' % tmpJob.computingSite)
return False
self.putLog("site -> %s" % tmpJob.computingSite)
# send transfer request
try:
tmpDN = rucioAPI.parse_dn(tmpDN)
tmpStatus,userInfo = rucioAPI.finger(tmpDN)
if not tmpStatus:
raise RuntimeError('user info not found for {0} with {1}'.format(tmpDN,userInfo))
tmpDN = userInfo['nickname']
tmpSiteSpec = self.siteMapper.getSite(tmpJob.computingSite)
scope_input, scope_output = select_scope(tmpSiteSpec, 'user')
tmpDQ2ID = tmpSiteSpec.ddm_input[scope_input]
tmpMsg = "%s ds=%s site=%s id=%s" % ('registerDatasetLocation for DaTRI ',
tmpUserDatasetName,
tmpDQ2ID,
tmpDN)
self.putLog(tmpMsg)
rucioAPI.registerDatasetLocation(tmpDS,[tmpDQ2ID],lifetime=14,owner=tmpDN,
activity="User Subscriptions")
self.putLog('OK')
except Exception:
errType,errValue = sys.exc_info()[:2]
tmpStr = 'Failed to send transfer request : %s %s' % (errType,errValue)
tmpStr.strip()
tmpStr += traceback.format_exc()
self.endWithError(tmpStr)
return False
# list of sites already used
sitesUsed.append(tmpJob.computingSite)
self.putLog("used %s sites" % len(sitesUsed))
# set candidates
if len(sitesUsed) >= eventPickNumSites:
# reset candidates to limit the number of sites
allCandidates = sitesUsed
sitesUsed = []
else:
# remove site
allCandidates.remove(tmpJob.computingSite)
# send email notification for success
tmpMsg = 'A transfer request was successfully sent to Rucio.\n'
tmpMsg += 'Your task will get started once transfer is completed.'
self.sendEmail(True,tmpMsg)
try:
# unlock and delete evp file
fcntl.flock(self.evpFile.fileno(),fcntl.LOCK_UN)
self.evpFile.close()
os.remove(self.evpFileName)
except Exception:
pass
# successfully terminated
self.putLog("end %s" % self.evpFileName)
return True
except Exception:
errType,errValue = sys.exc_info()[:2]
self.endWithError('Got exception %s:%s %s' % (errType,errValue,traceback.format_exc()))
return False
# end with error
def endWithError(self,message):
self.putLog(message,'error')
# unlock evp file
try:
fcntl.flock(self.evpFile.fileno(),fcntl.LOCK_UN)
self.evpFile.close()
if not self.ignoreError:
# remove evp file
os.remove(self.evpFileName)
# send email notification
self.sendEmail(False,message)
except Exception:
pass
# upload log
if self.jediTaskID is not None:
outLog = self.uploadLog()
self.taskBuffer.updateTaskErrorDialogJEDI(self.jediTaskID,'event picking failed. '+outLog)
# update task
if not self.ignoreError:
self.taskBuffer.updateTaskModTimeJEDI(self.jediTaskID,'tobroken')
self.putLog(outLog)
self.putLog('end %s' % self.evpFileName)
# put log
def putLog(self,msg,type='debug'):
tmpMsg = msg
if type == 'error':
self.logger.error(tmpMsg)
else:
self.logger.debug(tmpMsg)
# send email notification
def sendEmail(self,isSucceeded,message):
# mail address
toAdder = Notifier(self.taskBuffer,None,[]).getEmail(self.userDN)
if toAdder == '':
self.putLog('cannot find email address for %s' % self.userDN,'error')
return
# subject
mailSubject = "PANDA notification for Event-Picking Request"
# message
mailBody = "Hello,\n\nHere is your request status for event picking\n\n"
if isSucceeded:
mailBody += "Status : Passed to Rucio\n"
else:
mailBody += "Status : Failed\n"
mailBody += "Created : %s\n" % self.creationTime
mailBody += "Ended : %s\n" % datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
mailBody += "Dataset : %s\n" % self.userDatasetName
mailBody += "\n"
mailBody += "Parameters : %s %s\n" % (self.lockedBy,self.params)
mailBody += "\n"
mailBody += "%s\n" % message
# send
retVal = MailUtils().send(toAdder,mailSubject,mailBody)
# return
return
# upload log
def uploadLog(self):
if self.jediTaskID is None:
return 'cannot find jediTaskID'
strMsg = self.logger.dumpToString()
s,o = Client.uploadLog(strMsg,self.jediTaskID)
if s != 0:
return "failed to upload log with {0}.".format(s)
if o.startswith('http'):
return '<a href="{0}">log</a>'.format(o)
return o
| 45.530457 | 126 | 0.480685 |
795a2645e89df84281d6a81a1b4f1287d2923ad6 | 5,373 | py | Python | test/unicode/unicode1.py | kylebarron/MagicPython | da6fa0793e2c85d3bf7709ff1d4f65ccf468db11 | [
"MIT"
] | 1,482 | 2015-10-16T21:59:32.000Z | 2022-03-30T11:44:40.000Z | test/unicode/unicode1.py | kylebarron/MagicPython | da6fa0793e2c85d3bf7709ff1d4f65ccf468db11 | [
"MIT"
] | 226 | 2015-10-15T15:53:44.000Z | 2022-03-25T03:08:27.000Z | test/unicode/unicode1.py | kylebarron/MagicPython | da6fa0793e2c85d3bf7709ff1d4f65ccf468db11 | [
"MIT"
] | 129 | 2015-10-20T02:41:49.000Z | 2022-03-22T01:44:36.000Z | class Üa(Êa):
'Œ\nŃ'
@æaœ
def ŌÏŒĘ(self, Ú=1):
print('превед 你好')
return Ù
ÜBER = 1
你好 = lambda: 你好
def 你好(): pass
class : meta.class.python, source.python, storage.type.class.python
: meta.class.python, source.python
Üa : entity.name.type.class.python, meta.class.python, source.python
( : meta.class.inheritance.python, meta.class.python, punctuation.definition.inheritance.begin.python, source.python
Êa : entity.other.inherited-class.python, meta.class.inheritance.python, meta.class.python, source.python
) : meta.class.inheritance.python, meta.class.python, punctuation.definition.inheritance.end.python, source.python
: : meta.class.python, punctuation.section.class.begin.python, source.python
: source.python
' : punctuation.definition.string.begin.python, source.python, string.quoted.docstring.single.python
Œ : source.python, string.quoted.docstring.single.python
\n : constant.character.escape.python, source.python, string.quoted.docstring.single.python
Ń : source.python, string.quoted.docstring.single.python
' : punctuation.definition.string.end.python, source.python, string.quoted.docstring.single.python
: meta.function.decorator.python, source.python
@ : entity.name.function.decorator.python, meta.function.decorator.python, punctuation.definition.decorator.python, source.python
æaœ : entity.name.function.decorator.python, meta.function.decorator.python, source.python
: meta.function.python, source.python
def : meta.function.python, source.python, storage.type.function.python
: meta.function.python, source.python
ŌÏŒĘ : entity.name.function.python, meta.function.python, source.python
( : meta.function.parameters.python, meta.function.python, punctuation.definition.parameters.begin.python, source.python
self : meta.function.parameters.python, meta.function.python, source.python, variable.parameter.function.language.python, variable.parameter.function.language.special.self.python
, : meta.function.parameters.python, meta.function.python, punctuation.separator.parameters.python, source.python
: meta.function.parameters.python, meta.function.python, source.python
Ú : meta.function.parameters.python, meta.function.python, source.python, variable.parameter.function.language.python
= : keyword.operator.python, meta.function.parameters.python, meta.function.python, source.python
1 : constant.numeric.dec.python, meta.function.parameters.python, meta.function.python, source.python
) : meta.function.parameters.python, meta.function.python, punctuation.definition.parameters.end.python, source.python
: : meta.function.python, punctuation.section.function.begin.python, source.python
: source.python
print : meta.function-call.python, source.python, support.function.builtin.python
( : meta.function-call.python, punctuation.definition.arguments.begin.python, source.python
' : meta.function-call.arguments.python, meta.function-call.python, punctuation.definition.string.begin.python, source.python, string.quoted.single.python
превед 你好 : meta.function-call.arguments.python, meta.function-call.python, source.python, string.quoted.single.python
' : meta.function-call.arguments.python, meta.function-call.python, punctuation.definition.string.end.python, source.python, string.quoted.single.python
) : meta.function-call.python, punctuation.definition.arguments.end.python, source.python
: source.python
return : keyword.control.flow.python, source.python
: source.python
Ù : source.python
: source.python
ÜBER : constant.other.caps.python, source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
1 : constant.numeric.dec.python, source.python
: source.python
你好 : source.python
: source.python
= : keyword.operator.assignment.python, source.python
: source.python
lambda : meta.lambda-function.python, source.python, storage.type.function.lambda.python
: : meta.lambda-function.python, punctuation.section.function.lambda.begin.python, source.python
: source.python
你好 : source.python
: meta.function.python, source.python
def : meta.function.python, source.python, storage.type.function.python
: meta.function.python, source.python
你好 : entity.name.function.python, meta.function.python, source.python
( : meta.function.parameters.python, meta.function.python, punctuation.definition.parameters.begin.python, source.python
) : meta.function.parameters.python, meta.function.python, punctuation.definition.parameters.end.python, source.python
: : meta.function.python, punctuation.section.function.begin.python, source.python
: source.python
pass : keyword.control.flow.python, source.python
| 69.779221 | 187 | 0.685837 |
795a26bea992464ba89e24952a70f9220a74c645 | 956 | py | Python | tests/vasp/test_potential.py | pmrv/pyiron | 3631053231761510a2fd4b1416a93adee44fd6f7 | [
"BSD-3-Clause"
] | null | null | null | tests/vasp/test_potential.py | pmrv/pyiron | 3631053231761510a2fd4b1416a93adee44fd6f7 | [
"BSD-3-Clause"
] | null | null | null | tests/vasp/test_potential.py | pmrv/pyiron | 3631053231761510a2fd4b1416a93adee44fd6f7 | [
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
import unittest
import os
from pyiron.vasp.potential import get_enmax_among_species
class TestPotential(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.file_location = os.path.dirname(os.path.abspath(__file__))
def test_get_enmax_among_species(self):
float_out = get_enmax_among_species(['Fe'], return_list=False)
self.assertTrue(isinstance(float_out, float))
tuple_out = get_enmax_among_species(['Fe'], return_list=True)
self.assertTrue(isinstance(tuple_out, tuple))
self.assertRaises(KeyError, get_enmax_among_species, symbol_lst=['X'])
self.assertRaises(ValueError, get_enmax_among_species, symbol_lst=['Fe'], xc='FOO')
if __name__ == "__main__":
unittest.main()
| 32.965517 | 108 | 0.735356 |
795a2722be2891e65032c147b8778d9f9e44f7ef | 16,402 | py | Python | cit-api/pipeline/views/proximity.py | bcgov/CIT | b9db4f169b52e9a6293b3ee1e61935888074215a | [
"Apache-2.0"
] | 10 | 2020-11-12T15:13:40.000Z | 2022-03-05T22:33:08.000Z | cit-api/pipeline/views/proximity.py | bcgov/CIT | b9db4f169b52e9a6293b3ee1e61935888074215a | [
"Apache-2.0"
] | 28 | 2020-07-17T16:33:55.000Z | 2022-03-21T16:24:25.000Z | cit-api/pipeline/views/proximity.py | bcgov/CIT | b9db4f169b52e9a6293b3ee1e61935888074215a | [
"Apache-2.0"
] | 5 | 2020-11-02T23:39:53.000Z | 2022-03-01T19:09:45.000Z | from django.http import HttpResponse
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from django.core import serializers
from django.db.models import Sum, F
from django.contrib.gis.geos import Point
from django.contrib.gis.db.models.functions import Distance
from django.contrib.gis.measure import D
import json
from pipeline.models.general import RegionalDistrict, Municipality, Road
from pipeline.models.location_assets import Airport, Location, CustomsPortOfEntry, FirstResponder, Hospital
from pipeline.models.railway import Railway
from pipeline.models.river import River
from pipeline.models.lake import Lake
from pipeline.models.community import Community
from pipeline.models.indian_reserve_band_name import IndianReserveBandName
from pipeline.models.roads_and_highways import RoadsAndHighways
# TODO: Reduce properties per feature across all sets in GET
NETWORK_CODES = ['5/1', '10/2', '25/5', '50/10']
class ProximityView(APIView):
def validate_request(self, lat, lng):
errors = []
if lat is None:
errors.append('Must supply a parameter "lat" with a valid latitude')
if lng is None:
errors.append('Must supply a parameter "lng" with a valid longitude')
if lat is not None and lat.isdigit() is not False:
errors.append('Parameter "lat" must be a valid latitude')
if lng is not None and lng.isdigit() is not False:
errors.append('Parameter "lng" must be a valid longitude')
if len(errors) > 0:
return Response(dict(errors=errors, status=status.HTTP_400_BAD_REQUEST))
return None
def get(self, request, format=None):
"""
Points of interests in proximity to opportunity.
"""
lat = request.query_params.get('lat', None)
lng = request.query_params.get('lng', None)
validation_error = self.validate_request(lat, lng)
if validation_error:
return validation_error
point = Point(float(lng), float(lat))
regional_district = None
regional_check = RegionalDistrict.objects.get(geom__contains=point)
if regional_check:
regional_district = dict(id=regional_check.id, name=regional_check.name)
airport = None
airport_check = Location.objects.annotate(distance=Distance("point", point)).filter(
point__distance_lte=(point, D(km=100)),
location_type="airports").order_by('distance')[:1]
if airport_check:
airport = json.loads(
serializers.serialize('geojson', airport_check, geometry_field=point))
location_id = int(airport['features'][0]['properties']['pk'])
airport['airport_distance'] = airport_check.first().distance.km
airport['features'][0]['properties']['name'] = Location.objects.get(id=location_id).name
deep_port = None
deep_port_check = Location.objects.annotate(distance=Distance("point", point)).filter(
point__distance_lte=(point, D(km=100)),
location_type="port_and_terminal").order_by('distance')[:1]
if deep_port_check:
deep_port = json.loads(
serializers.serialize('geojson', deep_port_check, geometry_field=point))
location_id = int(deep_port['features'][0]['properties']['pk'])
deep_port['port_distance'] = deep_port_check.first().distance.km
deep_port['features'][0]['properties']['name'] = Location.objects.get(
id=location_id).name
# TODO: join Location to get name field
customs_port = None
customs_port_check = CustomsPortOfEntry.objects.annotate(
distance=Distance("point", point)).filter(point__distance_lte=(point, D(
km=100))).order_by('distance')[:1]
if customs_port_check:
customs_port = json.loads(
serializers.serialize('geojson', customs_port_check, geometry_field=point))
customs_port['customs_port_distance'] = customs_port_check.first().distance.km
location_id = int(customs_port['features'][0]['properties']['pk'])
customs_port['features'][0]['properties']['name'] = Location.objects.get(
id=location_id).name
post_secondary = None
post_secondary_check = Location.objects.annotate(distance=Distance("point", point)).filter(
point__distance_lte=(point, D(km=100)),
location_type="post_secondary_institutions").order_by('distance')[:1]
if post_secondary_check:
post_secondary = json.loads(
serializers.serialize('geojson', post_secondary_check, geometry_field=point))
post_secondary['post_secondary_distance'] = post_secondary_check.first().distance.km
railway = None
railway_check = Railway.objects.annotate(distance=Distance("geom", point)).filter(
geom__distance_lte=(point, D(km=100))).order_by('distance')[:1]
if railway_check:
railway = json.loads(
serializers.serialize('geojson', railway_check, geometry_field=point))
railway['railway_distance'] = railway_check.first().distance.km
highway = None
highway_check = RoadsAndHighways.objects.annotate(distance=Distance("geom", point)).filter(
geom__distance_lte=(point, D(km=100)), feature_type="Road").order_by('distance')[:1]
if highway_check:
highway = json.loads(
serializers.serialize('geojson', highway_check, geometry_field=point))
highway['highway_distance'] = highway_check.first().distance.km
lake = None
lake_check = Lake.objects.annotate(distance=Distance("geom", point)).filter(
geom__distance_lte=(point, D(km=100))).order_by('distance')[:1]
if lake_check:
lake = json.loads(serializers.serialize('geojson', lake_check, geometry_field=point))
lake['lake_distance'] = lake_check.first().distance.km
river = None
river_check = River.objects.annotate(distance=Distance("geom", point)).filter(
geom__distance_lte=(point, D(km=100))).order_by('distance')[:1]
if river_check:
river = json.loads(serializers.serialize('geojson', river_check, geometry_field=point))
river['river_distance'] = river_check.first().distance.km
research_centre = None
research_centre_check = Location.objects.annotate(distance=Distance("point", point)).filter(
point__distance_lte=(point, D(km=100)),
location_type="research_centres").order_by('distance')[:1]
if research_centre_check:
research_centre = json.loads(
serializers.serialize('geojson', research_centre_check, geometry_field=point))
research_centre['research_centre_distance'] = research_centre_check.first().distance.km
community = None
network_avg = dict()
transmission = None
community_check = Community.objects.annotate(distance=Distance("point", point)).filter(
point__distance_lte=(point, D(km=100))).order_by('distance')[:1]
if community_check:
community = json.loads(
serializers.serialize('geojson', community_check, geometry_field=point))
community['community_distance'] = community_check.first().distance.km
transmission = dict()
transmission['distance'] = community_check.first(
).nearest_transmission_distance + community['community_distance']
properties = community['features'][0]['properties']
network_percents = [
properties['percent_5_1'], properties['percent_10_2'], properties['percent_25_5'],
properties['percent_50_10']
]
# scan for network_avg speed category
last_network = properties['percent_5_1']
network_avg = 'Unknown network'
index = 0
for network in network_percents:
if last_network is not None and network is not None and abs(last_network -
network) < 0.1:
network_avg = NETWORK_CODES[index]
last_network = network
index += 1
nearest_fire_station = None
nearest_fire_station_check = Location.objects.annotate(
distance=Distance("point", point)).filter(
point__distance_lte=(point, D(km=100)),
firstresponder__keywords__contains="fire").order_by('distance')[:1]
if nearest_fire_station_check:
nearest_fire_station = json.loads(
serializers.serialize('geojson', nearest_fire_station_check, geometry_field=point))
nearest_fire_station['distance'] = nearest_fire_station_check.first().distance.km
nearest_police_station = None
nearest_police_station_check = Location.objects.annotate(
distance=Distance("point", point)).filter(
point__distance_lte=(point, D(km=100)),
firstresponder__keywords__contains="police").order_by('distance')[:1]
if nearest_police_station_check:
nearest_police_station = json.loads(
serializers.serialize('geojson', nearest_police_station_check,
geometry_field=point))
nearest_police_station['distance'] = nearest_police_station_check.first().distance.km
nearest_ambulance_station = None
nearest_ambulance_station_check = Location.objects.annotate(
distance=Distance("point", point)).filter(
point__distance_lte=(point, D(km=100)),
firstresponder__keywords__contains="ambulance").order_by('distance')[:1]
if nearest_ambulance_station_check:
nearest_ambulance_station = json.loads(
serializers.serialize('geojson',
nearest_ambulance_station_check,
geometry_field=point))
nearest_ambulance_station['distance'] = nearest_ambulance_station_check.first(
).distance.km
nearest_coast_guard_station = None
nearest_coast_guard_station_check = Location.objects.annotate(
distance=Distance("point", point)).filter(
point__distance_lte=(point, D(km=100)),
firstresponder__keywords__contains="coastguard").order_by('distance')[:1]
if nearest_coast_guard_station_check:
nearest_coast_guard_station = json.loads(
serializers.serialize('geojson',
nearest_coast_guard_station_check,
geometry_field=point))
nearest_coast_guard_station['distance'] = nearest_coast_guard_station_check.first(
).distance.km
nearest_health_center = None
nearest_health_center_check = Location.objects.annotate(
distance=Distance("point", point)).filter(
point__distance_lte=(point, D(km=100)),
location_type='hospitals').order_by('distance')[:1]
if nearest_health_center_check:
nearest_health_center = json.loads(
serializers.serialize('geojson', nearest_health_center_check, geometry_field=point))
nearest_health_center['distance'] = nearest_health_center_check.first().distance.km
network_at_road = 'Unknown network'
network_at_road_check = Road.objects.annotate(distance=Distance("geom", point)).filter(
geom__distance_lte=(point, D(km=100))).order_by('distance')[:1]
if network_at_road_check and network_at_road_check.first().best_broadband:
network_at_road = network_at_road_check.first().best_broadband
municipality = None
municipalities = None
municipalities_check = Municipality.objects.annotate(
distance=Distance("geom", point)).filter(geom__distance_lte=(point, D(
km=100))).order_by('distance')[:5]
if municipalities_check:
municipalities = json.loads(
serializers.serialize('geojson', municipalities_check, geometry_field=point))
# Add the missing annotated distance value to each geojson feature
index = 0
while index < len(municipalities_check):
municipality_id = int(municipalities['features'][index]['properties']['pk'])
if index == 0:
municipality = {
'id': municipalities['features'][index]['properties']['pk'],
'name': municipalities['features'][index]['properties']['name']
}
municipalities['features'][index]['properties']['distance'] = municipalities_check[
index].distance.km
municipalities['features'][index]['properties']['population'] = 0
community_queryset = Community.objects.filter(
municipality_id=municipality_id).annotate(
count=Sum(F('census_subdivision__pop_total_2016')))
# Valid population found, else not valid
#
# Tech Loan: The area overlap query below is to patch up misssing data in the Communities V6 spread sheet
if len(community_queryset):
municipalities['features'][index]['properties'][
'population'] = community_queryset.values()[0]['count']
else:
total_pop = Community.objects.raw(
f'select pm.id, pcpdcas.* from pipeline_municipality pm, pipeline_cen_prof_detailed_csd_attrs_sp pcpdcas where pm.id = {municipality_id} and st_intersects(pm.geom, pcpdcas.geom) and (st_area(st_intersection(pm.geom, pcpdcas.geom))/st_area(pm.geom)) > .55;')[0].pop_total_2016
municipalities['features'][index]['properties']['population'] = total_pop
index += 1
# TODO: Join on community, join on census for population
# Might be missing info
first_nation_communities = None
first_nation_community_check = Community.objects.annotate(
distance=Distance("point", point)).filter(point__distance_lte=(point, D(
km=100)), community_type__in=["Rural First Nations Reserve", "Urban First Nations Reserve"]).order_by('distance')[:3]
if first_nation_community_check:
first_nation_communities = json.loads(
serializers.serialize('geojson', first_nation_community_check,
geometry_field=point))
# Add the missing annotated distance value to each geojson feature
index = 0
while index < len(first_nation_community_check):
first_nation_communities['features'][index]['properties'][
'distance'] = first_nation_community_check[index].distance.km
index += 1
return Response(
dict(municipality=municipality,
regionalDistrict=regional_district,
nearestAirport=airport,
nearestPort=deep_port,
nearestCustomsPortOfEntry=customs_port,
nearestPostSecondary=post_secondary,
nearestHighway=highway,
nearestRailway=railway,
nearestResearchCentre=research_centre,
community=community,
nearestTransmission=transmission,
nearestFirstNations=first_nation_communities,
nearestMunicipalities=municipalities,
nearestLake=lake,
nearestRiver=river,
networkAtRoad=network_at_road,
networkAvg=network_avg,
nearestFireStation=nearest_fire_station,
nearestPoliceStation=nearest_police_station,
nearestAmbulanceStation=nearest_ambulance_station,
nearestCoastGuardStation=nearest_coast_guard_station,
nearestHealthCenter=nearest_health_center))
| 52.909677 | 299 | 0.637178 |
795a27aa20a16731329d0958742511da3dc6e246 | 8,003 | py | Python | yolo/test_yolo.py | simenvg/cloud_detection_framework | 97db2eb2cf3da91aa64ae90bf404adbd4744578b | [
"MIT"
] | 4 | 2020-06-26T06:47:26.000Z | 2022-01-10T07:46:54.000Z | yolo/test_yolo.py | simenvg/cloud-detection-framework | 97db2eb2cf3da91aa64ae90bf404adbd4744578b | [
"MIT"
] | null | null | null | yolo/test_yolo.py | simenvg/cloud-detection-framework | 97db2eb2cf3da91aa64ae90bf404adbd4744578b | [
"MIT"
] | 4 | 2020-05-13T08:23:48.000Z | 2021-04-19T07:58:47.000Z | # Instead just add darknet.py to somewhere in your python path
# OK actually that might not be a great idea, idk, work in progress
# Use at your own risk. or don't, i don't care
import xml.etree.ElementTree as ET
import os
import cv2
import sqlite3 as db
import argparse
import matplotlib as mpl
if os.environ.get('DISPLAY', '') == '':
print('no display found. Using non-interactive Agg backend')
mpl.use('Agg')
import matplotlib.pyplot as plt
import pickle
RED = (0, 0, 255)
GREEN = (0, 255, 0)
BLUE = (255, 0, 0)
class Box(object):
"""docstring for Box"""
def __init__(self, cls, x_min, x_max, y_min, y_max, confidence=None):
self.cls = cls
self.x_min = x_min
self.x_max = x_max
self.y_min = y_min
self.y_max = y_max
self.confidence = confidence
def get_GT_boxes(label_filepath):
in_file = open(os.path.join(label_filepath), 'r')
tree = ET.parse(in_file)
root = tree.getroot()
boxes = []
for obj in root.iter('object'):
xmlbox = obj.find('bndbox')
if obj.find('name').text == 'boat':
boxes.append(Box(obj.find('name').text, float(xmlbox.find('xmin').text), float(
xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text)))
return boxes
def convert_yolo_format(x_center, y_center, width, height):
x_min = float(x_center) - float(width) / 2
x_max = float(x_center) + float(width) / 2
y_min = float(y_center) - float(height) / 2
y_max = float(y_center) + float(height) / 2
return [x_min, x_max, y_min, y_max]
def get_intersected_area(box1, box2):
dx = min(box1.x_max, box2.x_max) - max(box1.x_min, box2.x_min)
dy = min(box1.y_max, box2.y_max) - max(box1.y_min, box2.y_min)
if dy <= 0 or dx <= 0:
return -1
else:
return dx * dy
def get_iou(box1, box2):
area_box1 = (box1.x_max - box1.x_min) * (box1.y_max - box1.y_min)
area_box2 = (box2.x_max - box2.x_min) * (box2.y_max - box2.y_min)
intersected_area = get_intersected_area(box1, box2)
# print(intersected_area)
if intersected_area == -1:
return -1
else:
return intersected_area / (area_box1 + area_box2 - intersected_area)
def valid_detection(detected_box, gt_box, iou_thresh=0.5):
return get_iou(detected_box, gt_box) >= iou_thresh
def get_detections_stats(conn, data_path, iou_thresh, confidence_thresh=0.25):
true_positives = 0
num_detections = 0
num_gt_boxes = 0
c = conn.cursor()
test_file = open(os.path.join(data_path, 'model', 'test.txt'), 'r')
image_filepaths = test_file.readlines()
test_file.close()
for img in image_filepaths:
gt_boxes = get_GT_boxes(os.path.join((img.strip()[:-4] + '.xml')))
c.execute('SELECT * FROM detections WHERE image_name=? and confidence>=? and class_name=?',
(img.strip(), confidence_thresh, 'boat'))
detections = c.fetchall()
num_detections += len(detections)
for gt_box in gt_boxes:
for i in range(len(detections) - 1, -1, -1):
detected_box = Box(detections[i][5], detections[i][1], detections[i]
[2], detections[i][3], detections[i][4], detections[i][6])
if detected_box.confidence >= confidence_thresh:
if valid_detection(detected_box, gt_box, iou_thresh=iou_thresh):
true_positives += 1
detections.remove(detections[i])
break
num_gt_boxes += len(gt_boxes)
print('TP: ', true_positives, ' num_detections: ',
num_detections, ' num_gt: ', num_gt_boxes)
return [true_positives, num_detections, num_gt_boxes]
def get_precision_recall(conn, data_path, iou_thresh, confidence_thresh=0.25):
[true_positives, num_detections, num_gt_boxes] = get_detections_stats(conn, data_path, iou_thresh, confidence_thresh)
precision = 0
if num_detections > 0:
precision = float(true_positives) / float(num_detections)
recall = 0
if num_gt_boxes > 0:
recall = float(true_positives) / float(num_gt_boxes)
return (precision, recall)
def get_confusion_matrix(conn, data_path, iou_thresh, confidence_thresh=0.25):
[true_positives, num_detections, num_gt_boxes] = get_detections_stats(conn, data_path, iou_thresh, confidence_thresh)
false_positives = num_detections - true_positives
false_negatives = num_gt_boxes - true_positives
file = open(os.path.join(data_path, 'results', 'confusion_matrix.txt'), 'w')
file.write('true_positives = ' + str(true_positives) + '\n')
file.write('false_positives = ' + str(false_positives) + '\n')
file.write('false_negatives = ' + str(false_negatives) + '\n')
file.write('num_detections = ' + str(num_detections) + '\n')
file.write('num_gt_boxes = ' + str(num_gt_boxes) + '\n')
file.close()
return [true_positives, false_positives, false_negatives]
def save_images_with_boxes(conn, data_path, conf_thresh=0.25):
c = conn.cursor()
test_file = open(os.path.join(data_path, 'model', 'test.txt'), 'r')
image_filepaths = test_file.readlines()
test_file.close()
for img in image_filepaths:
img_name = img.strip().split('/')[-1]
gt_boxes = get_GT_boxes(os.path.join(
'', (img.strip()[:-4] + '.xml')))
c.execute('SELECT * FROM detections WHERE image_name=? AND confidence>=?',
(img.strip(), conf_thresh))
detections = c.fetchall()
image = cv2.imread(img.strip())
print(img.strip())
if image is None:
print('No image')
exit()
for box in gt_boxes:
cv2.rectangle(image, (int(box.x_min), int(box.y_max)),
(int(box.x_max), int(box.y_min)), GREEN, 6)
for box in detections:
if (box[5] == 'building'):
color = BLUE
else:
color = RED
cv2.rectangle(image, (int(box[1]), int(box[3])),
(int(box[2]), int(box[4])), color, 6)
cv2.imwrite(os.path.join(data_path, 'results',
img_name), image)
def write_prec_recall_to_file(data_path, precisions, recalls, name='Yolo'):
file = open(os.path.join(data_path, 'results', 'prec_recalls.txt'), 'w')
file.write(name + '\n')
for i in range(len(precisions)):
file.write(str(precisions[i]))
if i != len(precisions):
file.write(' ')
file.write('\n')
for i in range(len(recalls)):
file.write(str(recalls[i]))
if i != len(recalls):
file.write(' ')
file.write('\n')
file.close()
def calculate_prec_recall(data_path, conn):
conf_threshs = [x * 0.01 for x in range(0, 100)]
precisions = []
recalls = []
for conf_thresh in conf_threshs:
(precision, recall) = get_precision_recall(
conn, data_path, 0.5, conf_thresh)
precisions.append(precision)
recalls.append(recall)
print(precisions)
print(recalls)
write_prec_recall_to_file(data_path, precisions, recalls)
# print(get_precision_recall(conn, data_path, 0.5))
plt.plot(recalls, precisions)
plt.grid(True)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.savefig(os.path.join(data_path, 'results', 'prec_recall.png'))
def main(data_path):
conn = db.connect(os.path.join(data_path, 'results', 'detections.db'))
save_images_with_boxes(conn, data_path)
calculate_prec_recall(data_path, conn)
get_confusion_matrix(conn, data_path, 0.5)
conn.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Input path to darknet')
parser.add_argument('DATA_PATH', type=str, nargs=1,
help='Set path to data folder, containg datasets')
args = parser.parse_args()
DATA_PATH = args.DATA_PATH[0]
main(DATA_PATH)
| 37.050926 | 121 | 0.628389 |
795a289541a01141e09be2ecd60e65071bb19c65 | 54,334 | py | Python | numpy/linalg/tests/test_linalg.py | gogulnie/numpy | 485b099cd4b82d65dc38cb2b28c7119f003c76c4 | [
"BSD-3-Clause"
] | null | null | null | numpy/linalg/tests/test_linalg.py | gogulnie/numpy | 485b099cd4b82d65dc38cb2b28c7119f003c76c4 | [
"BSD-3-Clause"
] | null | null | null | numpy/linalg/tests/test_linalg.py | gogulnie/numpy | 485b099cd4b82d65dc38cb2b28c7119f003c76c4 | [
"BSD-3-Clause"
] | null | null | null | """ Test functions for linalg module
"""
from __future__ import division, absolute_import, print_function
import os
import sys
import itertools
import traceback
import warnings
import numpy as np
from numpy import array, single, double, csingle, cdouble, dot, identity
from numpy import multiply, atleast_2d, inf, asarray, matrix
from numpy import linalg
from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot, LinAlgError
from numpy.linalg.linalg import _multi_dot_matrix_chain_order
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
assert_almost_equal, assert_allclose, run_module_suite,
dec, SkipTest, suppress_warnings
)
def ifthen(a, b):
return not a or b
def imply(a, b):
return not a or b
old_assert_almost_equal = assert_almost_equal
def assert_almost_equal(a, b, single_decimal=6, double_decimal=12, **kw):
if asarray(a).dtype.type in (single, csingle):
decimal = single_decimal
else:
decimal = double_decimal
old_assert_almost_equal(a, b, decimal=decimal, **kw)
def get_real_dtype(dtype):
return {single: single, double: double,
csingle: single, cdouble: double}[dtype]
def get_complex_dtype(dtype):
return {single: csingle, double: cdouble,
csingle: csingle, cdouble: cdouble}[dtype]
def get_rtol(dtype):
# Choose a safe rtol
if dtype in (single, csingle):
return 1e-5
else:
return 1e-11
# used to categorize tests
all_tags = {
'square', 'nonsquare', 'hermitian', # mutually exclusive
'generalized', 'size-0', 'strided' # optional additions
}
class LinalgCase(object):
def __init__(self, name, a, b, tags=set()):
"""
A bundle of arguments to be passed to a test case, with an identifying
name, the operands a and b, and a set of tags to filter the tests
"""
assert_(isinstance(name, str))
self.name = name
self.a = a
self.b = b
self.tags = frozenset(tags) # prevent shared tags
def check(self, do):
"""
Run the function `do` on this test case, expanding arguments
"""
do(self.a, self.b, tags=self.tags)
def __repr__(self):
return "<LinalgCase: %s>" % (self.name,)
def apply_tag(tag, cases):
"""
Add the given tag (a string) to each of the cases (a list of LinalgCase
objects)
"""
assert tag in all_tags, "Invalid tag"
for case in cases:
case.tags = case.tags | {tag}
return cases
#
# Base test cases
#
np.random.seed(1234)
CASES = []
# square test cases
CASES += apply_tag('square', [
LinalgCase("single",
array([[1., 2.], [3., 4.]], dtype=single),
array([2., 1.], dtype=single)),
LinalgCase("double",
array([[1., 2.], [3., 4.]], dtype=double),
array([2., 1.], dtype=double)),
LinalgCase("double_2",
array([[1., 2.], [3., 4.]], dtype=double),
array([[2., 1., 4.], [3., 4., 6.]], dtype=double)),
LinalgCase("csingle",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=csingle),
array([2. + 1j, 1. + 2j], dtype=csingle)),
LinalgCase("cdouble",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble),
array([2. + 1j, 1. + 2j], dtype=cdouble)),
LinalgCase("cdouble_2",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], dtype=cdouble)),
LinalgCase("0x0",
np.empty((0, 0), dtype=double),
np.empty((0,), dtype=double),
tags={'size-0'}),
LinalgCase("0x0_matrix",
np.empty((0, 0), dtype=double).view(np.matrix),
np.empty((0, 1), dtype=double).view(np.matrix),
tags={'size-0'}),
LinalgCase("8x8",
np.random.rand(8, 8),
np.random.rand(8)),
LinalgCase("1x1",
np.random.rand(1, 1),
np.random.rand(1)),
LinalgCase("nonarray",
[[1, 2], [3, 4]],
[2, 1]),
LinalgCase("matrix_b_only",
array([[1., 2.], [3., 4.]]),
matrix([2., 1.]).T),
LinalgCase("matrix_a_and_b",
matrix([[1., 2.], [3., 4.]]),
matrix([2., 1.]).T),
])
# non-square test-cases
CASES += apply_tag('nonsquare', [
LinalgCase("single_nsq_1",
array([[1., 2., 3.], [3., 4., 6.]], dtype=single),
array([2., 1.], dtype=single)),
LinalgCase("single_nsq_2",
array([[1., 2.], [3., 4.], [5., 6.]], dtype=single),
array([2., 1., 3.], dtype=single)),
LinalgCase("double_nsq_1",
array([[1., 2., 3.], [3., 4., 6.]], dtype=double),
array([2., 1.], dtype=double)),
LinalgCase("double_nsq_2",
array([[1., 2.], [3., 4.], [5., 6.]], dtype=double),
array([2., 1., 3.], dtype=double)),
LinalgCase("csingle_nsq_1",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=csingle),
array([2. + 1j, 1. + 2j], dtype=csingle)),
LinalgCase("csingle_nsq_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=csingle),
array([2. + 1j, 1. + 2j, 3. - 3j], dtype=csingle)),
LinalgCase("cdouble_nsq_1",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble),
array([2. + 1j, 1. + 2j], dtype=cdouble)),
LinalgCase("cdouble_nsq_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble),
array([2. + 1j, 1. + 2j, 3. - 3j], dtype=cdouble)),
LinalgCase("cdouble_nsq_1_2",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)),
LinalgCase("cdouble_nsq_2_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)),
LinalgCase("8x11",
np.random.rand(8, 11),
np.random.rand(8)),
LinalgCase("1x5",
np.random.rand(1, 5),
np.random.rand(1)),
LinalgCase("5x1",
np.random.rand(5, 1),
np.random.rand(5)),
LinalgCase("0x4",
np.random.rand(0, 4),
np.random.rand(0),
tags={'size-0'}),
LinalgCase("4x0",
np.random.rand(4, 0),
np.random.rand(4),
tags={'size-0'}),
])
# hermitian test-cases
CASES += apply_tag('hermitian', [
LinalgCase("hsingle",
array([[1., 2.], [2., 1.]], dtype=single),
None),
LinalgCase("hdouble",
array([[1., 2.], [2., 1.]], dtype=double),
None),
LinalgCase("hcsingle",
array([[1., 2 + 3j], [2 - 3j, 1]], dtype=csingle),
None),
LinalgCase("hcdouble",
array([[1., 2 + 3j], [2 - 3j, 1]], dtype=cdouble),
None),
LinalgCase("hempty",
np.empty((0, 0), dtype=double),
None,
tags={'size-0'}),
LinalgCase("hnonarray",
[[1, 2], [2, 1]],
None),
LinalgCase("matrix_b_only",
array([[1., 2.], [2., 1.]]),
None),
LinalgCase("hmatrix_a_and_b",
matrix([[1., 2.], [2., 1.]]),
None),
LinalgCase("hmatrix_1x1",
np.random.rand(1, 1),
None),
])
#
# Gufunc test cases
#
def _make_generalized_cases():
new_cases = []
for case in CASES:
if not isinstance(case.a, np.ndarray):
continue
a = np.array([case.a, 2 * case.a, 3 * case.a])
if case.b is None:
b = None
else:
b = np.array([case.b, 7 * case.b, 6 * case.b])
new_case = LinalgCase(case.name + "_tile3", a, b,
tags=case.tags | {'generalized'})
new_cases.append(new_case)
a = np.array([case.a] * 2 * 3).reshape((3, 2) + case.a.shape)
if case.b is None:
b = None
else:
b = np.array([case.b] * 2 * 3).reshape((3, 2) + case.b.shape)
new_case = LinalgCase(case.name + "_tile213", a, b,
tags=case.tags | {'generalized'})
new_cases.append(new_case)
return new_cases
CASES += _make_generalized_cases()
#
# Generate stride combination variations of the above
#
def _stride_comb_iter(x):
"""
Generate cartesian product of strides for all axes
"""
if not isinstance(x, np.ndarray):
yield x, "nop"
return
stride_set = [(1,)] * x.ndim
stride_set[-1] = (1, 3, -4)
if x.ndim > 1:
stride_set[-2] = (1, 3, -4)
if x.ndim > 2:
stride_set[-3] = (1, -4)
for repeats in itertools.product(*tuple(stride_set)):
new_shape = [abs(a * b) for a, b in zip(x.shape, repeats)]
slices = tuple([slice(None, None, repeat) for repeat in repeats])
# new array with different strides, but same data
xi = np.empty(new_shape, dtype=x.dtype)
xi.view(np.uint32).fill(0xdeadbeef)
xi = xi[slices]
xi[...] = x
xi = xi.view(x.__class__)
assert_(np.all(xi == x))
yield xi, "stride_" + "_".join(["%+d" % j for j in repeats])
# generate also zero strides if possible
if x.ndim >= 1 and x.shape[-1] == 1:
s = list(x.strides)
s[-1] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0"
if x.ndim >= 2 and x.shape[-2] == 1:
s = list(x.strides)
s[-2] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_x"
if x.ndim >= 2 and x.shape[:-2] == (1, 1):
s = list(x.strides)
s[-1] = 0
s[-2] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_0"
def _make_strided_cases():
new_cases = []
for case in CASES:
for a, a_label in _stride_comb_iter(case.a):
for b, b_label in _stride_comb_iter(case.b):
new_case = LinalgCase(case.name + "_" + a_label + "_" + b_label, a, b,
tags=case.tags | {'strided'})
new_cases.append(new_case)
return new_cases
CASES += _make_strided_cases()
#
# Test different routines against the above cases
#
def _check_cases(func, require=set(), exclude=set()):
"""
Run func on each of the cases with all of the tags in require, and none
of the tags in exclude
"""
for case in CASES:
# filter by require and exclude
if case.tags & require != require:
continue
if case.tags & exclude:
continue
try:
case.check(func)
except Exception:
msg = "In test case: %r\n\n" % case
msg += traceback.format_exc()
raise AssertionError(msg)
class LinalgSquareTestCase(object):
def test_sq_cases(self):
_check_cases(self.do, require={'square'}, exclude={'generalized', 'size-0'})
def test_empty_sq_cases(self):
_check_cases(self.do, require={'square', 'size-0'}, exclude={'generalized'})
class LinalgNonsquareTestCase(object):
def test_nonsq_cases(self):
_check_cases(self.do, require={'nonsquare'}, exclude={'generalized', 'size-0'})
def test_empty_nonsq_cases(self):
_check_cases(self.do, require={'nonsquare', 'size-0'}, exclude={'generalized'})
class HermitianTestCase(object):
def test_herm_cases(self):
_check_cases(self.do, require={'hermitian'}, exclude={'generalized', 'size-0'})
def test_empty_herm_cases(self):
_check_cases(self.do, require={'hermitian', 'size-0'}, exclude={'generalized'})
class LinalgGeneralizedSquareTestCase(object):
@dec.slow
def test_generalized_sq_cases(self):
_check_cases(self.do, require={'generalized', 'square'}, exclude={'size-0'})
@dec.slow
def test_generalized_empty_sq_cases(self):
_check_cases(self.do, require={'generalized', 'square', 'size-0'})
class LinalgGeneralizedNonsquareTestCase(object):
@dec.slow
def test_generalized_nonsq_cases(self):
_check_cases(self.do, require={'generalized', 'nonsquare'}, exclude={'size-0'})
@dec.slow
def test_generalized_empty_nonsq_cases(self):
_check_cases(self.do, require={'generalized', 'nonsquare', 'size-0'})
class HermitianGeneralizedTestCase(object):
@dec.slow
def test_generalized_herm_cases(self):
_check_cases(self.do,
require={'generalized', 'hermitian'},
exclude={'size-0'})
@dec.slow
def test_generalized_empty_herm_cases(self):
_check_cases(self.do,
require={'generalized', 'hermitian', 'size-0'},
exclude={'none'})
def dot_generalized(a, b):
a = asarray(a)
if a.ndim >= 3:
if a.ndim == b.ndim:
# matrix x matrix
new_shape = a.shape[:-1] + b.shape[-1:]
elif a.ndim == b.ndim + 1:
# matrix x vector
new_shape = a.shape[:-1]
else:
raise ValueError("Not implemented...")
r = np.empty(new_shape, dtype=np.common_type(a, b))
for c in itertools.product(*map(range, a.shape[:-2])):
r[c] = dot(a[c], b[c])
return r
else:
return dot(a, b)
def identity_like_generalized(a):
a = asarray(a)
if a.ndim >= 3:
r = np.empty(a.shape, dtype=a.dtype)
for c in itertools.product(*map(range, a.shape[:-2])):
r[c] = identity(a.shape[-2])
return r
else:
return identity(a.shape[0])
class TestSolve(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
x = linalg.solve(a, b)
assert_almost_equal(b, dot_generalized(a, x))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.solve(x, x).dtype, dtype)
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def test_0_size(self):
class ArraySubclass(np.ndarray):
pass
# Test system of 0x0 matrices
a = np.arange(8).reshape(2, 2, 2)
b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass)
expected = linalg.solve(a, b)[:, 0:0, :]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, :])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
# Test errors for non-square and only b's dimension being 0
assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b)
assert_raises(ValueError, linalg.solve, a, b[:, 0:0, :])
# Test broadcasting error
b = np.arange(6).reshape(1, 3, 2) # broadcasting error
assert_raises(ValueError, linalg.solve, a, b)
assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
# Test zero "single equations" with 0x0 matrices.
b = np.arange(2).reshape(1, 2).view(ArraySubclass)
expected = linalg.solve(a, b)[:, 0:0]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
b = np.arange(3).reshape(1, 3)
assert_raises(ValueError, linalg.solve, a, b)
assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b)
def test_0_size_k(self):
# test zero multiple equation (K=0) case.
class ArraySubclass(np.ndarray):
pass
a = np.arange(4).reshape(1, 2, 2)
b = np.arange(6).reshape(3, 2, 1).view(ArraySubclass)
expected = linalg.solve(a, b)[:, :, 0:0]
result = linalg.solve(a, b[:, :, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
# test both zero.
expected = linalg.solve(a, b)[:, 0:0, 0:0]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
class TestInv(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
a_inv = linalg.inv(a)
assert_almost_equal(dot_generalized(a, a_inv),
identity_like_generalized(a))
assert_(imply(isinstance(a, matrix), isinstance(a_inv, matrix)))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.inv(x).dtype, dtype)
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def test_0_size(self):
# Check that all kinds of 0-sized arrays work
class ArraySubclass(np.ndarray):
pass
a = np.zeros((0, 1, 1), dtype=np.int_).view(ArraySubclass)
res = linalg.inv(a)
assert_(res.dtype.type is np.float64)
assert_equal(a.shape, res.shape)
assert_(isinstance(a, ArraySubclass))
a = np.zeros((0, 0), dtype=np.complex64).view(ArraySubclass)
res = linalg.inv(a)
assert_(res.dtype.type is np.complex64)
assert_equal(a.shape, res.shape)
class TestEigvals(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
ev = linalg.eigvals(a)
evalues, evectors = linalg.eig(a)
assert_almost_equal(ev, evalues)
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.eigvals(x).dtype, dtype)
x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
assert_equal(linalg.eigvals(x).dtype, get_complex_dtype(dtype))
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
class TestEig(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
evalues, evectors = linalg.eig(a)
assert_allclose(dot_generalized(a, evectors),
np.asarray(evectors) * np.asarray(evalues)[..., None, :],
rtol=get_rtol(evalues.dtype))
assert_(imply(isinstance(a, matrix), isinstance(evectors, matrix)))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w, v = np.linalg.eig(x)
assert_equal(w.dtype, dtype)
assert_equal(v.dtype, dtype)
x = np.array([[1, 0.5], [-1, 1]], dtype=dtype)
w, v = np.linalg.eig(x)
assert_equal(w.dtype, get_complex_dtype(dtype))
assert_equal(v.dtype, get_complex_dtype(dtype))
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
class TestSVD(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
if 'size-0' in tags:
assert_raises(LinAlgError, linalg.svd, a, 0)
return
u, s, vt = linalg.svd(a, 0)
assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
np.asarray(vt)),
rtol=get_rtol(u.dtype))
assert_(imply(isinstance(a, matrix), isinstance(u, matrix)))
assert_(imply(isinstance(a, matrix), isinstance(vt, matrix)))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
u, s, vh = linalg.svd(x)
assert_equal(u.dtype, dtype)
assert_equal(s.dtype, get_real_dtype(dtype))
assert_equal(vh.dtype, dtype)
s = linalg.svd(x, compute_uv=False)
assert_equal(s.dtype, get_real_dtype(dtype))
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
class TestCondSVD(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
c = asarray(a) # a might be a matrix
if 'size-0' in tags:
assert_raises(LinAlgError, linalg.svd, c, compute_uv=False)
return
s = linalg.svd(c, compute_uv=False)
assert_almost_equal(
s[..., 0] / s[..., -1], linalg.cond(a),
single_decimal=5, double_decimal=11)
def test_stacked_arrays_explicitly(self):
A = np.array([[1., 2., 1.], [0, -2., 0], [6., 2., 3.]])
assert_equal(linalg.cond(A), linalg.cond(A[None, ...])[0])
class TestCond2(LinalgSquareTestCase):
def do(self, a, b, tags):
c = asarray(a) # a might be a matrix
if 'size-0' in tags:
assert_raises(LinAlgError, linalg.svd, c, compute_uv=False)
return
s = linalg.svd(c, compute_uv=False)
assert_almost_equal(
s[..., 0] / s[..., -1], linalg.cond(a, 2),
single_decimal=5, double_decimal=11)
def test_stacked_arrays_explicitly(self):
A = np.array([[1., 2., 1.], [0, -2., 0], [6., 2., 3.]])
assert_equal(linalg.cond(A, 2), linalg.cond(A[None, ...], 2)[0])
class TestCondInf(object):
def test(self):
A = array([[1., 0, 0], [0, -2., 0], [0, 0, 3.]])
assert_almost_equal(linalg.cond(A, inf), 3.)
class TestPinv(LinalgSquareTestCase, LinalgNonsquareTestCase):
def do(self, a, b, tags):
a_ginv = linalg.pinv(a)
# `a @ a_ginv == I` does not hold if a is singular
assert_almost_equal(dot(a, a_ginv).dot(a), a, single_decimal=5, double_decimal=11)
assert_(imply(isinstance(a, matrix), isinstance(a_ginv, matrix)))
class TestDet(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
d = linalg.det(a)
(s, ld) = linalg.slogdet(a)
if asarray(a).dtype.type in (single, double):
ad = asarray(a).astype(double)
else:
ad = asarray(a).astype(cdouble)
ev = linalg.eigvals(ad)
assert_almost_equal(d, multiply.reduce(ev, axis=-1))
assert_almost_equal(s * np.exp(ld), multiply.reduce(ev, axis=-1))
s = np.atleast_1d(s)
ld = np.atleast_1d(ld)
m = (s != 0)
assert_almost_equal(np.abs(s[m]), 1)
assert_equal(ld[~m], -inf)
def test_zero(self):
assert_equal(linalg.det([[0.0]]), 0.0)
assert_equal(type(linalg.det([[0.0]])), double)
assert_equal(linalg.det([[0.0j]]), 0.0)
assert_equal(type(linalg.det([[0.0j]])), cdouble)
assert_equal(linalg.slogdet([[0.0]]), (0.0, -inf))
assert_equal(type(linalg.slogdet([[0.0]])[0]), double)
assert_equal(type(linalg.slogdet([[0.0]])[1]), double)
assert_equal(linalg.slogdet([[0.0j]]), (0.0j, -inf))
assert_equal(type(linalg.slogdet([[0.0j]])[0]), cdouble)
assert_equal(type(linalg.slogdet([[0.0j]])[1]), double)
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(np.linalg.det(x).dtype, dtype)
ph, s = np.linalg.slogdet(x)
assert_equal(s.dtype, get_real_dtype(dtype))
assert_equal(ph.dtype, dtype)
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
class TestLstsq(LinalgSquareTestCase, LinalgNonsquareTestCase):
def do(self, a, b, tags):
if 'size-0' in tags:
assert_raises(LinAlgError, linalg.lstsq, a, b)
return
arr = np.asarray(a)
m, n = arr.shape
u, s, vt = linalg.svd(a, 0)
x, residuals, rank, sv = linalg.lstsq(a, b)
if m <= n:
assert_almost_equal(b, dot(a, x))
assert_equal(rank, m)
else:
assert_equal(rank, n)
assert_almost_equal(sv, sv.__array_wrap__(s))
if rank == n and m > n:
expect_resids = (
np.asarray(abs(np.dot(a, x) - b)) ** 2).sum(axis=0)
expect_resids = np.asarray(expect_resids)
if np.asarray(b).ndim == 1:
expect_resids.shape = (1,)
assert_equal(residuals.shape, expect_resids.shape)
else:
expect_resids = np.array([]).view(type(x))
assert_almost_equal(residuals, expect_resids)
assert_(np.issubdtype(residuals.dtype, np.floating))
assert_(imply(isinstance(b, matrix), isinstance(x, matrix)))
assert_(imply(isinstance(b, matrix), isinstance(residuals, matrix)))
class TestMatrixPower(object):
R90 = array([[0, 1], [-1, 0]])
Arb22 = array([[4, -7], [-2, 10]])
noninv = array([[1, 0], [0, 0]])
arbfloat = array([[0.1, 3.2], [1.2, 0.7]])
large = identity(10)
t = large[1, :].copy()
large[1, :] = large[0,:]
large[0, :] = t
def test_large_power(self):
assert_equal(
matrix_power(self.R90, 2 ** 100 + 2 ** 10 + 2 ** 5 + 1), self.R90)
def test_large_power_trailing_zero(self):
assert_equal(
matrix_power(self.R90, 2 ** 100 + 2 ** 10 + 2 ** 5), identity(2))
def testip_zero(self):
def tz(M):
mz = matrix_power(M, 0)
assert_equal(mz, identity(M.shape[0]))
assert_equal(mz.dtype, M.dtype)
for M in [self.Arb22, self.arbfloat, self.large]:
yield tz, M
def testip_one(self):
def tz(M):
mz = matrix_power(M, 1)
assert_equal(mz, M)
assert_equal(mz.dtype, M.dtype)
for M in [self.Arb22, self.arbfloat, self.large]:
yield tz, M
def testip_two(self):
def tz(M):
mz = matrix_power(M, 2)
assert_equal(mz, dot(M, M))
assert_equal(mz.dtype, M.dtype)
for M in [self.Arb22, self.arbfloat, self.large]:
yield tz, M
def testip_invert(self):
def tz(M):
mz = matrix_power(M, -1)
assert_almost_equal(identity(M.shape[0]), dot(mz, M))
for M in [self.R90, self.Arb22, self.arbfloat, self.large]:
yield tz, M
def test_invert_noninvertible(self):
import numpy.linalg
assert_raises(numpy.linalg.linalg.LinAlgError,
lambda: matrix_power(self.noninv, -1))
class TestBoolPower(object):
def test_square(self):
A = array([[True, False], [True, True]])
assert_equal(matrix_power(A, 2), A)
class TestEigvalsh(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
# note that eigenvalue arrays returned by eig must be sorted since
# their order isn't guaranteed.
ev = linalg.eigvalsh(a, 'L')
evalues, evectors = linalg.eig(a)
evalues.sort(axis=-1)
assert_allclose(ev, evalues, rtol=get_rtol(ev.dtype))
ev2 = linalg.eigvalsh(a, 'U')
assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w = np.linalg.eigvalsh(x)
assert_equal(w.dtype, get_real_dtype(dtype))
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def test_invalid(self):
x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32)
assert_raises(ValueError, np.linalg.eigvalsh, x, UPLO="lrong")
assert_raises(ValueError, np.linalg.eigvalsh, x, "lower")
assert_raises(ValueError, np.linalg.eigvalsh, x, "upper")
def test_UPLO(self):
Klo = np.array([[0, 0], [1, 0]], dtype=np.double)
Kup = np.array([[0, 1], [0, 0]], dtype=np.double)
tgt = np.array([-1, 1], dtype=np.double)
rtol = get_rtol(np.double)
# Check default is 'L'
w = np.linalg.eigvalsh(Klo)
assert_allclose(w, tgt, rtol=rtol)
# Check 'L'
w = np.linalg.eigvalsh(Klo, UPLO='L')
assert_allclose(w, tgt, rtol=rtol)
# Check 'l'
w = np.linalg.eigvalsh(Klo, UPLO='l')
assert_allclose(w, tgt, rtol=rtol)
# Check 'U'
w = np.linalg.eigvalsh(Kup, UPLO='U')
assert_allclose(w, tgt, rtol=rtol)
# Check 'u'
w = np.linalg.eigvalsh(Kup, UPLO='u')
assert_allclose(w, tgt, rtol=rtol)
class TestEigh(HermitianTestCase, HermitianGeneralizedTestCase):
def do(self, a, b, tags):
# note that eigenvalue arrays returned by eig must be sorted since
# their order isn't guaranteed.
ev, evc = linalg.eigh(a)
evalues, evectors = linalg.eig(a)
evalues.sort(axis=-1)
assert_almost_equal(ev, evalues)
assert_allclose(dot_generalized(a, evc),
np.asarray(ev)[..., None, :] * np.asarray(evc),
rtol=get_rtol(ev.dtype))
ev2, evc2 = linalg.eigh(a, 'U')
assert_almost_equal(ev2, evalues)
assert_allclose(dot_generalized(a, evc2),
np.asarray(ev2)[..., None, :] * np.asarray(evc2),
rtol=get_rtol(ev.dtype), err_msg=repr(a))
def test_types(self):
def check(dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
w, v = np.linalg.eigh(x)
assert_equal(w.dtype, get_real_dtype(dtype))
assert_equal(v.dtype, dtype)
for dtype in [single, double, csingle, cdouble]:
yield check, dtype
def test_invalid(self):
x = np.array([[1, 0.5], [0.5, 1]], dtype=np.float32)
assert_raises(ValueError, np.linalg.eigh, x, UPLO="lrong")
assert_raises(ValueError, np.linalg.eigh, x, "lower")
assert_raises(ValueError, np.linalg.eigh, x, "upper")
def test_UPLO(self):
Klo = np.array([[0, 0], [1, 0]], dtype=np.double)
Kup = np.array([[0, 1], [0, 0]], dtype=np.double)
tgt = np.array([-1, 1], dtype=np.double)
rtol = get_rtol(np.double)
# Check default is 'L'
w, v = np.linalg.eigh(Klo)
assert_allclose(w, tgt, rtol=rtol)
# Check 'L'
w, v = np.linalg.eigh(Klo, UPLO='L')
assert_allclose(w, tgt, rtol=rtol)
# Check 'l'
w, v = np.linalg.eigh(Klo, UPLO='l')
assert_allclose(w, tgt, rtol=rtol)
# Check 'U'
w, v = np.linalg.eigh(Kup, UPLO='U')
assert_allclose(w, tgt, rtol=rtol)
# Check 'u'
w, v = np.linalg.eigh(Kup, UPLO='u')
assert_allclose(w, tgt, rtol=rtol)
class _TestNorm(object):
dt = None
dec = None
def test_empty(self):
assert_equal(norm([]), 0.0)
assert_equal(norm(array([], dtype=self.dt)), 0.0)
assert_equal(norm(atleast_2d(array([], dtype=self.dt))), 0.0)
def test_vector_return_type(self):
a = np.array([1, 0, 1])
exact_types = np.typecodes['AllInteger']
inexact_types = np.typecodes['AllFloat']
all_types = exact_types + inexact_types
for each_inexact_types in all_types:
at = a.astype(each_inexact_types)
an = norm(at, -np.inf)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 0.0)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "divide by zero encountered")
an = norm(at, -1)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 0.0)
an = norm(at, 0)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 2)
an = norm(at, 1)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 2.0)
an = norm(at, 2)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/2.0))
an = norm(at, 4)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, an.dtype.type(2.0)**an.dtype.type(1.0/4.0))
an = norm(at, np.inf)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 1.0)
def test_matrix_return_type(self):
a = np.array([[1, 0, 1], [0, 1, 1]])
exact_types = np.typecodes['AllInteger']
# float32, complex64, float64, complex128 types are the only types
# allowed by `linalg`, which performs the matrix operations used
# within `norm`.
inexact_types = 'fdFD'
all_types = exact_types + inexact_types
for each_inexact_types in all_types:
at = a.astype(each_inexact_types)
an = norm(at, -np.inf)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 2.0)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "divide by zero encountered")
an = norm(at, -1)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 1.0)
an = norm(at, 1)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 2.0)
an = norm(at, 2)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 3.0**(1.0/2.0))
an = norm(at, -2)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 1.0)
an = norm(at, np.inf)
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 2.0)
an = norm(at, 'fro')
assert_(issubclass(an.dtype.type, np.floating))
assert_almost_equal(an, 2.0)
an = norm(at, 'nuc')
assert_(issubclass(an.dtype.type, np.floating))
# Lower bar needed to support low precision floats.
# They end up being off by 1 in the 7th place.
old_assert_almost_equal(an, 2.7320508075688772, decimal=6)
def test_vector(self):
a = [1, 2, 3, 4]
b = [-1, -2, -3, -4]
c = [-1, 2, -3, 4]
def _test(v):
np.testing.assert_almost_equal(norm(v), 30 ** 0.5,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, inf), 4.0,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -inf), 1.0,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 1), 10.0,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -1), 12.0 / 25,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 2), 30 ** 0.5,
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, -2), ((205. / 144) ** -0.5),
decimal=self.dec)
np.testing.assert_almost_equal(norm(v, 0), 4,
decimal=self.dec)
for v in (a, b, c,):
_test(v)
for v in (array(a, dtype=self.dt), array(b, dtype=self.dt),
array(c, dtype=self.dt)):
_test(v)
def test_matrix_2x2(self):
A = matrix([[1, 3], [5, 7]], dtype=self.dt)
assert_almost_equal(norm(A), 84 ** 0.5)
assert_almost_equal(norm(A, 'fro'), 84 ** 0.5)
assert_almost_equal(norm(A, 'nuc'), 10.0)
assert_almost_equal(norm(A, inf), 12.0)
assert_almost_equal(norm(A, -inf), 4.0)
assert_almost_equal(norm(A, 1), 10.0)
assert_almost_equal(norm(A, -1), 6.0)
assert_almost_equal(norm(A, 2), 9.1231056256176615)
assert_almost_equal(norm(A, -2), 0.87689437438234041)
assert_raises(ValueError, norm, A, 'nofro')
assert_raises(ValueError, norm, A, -3)
assert_raises(ValueError, norm, A, 0)
def test_matrix_3x3(self):
# This test has been added because the 2x2 example
# happened to have equal nuclear norm and induced 1-norm.
# The 1/10 scaling factor accommodates the absolute tolerance
# used in assert_almost_equal.
A = (1 / 10) * \
np.array([[1, 2, 3], [6, 0, 5], [3, 2, 1]], dtype=self.dt)
assert_almost_equal(norm(A), (1 / 10) * 89 ** 0.5)
assert_almost_equal(norm(A, 'fro'), (1 / 10) * 89 ** 0.5)
assert_almost_equal(norm(A, 'nuc'), 1.3366836911774836)
assert_almost_equal(norm(A, inf), 1.1)
assert_almost_equal(norm(A, -inf), 0.6)
assert_almost_equal(norm(A, 1), 1.0)
assert_almost_equal(norm(A, -1), 0.4)
assert_almost_equal(norm(A, 2), 0.88722940323461277)
assert_almost_equal(norm(A, -2), 0.19456584790481812)
def test_axis(self):
# Vector norms.
# Compare the use of `axis` with computing the norm of each row
# or column separately.
A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]:
expected0 = [norm(A[:, k], ord=order) for k in range(A.shape[1])]
assert_almost_equal(norm(A, ord=order, axis=0), expected0)
expected1 = [norm(A[k, :], ord=order) for k in range(A.shape[0])]
assert_almost_equal(norm(A, ord=order, axis=1), expected1)
# Matrix norms.
B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
nd = B.ndim
for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro']:
for axis in itertools.combinations(range(-nd, nd), 2):
row_axis, col_axis = axis
if row_axis < 0:
row_axis += nd
if col_axis < 0:
col_axis += nd
if row_axis == col_axis:
assert_raises(ValueError, norm, B, ord=order, axis=axis)
else:
n = norm(B, ord=order, axis=axis)
# The logic using k_index only works for nd = 3.
# This has to be changed if nd is increased.
k_index = nd - (row_axis + col_axis)
if row_axis < col_axis:
expected = [norm(B[:].take(k, axis=k_index), ord=order)
for k in range(B.shape[k_index])]
else:
expected = [norm(B[:].take(k, axis=k_index).T, ord=order)
for k in range(B.shape[k_index])]
assert_almost_equal(n, expected)
def test_keepdims(self):
A = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
allclose_err = 'order {0}, axis = {1}'
shape_err = 'Shape mismatch found {0}, expected {1}, order={2}, axis={3}'
# check the order=None, axis=None case
expected = norm(A, ord=None, axis=None)
found = norm(A, ord=None, axis=None, keepdims=True)
assert_allclose(np.squeeze(found), expected,
err_msg=allclose_err.format(None, None))
expected_shape = (1, 1, 1)
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, None, None))
# Vector norms.
for order in [None, -1, 0, 1, 2, 3, np.Inf, -np.Inf]:
for k in range(A.ndim):
expected = norm(A, ord=order, axis=k)
found = norm(A, ord=order, axis=k, keepdims=True)
assert_allclose(np.squeeze(found), expected,
err_msg=allclose_err.format(order, k))
expected_shape = list(A.shape)
expected_shape[k] = 1
expected_shape = tuple(expected_shape)
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, order, k))
# Matrix norms.
for order in [None, -2, 2, -1, 1, np.Inf, -np.Inf, 'fro', 'nuc']:
for k in itertools.permutations(range(A.ndim), 2):
expected = norm(A, ord=order, axis=k)
found = norm(A, ord=order, axis=k, keepdims=True)
assert_allclose(np.squeeze(found), expected,
err_msg=allclose_err.format(order, k))
expected_shape = list(A.shape)
expected_shape[k[0]] = 1
expected_shape[k[1]] = 1
expected_shape = tuple(expected_shape)
assert_(found.shape == expected_shape,
shape_err.format(found.shape, expected_shape, order, k))
def test_bad_args(self):
# Check that bad arguments raise the appropriate exceptions.
A = array([[1, 2, 3], [4, 5, 6]], dtype=self.dt)
B = np.arange(1, 25, dtype=self.dt).reshape(2, 3, 4)
# Using `axis=<integer>` or passing in a 1-D array implies vector
# norms are being computed, so also using `ord='fro'`
# or `ord='nuc'` raises a ValueError.
assert_raises(ValueError, norm, A, 'fro', 0)
assert_raises(ValueError, norm, A, 'nuc', 0)
assert_raises(ValueError, norm, [3, 4], 'fro', None)
assert_raises(ValueError, norm, [3, 4], 'nuc', None)
# Similarly, norm should raise an exception when ord is any finite
# number other than 1, 2, -1 or -2 when computing matrix norms.
for order in [0, 3]:
assert_raises(ValueError, norm, A, order, None)
assert_raises(ValueError, norm, A, order, (0, 1))
assert_raises(ValueError, norm, B, order, (1, 2))
# Invalid axis
assert_raises(np.AxisError, norm, B, None, 3)
assert_raises(np.AxisError, norm, B, None, (2, 3))
assert_raises(ValueError, norm, B, None, (0, 1, 2))
class TestNorm_NonSystematic(object):
def test_longdouble_norm(self):
# Non-regression test: p-norm of longdouble would previously raise
# UnboundLocalError.
x = np.arange(10, dtype=np.longdouble)
old_assert_almost_equal(norm(x, ord=3), 12.65, decimal=2)
def test_intmin(self):
# Non-regression test: p-norm of signed integer would previously do
# float cast and abs in the wrong order.
x = np.array([-2 ** 31], dtype=np.int32)
old_assert_almost_equal(norm(x, ord=3), 2 ** 31, decimal=5)
def test_complex_high_ord(self):
# gh-4156
d = np.empty((2,), dtype=np.clongdouble)
d[0] = 6 + 7j
d[1] = -6 + 7j
res = 11.615898132184
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=10)
d = d.astype(np.complex128)
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=9)
d = d.astype(np.complex64)
old_assert_almost_equal(np.linalg.norm(d, ord=3), res, decimal=5)
class TestNormDouble(_TestNorm):
dt = np.double
dec = 12
class TestNormSingle(_TestNorm):
dt = np.float32
dec = 6
class TestNormInt64(_TestNorm):
dt = np.int64
dec = 12
class TestMatrixRank(object):
def test_matrix_rank(self):
# Full rank matrix
yield assert_equal, 4, matrix_rank(np.eye(4))
# rank deficient matrix
I = np.eye(4)
I[-1, -1] = 0.
yield assert_equal, matrix_rank(I), 3
# All zeros - zero rank
yield assert_equal, matrix_rank(np.zeros((4, 4))), 0
# 1 dimension - rank 1 unless all 0
yield assert_equal, matrix_rank([1, 0, 0, 0]), 1
yield assert_equal, matrix_rank(np.zeros((4,))), 0
# accepts array-like
yield assert_equal, matrix_rank([1]), 1
# greater than 2 dimensions raises error
yield assert_raises, TypeError, matrix_rank, np.zeros((2, 2, 2))
# works on scalar
yield assert_equal, matrix_rank(1), 1
def test_reduced_rank():
# Test matrices with reduced rank
rng = np.random.RandomState(20120714)
for i in range(100):
# Make a rank deficient matrix
X = rng.normal(size=(40, 10))
X[:, 0] = X[:, 1] + X[:, 2]
# Assert that matrix_rank detected deficiency
assert_equal(matrix_rank(X), 9)
X[:, 3] = X[:, 4] + X[:, 5]
assert_equal(matrix_rank(X), 8)
class TestQR(object):
def check_qr(self, a):
# This test expects the argument `a` to be an ndarray or
# a subclass of an ndarray of inexact type.
a_type = type(a)
a_dtype = a.dtype
m, n = a.shape
k = min(m, n)
# mode == 'complete'
q, r = linalg.qr(a, mode='complete')
assert_(q.dtype == a_dtype)
assert_(r.dtype == a_dtype)
assert_(isinstance(q, a_type))
assert_(isinstance(r, a_type))
assert_(q.shape == (m, m))
assert_(r.shape == (m, n))
assert_almost_equal(dot(q, r), a)
assert_almost_equal(dot(q.T.conj(), q), np.eye(m))
assert_almost_equal(np.triu(r), r)
# mode == 'reduced'
q1, r1 = linalg.qr(a, mode='reduced')
assert_(q1.dtype == a_dtype)
assert_(r1.dtype == a_dtype)
assert_(isinstance(q1, a_type))
assert_(isinstance(r1, a_type))
assert_(q1.shape == (m, k))
assert_(r1.shape == (k, n))
assert_almost_equal(dot(q1, r1), a)
assert_almost_equal(dot(q1.T.conj(), q1), np.eye(k))
assert_almost_equal(np.triu(r1), r1)
# mode == 'r'
r2 = linalg.qr(a, mode='r')
assert_(r2.dtype == a_dtype)
assert_(isinstance(r2, a_type))
assert_almost_equal(r2, r1)
def test_qr_empty(self):
a = np.zeros((0, 2))
assert_raises(linalg.LinAlgError, linalg.qr, a)
def test_mode_raw(self):
# The factorization is not unique and varies between libraries,
# so it is not possible to check against known values. Functional
# testing is a possibility, but awaits the exposure of more
# of the functions in lapack_lite. Consequently, this test is
# very limited in scope. Note that the results are in FORTRAN
# order, hence the h arrays are transposed.
a = array([[1, 2], [3, 4], [5, 6]], dtype=np.double)
# Test double
h, tau = linalg.qr(a, mode='raw')
assert_(h.dtype == np.double)
assert_(tau.dtype == np.double)
assert_(h.shape == (2, 3))
assert_(tau.shape == (2,))
h, tau = linalg.qr(a.T, mode='raw')
assert_(h.dtype == np.double)
assert_(tau.dtype == np.double)
assert_(h.shape == (3, 2))
assert_(tau.shape == (2,))
def test_mode_all_but_economic(self):
a = array([[1, 2], [3, 4]])
b = array([[1, 2], [3, 4], [5, 6]])
for dt in "fd":
m1 = a.astype(dt)
m2 = b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
self.check_qr(matrix(m1))
for dt in "fd":
m1 = 1 + 1j * a.astype(dt)
m2 = 1 + 1j * b.astype(dt)
self.check_qr(m1)
self.check_qr(m2)
self.check_qr(m2.T)
self.check_qr(matrix(m1))
def test_byteorder_check():
# Byte order check should pass for native order
if sys.byteorder == 'little':
native = '<'
else:
native = '>'
for dtt in (np.float32, np.float64):
arr = np.eye(4, dtype=dtt)
n_arr = arr.newbyteorder(native)
sw_arr = arr.newbyteorder('S').byteswap()
assert_equal(arr.dtype.byteorder, '=')
for routine in (linalg.inv, linalg.det, linalg.pinv):
# Normal call
res = routine(arr)
# Native but not '='
assert_array_equal(res, routine(n_arr))
# Swapped
assert_array_equal(res, routine(sw_arr))
def test_generalized_raise_multiloop():
# It should raise an error even if the error doesn't occur in the
# last iteration of the ufunc inner loop
invertible = np.array([[1, 2], [3, 4]])
non_invertible = np.array([[1, 1], [1, 1]])
x = np.zeros([4, 4, 2, 2])[1::2]
x[...] = invertible
x[0, 0] = non_invertible
assert_raises(np.linalg.LinAlgError, np.linalg.inv, x)
def test_xerbla_override():
# Check that our xerbla has been successfully linked in. If it is not,
# the default xerbla routine is called, which prints a message to stdout
# and may, or may not, abort the process depending on the LAPACK package.
XERBLA_OK = 255
try:
pid = os.fork()
except (OSError, AttributeError):
# fork failed, or not running on POSIX
raise SkipTest("Not POSIX or fork failed.")
if pid == 0:
# child; close i/o file handles
os.close(1)
os.close(0)
# Avoid producing core files.
import resource
resource.setrlimit(resource.RLIMIT_CORE, (0, 0))
# These calls may abort.
try:
np.linalg.lapack_lite.xerbla()
except ValueError:
pass
except:
os._exit(os.EX_CONFIG)
try:
a = np.array([[1.]])
np.linalg.lapack_lite.dorgqr(
1, 1, 1, a,
0, # <- invalid value
a, a, 0, 0)
except ValueError as e:
if "DORGQR parameter number 5" in str(e):
# success, reuse error code to mark success as
# FORTRAN STOP returns as success.
os._exit(XERBLA_OK)
# Did not abort, but our xerbla was not linked in.
os._exit(os.EX_CONFIG)
else:
# parent
pid, status = os.wait()
if os.WEXITSTATUS(status) != XERBLA_OK:
raise SkipTest('Numpy xerbla not linked in.')
class TestMultiDot(object):
def test_basic_function_with_three_arguments(self):
# multi_dot with three arguments uses a fast hand coded algorithm to
# determine the optimal order. Therefore test it separately.
A = np.random.random((6, 2))
B = np.random.random((2, 6))
C = np.random.random((6, 2))
assert_almost_equal(multi_dot([A, B, C]), A.dot(B).dot(C))
assert_almost_equal(multi_dot([A, B, C]), np.dot(A, np.dot(B, C)))
def test_basic_function_with_dynamic_programing_optimization(self):
# multi_dot with four or more arguments uses the dynamic programing
# optimization and therefore deserve a separate
A = np.random.random((6, 2))
B = np.random.random((2, 6))
C = np.random.random((6, 2))
D = np.random.random((2, 1))
assert_almost_equal(multi_dot([A, B, C, D]), A.dot(B).dot(C).dot(D))
def test_vector_as_first_argument(self):
# The first argument can be 1-D
A1d = np.random.random(2) # 1-D
B = np.random.random((2, 6))
C = np.random.random((6, 2))
D = np.random.random((2, 2))
# the result should be 1-D
assert_equal(multi_dot([A1d, B, C, D]).shape, (2,))
def test_vector_as_last_argument(self):
# The last argument can be 1-D
A = np.random.random((6, 2))
B = np.random.random((2, 6))
C = np.random.random((6, 2))
D1d = np.random.random(2) # 1-D
# the result should be 1-D
assert_equal(multi_dot([A, B, C, D1d]).shape, (6,))
def test_vector_as_first_and_last_argument(self):
# The first and last arguments can be 1-D
A1d = np.random.random(2) # 1-D
B = np.random.random((2, 6))
C = np.random.random((6, 2))
D1d = np.random.random(2) # 1-D
# the result should be a scalar
assert_equal(multi_dot([A1d, B, C, D1d]).shape, ())
def test_dynamic_programming_logic(self):
# Test for the dynamic programming part
# This test is directly taken from Cormen page 376.
arrays = [np.random.random((30, 35)),
np.random.random((35, 15)),
np.random.random((15, 5)),
np.random.random((5, 10)),
np.random.random((10, 20)),
np.random.random((20, 25))]
m_expected = np.array([[0., 15750., 7875., 9375., 11875., 15125.],
[0., 0., 2625., 4375., 7125., 10500.],
[0., 0., 0., 750., 2500., 5375.],
[0., 0., 0., 0., 1000., 3500.],
[0., 0., 0., 0., 0., 5000.],
[0., 0., 0., 0., 0., 0.]])
s_expected = np.array([[0, 1, 1, 3, 3, 3],
[0, 0, 2, 3, 3, 3],
[0, 0, 0, 3, 3, 3],
[0, 0, 0, 0, 4, 5],
[0, 0, 0, 0, 0, 5],
[0, 0, 0, 0, 0, 0]], dtype=np.int)
s_expected -= 1 # Cormen uses 1-based index, python does not.
s, m = _multi_dot_matrix_chain_order(arrays, return_costs=True)
# Only the upper triangular part (without the diagonal) is interesting.
assert_almost_equal(np.triu(s[:-1, 1:]),
np.triu(s_expected[:-1, 1:]))
assert_almost_equal(np.triu(m), np.triu(m_expected))
def test_too_few_input_arrays(self):
assert_raises(ValueError, multi_dot, [])
assert_raises(ValueError, multi_dot, [np.random.random((3, 3))])
if __name__ == "__main__":
run_module_suite()
| 35.628852 | 96 | 0.548717 |
795a294ee03f4d64f43df5c35cc5d04b965cc964 | 941 | py | Python | launch/ros2_player_bridge.launch.py | betab0t/ros2_player_bridge | 99da0581565006e6abf295ba003ba6c02d46d88b | [
"Apache-2.0"
] | null | null | null | launch/ros2_player_bridge.launch.py | betab0t/ros2_player_bridge | 99da0581565006e6abf295ba003ba6c02d46d88b | [
"Apache-2.0"
] | null | null | null | launch/ros2_player_bridge.launch.py | betab0t/ros2_player_bridge | 99da0581565006e6abf295ba003ba6c02d46d88b | [
"Apache-2.0"
] | null | null | null | import launch_ros.actions
import launch.actions
from launch import LaunchDescription
from launch.substitutions import LaunchConfiguration
def generate_launch_description():
return LaunchDescription([
launch.actions.DeclareLaunchArgument('player_ip',
default_value='127.0.0.1',
description="Player server IP address"),
launch_ros.actions.Node(package='ros2_player_bridge',
node_executable='mobile_base_node',
output='screen',
arguments=[LaunchConfiguration('player_ip')]),
launch_ros.actions.Node(package='ros2_player_bridge',
node_executable='sensors_node',
output='screen',
arguments=[LaunchConfiguration('player_ip')]),
]) | 44.809524 | 85 | 0.550478 |
795a2951d27cfbf4908b5ee3d20ce443fde2175e | 2,695 | py | Python | jdcloud_sdk/services/vm/apis/ImageTasksRequest.py | jdcloud-apigateway/jdcloud-sdk-python | 0886769bcf1fb92128a065ff0f4695be099571cc | [
"Apache-2.0"
] | 14 | 2018-04-19T09:53:56.000Z | 2022-01-27T06:05:48.000Z | jdcloud_sdk/services/vm/apis/ImageTasksRequest.py | jdcloud-apigateway/jdcloud-sdk-python | 0886769bcf1fb92128a065ff0f4695be099571cc | [
"Apache-2.0"
] | 15 | 2018-09-11T05:39:54.000Z | 2021-07-02T12:38:02.000Z | jdcloud_sdk/services/vm/apis/ImageTasksRequest.py | jdcloud-apigateway/jdcloud-sdk-python | 0886769bcf1fb92128a065ff0f4695be099571cc | [
"Apache-2.0"
] | 33 | 2018-04-20T05:29:16.000Z | 2022-02-17T09:10:05.000Z | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class ImageTasksRequest(JDCloudRequest):
"""
查询镜像任务详情。
将京东云私有镜像导出至京东云以外环境。
详细操作说明请参考帮助文档:
[导入私有镜像](https://docs.jdcloud.com/cn/virtual-machines/import-private-image)
[导出私有镜像](https://docs.jdcloud.com/cn/virtual-machines/export-private-image)
## 接口说明
- 调用该接口可查询镜像导入或导出的任务详情。
"""
def __init__(self, parameters, header=None, version="v1"):
super(ImageTasksRequest, self).__init__(
'/regions/{regionId}/imageTasks', 'GET', header, version)
self.parameters = parameters
class ImageTasksParameters(object):
def __init__(self, regionId, ):
"""
:param regionId: 地域ID。
"""
self.regionId = regionId
self.taskAction = None
self.taskIds = None
self.taskStatus = None
self.startTime = None
self.endTime = None
self.pageNumber = None
self.pageSize = None
def setTaskAction(self, taskAction):
"""
:param taskAction: (Optional) 任务操作类型。支持范围:`ImportImage、ExportImage`。
"""
self.taskAction = taskAction
def setTaskIds(self, taskIds):
"""
:param taskIds: (Optional) 任务id列表。
"""
self.taskIds = taskIds
def setTaskStatus(self, taskStatus):
"""
:param taskStatus: (Optional) 任务状态。支持范围:`pending、running、failed、finished`。
"""
self.taskStatus = taskStatus
def setStartTime(self, startTime):
"""
:param startTime: (Optional) 任务开始时间
"""
self.startTime = startTime
def setEndTime(self, endTime):
"""
:param endTime: (Optional) 任务结束时间
"""
self.endTime = endTime
def setPageNumber(self, pageNumber):
"""
:param pageNumber: (Optional) 页码;默认为1。
"""
self.pageNumber = pageNumber
def setPageSize(self, pageSize):
"""
:param pageSize: (Optional) 分页大小;取值范围[10, 100]。
"""
self.pageSize = pageSize
| 26.421569 | 82 | 0.642301 |
795a2967892cc90ba2124981fef6e9ca094ab3e1 | 13,965 | py | Python | ENV/lib/python3.6/site-packages/webob/multidict.py | captain-c00keys/pyramid-stocks | 0acf3363a6a7ee61cd41b855f43c9d6f9582ae6a | [
"MIT"
] | 2 | 2018-10-02T15:47:45.000Z | 2018-10-02T15:50:55.000Z | lib/webob/multidict.py | uetopia/metagame | 4f48418e5a8e691698ff347370ef1447021b9a4f | [
"Apache-2.0"
] | 1 | 2018-08-30T21:35:33.000Z | 2018-08-30T21:35:33.000Z | lib/webob/multidict.py | uetopia/metagame | 4f48418e5a8e691698ff347370ef1447021b9a4f | [
"Apache-2.0"
] | 1 | 2018-08-20T16:55:05.000Z | 2018-08-20T16:55:05.000Z | # (c) 2005 Ian Bicking and contributors; written for Paste
# (http://pythonpaste.org) Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
"""
Gives a multi-value dictionary object (MultiDict) plus several wrappers
"""
from collections import MutableMapping
import binascii
import warnings
from webob.compat import (
PY2,
iteritems_,
itervalues_,
url_encode,
)
__all__ = ['MultiDict', 'NestedMultiDict', 'NoVars', 'GetDict']
class MultiDict(MutableMapping):
"""
An ordered dictionary that can have multiple values for each key.
Adds the methods getall, getone, mixed and extend and add to the normal
dictionary interface.
"""
def __init__(self, *args, **kw):
if len(args) > 1:
raise TypeError("MultiDict can only be called with one positional "
"argument")
if args:
if hasattr(args[0], 'iteritems'):
items = list(args[0].iteritems())
elif hasattr(args[0], 'items'):
items = list(args[0].items())
else:
items = list(args[0])
self._items = items
else:
self._items = []
if kw:
self._items.extend(kw.items())
@classmethod
def view_list(cls, lst):
"""
Create a dict that is a view on the given list
"""
if not isinstance(lst, list):
raise TypeError(
"%s.view_list(obj) takes only actual list objects, not %r"
% (cls.__name__, lst))
obj = cls()
obj._items = lst
return obj
@classmethod
def from_fieldstorage(cls, fs):
"""
Create a dict from a cgi.FieldStorage instance
"""
obj = cls()
# fs.list can be None when there's nothing to parse
for field in fs.list or ():
charset = field.type_options.get('charset', 'utf8')
transfer_encoding = field.headers.get('Content-Transfer-Encoding', None)
supported_transfer_encoding = {
'base64' : binascii.a2b_base64,
'quoted-printable' : binascii.a2b_qp
}
if not PY2:
if charset == 'utf8':
decode = lambda b: b
else:
decode = lambda b: b.encode('utf8').decode(charset)
else:
decode = lambda b: b.decode(charset)
if field.filename:
field.filename = decode(field.filename)
obj.add(field.name, field)
else:
value = field.value
if transfer_encoding in supported_transfer_encoding:
if not PY2:
# binascii accepts bytes
value = value.encode('utf8')
value = supported_transfer_encoding[transfer_encoding](value)
if not PY2:
# binascii returns bytes
value = value.decode('utf8')
obj.add(field.name, decode(value))
return obj
def __getitem__(self, key):
for k, v in reversed(self._items):
if k == key:
return v
raise KeyError(key)
def __setitem__(self, key, value):
try:
del self[key]
except KeyError:
pass
self._items.append((key, value))
def add(self, key, value):
"""
Add the key and value, not overwriting any previous value.
"""
self._items.append((key, value))
def getall(self, key):
"""
Return a list of all values matching the key (may be an empty list)
"""
return [v for k, v in self._items if k == key]
def getone(self, key):
"""
Get one value matching the key, raising a KeyError if multiple
values were found.
"""
v = self.getall(key)
if not v:
raise KeyError('Key not found: %r' % key)
if len(v) > 1:
raise KeyError('Multiple values match %r: %r' % (key, v))
return v[0]
def mixed(self):
"""
Returns a dictionary where the values are either single
values, or a list of values when a key/value appears more than
once in this dictionary. This is similar to the kind of
dictionary often used to represent the variables in a web
request.
"""
result = {}
multi = {}
for key, value in self.items():
if key in result:
# We do this to not clobber any lists that are
# *actual* values in this dictionary:
if key in multi:
result[key].append(value)
else:
result[key] = [result[key], value]
multi[key] = None
else:
result[key] = value
return result
def dict_of_lists(self):
"""
Returns a dictionary where each key is associated with a list of values.
"""
r = {}
for key, val in self.items():
r.setdefault(key, []).append(val)
return r
def __delitem__(self, key):
items = self._items
found = False
for i in range(len(items)-1, -1, -1):
if items[i][0] == key:
del items[i]
found = True
if not found:
raise KeyError(key)
def __contains__(self, key):
for k, v in self._items:
if k == key:
return True
return False
has_key = __contains__
def clear(self):
del self._items[:]
def copy(self):
return self.__class__(self)
def setdefault(self, key, default=None):
for k, v in self._items:
if key == k:
return v
self._items.append((key, default))
return default
def pop(self, key, *args):
if len(args) > 1:
raise TypeError("pop expected at most 2 arguments, got %s"
% repr(1 + len(args)))
for i in range(len(self._items)):
if self._items[i][0] == key:
v = self._items[i][1]
del self._items[i]
return v
if args:
return args[0]
else:
raise KeyError(key)
def popitem(self):
return self._items.pop()
def update(self, *args, **kw):
if args:
lst = args[0]
if len(lst) != len(dict(lst)):
# this does not catch the cases where we overwrite existing
# keys, but those would produce too many warning
msg = ("Behavior of MultiDict.update() has changed "
"and overwrites duplicate keys. Consider using .extend()"
)
warnings.warn(msg, UserWarning, stacklevel=2)
MutableMapping.update(self, *args, **kw)
def extend(self, other=None, **kwargs):
if other is None:
pass
elif hasattr(other, 'items'):
self._items.extend(other.items())
elif hasattr(other, 'keys'):
for k in other.keys():
self._items.append((k, other[k]))
else:
for k, v in other:
self._items.append((k, v))
if kwargs:
self.update(kwargs)
def __repr__(self):
items = map('(%r, %r)'.__mod__, _hide_passwd(self.items()))
return '%s([%s])' % (self.__class__.__name__, ', '.join(items))
def __len__(self):
return len(self._items)
##
## All the iteration:
##
def iterkeys(self):
for k, v in self._items:
yield k
if PY2:
def keys(self):
return [k for k, v in self._items]
else:
keys = iterkeys
__iter__ = iterkeys
def iteritems(self):
return iter(self._items)
if PY2:
def items(self):
return self._items[:]
else:
items = iteritems
def itervalues(self):
for k, v in self._items:
yield v
if PY2:
def values(self):
return [v for k, v in self._items]
else:
values = itervalues
_dummy = object()
class GetDict(MultiDict):
# def __init__(self, data, tracker, encoding, errors):
# d = lambda b: b.decode(encoding, errors)
# data = [(d(k), d(v)) for k,v in data]
def __init__(self, data, env):
self.env = env
MultiDict.__init__(self, data)
def on_change(self):
e = lambda t: t.encode('utf8')
data = [(e(k), e(v)) for k,v in self.items()]
qs = url_encode(data)
self.env['QUERY_STRING'] = qs
self.env['webob._parsed_query_vars'] = (self, qs)
def __setitem__(self, key, value):
MultiDict.__setitem__(self, key, value)
self.on_change()
def add(self, key, value):
MultiDict.add(self, key, value)
self.on_change()
def __delitem__(self, key):
MultiDict.__delitem__(self, key)
self.on_change()
def clear(self):
MultiDict.clear(self)
self.on_change()
def setdefault(self, key, default=None):
result = MultiDict.setdefault(self, key, default)
self.on_change()
return result
def pop(self, key, *args):
result = MultiDict.pop(self, key, *args)
self.on_change()
return result
def popitem(self):
result = MultiDict.popitem(self)
self.on_change()
return result
def update(self, *args, **kwargs):
MultiDict.update(self, *args, **kwargs)
self.on_change()
def extend(self, *args, **kwargs):
MultiDict.extend(self, *args, **kwargs)
self.on_change()
def __repr__(self):
items = map('(%r, %r)'.__mod__, _hide_passwd(self.items()))
# TODO: GET -> GetDict
return 'GET([%s])' % (', '.join(items))
def copy(self):
# Copies shouldn't be tracked
return MultiDict(self)
class NestedMultiDict(MultiDict):
"""
Wraps several MultiDict objects, treating it as one large MultiDict
"""
def __init__(self, *dicts):
self.dicts = dicts
def __getitem__(self, key):
for d in self.dicts:
value = d.get(key, _dummy)
if value is not _dummy:
return value
raise KeyError(key)
def _readonly(self, *args, **kw):
raise KeyError("NestedMultiDict objects are read-only")
__setitem__ = _readonly
add = _readonly
__delitem__ = _readonly
clear = _readonly
setdefault = _readonly
pop = _readonly
popitem = _readonly
update = _readonly
def getall(self, key):
result = []
for d in self.dicts:
result.extend(d.getall(key))
return result
# Inherited:
# getone
# mixed
# dict_of_lists
def copy(self):
return MultiDict(self)
def __contains__(self, key):
for d in self.dicts:
if key in d:
return True
return False
has_key = __contains__
def __len__(self):
v = 0
for d in self.dicts:
v += len(d)
return v
def __nonzero__(self):
for d in self.dicts:
if d:
return True
return False
def iteritems(self):
for d in self.dicts:
for item in iteritems_(d):
yield item
if PY2:
def items(self):
return list(self.iteritems())
else:
items = iteritems
def itervalues(self):
for d in self.dicts:
for value in itervalues_(d):
yield value
if PY2:
def values(self):
return list(self.itervalues())
else:
values = itervalues
def __iter__(self):
for d in self.dicts:
for key in d:
yield key
iterkeys = __iter__
if PY2:
def keys(self):
return list(self.iterkeys())
else:
keys = iterkeys
class NoVars(object):
"""
Represents no variables; used when no variables
are applicable.
This is read-only
"""
def __init__(self, reason=None):
self.reason = reason or 'N/A'
def __getitem__(self, key):
raise KeyError("No key %r: %s" % (key, self.reason))
def __setitem__(self, *args, **kw):
raise KeyError("Cannot add variables: %s" % self.reason)
add = __setitem__
setdefault = __setitem__
update = __setitem__
def __delitem__(self, *args, **kw):
raise KeyError("No keys to delete: %s" % self.reason)
clear = __delitem__
pop = __delitem__
popitem = __delitem__
def get(self, key, default=None):
return default
def getall(self, key):
return []
def getone(self, key):
return self[key]
def mixed(self):
return {}
dict_of_lists = mixed
def __contains__(self, key):
return False
has_key = __contains__
def copy(self):
return self
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__,
self.reason)
def __len__(self):
return 0
def iterkeys(self):
return iter([])
if PY2:
def __cmp__(self, other):
return cmp({}, other)
def keys(self):
return []
items = keys
values = keys
itervalues = iterkeys
iteritems = iterkeys
else:
keys = iterkeys
items = iterkeys
values = iterkeys
__iter__ = iterkeys
def _hide_passwd(items):
for k, v in items:
if ('password' in k
or 'passwd' in k
or 'pwd' in k
):
yield k, '******'
else:
yield k, v
| 27.490157 | 84 | 0.529252 |
795a2a5f079bb372769da74a40fa122aa25f7ffd | 289 | py | Python | src/5/5054.py | youngdaLee/Baekjoon | 7d858d557dbbde6603fe4e8af2891c2b0e1940c0 | [
"MIT"
] | 11 | 2020-09-20T15:17:11.000Z | 2022-03-17T12:43:33.000Z | src/5/5054.py | youngdaLee/Baekjoon | 7d858d557dbbde6603fe4e8af2891c2b0e1940c0 | [
"MIT"
] | 3 | 2021-10-30T07:51:36.000Z | 2022-03-09T05:19:23.000Z | src/5/5054.py | youngdaLee/Baekjoon | 7d858d557dbbde6603fe4e8af2891c2b0e1940c0 | [
"MIT"
] | 13 | 2021-01-21T03:19:08.000Z | 2022-03-28T10:44:58.000Z | """
5054. 주차의 신
작성자: xCrypt0r
언어: Python 3
사용 메모리: 29,380 KB
소요 시간: 76 ms
해결 날짜: 2020년 9월 16일
"""
def main():
for _ in range(int(input())):
input()
pos = sorted(map(int, input().split()))
print((pos[-1] - pos[0]) * 2)
if __name__ == '__main__':
main()
| 13.761905 | 47 | 0.532872 |
795a2aee5bbb8ddc20920d889d1442a10aa91852 | 5,522 | py | Python | examples/reID/caffe/modelscripts/generate_vgg16.py | yuqj1990/deepano_train | c7247801ccea4c3a5c0be9c9091fc91876dbf279 | [
"Unlicense"
] | 21 | 2019-11-28T06:11:17.000Z | 2020-06-15T00:45:46.000Z | examples/reID/caffe/modelscripts/generate_vgg16.py | yuqj1990/deepano_train | c7247801ccea4c3a5c0be9c9091fc91876dbf279 | [
"Unlicense"
] | 12 | 2019-11-20T01:38:28.000Z | 2020-06-30T06:29:40.000Z | examples/reID/caffe/modelscripts/generate_vgg16.py | yuqj1990/deepano_face_train | c7247801ccea4c3a5c0be9c9091fc91876dbf279 | [
"Unlicense"
] | 10 | 2019-11-26T00:50:06.000Z | 2020-06-20T05:43:45.000Z | import _init_paths
import os
import os.path as osp
import caffe
from caffe import layers as L, params as P
from caffe import tools
from caffe.model_libs import *
def vgg16_body(net, data, post, is_train):
# the net itself
#param = [dict(lr_mult=0.5,decay_mult=1), dict(lr_mult=1,decay_mult=0)]
param = None
nunits_list = [2, 2, 3, 3, 3]
nouts = [64, 128, 256, 512, 512]
main_name = data
for idx, (nout, nunits) in enumerate(zip(nouts, nunits_list)): # for each depth and nunits
for unit in range(1, nunits + 1): # for each unit. Enumerate from 1.
convn = 'conv' + str(idx+1) + '_' + str(unit) + post
relun = 'relu' + str(idx+1) + '_' + str(unit) + post
#param = 'conv' + str(idx+1) + '_' + str(unit) + 'w'
net[convn], net[relun] = conv_relu(net[main_name], 3, nout, pad = 1, is_train=is_train, param=param)
main_name = relun
pooln = 'pool' + str(idx+1) + post
net[pooln] = L.Pooling(net[main_name], stride = 2, kernel_size = 2, pool = P.Pooling.MAX, ntop=1)
main_name = pooln
net['fc6'+post], net['relu6'+post] = fc_relu(net[main_name], 4096, is_train=is_train, param = param)
net['drop6'+post] = L.Dropout(net['relu6'+post], in_place=True, dropout_ratio=0.5)
net['fc7'+post], net['relu7'+post] = fc_relu(net['drop6'+post], 4096, is_train=is_train, param = param)
net['drop7'+post] = L.Dropout(net['relu7'+post], in_place=True, dropout_ratio=0.5)
final_name = 'drop7'+post
return net, final_name
# main netspec wrapper
def vgg16_train(mean_value, list_file, is_train=True):
# setup the python data layer
net = caffe.NetSpec()
net.data, net.label \
= L.ReidData(transform_param=dict(mirror=True,crop_size=224,mean_value=mean_value),
reid_data_param=dict(source=list_file,batch_size=32,new_height=256,new_width=256,
pos_fraction=1,neg_fraction=1,pos_limit=1,neg_limit=4,pos_factor=1,neg_factor=1.01),
ntop = 2)
net, final = vgg16_body(net, 'data', '', is_train)
#param = param = [dict(lr_mult=1,decay_mult=1), dict(lr_mult=2,decay_mult=0)]
param = None
net['score'] = fc_relu(net[final], nout=751, is_train=is_train, has_relu=False, param = param)
net['euclidean'], net['label_dif'] = L.PairEuclidean(net[final], net['label'], ntop=2)
net['score_dif'] = fc_relu(net['euclidean'], nout=2, is_train=is_train, has_relu=False, param = param)
net['loss'] = L.SoftmaxWithLoss(net['score'], net['label'] , propagate_down=[1,0], loss_weight= 1)
net['loss_dif'] = L.SoftmaxWithLoss(net['score_dif'], net['label_dif'], propagate_down=[1,0], loss_weight=0.5)
return str(net.to_proto())
def vgg16_dev(data_param = dict(shape=dict(dim=[2, 3, 224, 224])), label_param = dict(shape=dict(dim=[2]))):
# setup the python data layer
net = caffe.NetSpec()
net['data'] = L.Input(input_param = data_param)
net['label'] = L.Input(input_param = label_param)
net, final = vgg16_body(net, 'data', '', False)
net['score'] = fc_relu(net[final], nout=751, is_train=False, has_relu=False)
net['euclidean'], net['label_dif'] = L.PairEuclidean(net[final], net['label'], ntop = 2)
net['score_dif'] = fc_relu(net['euclidean'], nout=2, is_train=False, has_relu=False)
return str(net.to_proto())
def vgg16_score(input_param = dict(shape=dict(dim=[1, 3, 224, 224]))):
# setup the python data layer
net = caffe.NetSpec()
net['data'] = L.Input(input_param = input_param)
net, final = vgg16_body(net, 'data', '', is_train=False)
net['score'] = fc_relu(net[final], nout=751, is_train=False, has_relu=False)
net['prediction'] = L.Softmax(net['score'])
return str(net.to_proto())
workdir = osp.join(osp.dirname(__file__), 'vgg16')
if not os.path.isdir(workdir):
os.makedirs(workdir)
logdir = osp.join(workdir, 'log')
if not os.path.isdir(logdir):
os.makedirs(logdir)
snapshotdir = osp.join(workdir, 'snapshot')
if not os.path.isdir(snapshotdir):
os.makedirs(snapshotdir)
print('Work Dir : {}'.format(workdir))
train_proto = osp.join(workdir, "train.proto")
solverproto = tools.CaffeSolver(trainnet_prototxt_path = train_proto, testnet_prototxt_path = None)
solverproto.sp['display'] = "50"
solverproto.sp['base_lr'] = "0.001"
solverproto.sp['stepsize'] = "16000"
solverproto.sp['max_iter'] = "18000"
solverproto.sp['snapshot'] = "2000"
solverproto.sp['iter_size'] = "1"
solverproto.sp['type'] = "\"Nesterov\""
solverproto.sp['snapshot_prefix'] = "\"{}/snapshot/vgg16.full\"".format(workdir)
solverproto.write(osp.join(workdir, 'solver.proto'))
list_file = 'examples/market1501/lists/train.lst'
mean_value = [97.8286, 99.0468, 105.606]
# write train net.
with open(train_proto, 'w') as f:
f.write(vgg16_train(mean_value, list_file, True))
dev_proto = osp.join(workdir, "dev.proto")
with open(dev_proto, 'w') as f:
f.write(vgg16_score())
dep_proto = osp.join(workdir, "deploy.proto")
with open(dep_proto, 'w') as f:
f.write(vgg16_dev())
train_shell = osp.join(workdir, "train.sh")
with open(train_shell, 'w') as f:
f.write('#!/usr/bin/env sh\n')
f.write('model_dir=models/market1501/vgg16\n')
f.write('pre_train_dir=${HOME}/datasets/model_pretrained/vgg\n\n')
f.write('GLOG_log_dir=${model_dir}/log ./build/tools/caffe train ')
f.write('--solver ${model_dir}/solver.proto --weights ${pre_train_dir}/VGG_ILSVRC_16_layers.caffemodel $@')
| 43.825397 | 116 | 0.660811 |
795a2c0c8e419372650770606437e1a445aa6041 | 21 | py | Python | chemisova-helloworld.py | masha719/lab6 | 4928d682ee10184c163730341a8f168714f8d6a5 | [
"MIT"
] | null | null | null | chemisova-helloworld.py | masha719/lab6 | 4928d682ee10184c163730341a8f168714f8d6a5 | [
"MIT"
] | null | null | null | chemisova-helloworld.py | masha719/lab6 | 4928d682ee10184c163730341a8f168714f8d6a5 | [
"MIT"
] | null | null | null | print('Helllo world') | 21 | 21 | 0.761905 |
795a2cea537a2cb1d91c2a5dfad32ef4687b818b | 9,895 | py | Python | open_spiel/python/algorithms/psro_variations/nash_solver/general_nash_solver.py | wyz2368/open_spiel_egta | 6bcb3d4d863e7d89283029dd860412c3ef1731dd | [
"Apache-2.0"
] | null | null | null | open_spiel/python/algorithms/psro_variations/nash_solver/general_nash_solver.py | wyz2368/open_spiel_egta | 6bcb3d4d863e7d89283029dd860412c3ef1731dd | [
"Apache-2.0"
] | null | null | null | open_spiel/python/algorithms/psro_variations/nash_solver/general_nash_solver.py | wyz2368/open_spiel_egta | 6bcb3d4d863e7d89283029dd860412c3ef1731dd | [
"Apache-2.0"
] | 1 | 2020-12-25T03:02:31.000Z | 2020-12-25T03:02:31.000Z | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script provides a general Nash equilibrium solver that can solve general-sum many-player games.
"""
from __future__ import absolute_import
from __future__ import division
# from __future__ import logging.info_function
import fractions
import itertools
import os
import subprocess
import tempfile
import warnings
import nashpy
import numpy as np
import logging
from open_spiel.python.algorithms import lp_solver
import pyspiel
from open_spiel.python.algorithms.psro_variations.nash_solver.gambit_tools import do_gambit_analysis
from open_spiel.python.algorithms.psro_variations.nash_solver.replicator_dynamics_solver import replicator_dynamics
def renormalize(probabilities):
"""Replaces all non-zero entries with zeroes and normalizes the result.
Args:
probabilities: probability vector to renormalize. Has to be one-dimensional.
Returns:
Renormalized probabilities.
"""
probabilities[probabilities < 0] = 0
probabilities = probabilities / np.sum(probabilities)
return probabilities
@np.vectorize
def _to_fraction_str(x, lrsnash_max_denom=1000):
return str(fractions.Fraction(x).limit_denominator(lrsnash_max_denom))
def lrs_solve(row_payoffs, col_payoffs, lrsnash_path):
"""Find all Nash equilibria using the lrsnash solver.
`lrsnash` uses reverse search vertex enumeration on rational polytopes.
For more info, see: http://cgm.cs.mcgill.ca/~avis/C/lrslib/USERGUIDE.html#nash
Args:
row_payoffs: payoffs for row player
col_payoffs: payoffs for column player
Yields:
(row_mixture, col_mixture), numpy vectors of float64s.
"""
num_rows, num_cols = row_payoffs.shape
game_file, game_file_path = tempfile.mkstemp()
try:
game_file = os.fdopen(game_file, "w")
# write dimensions
game_file.write("%d %d\n\n" % (num_rows, num_cols))
# write row-player payoff matrix as fractions
for row in range(num_rows):
game_file.write(" ".join(_to_fraction_str(row_payoffs[row])) + "\n")
game_file.write("\n")
# write col-player payoff matrix as fractions
for row in range(num_rows):
game_file.write(" ".join(_to_fraction_str(col_payoffs[row])) + "\n")
game_file.write("\n")
game_file.close()
lrs = subprocess.Popen(
[lrsnash_path or "lrsnash", "-s", game_file_path],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
lrs_result = lrs.communicate()[0]
equilibria = []
col_mixtures = []
for line in lrs_result.split(b'\n'):
if len(line) <= 1 or line[:1] == b"*":
continue
line = np.asfarray([fractions.Fraction(x) for x in line.decode().split()])
if line[0] == 2: # col-player
col_mixtures.append(line[1:-1])
else: # row-player
row_mixture = line[1:-1]
# row-mixture forms a Nash with every col-mixture listed directly above
for col_mixture in col_mixtures:
equilibria.append([row_mixture, col_mixture])
col_mixtures = []
return equilibria
finally:
os.remove(game_file_path)
def lemke_howson_solve(row_payoffs, col_payoffs):
"""Find Nash equilibria using the Lemke-Howson algorithm.
The algorithm is not guaranteed to find all equilibria. Also it can yield
wrong answers if the game is degenerate (but raises warnings in that case).
Args:
row_payoffs: payoffs for row player
col_payoffs: payoffs for column player
Yields:
(row_mixture, col_mixture), numpy vectors of float64s.
"""
showwarning = warnings.showwarning
warned_degenerate = [False]
def showwarning_check_degenerate(message, *args, **kwargs):
if "Your game could be degenerate." in str(message):
warned_degenerate[0] = True
showwarning(message, *args, **kwargs)
try:
warnings.showwarning = showwarning_check_degenerate
for row_mixture, col_mixture in nashpy.Game(
row_payoffs, col_payoffs).lemke_howson_enumeration():
if warned_degenerate[0]:
print('wrong results')
# attempt to discard obviously-wrong results
if (row_mixture.shape != row_payoffs.shape[:1] or
col_mixture.shape != row_payoffs.shape[1:]):
warnings.warn("Discarding ill-shaped solution.")
continue
if (not np.isfinite(row_mixture).all() or
not np.isfinite(col_mixture).all()):
warnings.warn("Discarding non-finite solution.")
continue
yield row_mixture, col_mixture
finally:
warnings.showwarning = showwarning
def gambit_solve(meta_games, mode, gambit_path=None):
"""
Find NE using gambit.
:param meta_games: meta-games in PSRO.
:param mode: options "all", "one", "pure"
:return: a list of NE.
"""
return do_gambit_analysis(meta_games, mode, gambit_path=gambit_path)
def pure_ne_solve(meta_games, tol=1e-7):
"""
Find pure NE. Only work for two-player game. For more than 2 player case,
the nash_solver will call gambit to find pure NE.
:param meta_games: meta-games in PSRO.
:param tol: Error allowed.
:return: pure NE
"""
row_payoffs, col_payoffs = meta_games[0], meta_games[1]
pure_nash = list(
zip(*((row_payoffs >= row_payoffs.max(0, keepdims=True) - tol)
& (col_payoffs >= col_payoffs.max(1, keepdims=True) - tol)
).nonzero()))
p1_num_str, p2_num_str = np.shape(meta_games[0])
pure_ne = []
for i, j in pure_nash:
p1_ne = np.zeros(p1_num_str)
p2_ne = np.zeros(p2_num_str)
p1_ne[i] = 1
p2_ne[j] = 1
pure_ne.append([p1_ne, p2_ne])
return pure_ne
def nash_solver(meta_games,
solver="gambit",
mode="one",
gambit_path=None,
lrsnash_path=None):
"""
Solver for NE.
:param meta_games: meta-games in PSRO.
:param solver: options "gambit", "nashpy", "linear", "lrsnash", "replicator".
:param mode: options "all", "one", "pure"
:param lrsnash_path: path to lrsnash solver.
:return: a list of NE.
WARNING:
opening up a subprocess in every iteration eventually
leads the os to block the subprocess. Not usable.
"""
num_players = len(meta_games)
if solver == "gambit":
return gambit_solve(meta_games, mode, gambit_path=gambit_path)
elif solver == "replicator":
return [replicator_dynamics(meta_games)]
else:
assert num_players == 2
num_rows, num_cols = np.shape(meta_games[0])
row_payoffs, col_payoffs = meta_games[0], meta_games[1]
if num_rows == 1 or num_cols == 1:
equilibria = itertools.product(np.eye(num_rows), np.eye(num_cols))
elif mode == 'pure':
return pure_ne_solve(meta_games)
elif solver == "linear":
meta_games = [x.tolist() for x in meta_games]
nash_prob_1, nash_prob_2, _, _ = (
lp_solver.solve_zero_sum_matrix_game(
pyspiel.create_matrix_game(*meta_games)))
return [
renormalize(np.array(nash_prob_1).reshape(-1)),
renormalize(np.array(nash_prob_2).reshape(-1))
]
elif solver == "lrsnash":
logging.info("Using lrsnash solver.")
equilibria = lrs_solve(row_payoffs, col_payoffs, lrsnash_path)
elif solver == "nashpy":
if mode == "all":
logging.info("Using nashpy vertex enumeration.")
equilibria = nashpy.Game(row_payoffs, col_payoffs).vertex_enumeration()
else:
logging.info("Using nashpy Lemke-Howson solver.")
equilibria = lemke_howson_solve(row_payoffs, col_payoffs)
else:
raise ValueError("Please choose a valid NE solver.")
equilibria = iter(equilibria)
# check that there's at least one equilibrium
try:
equilibria = itertools.chain([next(equilibria)], equilibria)
except StopIteration:
logging.warning("degenerate game!")
# pklfile = open('/home/qmaai/degenerate_game.pkl','wb')
# pickle.dump([row_payoffs,col_payoffs],pklfile)
# pklfile.close()
# degenerate game apply support enumeration
equilibria = nashpy.Game(row_payoffs, col_payoffs).support_enumeration()
try:
equilibria = itertools.chain([next(equilibria)], equilibria)
except StopIteration:
logging.warning("no equilibrium!")
equilibria = list(equilibria)
if mode == 'all':
return equilibria
elif mode == 'one':
return equilibria[0]
else:
raise ValueError("Please choose a valid mode.")
def normalize_ne(eq):
for p in range(len(eq)):
np_eq = np.array(eq[p])
np_eq[np_eq<0] = 0
np_eq /= sum(np_eq)
eq[p] = np_eq.tolist()
return eq
| 37.059925 | 115 | 0.637393 |
795a2cee0ed333971b4806b53c079eb59e4f77e0 | 689 | py | Python | dql_course/exercises/random_policy.py | ecly/ai_experiments | 94b5b063345761cd1668132610ddc59749b84a47 | [
"MIT"
] | null | null | null | dql_course/exercises/random_policy.py | ecly/ai_experiments | 94b5b063345761cd1668132610ddc59749b84a47 | [
"MIT"
] | null | null | null | dql_course/exercises/random_policy.py | ecly/ai_experiments | 94b5b063345761cd1668132610ddc59749b84a47 | [
"MIT"
] | null | null | null | import gym
import numpy as np
import matplotlib.pyplot as plt
def main():
env = gym.make("FrozenLake-v1")
env.reset()
running_win_perc = []
scores = []
for i in range(1000):
score = 0
while True:
action = env.action_space.sample()
_observation, reward, done, _info = env.step(action)
score += reward
if done:
break
observation, _info = env.reset(return_info=True)
scores.append(score)
if i % 10 == 0:
running_win_perc.append(np.mean(scores[-10:]))
plt.plot(running_win_perc)
plt.show()
env.close()
if __name__ == "__main__":
main()
| 22.225806 | 64 | 0.561684 |
795a2dee9e98c86c435ec0756a648e904229572f | 1,409 | py | Python | blog/middleware.py | sunguotao/DjangoMysqlBlog | 136a21007475a302a62341ac6b9ab3fbb1bfafb9 | [
"MIT"
] | null | null | null | blog/middleware.py | sunguotao/DjangoMysqlBlog | 136a21007475a302a62341ac6b9ab3fbb1bfafb9 | [
"MIT"
] | 11 | 2020-02-11T23:51:47.000Z | 2022-03-11T23:17:21.000Z | blog/middleware.py | sunguotao/DjangoMysqlBlog | 136a21007475a302a62341ac6b9ab3fbb1bfafb9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
"""
@version: ??
@author: systemsgt.cn
@license: MIT Licence
@contact: 18653076096@163.com
@site: http://www.systemsgt.cn
@software: PyCharm
@file: middleware.py
@time: 2017/1/19 上午12:36
"""
import time
from ipware.ip import get_real_ip
from DjangoBlog.utils import cache
class OnlineMiddleware(object):
def process_request(self, request):
self.start_time = time.time()
def process_view(self, request, view_func, view_args, view_kwargs):
"""
处理当前在线人数
"""
http_user_agent = request.META.get('HTTP_USER_AGENT', [])
if 'Spider' in http_user_agent or 'spider' in http_user_agent:
return
online_ips = cache.get("online_ips", [])
if online_ips:
online_ips = cache.get_many(online_ips).keys()
online_ips = list(online_ips)
ip = get_real_ip(request)
cache.set(ip, 0, 5 * 60)
if ip not in online_ips:
online_ips.append(ip)
s = type(online_ips)
cache.set("online_ips", online_ips)
def process_response(self, request, response):
cast_time = 0.921
if self.__dict__ and 'start_time' in self.__dict__:
cast_time = time.time() - self.start_time
response.content = response.content.replace(b'<!!LOAD_TIMES!!>', str.encode(str(cast_time)[:5]))
return response
| 27.096154 | 104 | 0.634493 |
795a2e632f18ca66cac6fd171d99e3881f86321f | 10,453 | py | Python | samples_to_sprite.py | skratchdot/media-tools | bca0c683fb637aeefda1c49454a118f809047d97 | [
"MIT"
] | null | null | null | samples_to_sprite.py | skratchdot/media-tools | bca0c683fb637aeefda1c49454a118f809047d97 | [
"MIT"
] | null | null | null | samples_to_sprite.py | skratchdot/media-tools | bca0c683fb637aeefda1c49454a118f809047d97 | [
"MIT"
] | 1 | 2021-05-25T08:11:10.000Z | 2021-05-25T08:11:10.000Z | # -*- coding: utf-8 -*-
import argparse
import math
import numpy as np
import os
from PIL import Image
from pprint import pprint
import sys
from lib.audio_mixer import *
from lib.audio_utils import *
from lib.clip import *
from lib.collection_utils import *
from lib.color_utils import *
from lib.composition_utils import *
from lib.io_utils import *
from lib.math_utils import *
from lib.video_utils import *
# input
parser = argparse.ArgumentParser()
parser.add_argument('-in', dest="INPUT_FILE", default="tmp/samples_tsne.csv", help="Input file")
parser.add_argument('-dir', dest="MEDIA_DIRECTORY", default="media/sample/", help="Input file")
parser.add_argument('-props', dest="PROPS", default="tsne,tsne2", help="Properties to sort x,y matrix by; only necessary for cloud type; will use gridX,gridY for grid type")
parser.add_argument('-sort', dest="SORT", default="clarity=desc=0.5&power=desc", help="Query string to filter and sort by")
parser.add_argument('-lim', dest="LIMIT", default=-1, type=int, help="Target total sample count, -1 for everything")
parser.add_argument('-width', dest="IMAGE_W", default=2048, type=int, help="Image width in pixels")
parser.add_argument('-height', dest="IMAGE_H", default=2048, type=int, help="Image height in pixels")
parser.add_argument('-cell', dest="CELL_DIMENSIONS", default="32x32", help="Dimensions of each cell")
parser.add_argument('-count', dest="FILE_COUNT", default=6, type=int, help="Number of audio files to produce")
parser.add_argument('-cdurmax', dest="CLIP_DURATION_MAX", default=300, type=int, help="Max duration of clip in milliseconds; -1 for no max")
parser.add_argument('-id', dest="UNIQUE_ID", default="sample", help="Key for naming files")
parser.add_argument('-type', dest="TYPE", default="grid", help="Grid or cloud")
parser.add_argument('-cached', dest="CACHE_DIR", default="tmp/sprite_{uid}_cache/", help="Grid or cloud")
parser.add_argument('-outaud', dest="OUT_AUDIO", default="ui/sprites/sprite/{uid}/{uid}.mp3", help="Output audio pattern")
parser.add_argument('-outdat', dest="OUT_DATA", default="ui/sprites/sprite/{uid}/{uid}.json", help="Output data pattern")
parser.add_argument('-outimg', dest="OUT_IMAGE", default="ui/sprites/sprite/{uid}/{uid}.png", help="Output image pattern")
parser.add_argument('-mode', dest="IMAGE_SPRITE_MODE", default="audio", help="Is this a video or audio collection?")
parser.add_argument('-fingerprints', dest="FINGERPRINTS_FILE", default="", help="Path to fingerprint file (generated by samples_to_fingerprints.py) for audio sprite mode")
parser.add_argument('-log', dest="LOG", default=0, type=int, help="Display using log?")
parser.add_argument('-overwrite', dest="OVERWRITE", action="store_true", help="Overwrite existing?")
parser.add_argument('-probe', dest="PROBE", action="store_true", help="Just display durations?")
parser.add_argument('-image', dest="IMAGE_ONLY", action="store_true", help="Just output image with no audio?")
parser.add_argument('-colorful', dest="COLORFUL_IMAGES", action="store_true", help="Add background color to images?")
args = parser.parse_args()
# Parse arguments
INPUT_FILE = args.INPUT_FILE
MEDIA_DIRECTORY = args.MEDIA_DIRECTORY
SORT = args.SORT
PROP1, PROP2 = tuple([p for p in args.PROPS.strip().split(",")])
IMAGE_W = args.IMAGE_W
IMAGE_H = args.IMAGE_H
CELL_W, CELL_H = tuple([int(d) for d in args.CELL_DIMENSIONS.split("x")])
GRID_W, GRID_H = (int(IMAGE_W/CELL_W), int(IMAGE_H/CELL_H))
LIMIT = args.LIMIT
FILE_COUNT = args.FILE_COUNT
UNIQUE_ID = args.UNIQUE_ID
OVERWRITE = args.OVERWRITE
TYPE = args.TYPE
LOG = args.LOG
FPS = 30
if TYPE == "grid" and LIMIT < 0:
LIMIT = GRID_W * GRID_H
print("Limiting grid to %s x %s = %s" % (GRID_W, GRID_H, LIMIT))
AUDIO_FILE = args.OUT_AUDIO.format(uid=UNIQUE_ID)
MANIFEST_FILE = args.OUT_DATA.format(uid=UNIQUE_ID)
IMAGE_FILE = args.OUT_IMAGE.format(uid=UNIQUE_ID)
CACHE_DIR = args.CACHE_DIR.format(uid=UNIQUE_ID)
# Read files
fieldNames, rows = readCsv(INPUT_FILE)
rowCount = len(rows)
print("Found %s rows" % rowCount)
# Sort and limit
if LIMIT > 0 and len(rows) > LIMIT:
rows = sortByQueryString(rows, SORT)
rows = rows[:LIMIT]
rowCount = len(rows)
rows = addIndices(rows)
rows = prependAll(rows, ("filename", MEDIA_DIRECTORY))
for i, row in enumerate(rows):
dur = row["dur"] if args.CLIP_DURATION_MAX <= 0 else min(args.CLIP_DURATION_MAX, row["dur"])
rows[i]["dur"] = dur
rows[i]["t"] = row["start"] + roundInt(dur*0.5)
# use logarithmic scale
if LOG > 0:
for i, row in enumerate(rows):
base = LOG if LOG > 1 else math.e
rows[i][PROP1] = math.log(row[PROP1], base)
rows[i][PROP2] = math.log(row[PROP2], base)
# Sort rows and add sequence
totalDur = sum([r["dur"] for r in rows])
print("Total duration: %s" % formatSeconds(totalDur/1000.0))
print("Each file will be about %s" % formatSeconds(totalDur/1000.0/FILE_COUNT))
# loaded, fingerprints = loadCacheFile(args.FINGERPRINTS_FILE)
# if loaded:
# pprint(fingerprints[0])
# pprint(np.ptp(fingerprints,axis=1))
# print(np.array(fingerprints).shape)
# makeDirectories([AUDIO_FILE, MANIFEST_FILE, IMAGE_FILE, CACHE_DIR])
# loaded, fingerprints = loadCacheFile(args.FINGERPRINTS_FILE)
# if loaded:
# audioFingerprintsToImage(fingerprints, IMAGE_FILE, cols=GRID_W, rows=GRID_H, width=IMAGE_W, height=IMAGE_H)
if args.PROBE:
sys.exit()
# sort rows by filename to reduce number of file reads
rows = sorted(rows, key=lambda r: r["filename"])
# Make sure output dirs exist
makeDirectories([AUDIO_FILE, MANIFEST_FILE, IMAGE_FILE, CACHE_DIR])
samplesPerFile = ceilInt(1.0 * rowCount / FILE_COUNT)
audioSpriteFiles = []
sprites = [None for i in range(rowCount)]
for file in range(FILE_COUNT):
iStart = file * samplesPerFile
iEnd = iStart + samplesPerFile
fileRows = rows[iStart:iEnd]
if file >= (FILE_COUNT-1):
fileRows = rows[iStart:]
# build the audio
instructions = []
ms = 0
for row in fileRows:
instructions.append({
"ms": ms,
"filename": row["filename"],
"start": row["start"],
"dur": row["dur"]
})
sprites[row["index"]] = [file, ms, row["start"], row["dur"]]
ms += row["dur"]
outfilename = AUDIO_FILE.replace(".mp3", ".%s.mp3" % zeroPad(file+1, FILE_COUNT))
if not os.path.isfile(outfilename) or OVERWRITE:
if not args.IMAGE_ONLY:
mixAudio(instructions, ms+1000, outfilename)
else:
print("Already created %s" % outfilename)
audioSpriteFiles.append(os.path.basename(outfilename))
if TYPE == "grid":
testRow = rows[0]
if "gridX" not in testRow or "gridY" not in testRow:
print("You must run samples_to_grid.py first")
sys.exit()
# Filter out rows without valid gridX/gridY
rows = [row for row in rows if row["gridY"] != "" and row["gridX"] != ""]
# Sort by grid
rows = sorted(rows, key=lambda s: (s["gridY"], s["gridX"]))
rows = addGridPositions(rows, GRID_W, IMAGE_W, IMAGE_H)
for i, row in enumerate(rows):
sprites[row["index"]] += [round(1.0*row["x"]/IMAGE_W, 3), round(1.0*row["y"]/IMAGE_H, 3)]
# otherwise, just do a cloud
else:
values1 = [row[PROP1] for row in rows]
values2 = [row[PROP2] for row in rows]
range1 = (min(values1), max(values1))
range2 = (min(values2), max(values2))
for i, row in enumerate(rows):
nx = norm(row[PROP1], range1)
ny = 1.0 - norm(row[PROP2], range2)
x = roundInt((IMAGE_W - CELL_W) * nx)
y = roundInt((IMAGE_H - CELL_H) * ny)
rows[i]["x"] = x
rows[i]["y"] = y
rows[i]["width"] = CELL_W
rows[i]["height"] = CELL_H
sprites[row["index"]] += [round(1.0*x/IMAGE_W, 3), round(1.0*y/IMAGE_H, 3)]
rows = addIndices(rows, "gridIndex")
filenames = sorted(unique([os.path.basename(row["filename"]) for row in rows]))
notes = sorted(unique([row["note"] for row in rows]))
for i, row in enumerate(rows):
# add label
# label = os.path.basename(row["filename"]) + " " + formatSeconds(row["start"]/1000.0) + ", index: %s" % row["gridIndex"]
filename = os.path.basename(row["filename"])
filenameIndex = filenames.index(filename)
hz = round(row['hz'], 1)
noteIndex = notes.index(row['note'])
sprites[row["index"]] += [filenameIndex, hz, noteIndex]
# kind of a hack: only take one frame at time
rows[i]["start"] = row["t"]
rows[i]["dur"] = 1
# sort sprites and remove positions
sprites = sorted(sprites, key=lambda s: (s[5], s[4]))
sprites = [s[:4] + s[6:] for s in sprites]
print("Generating image...")
if OVERWRITE or not os.path.isfile(IMAGE_FILE):
if args.IMAGE_SPRITE_MODE == "audio":
loaded, fingerprints = loadCacheFile(args.FINGERPRINTS_FILE)
if loaded:
# order the fingerprints according to row order
sortedFingerprints = []
bgcolors = None
if args.COLORFUL_IMAGES:
bgcolors = []
rows = addNormalizedValues(rows, 'color', 'nr')
rows = addNormalizedValues(rows, 'color2', 'ng')
rows = addNormalizedValues(rows, 'color3', 'nb')
for i, row in enumerate(rows):
sortedFingerprints.append(fingerprints[row["index"]])
if args.COLORFUL_IMAGES:
rgb = (roundInt(row['nr']*255), roundInt(row['ng']*255), roundInt(row['nb']*255))
bgcolors.append(rgb)
audioFingerprintsToImage(sortedFingerprints, IMAGE_FILE, cols=GRID_W, rows=GRID_H, width=IMAGE_W, height=IMAGE_H, bgcolors=bgcolors)
else:
print("Could not load cache file %s" % args.CACHE_FILE)
else:
clips = samplesToClips(rows)
pixelData = loadVideoPixelData(clips, fps=FPS, cacheDir=CACHE_DIR, verifyData=False)
clipsToFrame({
"filename": IMAGE_FILE,
"overwrite": OVERWRITE,
"width": IMAGE_W,
"height": IMAGE_H,
"ms": 0,
"verbose": True
}, clips, pixelData)
# Write json sprite file
jsonData = {}
jsonData["audioSpriteFiles"] = audioSpriteFiles
jsonData["sprites"] = sprites
jsonData["filenames"] = filenames
jsonData["notes"] = notes
jsonData["image"] = os.path.basename(IMAGE_FILE)
jsonData["width"] = IMAGE_W
jsonData["height"] = IMAGE_H
jsonData["cellW"] = CELL_W
jsonData["cellH"] = CELL_H
writeJSON(MANIFEST_FILE, jsonData)
| 40.992157 | 173 | 0.671195 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.