hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf167990db65d81f29e03b6d6f97601659d690c | 6,287 | py | Python | libs/search_wiz.py | sebastian-code/wiznet-conf | 27b296759ad12a75fed783c74c59120b2f054fb1 | [
"CC-BY-3.0"
] | null | null | null | libs/search_wiz.py | sebastian-code/wiznet-conf | 27b296759ad12a75fed783c74c59120b2f054fb1 | [
"CC-BY-3.0"
] | null | null | null | libs/search_wiz.py | sebastian-code/wiznet-conf | 27b296759ad12a75fed783c74c59120b2f054fb1 | [
"CC-BY-3.0"
] | null | null | null | class WizSearch(object):
DEVICE_TYPES = {
"wiz1000": WIZ1000,
"wiz1x0sr": WIZ1x0SR,
}
def __init__(self, address="192.168.11.255",
broadcast=False,
bind_address="0.0.0.0",
device_type="wiz1000",
allowed_mac=None,
search_password="wiznet", timeout=2.0):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
if broadcast:
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, True)
s.bind((
bind_address,
WizSearch.DEVICE_TYPES[device_type].UDP_LOCAL_PORT,
))
self.device_s = s
self.search_password = search_password
self.timeout = timeout
self.address = (
address,
WizSearch.DEVICE_TYPES[device_type].UDP_REMOTE_PORT,
)
self.device_type = device_type
self._devices_list = []
self.allowed_mac = allowed_mac or []
self.broadcast = broadcast
def sendto(self, data):
logger.debug("sendto %s" % (data[:4],))
self.device_s.sendto(data, self.address)
def recvfrom(self, size=1500):
data, addr = self.device_s.recvfrom(size)
if not self.broadcast and addr != self.address:
raise WizSearchException(
"Unexpected packet recevied from %s, expected was %s" % (
addr, self.address))
logger.debug("recvfrom: %s" % (data[:4]))
return data
def get_devices(self):
devices = {}
for device in self._devices_list:
if device.mac in devices:
raise WizSearchException(
"Multiple devices found with mac '%s'" % (
device.mac,
))
devices[device.mac] = device
return devices
def update(self):
"""
Search devices. timeout is expressed in seconds.
"""
self._devices_list = []
self.sendto("FIND%-8s" % (self.search_password,))
start = time.time()
while start + self.timeout > time.time():
rfds, _, _ = select.select([self.device_s], [], [], 0.5)
for sock in rfds:
data = self.recvfrom()
if data[0:4] in ("IMIN", "SETC"):
try:
dev = WizSearch.DEVICE_TYPES[self.device_type](data[4:])
# devices.append(self.extract_IMIN(data, wiztype))
if not self.allowed_mac or dev.mac in self.allowed_mac:
self._devices_list.append(dev)
except:
logger.exception("parsing error.")
if not self._devices_list:
logger.error("Timeout, no devices found")
return self._devices_list
def send_config(self, device):
data = device.pack()
self.sendto("SETT%s" % (data,))
ack = self.recvfrom()
if ack[:4] != "SETC":
logger.error("Unexpected data '%s'" % (data[:4]))
if ack[4:] != data:
logger.error("ACK failed")
else:
logger.debug("ACK sucess")
def set_options(self, **kwargs):
devices = self.get_devices()
for dev in devices.values():
for opt, val in kwargs.items():
dev.set_option(opt, val)
if kwargs:
self.send_config(dev)
else:
dev.print_config()
@staticmethod
def main():
parser = OptionParser()
parser.add_option(
"-b", dest="broadcast_address",
action="store",
help="Broadcast address",
)
parser.add_option(
"-a", dest="address",
help="Device IP address",
)
parser.add_option(
"--device-type",
choices=["wiz1000", "wiz1x0sr"],
default="wiz1000",
help="Device type",
)
parser.add_option(
"-m", dest="mac_list",
help="Limit actions to theses mac address",
)
parser.add_option(
"-s", dest="device_search_password",
default="wiznet",
help="Search password",
)
# Generate options based on fields descriptions
fields = WIZ1000._basic_fields + WIZ1000._extended_fields
for field in fields:
option = "--%s" % (field[0].replace("_", "-"),)
kwargs = {}
if field[1] == "bool":
kwargs["action"] = "store_true"
if field[1] == "short":
kwargs["type"] = "int"
if field[1] == "dictvalues":
choices = field[2].values()
if isinstance(choices[0], int):
kwargs["type"] = "int"
else:
kwargs["choices"] = choices
kwargs["help"] = ",".join(["%s" % (v,) for v in choices])
parser.add_option(option, dest=field[0], **kwargs)
if field[1] == "bool":
# For boolean field, add --no-option
kwargs["action"] = "store_false"
parser.add_option("--no-%s" % (field[0].replace("_", "-"),),
dest=field[0], **kwargs)
options, _ = parser.parse_args()
kwargs = {}
for field in fields:
value = getattr(options, field[0])
if value is not None:
kwargs[field[0]] = value
search_kwargs = {
"broadcast": True,
"address": "192.168.11.255",
"device_type": options.device_type,
"search_password": options.device_search_password,
}
if options.mac_list:
search_kwargs["allowed_mac"] = options.mac_list.split(',')
if options.broadcast_address:
search_kwargs["address"] = options.broadcast_address
if options.address:
search_kwargs["address"] = options.address
search_kwargs["broadcast"] = False
searcher = WizSearch(**search_kwargs)
searcher.update()
searcher.set_options(**kwargs) | 34.927778 | 80 | 0.511373 |
acf167b17dd7e65e5e762abe3afb06349d4551c3 | 61,642 | py | Python | brian2/tests/test_morphology.py | SimonAltrogge/brian2 | 6463c368a8277041051bf5ae4816f0dd5b6e057c | [
"BSD-2-Clause"
] | 674 | 2015-01-14T11:05:39.000Z | 2022-03-29T04:53:50.000Z | brian2/tests/test_morphology.py | JongwanKim2090/brian2 | c212a57cb992b766786b5769ebb830ff12d8a8ad | [
"BSD-2-Clause"
] | 937 | 2015-01-05T13:24:22.000Z | 2022-03-25T13:10:13.000Z | brian2/tests/test_morphology.py | JongwanKim2090/brian2 | c212a57cb992b766786b5769ebb830ff12d8a8ad | [
"BSD-2-Clause"
] | 237 | 2015-01-05T13:54:16.000Z | 2022-03-15T22:16:32.000Z |
import pytest
from numpy.testing import assert_equal
import tempfile
import os
from brian2.spatialneuron import *
from brian2.units import um, cm, second, DimensionMismatchError
from brian2 import numpy as np
from brian2.tests.utils import assert_allclose
@pytest.mark.codegen_independent
def test_attributes_soma():
soma = Soma(diameter=10*um)
assert isinstance(soma, Morphology)
# Single compartment
assert soma.n == 1
assert soma.total_sections == 1
assert soma.total_compartments == 1
with pytest.raises(TypeError):
len(soma) # ambiguous
# Compartment attributes
assert_equal(soma.diameter, [10]*um)
assert_equal(soma.length, [10]*um)
assert_equal(soma.distance, [0]*um)
assert_equal(soma.end_distance, 0 * um)
assert soma.r_length_1 > 1*cm
assert soma.r_length_2 > 1*cm
assert_equal(soma.area, np.pi*soma.diameter**2)
assert_allclose(soma.volume, 1.0/6.0*np.pi*(10*um)**3)
# No coordinates were specified
assert soma.start_x is None
assert soma.start_y is None
assert soma.start_z is None
assert soma.x is None
assert soma.y is None
assert soma.z is None
assert soma.end_x is None
assert soma.end_y is None
assert soma.end_z is None
@pytest.mark.codegen_independent
def test_attributes_soma_coordinates():
# Specify only one of the coordinates
xyz = {'x', 'y', 'z'}
for coord in xyz:
kwds = {coord: 5*um}
soma = Soma(diameter=10*um, **kwds)
# Length shouldn't change (not defined by coordinates but by the diameter)
assert_equal(soma.length, [10]*um)
assert_equal(soma.distance, [0]*um)
# Coordinates should be specified now, with 0 values for the other
# coordinates
for other_coord in xyz - {coord}:
assert_equal(getattr(soma, f"start_{other_coord}"), [0]*um)
assert_equal(getattr(soma, other_coord), [0]*um)
assert_equal(getattr(soma, f"end_{other_coord}"), [0]*um)
assert_equal(getattr(soma, f"start_{coord}"), [5]*um)
assert_equal(getattr(soma, coord), [5]*um)
assert_equal(getattr(soma, f"end_{coord}"), [5]*um)
# Specify all coordinates
soma = Soma(diameter=10*um, x=1*um, y=2*um, z=3*um)
# Length shouldn't change (not defined by coordinates but by the diameter)
assert_equal(soma.length, [10]*um)
assert_equal(soma.distance, [0]*um)
assert_equal(soma.start_x, 1*um)
assert_equal(soma.x, 1*um)
assert_equal(soma.end_x, 1*um)
assert_equal(soma.start_y, 2*um)
assert_equal(soma.y, 2*um)
assert_equal(soma.end_y, 2*um)
assert_equal(soma.start_z, 3*um)
assert_equal(soma.z, 3*um)
assert_equal(soma.end_z, 3*um)
@pytest.mark.codegen_independent
def test_attributes_cylinder():
n = 10
cylinder = Cylinder(n=n, diameter=10*um, length=200*um)
assert isinstance(cylinder, Morphology)
# Single section with 10 compartments
assert cylinder.n == n
assert cylinder.total_sections == 1
assert cylinder.total_compartments == n
with pytest.raises(TypeError):
len(cylinder) # ambiguous
# Compartment attributes
assert_equal(cylinder.diameter, np.ones(n)*10*um)
assert_equal(cylinder.length, np.ones(n)*20*um)
assert_equal(cylinder.distance, np.arange(n)*20*um + 10*um)
assert_equal(cylinder.end_distance, 200 * um)
# TODO: r_length
assert_allclose(cylinder.area, np.pi*cylinder.diameter*cylinder.length)
assert_allclose(cylinder.volume, 1.0/4.0*np.pi*cylinder.diameter**2*cylinder.length)
# No coordinates were specified
assert cylinder.start_x is None
assert cylinder.start_y is None
assert cylinder.start_z is None
assert cylinder.x is None
assert cylinder.y is None
assert cylinder.z is None
assert cylinder.end_x is None
assert cylinder.end_y is None
assert cylinder.end_z is None
@pytest.mark.codegen_independent
def test_attributes_cylinder_coordinates():
# Specify only the end-point of the section
n = 10
# Specify only one of the coordinates
xyz = {'x', 'y', 'z'}
for coord in xyz:
kwds = {coord: [0, 200]*um}
cylinder = Cylinder(n=n, diameter=10*um, **kwds)
assert_equal(cylinder.diameter, np.ones(n)*10*um)
assert_equal(cylinder.length, np.ones(n)*20*um)
assert_equal(cylinder.distance, np.arange(n)*20*um + 10*um)
assert_equal(cylinder.end_distance, 200 * um)
# Coordinates should be specified now, with 0 values for the other
# coordinates
for other_coord in xyz - {coord}:
assert_equal(getattr(cylinder, f"start_{other_coord}"), np.zeros(n)*um)
assert_equal(getattr(cylinder, other_coord), np.zeros(n)*um)
assert_equal(getattr(cylinder, f"end_{other_coord}"), np.zeros(n)*um)
assert_equal(getattr(cylinder, f"start_{coord}"), np.arange(n)*20*um)
assert_equal(getattr(cylinder, coord), np.arange(n)*20*um + 10*um)
assert_equal(getattr(cylinder, f"end_{coord}"), np.arange(n)*20*um + 20*um)
# Specify all coordinates
val = [0, 200.0/np.sqrt(3.0)]*um
cylinder = Cylinder(n=n, diameter=10*um, x=val, y=val, z=val)
assert_equal(cylinder.diameter, np.ones(n)*10*um)
assert_allclose(cylinder.length, np.ones(n)*20*um)
assert_allclose(cylinder.distance, np.arange(n)*20*um + 10*um)
assert_allclose(cylinder.end_distance, 200 * um)
for coord in ['x', 'y', 'z']:
assert_allclose(getattr(cylinder, f"start_{coord}"), np.arange(n)*val[1]/n)
assert_allclose(getattr(cylinder, coord), np.arange(n)*val[1]/n + 0.5*val[1]/n)
assert_allclose(getattr(cylinder, f"end_{coord}"), np.arange(n)*val[1]/n + val[1]/n)
@pytest.mark.codegen_independent
def test_attributes_section():
n = 10
# No difference to a cylinder
sec = Section(n=n, diameter=np.ones(n+1)*10*um, length=np.ones(n)*20*um)
cyl = Cylinder(n=1, diameter=10*um, length=0*um) # dummy cylinder
cyl.child = sec
assert isinstance(sec, Morphology)
# Single section with 10 compartments
assert sec.n == n
assert sec.total_sections == 1
assert sec.total_compartments == n
with pytest.raises(TypeError):
len(sec) # ambiguous
# Compartment attributes
assert_allclose(sec.diameter, np.ones(n)*10*um)
assert_allclose(sec.length, np.ones(n)*20*um)
assert_allclose(sec.distance, np.arange(n)*20*um + 10*um)
assert_allclose(sec.end_distance, 200 * um)
# TODO: r_length
assert_allclose(sec.area,
np.pi*0.5*(sec.start_diameter + sec.end_diameter)*sec.length)
assert_allclose(sec.volume, 1.0/4.0*np.pi*sec.diameter**2*sec.length)
# No coordinates were specified
assert sec.start_x is None
assert sec.start_y is None
assert sec.start_z is None
assert sec.x is None
assert sec.y is None
assert sec.z is None
assert sec.end_x is None
assert sec.end_y is None
assert sec.end_z is None
@pytest.mark.codegen_independent
def test_attributes_section_coordinates_single():
# Specify only the end-point of the section (no difference to cylinder)
n = 10
# Specify only one of the coordinates
xyz = {'x', 'y', 'z'}
for coord in xyz:
kwds = {coord: np.linspace(0*um, 200*um, n+1)}
sec = Section(n=n, diameter=np.ones(n+1)*10*um, **kwds)
cyl = Cylinder(n=1, diameter=10*um, length=0*um) # dummy cylinder
cyl.child = sec
assert_equal(sec.diameter, np.ones(n)*10*um)
assert_equal(sec.length, np.ones(n)*20*um)
assert_equal(sec.distance, np.arange(n)*20*um + 10*um)
assert_equal(sec.end_distance, 200 * um)
# Coordinates should be specified now, with 0 values for the other
# coordinates
for other_coord in xyz - {coord}:
assert_equal(getattr(sec, f"start_{other_coord}"), np.zeros(n)*um)
assert_equal(getattr(sec, other_coord), np.zeros(n)*um)
assert_equal(getattr(sec, f"end_{other_coord}"), np.zeros(n)*um)
assert_equal(getattr(sec, f"start_{coord}"), np.arange(n)*20*um)
assert_equal(getattr(sec, coord), np.arange(n)*20*um + 10*um)
assert_equal(getattr(sec, f"end_{coord}"), np.arange(n)*20*um + 20*um)
# Specify all coordinates
val = 200.0/np.sqrt(3.0)*um
sec = Section(n=n, diameter=np.ones(n+1)*10*um,
x=np.linspace(0*um, val, n+1),
y=np.linspace(0*um, val, n+1),
z=np.linspace(0*um, val, n+1))
cyl = Cylinder(n=1, diameter=10*um, length=0*um)
cyl.child = sec
assert_equal(sec.diameter, np.ones(n)*10*um)
assert_allclose(sec.length, np.ones(n)*20*um)
assert_allclose(sec.distance, np.arange(n)*20*um + 10*um)
assert_allclose(sec.end_distance, 200 * um)
for coord in ['x', 'y', 'z']:
assert_allclose(getattr(sec, f"start_{coord}"), np.arange(n)*val/n)
assert_allclose(getattr(sec, coord), np.arange(n)*val/n + 0.5*val/n)
assert_allclose(getattr(sec, f"end_{coord}"), np.arange(n)*val/n + val/n)
@pytest.mark.codegen_independent
def test_attributes_section_coordinates_all():
n = 3
# Specify all coordinates
sec = Section(n=n, diameter=[10, 10, 10, 10]*um,
x=[10, 11, 11, 11]*um,
y=[100, 100, 101, 101]*um,
z=[1000, 1000, 1000, 1001]*um)
assert_equal(sec.diameter, np.ones(n)*10*um)
assert_allclose(sec.length, np.ones(n)*um)
assert_allclose(sec.distance, np.arange(n)*um + .5*um)
assert_allclose(sec.end_distance, 3 * um)
assert_allclose(sec.start_x, [10, 11, 11]*um)
assert_allclose(sec.x, [10.5, 11, 11]*um)
assert_allclose(sec.end_x, [11, 11, 11]*um)
assert_allclose(sec.start_y, [100, 100, 101]*um)
assert_allclose(sec.y, [100, 100.5, 101]*um)
assert_allclose(sec.end_y, [100, 101, 101]*um)
assert_allclose(sec.start_z, [1000, 1000, 1000]*um)
assert_allclose(sec.z, [1000, 1000, 1000.5]*um)
assert_allclose(sec.end_z, [1000, 1000, 1001]*um)
# Specify varying diameters
sec = Section(n=n, diameter=[20, 10, 5, 2.5]*um,
x=[0, 1, 1, 1]*um, y=[0, 0, 1, 1]*um, z=[0, 0, 0, 1]*um)
assert_allclose(sec.start_diameter, [20, 10, 5]*um)
# diameter at midpoint
assert_allclose(sec.diameter, 0.5*(sec.start_diameter + sec.end_diameter))
assert_allclose(sec.end_diameter, [10, 5, 2.5]*um)
# TODO: Check area and volume
def _check_tree_cables(morphology, coordinates=False):
# number of compartments per section
assert morphology.n == 10
assert morphology['1'].n == 5
assert morphology['2'].n == 5
assert morphology['21'].n == 5
assert morphology['22'].n == 5
# number of compartments per subtree
assert morphology.total_compartments == 30
assert morphology['1'].total_compartments == 5
assert morphology['2'].total_compartments == 15
assert morphology['21'].total_compartments == 5
assert morphology['22'].total_compartments == 5
# number of sections per subtree
assert morphology.total_sections == 5
assert morphology['1'].total_sections == 1
assert morphology['2'].total_sections == 3
assert morphology['21'].total_sections == 1
assert morphology['22'].total_sections == 1
# Check that distances (= distance to root at electrical midpoint)
# correctly follow the tree structure
assert_allclose(morphology.distance, np.arange(10) * 10 * um + 5 * um)
assert_allclose(morphology['2'].distance,
100 * um + np.arange(5) * 10 * um + 5 * um)
assert_allclose(morphology['21'].distance,
150 * um + np.arange(5) * 10 * um + 5 * um)
assert_allclose(morphology.end_distance, 100 * um)
assert_allclose(morphology['1'].end_distance, 200 * um)
assert_allclose(morphology['2'].end_distance, 150 * um)
assert_allclose(morphology['21'].end_distance, 200 * um)
assert_allclose(morphology['22'].end_distance, 200 * um)
# Check that section diameters are correctly inherited from the parent
# sections
assert_allclose(morphology['1'].start_diameter, [10, 8, 6, 4, 2] * um)
assert_allclose(morphology['22'].start_diameter, [5, 4, 3, 2, 1] * um)
if coordinates:
# Coordinates should be absolute
# section: cable
assert_allclose(morphology.start_x, np.arange(10) * 10 * um)
assert_allclose(morphology.x, np.arange(10) * 10 * um + 5 * um)
assert_allclose(morphology.end_x, np.arange(10) * 10 * um + 10 * um)
assert_allclose(morphology.y, np.zeros(10) * um)
assert_allclose(morphology.z, np.zeros(10) * um)
# section: cable['1']
step = 20 / np.sqrt(2) * um
assert_allclose(morphology['1'].start_x, 100 * um + np.arange(5) * step)
assert_allclose(morphology['1'].x, 100 * um + np.arange(5) * step + step/2)
assert_allclose(morphology['1'].end_x, 100 * um + np.arange(5) * step + step)
assert_allclose(morphology['1'].start_y, np.arange(5) * step)
assert_allclose(morphology['1'].y, np.arange(5) * step + step/2)
assert_allclose(morphology['1'].end_y, np.arange(5) * step + step)
assert_allclose(morphology['1'].z, np.zeros(5) * um)
# section: cable['2']
step = 10 / np.sqrt(2) * um
assert_allclose(morphology['2'].start_x, 100 * um + np.arange(5) * step)
assert_allclose(morphology['2'].x, 100 * um + np.arange(5) * step + step / 2)
assert_allclose(morphology['2'].end_x, 100 * um + np.arange(5) * step + step)
assert_allclose(morphology['2'].start_y, -np.arange(5) * step)
assert_allclose(morphology['2'].y, -(np.arange(5) * step + step / 2))
assert_allclose(morphology['2'].end_y, -(np.arange(5) * step + step))
assert_allclose(morphology['2'].z, np.zeros(5) * um)
# section: cable ['21']
step = 10 / np.sqrt(2) * um
assert_allclose(morphology['21'].start_x,
100 * um + 50 / np.sqrt(2) * um + np.arange(5) * step)
assert_allclose(morphology['21'].x,
100 * um + 50 / np.sqrt(2) * um + np.arange(
5) * step + step / 2)
assert_allclose(morphology ['21'].end_x,
100 * um + 50 / np.sqrt(2) * um + np.arange(
5) * step + step)
assert_allclose(morphology['21'].start_y, -np.ones(5) * 50 / np.sqrt(2) * um)
assert_allclose(morphology['21'].y, -np.ones(5) * 50 / np.sqrt(2) * um)
assert_allclose(morphology['21'].end_y, -np.ones(5) * 50 / np.sqrt(2) * um)
assert_allclose(morphology['21'].start_z, np.arange(5) * step)
assert_allclose(morphology['21'].z, np.arange(5) * step + step / 2)
assert_allclose(morphology['21'].end_z, np.arange(5) * step + step)
# section: cable['22']
step = 10 / np.sqrt(2) * um
assert_allclose(morphology['22'].start_x,
100 * um + 50 / np.sqrt(2) * um + np.arange(5) * step)
assert_allclose(morphology['22'].x,
100 * um + 50 / np.sqrt(2) * um + np.arange(5) * step + step/2)
assert_allclose(morphology['22'].end_x,
100 * um + 50 / np.sqrt(2) * um + np.arange(
5) * step + step)
assert_allclose(morphology['22'].start_y, -np.ones(5) * 50 / np.sqrt(2) * um)
assert_allclose(morphology['22'].y, -np.ones(5) * 50 / np.sqrt(2) * um)
assert_allclose(morphology['22'].end_y, -np.ones(5) * 50 / np.sqrt(2) * um)
assert_allclose(morphology['22'].start_z, -np.arange(5) * step)
assert_allclose(morphology['22'].z, -(np.arange(5) * step + step/2))
assert_allclose(morphology['22'].end_z, -(np.arange(5) * step + step))
@pytest.mark.codegen_independent
def test_tree_cables_schematic():
cable = Cylinder(n=10, diameter=10*um, length=100*um)
cable.L = Section(n=5, diameter=[10, 8, 6, 4, 2, 0]*um, length=np.ones(5)*20*um) # tapering truncated cones
cable.R = Cylinder(n=5, diameter=5*um, length=50*um)
cable.RL = Cylinder(n=5, diameter=2.5*um, length=50*um)
cable.RR = Section(n=5, diameter=[5, 4, 3, 2, 1, 0]*um, length=np.ones(5)*10*um)
_check_tree_cables(cable)
@pytest.mark.codegen_independent
def test_tree_cables_coordinates():
# The lengths of the sections should be identical to the previous test
cable = Cylinder(n=10, x=[0, 100]*um, diameter=10*um)
cable.L = Section(n=5, diameter=[10, 8, 6, 4, 2, 0]*um,
x=np.linspace(0, 100, 6)/np.sqrt(2)*um,
y=np.linspace(0, 100, 6)/np.sqrt(2)*um)
cable.R = Cylinder(n=5, diameter=5*um, x=[0, 50]*um/np.sqrt(2),
y=[0, -50]*um/np.sqrt(2))
cable.RL = Cylinder(n=5, diameter=2.5*um,
x=[0, 50]*um/np.sqrt(2),
z=[0, 50]*um/np.sqrt(2))
cable.RR = Section(n=5, diameter=[5, 4, 3, 2, 1, 0]*um,
x=np.linspace(0, 50, 6)*um/np.sqrt(2),
z=np.linspace(0, -50, 6)*um/np.sqrt(2))
_check_tree_cables(cable, coordinates=True)
@pytest.mark.codegen_independent
def test_tree_cables_from_points():
# The coordinates should be identical to the previous test
points = [ # cable
(1, None, 0, 0, 0, 10, -1),
(2, None, 10, 0, 0, 10, 1),
(3, None, 20, 0, 0, 10, 2),
(4, None, 30, 0, 0, 10, 3),
(5, None, 40, 0, 0, 10, 4),
(6, None, 50, 0, 0, 10, 5),
(7, None, 60, 0, 0, 10, 6),
(8, None, 70, 0, 0, 10, 7),
(9, None, 80, 0, 0, 10, 8),
(10, None, 90, 0, 0, 10, 9),
(11, None, 100, 0, 0, 10, 10),
# cable.L (using automatic names)
(12, None, 100+20/np.sqrt(2), 20/np.sqrt(2), 0, 8 , 11),
(13, None, 100+40/np.sqrt(2), 40/np.sqrt(2), 0, 6 , 12),
(14, None, 100+60/np.sqrt(2), 60/np.sqrt(2), 0, 4 , 13),
(15, None, 100+80/np.sqrt(2), 80/np.sqrt(2), 0, 2 , 14),
(16, None, 100+100/np.sqrt(2), 100/np.sqrt(2), 0, 0 , 15),
# cable.R (using automatic names)
(17, None, 100+10/np.sqrt(2), -10/np.sqrt(2), 0, 5 , 11),
(18, None, 100+20/np.sqrt(2), -20/np.sqrt(2), 0, 5 , 17),
(19, None, 100+30/np.sqrt(2), -30/np.sqrt(2), 0, 5 , 18),
(20, None, 100+40/np.sqrt(2), -40/np.sqrt(2), 0, 5 , 19),
(21, None, 100+50/np.sqrt(2), -50/np.sqrt(2), 0, 5 , 20),
# cable.RL (using explicit names)
(22, 'L' , 100+60/np.sqrt(2), -50/np.sqrt(2), 10/np.sqrt(2), 2.5, 21),
(23, 'L' , 100+70/np.sqrt(2), -50/np.sqrt(2), 20/np.sqrt(2), 2.5, 22),
(24, 'L' , 100+80/np.sqrt(2), -50/np.sqrt(2), 30/np.sqrt(2), 2.5, 23),
(25, 'L' , 100+90/np.sqrt(2), -50/np.sqrt(2), 40/np.sqrt(2), 2.5, 24),
(26, 'L' , 100+100/np.sqrt(2), -50/np.sqrt(2), 50/np.sqrt(2), 2.5, 25),
# cable.RR (using explicit names)
(27, 'R' , 100+60/np.sqrt(2), -50/np.sqrt(2), -10/np.sqrt(2), 4, 21),
(28, 'R' , 100+70/np.sqrt(2), -50/np.sqrt(2), -20/np.sqrt(2), 3, 27),
(29, 'R' , 100+80/np.sqrt(2), -50/np.sqrt(2), -30/np.sqrt(2), 2, 28),
(30, 'R' , 100+90/np.sqrt(2), -50/np.sqrt(2), -40/np.sqrt(2), 1, 29),
(31, 'R' , 100+100/np.sqrt(2), -50/np.sqrt(2), -50/np.sqrt(2), 0, 30),
]
cable = Morphology.from_points(points)
# Check that the names are used
assert cable.L.n == 5
assert cable.R.n == 5
assert cable.RL.n == 5
assert cable.RR.n == 5
_check_tree_cables(cable, coordinates=True)
def test_tree_cables_from_swc():
swc_content = """
# Test file
1 0 0 0 0 5 -1
2 0 10 0 0 5 1
3 0 20 0 0 5 2
4 0 30 0 0 5 3
5 0 40 0 0 5 4
6 0 50 0 0 5 5
7 0 60 0 0 5 6
8 0 70 0 0 5 7
9 0 80 0 0 5 8
10 0 90 0 0 5 9
11 0 100 0 0 5 10
12 2 114.14213562373095 14.142135623730949 0 4 11
13 2 128.2842712474619 28.284271247461898 0 3 12
14 2 142.42640687119285 42.426406871192846 0 2 13
15 2 156.5685424949238 56.568542494923797 0 1 14
16 2 170.71067811865476 70.710678118654741 0 0 15
17 2 107.07106781186548 -7.0710678118654746 0 2.5 11
18 2 114.14213562373095 -14.142135623730949 0 2.5 17
19 2 121.21320343559643 -21.213203435596423 0 2.5 18
20 2 128.2842712474619 -28.284271247461898 0 2.5 19
21 2 135.35533905932738 -35.35533905932737 0 2.5 20
22 2 142.42640687119285 -35.35533905932737 7.0710678118654746 1.25 21
23 2 149.49747468305833 -35.35533905932737 14.142135623730949 1.25 22
24 2 156.5685424949238 -35.35533905932737 21.213203435596423 1.25 23
25 2 163.63961030678928 -35.35533905932737 28.284271247461898 1.25 24
26 2 170.71067811865476 -35.35533905932737 35.35533905932737 1.25 25
27 2 142.42640687119285 -35.35533905932737 -7.0710678118654746 2 21
28 2 149.49747468305833 -35.35533905932737 -14.142135623730949 1.5 27
29 2 156.5685424949238 -35.35533905932737 -21.213203435596423 1 28
30 2 163.63961030678928 -35.35533905932737 -28.284271247461898 0.5 29
31 2 170.71067811865476 -35.35533905932737 -35.35533905932737 0 30
"""
tmp_filename = tempfile.mktemp('cable_morphology.swc')
with open(tmp_filename, 'w') as f:
f.write(swc_content)
cable = Morphology.from_file(tmp_filename)
os.remove(tmp_filename)
_check_tree_cables(cable, coordinates=True)
def _check_tree_soma(morphology, coordinates=False, use_cylinders=True):
# number of compartments per section
assert morphology.n == 1
assert morphology['1'].n == 5
assert morphology['2'].n == 5
# number of compartments per subtree
assert morphology.total_compartments == 11
assert morphology['1'].total_compartments == 5
assert morphology['2'].total_compartments == 5
# number of sections per subtree
assert morphology.total_sections == 3
assert morphology['1'].total_sections == 1
assert morphology['2'].total_sections == 1
assert_allclose(morphology.diameter, [30]*um)
# Check that distances (= distance to root at midpoint)
# correctly follow the tree structure
# Note that the soma does add nothing to the distance
assert_equal(morphology.distance, 0 * um)
assert_allclose(morphology['1'].distance, np.arange(5)*20*um + 10*um)
assert_allclose(morphology['2'].distance, np.arange(5)*10*um + 5*um)
assert_allclose(morphology.end_distance, 0 * um)
assert_allclose(morphology['1'].end_distance, 100 * um)
assert_allclose(morphology['2'].end_distance, 50 * um)
assert_allclose(morphology.diameter, 30*um)
assert_allclose(morphology['1'].start_diameter, [8, 8, 6, 4, 2]*um)
assert_allclose(morphology['1'].diameter, [8, 7, 5, 3, 1]*um)
assert_allclose(morphology['1'].end_diameter, [8, 6, 4, 2, 0]*um)
assert_allclose(morphology['2'].start_diameter, np.ones(5) * 5*um)
assert_allclose(morphology['2'].diameter, np.ones(5) * 5*um)
assert_allclose(morphology['2'].end_diameter, np.ones(5) * 5*um)
if coordinates:
# Coordinates should be absolute
# section: soma
assert_allclose(morphology.start_x, 100*um)
assert_allclose(morphology.x, 100*um)
assert_allclose(morphology.end_x, 100*um)
assert_allclose(morphology.y, 0*um)
assert_allclose(morphology.z, 0*um)
# section: cable['1']
step = 20 / np.sqrt(2) * um
assert_allclose(morphology['1'].start_x, 100 * um + np.arange(5) * step)
assert_allclose(morphology['1'].x, 100 * um + np.arange(5) * step + step/2)
assert_allclose(morphology['1'].end_x, 100 * um + np.arange(5) * step + step)
assert_allclose(morphology['1'].start_y, np.arange(5) * step)
assert_allclose(morphology['1'].y, np.arange(5) * step + step/2)
assert_allclose(morphology['1'].end_y, np.arange(5) * step + step)
assert_allclose(morphology['1'].z, np.zeros(5) * um)
# section: cable['2']
step = 10 / np.sqrt(2) * um
assert_allclose(morphology['2'].start_x, 100 * um + np.arange(5) * step)
if use_cylinders:
assert_allclose(morphology['2'].x, 100 * um + np.arange(5) * step + step / 2)
assert_allclose(morphology['2'].end_x, 100 * um + np.arange(5) * step + step)
assert_allclose(morphology['2'].start_y, -np.arange(5) * step)
if use_cylinders:
assert_allclose(morphology['2'].y, -(np.arange(5) * step + step / 2))
assert_allclose(morphology['2'].end_y, -(np.arange(5) * step + step))
if use_cylinders:
assert_allclose(morphology['2'].z, np.zeros(5) * um)
@pytest.mark.codegen_independent
def test_tree_soma_schematic():
soma = Soma(diameter=30*um)
soma.L = Section(n=5, diameter=[8, 8, 6, 4, 2, 0]*um,
length=np.ones(5)*20*um) # tapering truncated cones
soma.R = Cylinder(n=5, diameter=5*um, length=50*um)
_check_tree_soma(soma)
@pytest.mark.codegen_independent
def test_tree_soma_coordinates():
soma = Soma(diameter=30*um, x=100*um)
soma.L = Section(n=5, diameter=[8, 8, 6, 4, 2, 0]*um,
x=np.linspace(0, 100, 6)/np.sqrt(2)*um,
y=np.linspace(0, 100, 6)/np.sqrt(2)*um) # tapering truncated cones
soma.R = Cylinder(n=5, diameter=5*um,
x=[0, 50]*um/np.sqrt(2), y=[0, -50]*um/np.sqrt(2))
_check_tree_soma(soma, coordinates=True)
@pytest.mark.codegen_independent
def test_tree_soma_from_points():
# The coordinates should be identical to the previous test
points = [ # soma
(1, 'soma', 100, 0, 0, 30, -1),
# soma.L
(2, 'L' , 100+20/np.sqrt(2), 20/np.sqrt(2), 0, 8 , 1),
(3, 'L' , 100+40/np.sqrt(2), 40/np.sqrt(2), 0, 6 , 2),
(4, 'L' , 100+60/np.sqrt(2), 60/np.sqrt(2), 0, 4 , 3),
(5, 'L' , 100+80/np.sqrt(2), 80/np.sqrt(2), 0, 2 , 4),
(6, 'L' , 100+100/np.sqrt(2), 100/np.sqrt(2), 0, 0 , 5),
# soma.R
(7, 'R' , 100+10/np.sqrt(2), -10/np.sqrt(2), 0, 5 , 1),
(8, 'R' , 100+20/np.sqrt(2), -20/np.sqrt(2), 0, 5 , 7),
(9, 'R' , 100+30/np.sqrt(2), -30/np.sqrt(2), 0, 5 , 8),
(10, 'R' , 100+40/np.sqrt(2), -40/np.sqrt(2), 0, 5 , 9),
(11, 'R' , 100+50/np.sqrt(2), -50/np.sqrt(2), 0, 5 , 10),
]
cable = Morphology.from_points(points)
_check_tree_soma(cable, coordinates=True, use_cylinders=False)
@pytest.mark.codegen_independent
def test_tree_soma_from_points_3_point_soma():
# The coordinates should be identical to the previous test
points = [ # soma
(1, 'soma', 100, 0, 0, 30, -1),
(2, 'soma', 100, 15, 0, 30, 1),
(3, 'soma', 100, -15, 0, 30, 1),
# soma.L
(4, 'L' , 100+20/np.sqrt(2), 20/np.sqrt(2), 0, 8 , 1),
(5, 'L' , 100+40/np.sqrt(2), 40/np.sqrt(2), 0, 6 , 4),
(6, 'L' , 100+60/np.sqrt(2), 60/np.sqrt(2), 0, 4 , 5),
(7, 'L' , 100+80/np.sqrt(2), 80/np.sqrt(2), 0, 2 , 6),
(8, 'L' , 100+100/np.sqrt(2), 100/np.sqrt(2), 0, 0 , 7),
# soma.R
(9, 'R' , 100+10/np.sqrt(2), -10/np.sqrt(2), 0, 5 , 1),
(10, 'R' , 100+20/np.sqrt(2), -20/np.sqrt(2), 0, 5 , 9),
(11, 'R' , 100+30/np.sqrt(2), -30/np.sqrt(2), 0, 5 , 10),
(12, 'R' , 100+40/np.sqrt(2), -40/np.sqrt(2), 0, 5 , 11),
(13, 'R' , 100+50/np.sqrt(2), -50/np.sqrt(2), 0, 5 , 12),
]
cable = Morphology.from_points(points)
_check_tree_soma(cable, coordinates=True, use_cylinders=False)
# The first compartment should be a spherical soma!
assert isinstance(cable, Soma)
@pytest.mark.codegen_independent
def test_tree_soma_from_points_3_point_soma_incorrect():
# Inconsistent diameters
points = [ # soma
(1, 'soma', 100, 0, 0, 30, -1),
(2, 'soma', 100, 15, 0, 28, 1),
(3, 'soma', 100, -15, 0, 30, 1),
# soma.L
(4, 'L' , 100+20/np.sqrt(2), 20/np.sqrt(2), 0, 8 , 1),
(5, 'L' , 100+40/np.sqrt(2), 40/np.sqrt(2), 0, 6 , 4),
(6, 'L' , 100+60/np.sqrt(2), 60/np.sqrt(2), 0, 4 , 5),
(7, 'L' , 100+80/np.sqrt(2), 80/np.sqrt(2), 0, 2 , 6),
(8, 'L' , 100+100/np.sqrt(2), 100/np.sqrt(2), 0, 0 , 7)
]
with pytest.raises(ValueError):
Morphology.from_points(points)
# Inconsistent coordinates
points = [ # soma
(1, 'soma', 100, 0, 0, 30, -1),
(2, 'soma', 100, 15, 0, 30, 1),
(3, 'soma', 100, -16, 0, 30, 1),
# soma.L
(4, 'L', 100 + 20 / np.sqrt(2), 20 / np.sqrt(2), 0, 8, 1),
(5, 'L', 100 + 40 / np.sqrt(2), 40 / np.sqrt(2), 0, 6, 4),
(6, 'L', 100 + 60 / np.sqrt(2), 60 / np.sqrt(2), 0, 4, 5),
(7, 'L', 100 + 80 / np.sqrt(2), 80 / np.sqrt(2), 0, 2, 6),
(8, 'L', 100 + 100 / np.sqrt(2), 100 / np.sqrt(2), 0, 0, 7)
]
with pytest.raises(ValueError):
Morphology.from_points(points)
@pytest.mark.codegen_independent
def test_tree_soma_from_swc():
swc_content = """
# Test file
1 1 100 0 0 15 -1
2 2 114.14213562373095 14.142135623730949 0 4 1
3 2 128.2842712474619 28.284271247461898 0 3 2
4 2 142.42640687119285 42.426406871192846 0 2 3
5 2 156.5685424949238 56.568542494923797 0 1 4
6 2 170.71067811865476 70.710678118654741 0 0 5
7 2 107.07106781186548 -7.0710678118654746 0 2.5 1
8 2 114.14213562373095 -14.142135623730949 0 2.5 7
9 2 121.21320343559643 -21.213203435596423 0 2.5 8
10 2 128.2842712474619 -28.284271247461898 0 2.5 9
11 2 135.35533905932738 -35.35533905932737 0 2.5 10
"""
tmp_filename = tempfile.mktemp('cable_morphology.swc')
with open(tmp_filename, 'w') as f:
f.write(swc_content)
soma = Morphology.from_file(tmp_filename)
os.remove(tmp_filename)
_check_tree_soma(soma, coordinates=True, use_cylinders=False)
@pytest.mark.codegen_independent
def test_tree_soma_from_swc_3_point_soma():
swc_content = """
# Test file
1 1 100 0 0 15 -1
2 1 100 15 0 15 1
3 1 100 -15 0 15 1
4 2 114.14213562373095 14.142135623730949 0 4 1
5 2 128.2842712474619 28.284271247461898 0 3 4
6 2 142.42640687119285 42.426406871192846 0 2 5
7 2 156.5685424949238 56.568542494923797 0 1 6
8 2 170.71067811865476 70.710678118654741 0 0 7
9 2 107.07106781186548 -7.0710678118654746 0 2.5 1
10 2 114.14213562373095 -14.142135623730949 0 2.5 9
11 2 121.21320343559643 -21.213203435596423 0 2.5 10
12 2 128.2842712474619 -28.284271247461898 0 2.5 11
13 2 135.35533905932738 -35.35533905932737 0 2.5 12
"""
tmp_filename = tempfile.mktemp('cable_morphology.swc')
with open(tmp_filename, 'w') as f:
f.write(swc_content)
soma = Morphology.from_file(tmp_filename)
os.remove(tmp_filename)
_check_tree_soma(soma, coordinates=True, use_cylinders=False)
@pytest.mark.codegen_independent
def test_construction_incorrect_arguments():
### Morphology
dummy_self = Soma(10*um) # To allow testing of Morphology.__init__
with pytest.raises(TypeError):
Morphology.__init__(dummy_self, n=1.5)
with pytest.raises(ValueError):
Morphology.__init__(dummy_self, n=0)
with pytest.raises(TypeError):
Morphology.__init__(dummy_self, 'filename.swc')
### Soma
with pytest.raises(DimensionMismatchError):
Soma(10)
with pytest.raises(TypeError):
Soma([10, 20]*um)
with pytest.raises(TypeError):
Soma(x=[10, 20]*um)
with pytest.raises(TypeError):
Soma(y=[10, 20]*um)
with pytest.raises(TypeError):
Soma(z=[10, 20]*um)
with pytest.raises(DimensionMismatchError):
Soma(x=10)
with pytest.raises(DimensionMismatchError):
Soma(y=10)
with pytest.raises(DimensionMismatchError):
Soma(z=10)
### Cylinder
# Diameter can only be single value
with pytest.raises(TypeError):
Cylinder(n=3, diameter=[10, 20]*um, length=100*um)
with pytest.raises(TypeError):
Cylinder(n=3, diameter=[10, 20, 30]*um, length=100*um)
with pytest.raises(TypeError):
Cylinder(n=3, diameter=np.ones(3, 2)*um, length=100*um)
# Length can only be single value
with pytest.raises(TypeError):
Cylinder(n=3, diameter=10*um, length=[10, 20]*um)
with pytest.raises(TypeError):
Cylinder(n=3, diameter=10*um, length=[10, 20, 30]*um)
with pytest.raises(TypeError):
Cylinder(n=3, diameter=10*um, length=np.ones(3, 2)*um)
# Coordinates have to be two values
with pytest.raises(TypeError):
Cylinder(n=3, diameter=10*um, x=[10]*um)
with pytest.raises(TypeError):
Cylinder(n=3, diameter=10*um, x=[10, 20, 30]*um)
with pytest.raises(TypeError):
Cylinder(n=3, diameter=10*um, y=[10]*um)
with pytest.raises(TypeError):
Cylinder(n=3, diameter=10*um, y=[10, 20, 30]*um)
with pytest.raises(TypeError):
Cylinder(n=3, diameter=10*um, z=[10]*um)
with pytest.raises(TypeError):
Cylinder(n=3, diameter=10*um, z=[10, 20, 30]*um)
# Need either coordinates or lengths
with pytest.raises(TypeError):
Cylinder(n=3, diameter=10*um)
# But not both
with pytest.raises(TypeError):
Cylinder(n=3, diameter=10*um, length=30*um, x=[0, 30]*um)
### Section
# Diameter have to be n+1 values
with pytest.raises(TypeError):
Section(n=3, diameter=10*um, length=np.ones(3)*10*um)
with pytest.raises(TypeError):
Section(n=3, diameter=[10, 20, 30]*um, length=np.ones(3)*10*um)
with pytest.raises(TypeError):
Section(n=3, diameter=np.ones(4, 2)*um, length=np.ones(3)*10*um)
# Length have to be n values
with pytest.raises(TypeError):
Section(n=3, diameter=np.ones(4)*10*um, length=10*um)
with pytest.raises(TypeError):
Section(n=3, diameter=np.ones(4)*10*um, length=[10, 20]*um)
with pytest.raises(TypeError):
Section(n=3, diameter=np.ones(4)*10*um, length=np.ones(3, 2)*um)
# Coordinates have to be n+1 values
with pytest.raises(TypeError):
Section(n=3, diameter=np.ones(4)*10*um, x=10*um)
with pytest.raises(TypeError):
Section(n=3, diameter=np.ones(4)*10*um, x=[10, 20, 30]*um)
with pytest.raises(TypeError):
Section(n=3, diameter=np.ones(4)*10*um, y=10*um)
with pytest.raises(TypeError):
Section(n=3, diameter=np.ones(4)*10*um, y=[10, 20, 30]*um)
with pytest.raises(TypeError):
Section(n=3, diameter=np.ones(4)*10*um, z=10*um)
with pytest.raises(TypeError):
Section(n=3, diameter=np.ones(4)*10*um, z=[10, 20, 30]*um)
# Need either coordinates or lengths
with pytest.raises(TypeError):
Section(n=3, diameter=np.ones(4)*10*um)
# But not both
with pytest.raises(TypeError):
Section(n=3, diameter=np.ones(4)*10*um, length=[10, 20, 30]*um,
x=[0, 10, 20, 30]*um)
@pytest.mark.codegen_independent
def test_from_points_minimal():
points = [(1, 'soma', 10, 20, 30, 30, -1)]
morph = Morphology.from_points(points)
assert morph.total_compartments == 1
assert_allclose(morph.diameter, 30*um)
assert_allclose(morph.x, 10*um)
assert_allclose(morph.y, 20*um)
assert_allclose(morph.z, 30*um)
@pytest.mark.codegen_independent
def test_from_points_incorrect():
# The coordinates should be identical to the previous test
points = [
(1, None, 0, 0, 0, 10, -1),
(2, None, 10, 0, 0, 10, 1),
(2, None, 20, 0, 0, 10, 2),
]
points2 = [
(1, None, 0, 0, 0, 10, -1),
(2, None, 10, 0, 0, 10, 1),
(3, None, 20, 0, 0, 10, 3),
]
points3 = [
(1, None, 0, 0, 0, 10, -1),
(2, None, 10, 0, 0, 10, 1),
(3, None, 20, 0, 0, 10, 4),
]
points4 = [
(1, 0, 0, 0, 10, -1),
(2, 10, 0, 0, 10, 1),
(3, 20, 0, 0, 10, 2),
]
with pytest.raises(ValueError):
Morphology.from_points(points)
with pytest.raises(ValueError):
Morphology.from_points(points2)
with pytest.raises(ValueError):
Morphology.from_points(points3)
with pytest.raises(ValueError):
Morphology.from_points(points4)
@pytest.mark.codegen_independent
def test_subtree_deletion():
soma = Soma(diameter=30*um)
first_dendrite = Cylinder(n=5, diameter=5*um, length=50*um)
second_dendrite = Cylinder(n=5, diameter=5*um, length=50*um)
second_dendrite.L = Cylinder(n=5, diameter=5*um, length=50*um)
second_dendrite.R = Cylinder(n=5, diameter=5*um, length=50*um)
soma.dend1 = first_dendrite
soma.dend2 = second_dendrite
soma.dend3 = Cylinder(n=5, diameter=5*um, length=50*um)
soma.dend3.L = Cylinder(n=5, diameter=5*um, length=50*um)
soma.dend3.L.L = Cylinder(n=5, diameter=5 * um, length=50 * um)
assert soma.total_compartments == 36
del soma.dend1
assert soma.total_compartments == 31
with pytest.raises(AttributeError):
soma.dend1
with pytest.raises(AttributeError):
delattr(soma, 'dend1')
with pytest.raises(AttributeError):
soma.__delitem__('dend1')
assert first_dendrite not in soma.children
del soma['dend2']
assert soma.total_compartments == 16
with pytest.raises(AttributeError):
soma.dend2
assert second_dendrite not in soma.children
del soma.dend3.LL
assert soma.total_compartments == 11
with pytest.raises(AttributeError):
soma.dend3.LL
with pytest.raises(AttributeError):
soma.dend3.L.L
@pytest.mark.codegen_independent
def test_subgroup_indices():
morpho = Soma(diameter=30*um)
morpho.L = Cylinder(length=10*um, diameter=1*um, n=10)
morpho.LL = Cylinder(length=5*um, diameter=2*um, n=5)
morpho.right = Cylinder(length=3*um, diameter=1*um, n=7)
assert_equal(morpho.LL.indices[:], [11, 12, 13, 14, 15])
assert_equal(morpho.L.indices[3*um:5*um], [4, 5])
assert_equal(morpho.L.indices[3*um:5*um],
morpho.L[3*um:5*um].indices[:])
assert_equal(morpho.L.indices[:5*um], [1, 2, 3, 4, 5])
assert_equal(morpho.L.indices[3*um:], [4, 5, 6, 7, 8, 9, 10])
assert_equal(morpho.L.indices[3.5*um], 4)
assert_equal(morpho.L.indices[3*um], 4)
assert_equal(morpho.L.indices[3.9*um], 4)
assert_equal(morpho.L.indices[3], 4)
assert_equal(morpho.L.indices[-1], 10)
assert_equal(morpho.L.indices[3:5], [4, 5])
assert_equal(morpho.L.indices[3:], [4, 5, 6, 7, 8, 9, 10])
assert_equal(morpho.L.indices[:5], [1, 2, 3, 4, 5])
@pytest.mark.codegen_independent
def test_subgroup_attributes():
morpho = Soma(diameter=30*um)
morpho.L = Cylinder(length=10*um, diameter=1*um, n=10)
morpho.LL = Cylinder(x=[0, 5]*um, diameter=2*um, n=5)
morpho.right = Cylinder(length=3*um, diameter=1*um, n=7)
# # Getting a single compartment by index
assert_allclose(morpho.L[2].area, morpho.L.area[2])
assert_allclose(morpho.L[2].volume, morpho.L.volume[2])
assert_allclose(morpho.L[2].length, morpho.L.length[2])
assert_allclose(morpho.L[2].r_length_1, morpho.L.r_length_1[2])
assert_allclose(morpho.L[2].r_length_2, morpho.L.r_length_2[2])
assert_allclose(morpho.L[2].distance, morpho.L.distance[2])
assert_allclose(morpho.L[2].diameter, morpho.L.diameter[2])
assert morpho.L[2].x is None
assert morpho.L[2].y is None
assert morpho.L[2].z is None
assert morpho.L[2].start_x is None
assert morpho.L[2].start_y is None
assert morpho.L[2].start_z is None
assert morpho.L[2].end_x is None
assert morpho.L[2].end_y is None
assert morpho.L[2].end_z is None
# # Getting a single compartment by position
assert_allclose(morpho.LL[1.5*um].area, morpho.LL.area[1])
assert_allclose(morpho.LL[1.5*um].volume, morpho.LL.volume[1])
assert_allclose(morpho.LL[1.5*um].length, morpho.LL.length[1])
assert_allclose(morpho.LL[1.5*um].r_length_1, morpho.LL.r_length_1[1])
assert_allclose(morpho.LL[1.5*um].r_length_2, morpho.LL.r_length_2[1])
assert_allclose(morpho.LL[1.5*um].distance, morpho.LL.distance[1])
assert_allclose(morpho.LL[1.5*um].diameter, morpho.LL.diameter[1])
assert_allclose(morpho.LL[1.5*um].x, morpho.LL.x[1])
assert_allclose(morpho.LL[1.5*um].y, morpho.LL.y[1])
assert_allclose(morpho.LL[1.5*um].z, morpho.LL.z[1])
assert_allclose(morpho.LL[1.5*um].start_x, morpho.LL.start_x[1])
assert_allclose(morpho.LL[1.5*um].start_y, morpho.LL.start_y[1])
assert_allclose(morpho.LL[1.5*um].start_z, morpho.LL.start_z[1])
assert_allclose(morpho.LL[1.5*um].end_x, morpho.LL.end_x[1])
assert_allclose(morpho.LL[1.5*um].end_y, morpho.LL.end_y[1])
assert_allclose(morpho.LL[1.5*um].end_z, morpho.LL.end_z[1])
# Getting several compartments by indices
assert_allclose(morpho.right[3:6].area, morpho.right.area[3:6])
assert_allclose(morpho.right[3:6].volume, morpho.right.volume[3:6])
assert_allclose(morpho.right[3:6].length, morpho.right.length[3:6])
assert_allclose(morpho.right[3:6].r_length_1, morpho.right.r_length_1[3:6])
assert_allclose(morpho.right[3:6].r_length_2, morpho.right.r_length_2[3:6])
assert_allclose(morpho.right[3:6].distance, morpho.right.distance[3:6])
assert_allclose(morpho.right[3:6].diameter, morpho.right.diameter[3:6])
assert morpho.right[3:6].x is None
assert morpho.right[3:6].y is None
assert morpho.right[3:6].z is None
assert morpho.right[3:6].start_x is None
assert morpho.right[3:6].start_y is None
assert morpho.right[3:6].start_z is None
assert morpho.right[3:6].end_x is None
assert morpho.right[3:6].end_y is None
assert morpho.right[3:6].end_z is None
# Getting several compartments by position
assert_allclose(morpho.L[3*um:5*um].distance, [3.5, 4.5]*um)
assert_allclose(morpho.L[3.5*um:4.5*um].distance, [3.5, 4.5]*um)
@pytest.mark.codegen_independent
def test_subgroup_incorrect():
# Incorrect indexing
morpho = Soma(diameter=30*um)
morpho.L = Cylinder(length=10*um, diameter=1*um, n=10)
morpho.LL = Cylinder(length=5*um, diameter=2*um, n=5)
morpho.right = Cylinder(length=3*um, diameter=1*um, n=7)
# Non-existing branch
with pytest.raises(AttributeError):
morpho.axon
# Incorrect indexing
# wrong units or mixing units
with pytest.raises(TypeError):
morpho.L[3*second:5*second]
with pytest.raises(TypeError):
morpho.L[3.4:5.3]
with pytest.raises(TypeError):
morpho.L[3:5*um]
with pytest.raises(TypeError):
morpho.L[3*um:5]
# providing a step
with pytest.raises(TypeError):
morpho.L[3*um:5*um:2*um]
with pytest.raises(TypeError):
morpho.L[3:5:2]
# incorrect type
with pytest.raises(TypeError):
morpho.L[object()]
# out of range
with pytest.raises(IndexError):
morpho.L[-10*um]
with pytest.raises(IndexError):
morpho.L[15*um]
with pytest.raises(IndexError):
morpho.L[10]
@pytest.mark.codegen_independent
def test_topology():
soma = Soma(diameter=30*um)
soma.L = Section(n=5, diameter=[10, 8, 6, 4, 2, 0]*um,
length=np.ones(5)*20*um) # tapering truncated cones
soma.R = Cylinder(n=10, diameter=5*um, length=50*um)
soma.R.left = Cylinder(n=10, diameter=2.5*um, length=50*um)
soma.R.right = Section(n=5, diameter=[5, 4, 3, 2, 1, 0]*um,
length=np.ones(5)*10*um)
str_topology = str(soma.topology())
lines = [l for l in str_topology.split('\n') if len(l.strip())]
assert len(lines) == 5 # one line for each section
for line, name in zip(lines, ['root', '.L', '.R', '.R.left', 'R.right']):
assert name in line
@pytest.mark.codegen_independent
def test_copy_section_soma():
soma = Soma(diameter=30*um)
soma_copy = soma.copy_section()
assert soma_copy.diameter[0] == 30*um
assert soma_copy.x is None
assert soma_copy.y is None
assert soma_copy.z is None
assert soma_copy.type == 'soma'
soma = Soma(diameter=30*um, x=5*um, z=-10*um)
soma_copy = soma.copy_section()
assert soma_copy.diameter[0] == 30*um
assert_allclose(soma_copy.x[0], 5*um)
assert_allclose(soma_copy.y[0], 0*um)
assert_allclose(soma_copy.z[0], -10*um)
assert soma_copy.type == 'soma'
@pytest.mark.codegen_independent
def test_copy_section_section():
# No coordinates
sec = Section(diameter=[10, 5, 4, 3, 2, 1]*um, n=5,
length=np.ones(5)*10*um, type='dend')
sec_copy = sec.copy_section()
assert_allclose(sec_copy.start_diameter, sec.start_diameter)
assert_allclose(sec_copy.end_diameter, sec.end_diameter)
assert_allclose(sec_copy.length, sec.length)
assert sec_copy.n == sec.n
assert sec_copy.x is None
assert sec_copy.y is None
assert sec_copy.z is None
assert sec_copy.type == 'dend'
# With coordinates
sec = Section(diameter=[10, 5, 4, 3, 2, 1]*um, n=5,
x=[0, 1, 2, 3, 4, 5]*um,
y=[0, -1, -2, -3, -4, -5]*um)
sec_copy = sec.copy_section()
assert_allclose(sec_copy.start_diameter, sec.start_diameter)
assert_allclose(sec_copy.end_diameter, sec.end_diameter)
assert_allclose(sec_copy.length, sec.length)
assert sec_copy.n == sec.n
assert_allclose(sec_copy.x, sec.x)
assert_allclose(sec_copy.y, sec.y)
assert_allclose(sec_copy.z, sec.z)
assert sec_copy.type is None
@pytest.mark.codegen_independent
def test_copy_section_cylinder():
# no coordinates
sec = Section(diameter=[10, 5, 4, 3, 2, 1]*um, n=5,
length=np.ones(5)*20*um, type='dend')
sec_copy = sec.copy_section()
assert_allclose(sec_copy.end_diameter, sec.end_diameter)
assert_allclose(sec_copy.length, sec.length)
assert sec_copy.n == sec.n
assert sec_copy.x is None
assert sec_copy.y is None
assert sec_copy.z is None
assert sec_copy.type == 'dend'
# with coordinates
sec = Section(diameter=[10, 5, 4, 3, 2, 1]*um, n=5,
x=[0, 1, 2, 3, 4, 5]*um, y=[0, -1, -2, -3, -4, -5]*um)
sec_copy = sec.copy_section()
assert_allclose(sec_copy.end_diameter, sec.end_diameter)
assert_allclose(sec_copy.length, sec.length)
assert sec_copy.n == sec.n
assert_allclose(sec_copy.x, sec.x)
assert_allclose(sec_copy.y, sec.y)
assert_allclose(sec_copy.z, sec.z)
assert sec_copy.type is None
def _check_length_coord_consistency(morph_with_coords):
if not isinstance(morph_with_coords, Soma):
vectors = np.diff(morph_with_coords.coordinates, axis=0)
calculated_length = np.sqrt(np.sum(vectors**2, axis=1))
assert_allclose(calculated_length, morph_with_coords.length)
for child in morph_with_coords.children:
_check_length_coord_consistency(child)
@pytest.mark.codegen_independent
def test_generate_coordinates_deterministic():
morph = Soma(diameter=30*um)
morph.L = Section(n=5, diameter=[10, 8, 6, 4, 2, 0]*um,
length=np.ones(5)*20*um) # tapering truncated cones
morph.R = Cylinder(n=10, diameter=5*um, length=50*um)
morph.R.left = Cylinder(n=10, diameter=2.5*um, length=50*um)
morph.R.right = Section(n=5, diameter=[5, 4, 3, 2, 1, 0]*um,
length=np.ones(5)*10*um)
morph_with_coords = morph.generate_coordinates()
assert morph_with_coords.total_compartments == morph.total_compartments
assert morph_with_coords.total_sections == morph.total_sections
for new, old in [(morph_with_coords, morph),
(morph_with_coords.L, morph.L),
(morph_with_coords.R, morph.R),
(morph_with_coords.R.left, morph.R.left),
(morph_with_coords.R.right, morph.R.right)]:
assert new.n == old.n
assert_allclose(new.length, old.length)
assert_allclose(new.diameter, old.diameter)
# The morphology should be in the x/y plane
assert_equal(new.z, 0*um)
_check_length_coord_consistency(morph_with_coords)
@pytest.mark.codegen_independent
def test_generate_coordinates_random_sections():
morph = Soma(diameter=30*um)
morph.L = Section(n=5, diameter=[10, 8, 6, 4, 2, 0]*um,
length=np.ones(5)*20*um) # tapering truncated cones
morph.R = Cylinder(n=10, diameter=5*um, length=50*um)
morph.R.left = Cylinder(n=10, diameter=2.5*um, length=50*um)
morph.R.right = Section(n=5, diameter=[5, 4, 3, 2, 1, 0]*um,
length=np.ones(5)*10*um)
morph_with_coords = morph.generate_coordinates(section_randomness=25)
assert morph_with_coords.total_compartments == morph.total_compartments
assert morph_with_coords.total_sections == morph.total_sections
for new, old in [(morph_with_coords, morph),
(morph_with_coords.L, morph.L),
(morph_with_coords.R, morph.R),
(morph_with_coords.R.left, morph.R.left),
(morph_with_coords.R.right, morph.R.right)]:
assert new.n == old.n
assert_allclose(new.length, old.length)
assert_allclose(new.diameter, old.diameter)
_check_length_coord_consistency(morph_with_coords)
@pytest.mark.codegen_independent
def test_generate_coordinates_random_compartments():
morph = Soma(diameter=30*um)
morph.L = Section(n=5, diameter=[10, 8, 6, 4, 2, 0]*um,
length=np.ones(5)*20*um) # tapering truncated cones
morph.R = Cylinder(n=10, diameter=5*um, length=50*um)
morph.R.left = Cylinder(n=10, diameter=2.5*um, length=50*um)
morph.R.right = Section(n=5, diameter=[5, 4, 3, 2, 1, 0]*um,
length=np.ones(5)*10*um)
morph_with_coords = morph.generate_coordinates(compartment_randomness=15)
assert morph_with_coords.total_compartments == morph.total_compartments
assert morph_with_coords.total_sections == morph.total_sections
for new, old in [(morph_with_coords, morph),
(morph_with_coords.L, morph.L),
(morph_with_coords.R, morph.R),
(morph_with_coords.R.left, morph.R.left),
(morph_with_coords.R.right, morph.R.right)]:
assert new.n == old.n
assert_allclose(new.length, old.length)
assert_allclose(new.diameter, old.diameter)
_check_length_coord_consistency(morph_with_coords)
@pytest.mark.codegen_independent
def test_generate_coordinates_random_all():
morph = Soma(diameter=30*um)
morph.L = Section(n=5, diameter=[10, 8, 6, 4, 2, 0]*um,
length=np.ones(5)*20*um) # tapering truncated cones
morph.R = Cylinder(n=10, diameter=5*um, length=50*um)
morph.R.left = Cylinder(n=10, diameter=2.5*um, length=50*um)
morph.R.right = Section(n=5, diameter=[5, 4, 3, 2, 1, 0]*um,
length=np.ones(5)*10*um)
morph_with_coords = morph.generate_coordinates(section_randomness=25,
compartment_randomness=15)
assert morph_with_coords.total_compartments == morph.total_compartments
assert morph_with_coords.total_sections == morph.total_sections
for new, old in [(morph_with_coords, morph),
(morph_with_coords.L, morph.L),
(morph_with_coords.R, morph.R),
(morph_with_coords.R.left, morph.R.left),
(morph_with_coords.R.right, morph.R.right)]:
assert new.n == old.n
assert_allclose(new.length, old.length)
assert_allclose(new.diameter, old.diameter)
_check_length_coord_consistency(morph_with_coords)
@pytest.mark.codegen_independent
def test_generate_coordinates_no_overwrite():
morph = Soma(diameter=30*um)
morph.L = Section(n=5, diameter=[10, 8, 6, 4, 2, 0]*um,
length=np.ones(5)*20*um) # tapering truncated cones
morph.R = Cylinder(n=10, diameter=5*um, length=50*um)
morph.R.left = Cylinder(n=10, diameter=2.5*um, length=50*um)
morph.R.right = Section(n=5, diameter=[5, 4, 3, 2, 1, 0]*um,
length=np.ones(5)*10*um)
morph_with_coords = morph.generate_coordinates(compartment_randomness=15)
# This should not change anything because the morphology already has coordinates!
morph_with_coords2 = morph_with_coords.generate_coordinates(section_randomness=25,
compartment_randomness=15)
for new, old in [(morph_with_coords2, morph_with_coords),
(morph_with_coords2.L, morph_with_coords.L),
(morph_with_coords2.R, morph_with_coords.R),
(morph_with_coords2.R.left, morph_with_coords.R.left),
(morph_with_coords2.R.right, morph_with_coords.R.right)]:
assert new.n == old.n
assert_allclose(new.length, old.length)
assert_allclose(new.diameter, old.diameter)
assert_allclose(new.x, old.x)
assert_allclose(new.y, old.y)
assert_allclose(new.z, old.z)
@pytest.mark.codegen_independent
def test_generate_coordinates_overwrite():
morph = Soma(diameter=30*um)
morph.L = Section(n=5, diameter=[10, 8, 6, 4, 2, 0]*um,
length=np.ones(5)*20*um) # tapering truncated cones
morph.R = Cylinder(n=10, diameter=5*um, length=50*um)
morph.R.left = Cylinder(n=10, diameter=2.5*um, length=50*um)
morph.R.right = Section(n=5, diameter=[5, 4, 3, 2, 1, 0]*um,
length=np.ones(5)*10*um)
morph_with_coords = morph.generate_coordinates(compartment_randomness=15)
# This should change things since we explicitly ask for it
morph_with_coords2 = morph_with_coords.generate_coordinates(section_randomness=25,
compartment_randomness=15,
overwrite_existing=True)
for new, old in [# ignore the root compartment
(morph_with_coords2.L, morph_with_coords.L),
(morph_with_coords2.R, morph_with_coords.R),
(morph_with_coords2.R.left, morph_with_coords.R.left),
(morph_with_coords2.R.right, morph_with_coords.R.right)]:
assert new.n == old.n
assert_allclose(new.length, old.length)
assert_allclose(new.diameter, old.diameter)
assert all(np.abs(new.x - old.x) > 0)
assert all(np.abs(new.y - old.y) > 0)
assert all(np.abs(new.z - old.z) > 0)
_check_length_coord_consistency(morph_with_coords2)
@pytest.mark.codegen_independent
def test_generate_coordinates_mixed_overwrite():
morph = Soma(diameter=30*um)
morph.L = Section(n=5, diameter=[10, 8, 6, 4, 2, 0]*um,
length=np.ones(5)*20*um) # tapering truncated cones
morph.R = Cylinder(n=10, diameter=5*um, length=50*um)
morph_with_coords = morph.generate_coordinates(section_randomness=25,
compartment_randomness=15)
# The following just returns a copy, as all coordinates are already
# specified
morph_copy = morph_with_coords.generate_coordinates()
# Add new sections that do not yet have coordinates
morph_with_coords.R.left = Cylinder(n=10, diameter=2.5*um, length=50*um)
morph_with_coords.R.right = Section(n=5, diameter=[5, 4, 3, 2, 1, 0]*um,
length=np.ones(5)*10*um)
# This should change things since we explicitly ask for it
morph_with_coords2 = morph_with_coords.generate_coordinates(section_randomness=25,
compartment_randomness=15)
for new, old in [(morph_with_coords2, morph_with_coords),
(morph_with_coords2.L, morph_with_coords.L),
(morph_with_coords2.R, morph_with_coords.R)]:
assert new.n == old.n
assert_allclose(new.length, old.length)
assert_allclose(new.diameter, old.diameter)
assert_allclose(new.x, old.x)
assert_allclose(new.y, old.y)
assert_allclose(new.z, old.z)
assert morph_with_coords.R.left.x is None
assert len(morph_with_coords2.R.left.x) == morph_with_coords2.R.left.n
_check_length_coord_consistency(morph_with_coords2)
@pytest.mark.codegen_independent
def test_str_repr():
# A very basic test, make sure that the str/repr functions return
# something and do not raise an error
for morph in [Soma(diameter=30*um),
Soma(diameter=30*um, x=5*um, y=10*um),
Cylinder(n=5, diameter=10*um, length=50*um),
Cylinder(n=5, diameter=10*um, x=[0, 50]*um),
Section(n=5, diameter=[2.5, 5, 10, 5, 10, 5]*um, length=[10, 20, 5, 5, 10]*um),
Section(n=5, diameter=[2.5, 5, 10, 5, 10, 5]*um, x=[0, 10, 30, 35, 40, 50]*um)]:
assert len(repr(morph)) > 0
assert len(str(morph)) > 0
morph = Soma(30*um)
assert len(repr(morph.children)) > 0
assert len(str(morph.children)) > 0
morph.axon = Cylinder(1*um, n=10, length=100*um)
morph.dend = Cylinder(1*um, n=10, length=50*um)
assert len(repr(morph.children)) > 0
assert len(str(morph.children)) > 0
if __name__ == '__main__':
test_attributes_soma()
test_attributes_soma_coordinates()
test_attributes_cylinder()
test_attributes_cylinder_coordinates()
test_attributes_section()
test_attributes_section_coordinates_single()
test_attributes_section_coordinates_all()
test_tree_cables_schematic()
test_tree_cables_coordinates()
test_tree_cables_from_points()
test_tree_cables_from_swc()
test_tree_soma_schematic()
test_tree_soma_coordinates()
test_tree_soma_from_points()
test_tree_soma_from_points_3_point_soma()
test_tree_soma_from_points_3_point_soma_incorrect()
test_tree_soma_from_swc()
test_tree_soma_from_swc_3_point_soma()
test_construction_incorrect_arguments()
test_from_points_minimal()
test_from_points_incorrect()
test_subtree_deletion()
test_subgroup_indices()
test_subgroup_attributes()
test_subgroup_incorrect()
test_topology()
test_copy_section_soma()
test_copy_section_section()
test_copy_section_cylinder()
test_generate_coordinates_deterministic()
test_generate_coordinates_random_sections()
test_generate_coordinates_random_compartments()
test_generate_coordinates_random_all()
test_generate_coordinates_no_overwrite()
test_generate_coordinates_overwrite()
test_generate_coordinates_mixed_overwrite()
test_str_repr()
| 44.442682 | 112 | 0.603128 |
acf167f16fdc4e5477bddc0e565135dd68403778 | 1,350 | py | Python | examples/classifier.py | moi90/experitur | 47519b203f7558d6e2444aecdf01a180ee8a22ef | [
"MIT"
] | 3 | 2019-01-31T11:49:00.000Z | 2019-02-03T10:57:31.000Z | examples/classifier.py | moi90/experitur | 47519b203f7558d6e2444aecdf01a180ee8a22ef | [
"MIT"
] | 21 | 2019-06-24T14:01:04.000Z | 2021-03-28T10:11:30.000Z | examples/classifier.py | moi90/experitur | 47519b203f7558d6e2444aecdf01a180ee8a22ef | [
"MIT"
] | null | null | null | from sklearn import datasets, svm
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
from experitur import Experiment, Trial
from experitur.parameters import Grid
@Grid({"svc_kernel": ["linear", "poly", "rbf", "sigmoid"]})
@Experiment()
def classifier_svm(trial: Trial):
X, y = datasets.load_digits(return_X_y=True)
n_samples = len(X)
# Flatten
X = X.reshape((n_samples, -1))
# Extract parameters prefixed with "svc_"
svc_parameters = trial.prefixed("svc_")
# Create a support vector classifier
classifier = svc_parameters.call(svm.SVC)
# svc_parameters.call automatically filled `parameters` in with the default values:
assert "svc_gamma" in trial
assert trial["svc_gamma"] == "scale"
print("Classifier:", classifier)
# Fit
X_train = X[: n_samples // 2]
y_train = y[: n_samples // 2]
classifier.fit(X_train, y_train)
# Predict
X_test = X[n_samples // 2 :]
y_test = y[n_samples // 2 :]
y_test_pred = classifier.predict(X_test)
# Calculate some metrics
macro_prfs = precision_recall_fscore_support(y_test, y_test_pred, average="macro")
result = dict(zip(("macro_precision", "macro_recall", "macro_f_score"), macro_prfs))
result["accuracy"] = accuracy_score(y_test, y_test_pred)
print(result)
return result
| 27 | 88 | 0.696296 |
acf16ac55a8b698d54b89b749fd2bd7ebf39c0bc | 1,205 | py | Python | crptcmexchange/wsgi.py | ZhiHaoSun/GoogleHackthon | eba15ba236475a7b0fad297a22068f035bb389f9 | [
"MIT"
] | 1 | 2017-09-28T15:20:42.000Z | 2017-09-28T15:20:42.000Z | crptcmexchange/wsgi.py | ZhiHaoSun/GoogleHackthon | eba15ba236475a7b0fad297a22068f035bb389f9 | [
"MIT"
] | null | null | null | crptcmexchange/wsgi.py | ZhiHaoSun/GoogleHackthon | eba15ba236475a7b0fad297a22068f035bb389f9 | [
"MIT"
] | null | null | null | """
WSGI config for crptcmexchange project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "crptcmexchange.settings")
import crptcmexchange.startup as startup
startup.run()
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application) | 38.870968 | 79 | 0.810788 |
acf16c2dc2b8ec61caaf11bb74bcaaeb450ad19b | 865 | py | Python | atom/nucleus/python/test/test_page_customer.py | sumit4-ttn/SDK | b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff | [
"Apache-2.0"
] | null | null | null | atom/nucleus/python/test/test_page_customer.py | sumit4-ttn/SDK | b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff | [
"Apache-2.0"
] | null | null | null | atom/nucleus/python/test/test_page_customer.py | sumit4-ttn/SDK | b3ae385e5415e47ac70abd0b3fdeeaeee9aa7cff | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Hydrogen Atom API
The Hydrogen Atom API # noqa: E501
OpenAPI spec version: 1.7.0
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import nucleus_api
from nucleus_api.models.page_customer import PageCustomer # noqa: E501
from nucleus_api.rest import ApiException
class TestPageCustomer(unittest.TestCase):
"""PageCustomer unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPageCustomer(self):
"""Test PageCustomer"""
# FIXME: construct object with mandatory attributes with example values
# model = nucleus_api.models.page_customer.PageCustomer() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 21.097561 | 79 | 0.695954 |
acf16c6f04219aa3bd7eb9246edbe141e8cd1570 | 6,696 | py | Python | 3-ML-Ops/train-and-register-model.py | Pcoric/bert-stack-overflow | 2cec618b3c12aad72cf8d9921e1539945fd772bd | [
"MIT"
] | null | null | null | 3-ML-Ops/train-and-register-model.py | Pcoric/bert-stack-overflow | 2cec618b3c12aad72cf8d9921e1539945fd772bd | [
"MIT"
] | null | null | null | 3-ML-Ops/train-and-register-model.py | Pcoric/bert-stack-overflow | 2cec618b3c12aad72cf8d9921e1539945fd772bd | [
"MIT"
] | null | null | null | from azureml.pipeline.core.graph import PipelineParameter
from azureml.pipeline.steps import EstimatorStep, PythonScriptStep
from azureml.pipeline.core import Pipeline
from azureml.core.runconfig import RunConfiguration, CondaDependencies
from azureml.core import Dataset, Datastore
from azureml.train.dnn import TensorFlow
import os
import sys
from dotenv import load_dotenv
sys.path.insert(1, os.path.abspath("./3-ML-Ops/util")) # NOQA: E402
from workspace import get_workspace
from attach_compute import get_compute
from attach_aks import get_aks
def main():
load_dotenv()
workspace_name = os.environ.get("BASE_NAME")+"-AML-WS"
resource_group = "AML-RG-"+os.environ.get("BASE_NAME")
subscription_id = os.environ.get("SUBSCRIPTION_ID")
tenant_id = os.environ.get("TENANT_ID")
app_id = os.environ.get("SP_APP_ID")
app_secret = os.environ.get("SP_APP_SECRET")
sources_directory_train = os.environ.get("SOURCES_DIR_TRAIN")
train_script_path = os.environ.get("TRAIN_SCRIPT_PATH")
evaluate_script_path = os.environ.get("EVALUATE_SCRIPT_PATH")
vm_size = os.environ.get("AML_COMPUTE_CLUSTER_SKU")
compute_name = os.environ.get("AML_COMPUTE_CLUSTER_NAME")
aks_name = os.environ.get("AKS_CLUSTER_NAME")
model_name = os.environ.get("MODEL_NAME")
build_id = os.environ.get("BUILD_BUILDID")
pipeline_name = os.environ.get("TRAINING_PIPELINE_NAME")
experiment_name = os.environ.get("EXPERIMENT_NAME")
# Get Azure machine learning workspace
aml_workspace = get_workspace(
workspace_name,
resource_group,
subscription_id,
tenant_id,
app_id,
app_secret)
print(aml_workspace)
# Get Azure machine learning cluster
aml_compute = get_compute(
aml_workspace,
compute_name,
vm_size)
if aml_compute is not None:
print(aml_compute)
run_config = RunConfiguration(conda_dependencies=CondaDependencies.create(
conda_packages=['numpy', 'pandas',
'scikit-learn', 'keras'],
pip_packages=['azure', 'azureml-sdk',
'azure-storage',
'azure-storage-blob',
'transformers>=2.1.1',
'tensorflow>=2.0.0',
'tensorflow-gpu>=2.0.0'])
)
run_config.environment.docker.enabled = True
datastore_name = 'tfworld'
container_name = 'azureml-blobstore-7c6bdd88-21fa-453a-9c80-16998f02935f'
account_name = 'tfworld6818510241'
sas_token = '?sv=2019-02-02&ss=bfqt&srt=sco&sp=rl&se=2020-06-01T14:18:31Z&st=2019-11-05T07:18:31Z&spr=https&sig=Z4JmM0V%2FQzoFNlWS3a3vJxoGAx58iCz2HAWtmeLDbGE%3D' # noqa: E501
try:
existing_datastore = Datastore.get(aml_workspace, datastore_name)
except: # noqa: E722
existing_datastore = Datastore \
.register_azure_blob_container(workspace=aml_workspace,
datastore_name=datastore_name,
container_name=container_name,
account_name=account_name,
sas_token=sas_token
)
azure_dataset = Dataset.File.from_files(
path=(existing_datastore, 'azure-service-classifier/data'))
azure_dataset = azure_dataset.register(
workspace=aml_workspace,
name='Azure Services Dataset',
description='Dataset containing azure related posts on Stackoverflow',
create_new_version=True)
azure_dataset.to_path()
input_data = azure_dataset.as_named_input('input_data1').as_mount(
'/tmp/data')
model_name = PipelineParameter(
name="model_name", default_value=model_name)
max_seq_length = PipelineParameter(
name="max_seq_length", default_value=128)
learning_rate = PipelineParameter(
name="learning_rate", default_value=3e-5)
num_epochs = PipelineParameter(
name="num_epochs", default_value=3)
export_dir = PipelineParameter(
name="export_dir", default_value="./outputs/exports")
batch_size = PipelineParameter(
name="batch_size", default_value=32)
steps_per_epoch = PipelineParameter(
name="steps_per_epoch", default_value=100)
# initialize the TensorFlow estimator
estimator = TensorFlow(
source_directory=sources_directory_train,
entry_script=train_script_path,
compute_target=aml_compute,
framework_version='2.0',
use_gpu=True,
pip_packages=[
'transformers==2.0.0',
'azureml-dataprep[fuse,pandas]==1.1.29'])
train_step = EstimatorStep(
name="Train Model",
estimator=estimator,
estimator_entry_script_arguments=[
"--data_dir", input_data,
"--max_seq_length", max_seq_length,
"--learning_rate", learning_rate,
"--num_epochs", num_epochs,
"--export_dir", export_dir,
"--batch_size", batch_size,
"--steps_per_epoch", steps_per_epoch],
compute_target=aml_compute,
inputs=[input_data],
allow_reuse=False,
)
print("Step Train created")
evaluate_step = PythonScriptStep(
name="Evaluate Model ",
script_name=evaluate_script_path,
compute_target=aml_compute,
source_directory=sources_directory_train,
arguments=[
"--model_name", model_name,
"--build_id", build_id,
],
runconfig=run_config,
allow_reuse=False,
)
print("Step Evaluate created")
# Currently, the Evaluate step will automatically register
# the model if it performs better. This step is based on a
# previous version of the repo which utilized JSON files to
# track evaluation results.
evaluate_step.run_after(train_step)
steps = [evaluate_step]
train_pipeline = Pipeline(workspace=aml_workspace, steps=steps)
train_pipeline.validate()
published_pipeline = train_pipeline.publish(
name=pipeline_name,
description="Model training/retraining pipeline",
version=build_id
)
print(f'Published pipeline: {published_pipeline.name}')
print(f'for build {published_pipeline.version}')
response = published_pipeline.submit( # noqa: F841
workspace=aml_workspace,
experiment_name=experiment_name)
# Get AKS cluster for deployment
aks_compute = get_aks(
aml_workspace,
aks_name
)
if aks_compute is not None:
print(aks_compute)
if __name__ == '__main__':
main()
| 36.590164 | 179 | 0.658005 |
acf16da19ae49d2fffe13d4ca265c87d2220617a | 8,553 | py | Python | RoboFontProject.roboFontExt/lib/main.py | typemytype/roboFontProject | e45dd901c1de5e9c92fc898f9ecc1fe557b5a506 | [
"MIT"
] | 3 | 2015-09-10T18:31:58.000Z | 2018-12-31T04:53:54.000Z | RoboFontProject.roboFontExt/lib/main.py | typemytype/roboFontProject | e45dd901c1de5e9c92fc898f9ecc1fe557b5a506 | [
"MIT"
] | 2 | 2017-01-11T13:13:55.000Z | 2019-11-17T17:19:03.000Z | RoboFontProject.roboFontExt/lib/main.py | typemytype/projectRoboFontExtension | e45dd901c1de5e9c92fc898f9ecc1fe557b5a506 | [
"MIT"
] | null | null | null | from AppKit import *
import os
import vanilla.dialogs as dialogs
import vanilla
from mojo.events import addObserver
from mojo.UI import OpenGlyphWindow, OpenSpaceCenter, CurrentSpaceCenterWindow, OutputWindow
from lib.scripting.scriptTools import ScriptRunner
from lib.scripting.codeEditor import CodeEditor
from mojo.tools import CallbackWrapper
from plistlib import readPlist, writePlist
def OpenRoboFontProject(path):
root = os.path.dirname(path)
project = readPlist(path)
documentController = NSDocumentController.sharedDocumentController()
delegate = NSApp().delegate()
openFileNames = [window.representedFilename() for window in NSApp().windows()]
for fileName, data in project["documents"].items():
isUntitled = fileName == "untitled"
if not isUntitled:
if not os.path.exists(fileName):
fileName = os.path.abspath(os.path.join(root, fileName))
if not os.path.exists(fileName):
continue
if fileName in openFileNames:
continue
data.sort(key=lambda item: item.get("name") != "FontWindow")
for windowData in data:
name = windowData["windowName"]
x, y, w, h = windowData["frame"]
if isUntitled:
if name == "FontWindow":
RFont()
elif name == "ScriptingWindow":
delegate.scriptingWindow_(None)
elif name == "FeatureWindow":
delegate.newFeature_(None)
else:
url = NSURL.fileURLWithPath_(fileName)
doc, error = documentController.openDocumentWithContentsOfURL_display_error_(url, True, None)
if error:
delegate.application_openFile_(NSApp(), fileName)
window = NSApp().mainWindow()
vanillaWrapper = None
if hasattr(window.delegate(), "vanillaWrapper"):
vanillaWrapper = window.delegate().vanillaWrapper()
if vanillaWrapper:
font = CurrentFont()
if name == "GlyphWindow":
window = OpenGlyphWindow(font[windowData["glyphName"]], newWindow=True)
window.w.getNSWindow().setFrame_display_animate_(((x, y), (w, h)), True, False)
continue
elif name == "SpaceCenter":
spaceCenter = OpenSpaceCenter(font)
spaceCenter.setPointSize(windowData["pointSize"])
spaceCenter.setPre(windowData["pre"])
spaceCenter.setAfter(windowData["after"])
spaceCenter.set(windowData["input"])
window = CurrentSpaceCenterWindow()
window.w.getNSWindow().setFrame_display_animate_(((x, y), (w, h)), True, False)
continue
window.setFrame_display_animate_(((x, y), (w, h)), True, False)
for windowData in project["toolWindows"]:
name = windowData["windowName"]
x, y, w, h = windowData["frame"]
if name == "DebugWindow":
window = OutputWindow()
window.show()
window.w.getNSWindow().setFrame_display_animate_(((x, y), (w, h)), True, False)
elif name == "InspectorWindow":
try:
# a little bit hacky
# will move to mojo.UI in the upcoming releases
window = delegate._inspectorWindow.w.getNSWindow()
except:
window = None
if window is None:
delegate.openInspector_(None)
window = delegate._inspectorWindow.w.getNSWindow()
window.setFrame_display_animate_(((x, y), (w, h)), True, False)
if "execute" in project:
try:
ScriptRunner(text=project["execute"])
except:
import traceback
print(traceback.format_exc(5))
class SaveRoboFontProject(object):
def __init__(self):
w, h = 550, 250
self.view = vanilla.Group((0, 0, w, h))
self.view.relative = vanilla.CheckBox((0, 3, 300, 22), "Use Relative Paths")
self.view.info = vanilla.TextBox((0, 33, 300, 22), "Execute on load:")
self.view.editor = CodeEditor((0, 60, w, h-70))
view = self.view.getNSView()
view.setFrame_(((0, 0), (w, h)))
path = dialogs.putFile("Save RoboFont Project..", fileTypes=["roboFontProject"], accessoryView=view)
if path:
data = self.getData(path)
writePlist(data, path)
icon = NSImage.alloc().initByReferencingFile_(os.path.join(os.path.dirname(__file__), "roboFontProjectIcon.png"))
ws = NSWorkspace.sharedWorkspace()
ws.setIcon_forFile_options_(icon, path, 0)
def getData(self, path):
toolWindows = list()
documents = dict()
untitled = list()
relativePaths = self.view.relative.get()
for document in NSApp().orderedDocuments():
url = document.fileURL()
fileName = None
if url:
fileName = url.path()
if relativePaths and path:
fileName = os.path.relpath(fileName, os.path.dirname(path))
if fileName not in documents:
documents[fileName] = []
for windowController in document.windowControllers():
window = windowController.window()
(x, y), (w, h) = window.frame()
data = dict()
data["frame"] = x, y, w, h
data["windowName"] = window.windowName()
vanillaWrapper = None
if hasattr(window.delegate(), "vanillaWrapper"):
vanillaWrapper = window.delegate().vanillaWrapper()
if vanillaWrapper:
if data["windowName"] == "GlyphWindow":
data["glyphName"] = vanillaWrapper.getGlyph().name
elif data["windowName"] == "SpaceCenter":
spaceCenter = vanillaWrapper.getSpaceCenter()
data["input"] = spaceCenter.get()
data["pre"] = spaceCenter.getPre()
data["after"] = spaceCenter.getAfter()
data["pointSize"] = spaceCenter.getPointSize()
if fileName:
documents[fileName].append(data)
else:
untitled.append(data)
for window in NSApp().windows():
if hasattr(window, "windowName"):
if window.windowName() in ["DebugWindow", "InspectorWindow"]:
(x, y), (w, h) = window.frame()
data = dict()
data["frame"] = x, y, w, h
data["windowName"] = window.windowName()
toolWindows.append(data)
documents["untitled"] = untitled
info = dict(toolWindows=toolWindows, documents=documents)
code = self.view.editor.get()
if code:
info["execute"] = code
return info
# file handler
class ReadRoboFontProjectFile(object):
def __init__(self):
addObserver(self, "applicationOpenFile", "applicationOpenFile")
def applicationOpenFile(self, notification):
path = notification["path"]
ext = notification["ext"]
fileHandler = notification["fileHandler"]
if ext.lower() == ".robofontproject":
try:
OpenRoboFontProject(path)
except:
import traceback
print(traceback.format_exc(5))
fileHandler["opened"] = True
ReadRoboFontProjectFile()
# add to menu
class RoboFontProjectMenu(object):
def __init__(self):
title = "Save Project..."
mainMenu = NSApp().mainMenu()
fileMenu = mainMenu.itemWithTitle_("File")
if not fileMenu:
return
fileMenu = fileMenu.submenu()
if fileMenu.itemWithTitle_(title):
return
index = fileMenu.indexOfItemWithTitle_("Revert to Saved")
self.target = CallbackWrapper(self.callback)
newItem = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(title, "action:", "")
newItem.setTarget_(self.target)
fileMenu.insertItem_atIndex_(newItem, index+1)
def callback(self, sender):
SaveRoboFontProject()
RoboFontProjectMenu()
| 34.075697 | 125 | 0.563077 |
acf16dfcf1b122fa1de514b658fe68165f34ef93 | 1,139 | py | Python | beads_new.py | Forrest-HuYang/usaco | 97e1c8de3b3d08dcbef935849ca361550adf4c17 | [
"Apache-2.0"
] | 1 | 2019-10-04T08:30:36.000Z | 2019-10-04T08:30:36.000Z | beads_new.py | dazer-chen/usaco | 82673a2a235e7393b1f8f610925c1063149176dd | [
"Apache-2.0"
] | null | null | null | beads_new.py | dazer-chen/usaco | 82673a2a235e7393b1f8f610925c1063149176dd | [
"Apache-2.0"
] | 1 | 2019-10-04T08:30:26.000Z | 2019-10-04T08:30:26.000Z | """
ID: tony_hu1
PROG: beads
LANG: PYTHON3
"""
a = []
with open('beads.in') as filename:
for line in filename:
a.append(line.rstrip())
num_beads = int(a[0])
list_beads = a[1]*2
bl = [0]
rl = [0]
br = [0]
rr = [0]
for i in range(2*num_beads):
if list_beads[i] == 'w':
bl.append(bl[i]+1)
rl.append(rl[i]+1)
else:
if list_beads[i] == 'b':
bl.append(bl[i]+1)
else:
bl.append(0)
if list_beads[i] == 'r':
rl.append(rl[i]+1)
else:
rl.append(0)
if list_beads[2*num_beads-1-i] == 'w':
br.append(br[i]+1)
rr.append(rr[i]+1)
else:
if list_beads[2*num_beads-1-i] == 'b':
br.append(br[i]+1)
else:
br.append(0)
if list_beads[2*num_beads-1-i] == 'r':
rr.append(rr[i]+1)
else:
rr.append(0)
m = 0
for i in range(2*num_beads):
x = (max(bl[i+1],rl[i+1])+max(br[2*num_beads-1-i],rr[2*num_beads-1-i]))
m = max(x,m)
a = min(m,num_beads)
print(bl)
print(rl)
print(br)
print(rr)
fout = open ('beads.out', 'w')
fout.write(str(a) +'\n') | 21.903846 | 75 | 0.498683 |
acf16e2c325a5550782cd7295d240f61963e7022 | 11,706 | py | Python | wechatpy/work/client/api/tag.py | fuh/wechatpy | 83c8ca93acef4149c5e61e3726c89b82052f17c1 | [
"MIT"
] | 2,428 | 2015-07-04T08:55:29.000Z | 2020-03-16T03:11:22.000Z | wechatpy/work/client/api/tag.py | fuh/wechatpy | 83c8ca93acef4149c5e61e3726c89b82052f17c1 | [
"MIT"
] | 453 | 2015-06-18T10:39:34.000Z | 2020-03-16T05:12:37.000Z | wechatpy/work/client/api/tag.py | fuh/wechatpy | 83c8ca93acef4149c5e61e3726c89b82052f17c1 | [
"MIT"
] | 669 | 2015-06-18T10:08:12.000Z | 2020-03-14T15:35:34.000Z | # -*- coding: utf-8 -*-
from typing import Optional, List, Dict, Any
from wechatpy.client.api.base import BaseWeChatAPI
class WeChatTag(BaseWeChatAPI):
@staticmethod
def _validate_tag_id(tag_id):
if tag_id < 0:
raise ValueError("tag id cannot be a negative integer")
@staticmethod
def _validate_tag_name(tag_name):
if len(tag_name) > 32:
raise ValueError("the length of the tag name cannot be more than 32 characters")
def create(self, name: str, tag_id: Optional[int] = None) -> dict:
"""创建标签
参考:https://developer.work.weixin.qq.com/document/path/90210
**权限说明**: 创建的标签属于该应用,只有该应用才可以增删成员。
**注意**: 标签总数不能超过3000个。
返回结果示例: ::
{
"errcode": 0,
"errmsg": "created"
"tagid": 12
}
返回结果参数说明:
+---------+------------------------+
| 参数 | 说明 |
+=========+========================+
| errcode | 返回码 |
+---------+------------------------+
| errmsg | 对返回码的文本描述内容 |
+---------+------------------------+
| tagid | 标签id |
+---------+------------------------+
:param name: 标签名称,长度限制为32个字以内(汉字或英文字母),标签名不可与
其他标签重名。
:param tag_id: 标签id,非负整型,指定此参数时新增的标签会生成对应的标签id,不指
定时则以目前最大的id自增。
:return: 创建结果
"""
if tag_id is not None:
self._validate_tag_id(tag_id)
self._validate_tag_name(name)
data: Dict[str, Any] = {"tagname": name}
if tag_id:
data["tagid"] = tag_id
return self._post("tag/create", data=data)
def update(self, tag_id: int, name: str) -> dict:
"""更新标签名字
参考:https://developer.work.weixin.qq.com/document/path/90211
**权限说明**:调用的应用必须是指定标签的创建者。
返回结果示例: ::
{
"errcode": 0,
"errmsg": "updated"
}
结果参数说明:
+---------+------------------------+
| 参数 | 说明 |
+=========+========================+
| errcode | 返回码 |
+---------+------------------------+
| errmsg | 对返回码的文本描述内容 |
+---------+------------------------+
:param tag_id: 标签ID
:param name: 标签名称,长度限制为32个字(汉字或英文字母),标签不可与其他标签重名。
:return: 更新结果
"""
self._validate_tag_id(tag_id)
self._validate_tag_name(name)
return self._post("tag/update", data={"tagid": tag_id, "tagname": name})
def delete(self, tag_id: int) -> dict:
"""删除标签
参考:https://developer.work.weixin.qq.com/document/path/90212
**权限说明**:调用的应用必须是指定标签的创建者。
返回结果示例: ::
{
"errcode": 0,
"errmsg": "deleted"
}
结果参数说明:
+---------+------------------------+
| 参数 | 说明 |
+=========+========================+
| errcode | 返回码 |
+---------+------------------------+
| errmsg | 对返回码的文本描述内容 |
+---------+------------------------+
:param tag_id: 标签ID,非负整型
:return: 删除结果
"""
self._validate_tag_id(tag_id)
return self._get("tag/delete", params={"tagid": tag_id})
def get_users(self, tag_id: int) -> dict:
"""获取标签成员
参考:https://developer.work.weixin.qq.com/document/path/90213
**权限说明**:
无限制,但返回列表仅包含应用可见范围的成员;第三方可获取自己创建的标签及应用可见
范围内的标签详情。
返回结果示例: ::
{
"errcode": 0,
"errmsg": "ok",
"tagname": "乒乓球协会",
"userlist": [
{
"userid": "zhangsan",
"name": "李四"
}
],
"partylist": [2]
}
结果参数说明:
+-----------+-------------------------------------------------------------------------------+
| 参数 | 说明 |
+===========+===============================================================================+
| errcode | 返回码 |
+-----------+-------------------------------------------------------------------------------+
| errmsg | 对返回码的文本描述内容 |
+-----------+-------------------------------------------------------------------------------+
| tagname | 标签名 |
+-----------+-------------------------------------------------------------------------------+
| userlist | 标签中包含的成员列表 |
+-----------+-------------------------------------------------------------------------------+
| userid | 成员帐号 |
+-----------+-------------------------------------------------------------------------------+
| name | 成员名称,此字段从2019年12月30日起,对新创建第三方应用不再返回, |
| | 2020年6月30日起,对所有历史第三方应用不再返回,后续第三方仅通讯录应用可获取, |
| | 第三方页面需要通过通讯录展示组件来展示名字 |
+-----------+-------------------------------------------------------------------------------+
| partylist | 标签中包含的部门id列表 |
+-----------+-------------------------------------------------------------------------------+
:param tag_id: 标签ID,非负整型
:return: 成员用户信息
"""
self._validate_tag_id(tag_id)
return self._get("tag/get", params={"tagid": tag_id})
def add_users(
self, tag_id: int, user_ids: Optional[List[str]] = None, department_ids: Optional[List[int]] = None
) -> dict:
"""增加标签成员
参考:https://developer.work.weixin.qq.com/document/path/90214
**权限说明**:
调用的应用必须是指定标签的创建者;成员属于应用的可见范围。
**注意**:每个标签下部门、人员总数不能超过3万个。
返回结果示例:
a. 正确时返回 ::
{
"errcode": 0,
"errmsg": "ok"
}
b. 若部分userid、partylist非法,则返回 ::
{
"errcode": 0,
"errmsg": "ok",
"invalidlist":"usr1|usr2|usr",
"invalidparty":[2,4]
}
c. 当包含userid、partylist全部非法时返回 ::
{
"errcode": 40070,
"errmsg": "all list invalid "
}
结果参数说明:
+--------------+------------------------+
| 参数 | 说明 |
+==============+========================+
| errcode | 返回码 |
+--------------+------------------------+
| errmsg | 对返回码的文本描述内容 |
+--------------+------------------------+
| invalidlist | 非法的成员帐号列表 |
+--------------+------------------------+
| invalidparty | 非法的部门id列表 |
+--------------+------------------------+
:param tag_id: 标签ID,非负整型
:param user_ids: 企业成员ID列表,注意:user_ids和department_ids不能同时为空,
单次请求个数不超过1000
:param department_ids: 企业部门ID列表,注意:user_ids和department_ids不能
同时为空,单次请求个数不超过100
:return: 请求结果
"""
self._validate_tag_id(tag_id)
if not user_ids and not department_ids:
raise ValueError("user_ids and department_ids cannot be empty at the same time")
if user_ids is not None and len(user_ids) > 1000:
raise ValueError("the length of the user_ids cannot be greater than 1000")
if department_ids is not None and len(department_ids) > 100:
raise ValueError("the length of the department_ids cannot be greater than 100")
data: Dict[str, Any] = {"tagid": tag_id}
if user_ids:
data["userlist"] = user_ids
if department_ids:
data["partylist"] = department_ids
return self._post("tag/addtagusers", data=data)
def delete_users(
self, tag_id: int, user_ids: Optional[List[str]] = None, department_ids: Optional[List[int]] = None
) -> dict:
"""删除标签成员
参考:https://developer.work.weixin.qq.com/document/path/90215
**权限说明**:
调用的应用必须是指定标签的创建者;成员属于应用的可见范围。
返回结果示例:
a. 正确时返回 ::
{
"errcode": 0,
"errmsg": "deleted"
}
b. 若部分userid、partylist非法,则返回 ::
{
"errcode": 0,
"errmsg": "deleted",
"invalidlist":"usr1|usr2|usr",
"invalidparty": [2,4]
}
c. 当包含的userid、partylist全部非法时返回 ::
{
"errcode": 40031,
"errmsg": "all list invalid"
}
结果参数说明:
+--------------+------------------------+
| 参数 | 说明 |
+==============+========================+
| errcode | 返回码 |
+--------------+------------------------+
| errmsg | 对返回码的文本描述内容 |
+--------------+------------------------+
| invalidlist | 非法的成员帐号列表 |
+--------------+------------------------+
| invalidparty | 非法的部门id列表 |
+--------------+------------------------+
:param tag_id: 标签ID,非负整型
:param user_ids: 企业成员ID列表,注意:user_ids和department_ids不能同时为空,
单次请求长度不超过1000
:param department_ids: 企业部门ID列表,注意:user_ids和department_ids不能
同时为空,单次请求长度不超过100
:return: 处理结果
"""
self._validate_tag_id(tag_id)
if not user_ids and not department_ids:
raise ValueError("user_ids and department_ids cannot be empty at the same time")
if user_ids is not None and len(user_ids) > 1000:
raise ValueError("the length of the user_ids cannot be greater than 1000")
if department_ids is not None and len(department_ids) > 100:
raise ValueError("the length of the department_ids cannot be greater than 100")
data: Dict[str, Any] = {"tagid": tag_id}
if user_ids:
data["userlist"] = user_ids
if department_ids:
data["partylist"] = department_ids
return self._post("tag/deltagusers", data=data)
def list(self) -> List[dict]:
"""获取标签列表
参考:https://developer.work.weixin.qq.com/document/path/90216
**权限说明**:
自建应用或通讯同步助手可以获取所有标签列表;第三方应用仅可获取自己创建的标签。
返回结果示例: ::
{
"errcode": 0,
"errmsg": "ok",
"taglist":[
{"tagid":1,"tagname":"a"},
{"tagid":2,"tagname":"b"}
]
}
结果参数说明:
+---------+------------------------+
| 参数 | 说明 |
+=========+========================+
| errcode | 返回码 |
+---------+------------------------+
| errmsg | 对返回码的文本描述内容 |
+---------+------------------------+
| taglist | 标签列表 |
+---------+------------------------+
| tagid | 标签id |
+---------+------------------------+
| tagname | 标签名 |
+---------+------------------------+
:return: 标签信息列表,不包含errcode等信息
"""
res = self._get("tag/list")
return res["taglist"]
| 32.337017 | 107 | 0.368785 |
acf16ea2d68c1d52f2f415f15422cc6e507455fb | 10,181 | py | Python | scenarios.py | e259f381/RTG | 8282c573d4cf0e83c001b4c1357ff1c4c897fbd7 | [
"Apache-2.0"
] | null | null | null | scenarios.py | e259f381/RTG | 8282c573d4cf0e83c001b4c1357ff1c4c897fbd7 | [
"Apache-2.0"
] | null | null | null | scenarios.py | e259f381/RTG | 8282c573d4cf0e83c001b4c1357ff1c4c897fbd7 | [
"Apache-2.0"
] | null | null | null | from ast import literal_eval
import numpy as np
class ScenarioSetting():
"""
Scenario paired with optional scripted behaviour for teams
"""
def __init__(self, scenario_name, strategies):
self.scenario_name = scenario_name
self.strategies = strategies
@staticmethod
def parse(input):
"""
Converts text into an array of scenarios
:param input:
:return:
"""
if '[' not in input:
input = f"[['{input}', None, None, None]]"
input = literal_eval(input)
result = []
for scenario_info in input:
scenario = ScenarioSetting(scenario_info[0], scenario_info[1:4])
result.append(scenario)
return result
def __repr__(self):
array = [self.scenario_name, *self.strategies]
return str(array)
class RescueTheGeneralScenario():
SCENARIOS = {
"rescue": {
"description": "The main game",
"map_width": 32,
"map_height": 32,
"team_counts": (1, 1, 4),
"n_trees": 10,
"reward_per_tree": 1,
"hidden_roles": "default",
"timeout_mean": 500,
# this gives red enough time to find the general, otherwise blue might learn to force a draw.
"max_view_distance": 6,
"team_general_view_distance": (2, 5, 5), # how close you need to be to the general to see them
"team_shoot_damage": (2, 2, 10), # blue can 1-shot other players, but red and green can not.
"team_view_distance": (6, 5, 5),
"team_shoot_range": (5, 5, 5),
"help_distance": 4,
"general_initial_health": 1,
"players_to_move_general": 2,
"blue_general_indicator": "direction",
"starting_locations": "together", # players start together
"team_shoot_timeout": (10, 10, 10),
"timeout_penalty": (5, 0, -5), # blue must not fail to rescue the general.
},
"r2g2": {
"description": "Two red players and two green players on a small map",
"map_width": 24,
"map_height": 24,
"team_counts": (2, 2, 0),
"n_trees": 10,
"reward_per_tree": 1,
"hidden_roles": "none",
"max_view_distance": 5, # makes things a bit faster
"team_view_distance": (5, 5, 5), # no bonus vision for red
"team_shoot_damage": (5, 5, 5), # 2 hits to kill
"team_shoot_range": (4, 4, 4),
"starting_locations": "random", # random start locations
"team_shoot_timeout": (5, 5, 5) # green is much slower at shooting
},
"r2g2_hr": {
"description": "Two red players and two green players on a small map",
"map_width": 24,
"map_height": 24,
"team_counts": (2, 2, 0),
"n_trees": 10,
"reward_per_tree": 1,
"hidden_roles": "all",
"max_view_distance": 5, # makes things a bit faster
"team_view_distance": (5, 5, 5), # no bonus vision for red
"team_shoot_damage": (5, 5, 5), # 2 hits to kill
"team_shoot_range": (4, 4, 4),
"starting_locations": "random", # random start locations
"team_shoot_timeout": (5, 5, 5) # green is much slower at shooting
},
"wolf": {
"description": "A wolf among the sheep",
"map_width": 32,
"map_height": 32,
"team_counts": (1, 3, 0),
"n_trees": 9,
"reward_per_tree": 1,
"hidden_roles": "all",
"timeout_mean": 500, # make sure games don't last too long, 400 is plenty of time for green
# to harvest all the trees
"max_view_distance": 5, # makes things a bit faster having smaller vision
"team_view_distance": (5, 5, 5), # no bonus vision for red
"team_shoot_range": (5, 5, 5),
"starting_locations": "together", # random start locations
"team_shoot_timeout": (20, 20, 20),
"team_shoot_damage": (10, 5, 5),
"battle_royale": True, # removes general, and ends game if all green players are killed, or
# if green eliminates red players and harvests all trees
"zero_sum": True,
"points_for_kill": np.asarray(( # loose a point for self kill, gain one for other team kill
(-1, +3.33, +1),
(+1, -1, +1),
(+1, +1, -1),
))
},
"red2": {
"description": "Two red players must find and kill general on small map.",
"map_width": 24,
"map_height": 24,
"team_counts": (2, 0, 0),
"max_view_distance": 5,
"team_view_distance": (5, 5, 5),
"n_trees": 10,
"reward_per_tree": 1,
},
"green2": {
"description": "Two green players must harvest trees uncontested on a small map.",
"map_width": 24,
"map_height": 24,
"team_counts": (0, 2, 0),
"max_view_distance": 5,
"team_view_distance": (5, 5, 5),
"n_trees": 10,
"reward_per_tree": 1,
},
"blue2": {
"description": "Two blue players must rescue the general on a small map.",
"map_width": 16,
"map_height": 16, # smaller to make it easier
"team_counts": (0, 0, 2),
"max_view_distance": 5,
"team_view_distance": (5, 5, 5),
"n_trees": 10,
"reward_per_tree": 1,
"timeout_mean": 1000
},
}
# add training verisons
for k,v in SCENARIOS.copy().items():
training_key = k+"_training"
if training_key not in SCENARIOS:
SCENARIOS[training_key] = {**v, **{"initial_random_kills": 0.5}}
def __init__(self, scenario_name=None, **kwargs):
# defaults
self.n_trees = 20
self.reward_per_tree = 0.5
self.map_width = 48
self.map_height = 48
self.max_view_distance = 7 # distance used for size of observational space, unused tiles are blanked out
self.team_view_distance = (7, 5, 5)
self.team_shoot_damage = (10, 10, 10)
self.team_general_view_distance = (3, 5, 5) # how close you need to be to the general to see him
self.team_shoot_range = (4, 0, 0)
self.team_counts = (4, 4, 4)
self.team_shoot_timeout = (3, 3, 3) # number of turns between shooting
self.enable_voting = False # enables the voting system
self.auto_shooting = False # shooting auto targets closest player
self.zero_sum = False # if enabled any points scored by one team will be counted as negative points for all other teams.
self.timeout_mean = 500
self.timeout_rnd = 0 # this helps make sure games are not always in sync, which can happen if lots of
# games timeout.
self.general_initial_health = 10
self.player_initial_health = 10
self.battle_royale = False # removes general from game, and teams can win by eliminating all others teams
self.bonus_actions = False # provides small reward for taking an action that is indicated on agents local
# observation some time after the signal appeared
self.bonus_actions_one_at_a_time = False
self.bonus_actions_delay = 10
self.enable_signals = False
self.help_distance = 2 # how close another player must be to help the first player move the general.
self.starting_locations = "together"
self.voting_button = False # creates a voting button near start
# enables team colors on agents local observation. This can be useful if one policy plays all 3 teams,
# however it could cause problems if you want to infer what a different team would have done in that situation
self.local_team_colors = True
self.frame_blanking = 0 # fraction of frames to zero out (tests memory)
self.initial_random_kills = 0 # enables random killing of players at the start of the game, can be helpful to make winning viable for a team.
self.blue_players_near_general_to_get_reward = 1
self.players_to_move_general = 1 # number of players required to move the general
self.red_wins_if_sees_general = False
self.timeout_penalty = (0,0,0) # score penality for each team if a timeout occurs.
# how many point a player gets for killing a player
# ordered as points_for_kill[shooting_player_team, killed_player_team]
self.points_for_kill = np.asarray((
(0, 0, 0),
(0, 0, 0),
(0, 0, 0)
))
# number of times to soft reset game before a hard reset
# during a soft reset, player positions and health are reset, but not their teams, or id_colors
# this allows players to remember roles across soft resets
# a done is issued only at the end of all resets
self.rounds = 1
# default is red knows red, but all others are hidden
# all is all roles are hidden
# none is all roles are visible
self.hidden_roles = "default"
self.blue_general_indicator = "direction"
self.reveal_team_on_death = False
self.description = "The full game"
# scenario settings
settings_to_apply = {}
if scenario_name is not None:
settings_to_apply = self.SCENARIOS[scenario_name].copy()
settings_to_apply.update(**kwargs)
# apply settings
for k,v in settings_to_apply.items():
assert hasattr(self, k), f"Invalid scenario attribute {k}"
setattr(self, k, v)
def __str__(self):
result = []
for k,v in vars(self).items():
result.append(f"{k:<24} = {v}")
return "\n".join(result)
| 40.400794 | 149 | 0.569983 |
acf16f145bfaf7b534e6c97df88d19855e8022cb | 2,278 | py | Python | models/va_net.py | CFM-MSG/MTSC_VSS | 9536f8dc8d0282c3d0e6e2beee6e2cac490d6cb1 | [
"MIT"
] | 1 | 2021-11-20T12:31:24.000Z | 2021-11-20T12:31:24.000Z | models/va_net.py | CFM-MSG/MTSC_VSS | 9536f8dc8d0282c3d0e6e2beee6e2cac490d6cb1 | [
"MIT"
] | null | null | null | models/va_net.py | CFM-MSG/MTSC_VSS | 9536f8dc8d0282c3d0e6e2beee6e2cac490d6cb1 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
class ResnetDilated(nn.Module):
def __init__(self, orig_resnet, fc_dim=64, pool_type='maxpool',
dilate_scale=16, conv_size=3, sa=False):
super(ResnetDilated, self).__init__()
from functools import partial
self.pool_type = pool_type
self.sa = sa
if dilate_scale == 8:
orig_resnet.layer3.apply(
partial(self._nostride_dilate, dilate=2))
orig_resnet.layer4.apply(
partial(self._nostride_dilate, dilate=4))
elif dilate_scale == 16:
orig_resnet.layer4.apply(
partial(self._nostride_dilate, dilate=2))
self.features = nn.Sequential(
*list(orig_resnet.children())[:-2])
self.fc = nn.Conv2d(
512, fc_dim, kernel_size=conv_size, padding=conv_size // 2)
self.soft = nn.Softmax(dim=2)
def _nostride_dilate(self, m, dilate):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
# the convolution with stride
if m.stride == (2, 2):
m.stride = (1, 1)
if m.kernel_size == (3, 3):
m.dilation = (dilate // 2, dilate // 2)
m.padding = (dilate // 2, dilate // 2)
# other convoluions
else:
if m.kernel_size == (3, 3):
m.dilation = (dilate, dilate)
m.padding = (dilate, dilate)
def forward_multiframe(self, x, pool=True):
(B, C, T, H, W) = x.size()
x = x.permute(0, 2, 1, 3, 4).contiguous()
x = x.view(B * T, C, H, W)
x = self.features(x)
# x = self.fc(x)
(_, C, H, W) = x.size()
if not self.sa:
x = x.view(B, T, C, H, W)
x = x.permute(0, 2, 1, 3, 4)
if not pool:
return x
if self.pool_type == 'avgpool':
x = F.adaptive_avg_pool3d(x, 1)
elif self.pool_type == 'maxpool':
x = F.adaptive_max_pool3d(x, 1)
x = x.view(B, C)
else:
x = x.view(B, T, C, H, W)
x = x.permute(0, 2, 1, 3, 4)
return x
| 32.084507 | 71 | 0.497805 |
acf16f2a8f22fd3b79aaa2acf42d3bc44d041095 | 20,533 | py | Python | catalyst/dl/experiment/config.py | Arquestro/catalyst | 682aba403e925fe7d1da8731065398b956e8e4c4 | [
"Apache-2.0"
] | 1 | 2019-11-26T06:41:33.000Z | 2019-11-26T06:41:33.000Z | catalyst/dl/experiment/config.py | Arquestro/catalyst | 682aba403e925fe7d1da8731065398b956e8e4c4 | [
"Apache-2.0"
] | null | null | null | catalyst/dl/experiment/config.py | Arquestro/catalyst | 682aba403e925fe7d1da8731065398b956e8e4c4 | [
"Apache-2.0"
] | null | null | null | from typing import Any, Callable, Dict, List, Mapping, Union # isort:skip
from collections import OrderedDict
from copy import deepcopy
import safitty
import torch
from torch import nn
from torch.utils.data import ( # noqa F401
DataLoader, Dataset, DistributedSampler
)
from catalyst.data import (
Augmentor, AugmentorCompose, DistributedSamplerWrapper
)
from catalyst.dl import (
Callback, ConfusionMatrixCallback, Experiment, LoggerCallback, utils
)
from catalyst.dl.callbacks import (
CheckpointCallback, ConsoleLogger, CriterionCallback, OptimizerCallback,
PhaseWrapperCallback, RaiseExceptionCallback, SchedulerCallback,
TensorboardLogger, VerboseLogger
)
from catalyst.dl.registry import (
CALLBACKS, CRITERIONS, MODELS, OPTIMIZERS, SAMPLERS, SCHEDULERS,
TRANSFORMS
)
from catalyst.utils import get_rank
from catalyst.utils.tools.typing import Criterion, Model, Optimizer, Scheduler
class ConfigExperiment(Experiment):
"""
Experiment created from a configuration file
"""
STAGE_KEYWORDS = [
"criterion_params",
"optimizer_params",
"scheduler_params",
"data_params",
"transform_params",
"state_params",
"callbacks_params",
]
def __init__(self, config: Dict):
"""
Args:
config (dict): dictionary of parameters
"""
self._config = deepcopy(config)
self._initial_seed = self._config.get("args", {}).get("seed", 42)
self._verbose = safitty.get(
self._config, "args", "verbose", default=False
)
self.__prepare_logdir()
self._config["stages"]["state_params"] = utils.merge_dicts(
deepcopy(self._config["stages"].get("state_params", {})),
deepcopy(self._config.get("args", {})), {"logdir": self._logdir}
)
self.stages_config = self._get_stages_config(self._config["stages"])
def __prepare_logdir(self):
EXCLUDE_TAG = "none"
logdir = self._config.get("args", {}).get("logdir", None)
baselogdir = self._config.get("args", {}).get("baselogdir", None)
if logdir is not None and logdir.lower() != EXCLUDE_TAG:
self._logdir = logdir
elif baselogdir is not None and baselogdir.lower() != EXCLUDE_TAG:
logdir_postfix = self._get_logdir(self._config)
self._logdir = f"{baselogdir}/{logdir_postfix}"
else:
self._logdir = None
def _get_stages_config(self, stages_config):
stages_defaults = {}
stages_config_out = OrderedDict()
for key in self.STAGE_KEYWORDS:
stages_defaults[key] = deepcopy(stages_config.get(key, {}))
for stage in stages_config:
if stage in self.STAGE_KEYWORDS \
or stages_config.get(stage) is None:
continue
stages_config_out[stage] = {}
for key in self.STAGE_KEYWORDS:
stages_config_out[stage][key] = utils.merge_dicts(
deepcopy(stages_defaults.get(key, {})),
deepcopy(stages_config[stage].get(key, {})),
)
return stages_config_out
def _get_logdir(self, config: Dict) -> str:
timestamp = utils.get_utcnow_time()
config_hash = utils.get_short_hash(config)
logdir = f"{timestamp}.{config_hash}"
distributed_rank = get_rank()
if distributed_rank > -1:
logdir = f"{logdir}.rank{distributed_rank:02d}"
return logdir
@property
def initial_seed(self) -> int:
"""Experiment's initial seed value"""
return self._initial_seed
@property
def logdir(self):
"""Path to the directory where the experiment logs"""
return self._logdir
@property
def stages(self) -> List[str]:
"""Experiment's stage names"""
stages_keys = list(self.stages_config.keys())
return stages_keys
@property
def distributed_params(self) -> Dict:
"""Dict with the parameters for distributed and FP16 methond"""
return self._config.get("distributed_params", {})
@property
def monitoring_params(self) -> Dict:
"""Dict with the parameters for monitoring services"""
return self._config.get("monitoring_params", {})
def get_state_params(self, stage: str) -> Mapping[str, Any]:
"""Returns the state parameters for a given stage"""
return self.stages_config[stage].get("state_params", {})
def _preprocess_model_for_stage(self, stage: str, model: Model):
stage_index = self.stages.index(stage)
if stage_index > 0:
checkpoint_path = \
f"{self.logdir}/checkpoints/best.pth"
checkpoint = utils.load_checkpoint(checkpoint_path)
utils.unpack_checkpoint(checkpoint, model=model)
return model
def _postprocess_model_for_stage(self, stage: str, model: Model):
return model
@staticmethod
def _get_model(**params):
key_value_flag = params.pop("_key_value", False)
if key_value_flag:
model = {}
for key, params_ in params.items():
model[key] = ConfigExperiment._get_model(**params_)
model = nn.ModuleDict(model)
else:
model = MODELS.get_from_params(**params)
return model
def get_model(self, stage: str):
"""Returns the model for a given stage"""
model_params = self._config["model_params"]
model = self._get_model(**model_params)
model = self._preprocess_model_for_stage(stage, model)
model = self._postprocess_model_for_stage(stage, model)
return model
@staticmethod
def _get_criterion(**params):
key_value_flag = params.pop("_key_value", False)
if key_value_flag:
criterion = {}
for key, params_ in params.items():
criterion[key] = ConfigExperiment._get_criterion(**params_)
else:
criterion = CRITERIONS.get_from_params(**params)
if criterion is not None and torch.cuda.is_available():
criterion = criterion.cuda()
return criterion
def get_criterion(self, stage: str) -> Criterion:
"""Returns the criterion for a given stage"""
criterion_params = \
self.stages_config[stage].get("criterion_params", {})
criterion = self._get_criterion(**criterion_params)
return criterion
def _get_optimizer(
self,
stage: str,
model: Union[Model, Dict[str, Model]],
**params
) -> Optimizer:
# @TODO 1: refactoring; this method is too long
# @TODO 2: load state dicts for schedulers & criterion
layerwise_params = \
params.pop("layerwise_params", OrderedDict())
no_bias_weight_decay = \
params.pop("no_bias_weight_decay", True)
# linear scaling rule from https://arxiv.org/pdf/1706.02677.pdf
lr_scaling_params = params.pop("lr_linear_scaling", None)
if lr_scaling_params:
data_params = dict(self.stages_config[stage]["data_params"])
batch_size = data_params.get("batch_size")
per_gpu_scaling = data_params.get("per_gpu_scaling", False)
distributed_rank = get_rank()
distributed = distributed_rank > -1
if per_gpu_scaling and not distributed:
num_gpus = max(1, torch.cuda.device_count())
batch_size *= num_gpus
base_lr = lr_scaling_params.get("lr")
base_batch_size = lr_scaling_params.get("base_batch_size", 256)
lr_scaling = batch_size / base_batch_size
params["lr"] = base_lr * lr_scaling # scale default lr
else:
lr_scaling = 1.0
# getting model parameters
model_key = params.pop("_model", None)
if model_key is None:
assert isinstance(model, nn.Module), \
"model is keyvalue, but optimizer has no specified model"
model_params = utils.process_model_params(
model, layerwise_params, no_bias_weight_decay, lr_scaling
)
elif isinstance(model_key, str):
model_params = utils.process_model_params(
model[model_key], layerwise_params, no_bias_weight_decay,
lr_scaling
)
elif isinstance(model_key, (list, tuple)):
model_params = []
for model_key_ in model_key:
model_params_ = utils.process_model_params(
model[model_key_], layerwise_params, no_bias_weight_decay,
lr_scaling
)
model_params.extend(model_params_)
else:
raise ValueError("unknown type of model_params")
load_from_previous_stage = \
params.pop("load_from_previous_stage", False)
optimizer_key = params.pop("optimizer_key", None)
optimizer = OPTIMIZERS.get_from_params(**params, params=model_params)
if load_from_previous_stage and self.stages.index(stage) != 0:
checkpoint_path = f"{self.logdir}/checkpoints/best_full.pth"
checkpoint = utils.load_checkpoint(checkpoint_path)
dict2load = optimizer
if optimizer_key is not None:
dict2load = {optimizer_key: optimizer}
utils.unpack_checkpoint(checkpoint, optimizer=dict2load)
# move optimizer to device
device = utils.get_device()
for param in model_params:
param = param["params"][0]
state = optimizer.state[param]
for key, value in state.items():
state[key] = utils.any2device(value, device)
# update optimizer params
for key, value in params.items():
for pg in optimizer.param_groups:
pg[key] = value
return optimizer
def get_optimizer(
self,
stage: str,
model: Union[Model, Dict[str, Model]]
) -> Union[Optimizer, Dict[str, Optimizer]]:
"""
Returns the optimizer for a given stage
Args:
stage (str): stage name
model (Union[Model, Dict[str, Model]]): model or a dict of models
"""
optimizer_params = \
self.stages_config[stage].get("optimizer_params", {})
key_value_flag = optimizer_params.pop("_key_value", False)
if key_value_flag:
optimizer = {}
for key, params_ in optimizer_params.items():
# load specified optimizer from checkpoint
optimizer_key = "optimizer_key"
assert optimizer_key not in params_, "keyword reserved"
params_[optimizer_key] = key
optimizer[key] = self._get_optimizer(stage, model, **params_)
else:
optimizer = self._get_optimizer(stage, model, **optimizer_params)
return optimizer
@staticmethod
def _get_scheduler(*, optimizer, **params):
key_value_flag = params.pop("_key_value", False)
if key_value_flag:
scheduler = {}
for key, params_ in params.items():
scheduler[key] = ConfigExperiment._get_scheduler(
optimizer=optimizer, **params_
)
else:
scheduler = SCHEDULERS.get_from_params(
**params, optimizer=optimizer
)
return scheduler
def get_scheduler(self, stage: str, optimizer: Optimizer) -> Scheduler:
"""Returns the scheduler for a given stage"""
scheduler_params = \
self.stages_config[stage].get("scheduler_params", {})
scheduler = self._get_scheduler(
optimizer=optimizer, **scheduler_params
)
return scheduler
@staticmethod
def _get_transform(**params) -> Callable:
key_value_flag = params.pop("_key_value", False)
if key_value_flag:
transforms_composition = {
key: ConfigExperiment._get_transform(**params_)
for key, params_ in params.items()
}
transform = AugmentorCompose({
key: Augmentor(
dict_key=key,
augment_fn=transform,
input_key=key,
output_key=key,
)
for key, transform in transforms_composition.items()
})
else:
if "transforms" in params:
transforms_composition = [
ConfigExperiment._get_transform(**transform_params)
for transform_params in params["transforms"]
]
params.update(transforms=transforms_composition)
transform = TRANSFORMS.get_from_params(**params)
return transform
def get_transforms(
self, stage: str = None, dataset: str = None
) -> Callable:
"""
Returns transform for a given stage & mode
Args:
stage (str): stage name
dataset (str): dataset name (e.g. "train", "valid"),
will be used only if the value of `_key_value`` is ``True``
"""
transform_params = deepcopy(
self.stages_config[stage].get("transform_params", {})
)
key_value_flag = transform_params.pop("_key_value", False)
if key_value_flag:
transform_params = transform_params.get(dataset, {})
transform = self._get_transform(**transform_params)
if transform is None:
def transform(dict_):
return dict_
elif not isinstance(transform, AugmentorCompose):
transform_ = transform
def transform(dict_):
return transform_(**dict_)
return transform
def get_loaders(
self,
stage: str,
epoch: int = None,
) -> "OrderedDict[str, DataLoader]":
"""Returns the loaders for a given stage"""
data_params = dict(self.stages_config[stage]["data_params"])
batch_size = data_params.pop("batch_size", 1)
num_workers = data_params.pop("num_workers")
drop_last = data_params.pop("drop_last", False)
per_gpu_scaling = data_params.pop("per_gpu_scaling", False)
distributed_rank = get_rank()
distributed = distributed_rank > -1
datasets = self.get_datasets(stage=stage, **data_params)
overridden_loaders_params = data_params.pop("loaders_params", {})
assert isinstance(overridden_loaders_params, dict), (
f"`overridden_loaders_params` should be a Dict. "
f"Got: {overridden_loaders_params}"
)
samplers_params = data_params.pop("samplers_params", {})
assert isinstance(samplers_params, dict), \
f"`samplers_params` should be a Dict. Got: {samplers_params}"
loaders = OrderedDict()
for name, ds_ in datasets.items():
assert isinstance(ds_, (Dataset, dict)), \
f"{ds_} should be Dataset or Dict"
overridden_loader_params = overridden_loaders_params.pop(name, {})
assert isinstance(overridden_loader_params, dict), \
f"{overridden_loader_params} should be Dict"
sampler_params = samplers_params.pop(name, None)
if sampler_params is None:
if isinstance(ds_, dict) and "sampler" in ds_:
sampler = ds_.pop("sampler", None)
else:
sampler = None
else:
sampler = SAMPLERS.get_from_params(**sampler_params)
if isinstance(ds_, dict) and "sampler" in ds_:
ds_.pop("sampler", None)
batch_size = overridden_loader_params.pop("batch_size", batch_size)
num_workers = overridden_loader_params.\
pop("num_workers", num_workers)
if per_gpu_scaling and not distributed:
num_gpus = max(1, torch.cuda.device_count())
batch_size *= num_gpus
num_workers *= num_gpus
loader_params = {
"batch_size": batch_size,
"num_workers": num_workers,
"pin_memory": torch.cuda.is_available(),
"drop_last": drop_last,
**overridden_loader_params
}
if isinstance(ds_, Dataset):
loader_params["dataset"] = ds_
elif isinstance(ds_, dict):
assert "dataset" in ds_, \
"You need to specify dataset for dataloader"
loader_params = utils.merge_dicts(ds_, loader_params)
else:
raise NotImplementedError
if distributed:
if sampler is not None:
if not isinstance(sampler, DistributedSampler):
loader_params["sampler"] = \
DistributedSamplerWrapper(sampler=sampler)
else:
sampler = DistributedSampler(
dataset=loader_params["dataset"]
)
loader_params["shuffle"] = (
name.startswith("train") and sampler is None
)
loader_params["sampler"] = sampler
if "batch_sampler" in loader_params:
if distributed:
raise ValueError(
"batch_sampler option is mutually "
"exclusive with distributed"
)
for k in ("batch_size", "shuffle", "sampler", "drop_last"):
loader_params.pop(k, None)
if "worker_init_fn" not in loader_params:
loader_params["worker_init_fn"] = \
lambda x: utils.set_global_seed(self.initial_seed + x)
loaders[name] = DataLoader(**loader_params)
return loaders
@staticmethod
def _get_callback(**params):
wrapper_params = params.pop("_wrapper", None)
callback = CALLBACKS.get_from_params(**params)
if wrapper_params is not None:
wrapper_params["base_callback"] = callback
return ConfigExperiment._get_callback(**wrapper_params)
return callback
def get_callbacks(self, stage: str) -> "OrderedDict[Callback]":
"""Returns the callbacks for a given stage"""
callbacks_params = (
self.stages_config[stage].get("callbacks_params", {})
)
callbacks = OrderedDict()
for key, callback_params in callbacks_params.items():
callback = self._get_callback(**callback_params)
callbacks[key] = callback
# ! For compatibility with previous versions.
default_callbacks = []
if self._verbose:
default_callbacks.append(("verbose", VerboseLogger))
if not stage.startswith("infer"):
default_callbacks.append(("_criterion", CriterionCallback))
default_callbacks.append(("_optimizer", OptimizerCallback))
if self.stages_config[stage].get("scheduler_params", {}):
default_callbacks.append(("_scheduler", SchedulerCallback))
default_callbacks.append(("_saver", CheckpointCallback))
default_callbacks.append(("console", ConsoleLogger))
default_callbacks.append(("tensorboard", TensorboardLogger))
default_callbacks.append(("exception", RaiseExceptionCallback))
for callback_name, callback_fn in default_callbacks:
is_already_present = False
for x in callbacks.values():
if isinstance(x, PhaseWrapperCallback):
x = x.callback
if isinstance(x, callback_fn):
is_already_present = True
break
if not is_already_present:
callbacks[callback_name] = callback_fn()
# Remove LoggerCallback on worker nodes
if get_rank() > 0:
to_del = (LoggerCallback, ConfusionMatrixCallback)
for k in list(filter(
lambda c: isinstance(callbacks[c], to_del), callbacks
)):
del callbacks[k]
return callbacks
__all__ = ["ConfigExperiment"]
| 36.863555 | 79 | 0.592412 |
acf17003b68883cb2b2d79c885c376a3b7c977ac | 44 | py | Python | drf_admin/apps/cmdb/__init__.py | guohaihan/myproject | 0ec105d0bd48477faddf93bd62a8ede800419ae6 | [
"MIT"
] | 228 | 2020-06-20T10:07:03.000Z | 2022-03-29T07:11:01.000Z | drf_admin/apps/cmdb/__init__.py | guohaihan/myproject | 0ec105d0bd48477faddf93bd62a8ede800419ae6 | [
"MIT"
] | 25 | 2020-07-16T12:29:04.000Z | 2022-02-16T06:31:06.000Z | drf_admin/apps/cmdb/__init__.py | guohaihan/myproject | 0ec105d0bd48477faddf93bd62a8ede800419ae6 | [
"MIT"
] | 82 | 2020-10-26T07:14:15.000Z | 2022-03-29T07:53:23.000Z | default_app_config = 'cmdb.apps.CmdbConfig'
| 22 | 43 | 0.818182 |
acf170472da26d46cd3102d575e73ac61052a4fb | 10,073 | py | Python | homeassistant/components/motioneye/camera.py | DerFlob/hass-core | 19d8b8a6ff2e312b1698ce05a9bd543c6cbe2b1a | [
"Apache-2.0"
] | null | null | null | homeassistant/components/motioneye/camera.py | DerFlob/hass-core | 19d8b8a6ff2e312b1698ce05a9bd543c6cbe2b1a | [
"Apache-2.0"
] | 18 | 2021-11-24T06:26:13.000Z | 2022-03-31T06:25:15.000Z | homeassistant/components/motioneye/camera.py | DerFlob/hass-core | 19d8b8a6ff2e312b1698ce05a9bd543c6cbe2b1a | [
"Apache-2.0"
] | 3 | 2021-11-14T13:29:33.000Z | 2021-12-27T17:05:22.000Z | """The motionEye integration."""
from __future__ import annotations
from types import MappingProxyType
from typing import Any
import aiohttp
from jinja2 import Template
from motioneye_client.client import MotionEyeClient, MotionEyeClientURLParseError
from motioneye_client.const import (
DEFAULT_SURVEILLANCE_USERNAME,
KEY_ACTION_SNAPSHOT,
KEY_MOTION_DETECTION,
KEY_NAME,
KEY_STREAMING_AUTH_MODE,
KEY_TEXT_OVERLAY_CAMERA_NAME,
KEY_TEXT_OVERLAY_CUSTOM_TEXT,
KEY_TEXT_OVERLAY_CUSTOM_TEXT_LEFT,
KEY_TEXT_OVERLAY_CUSTOM_TEXT_RIGHT,
KEY_TEXT_OVERLAY_DISABLED,
KEY_TEXT_OVERLAY_LEFT,
KEY_TEXT_OVERLAY_RIGHT,
KEY_TEXT_OVERLAY_TIMESTAMP,
)
import voluptuous as vol
from homeassistant.components.mjpeg.camera import (
CONF_MJPEG_URL,
CONF_STILL_IMAGE_URL,
MjpegCamera,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_AUTHENTICATION,
CONF_NAME,
CONF_PASSWORD,
CONF_USERNAME,
HTTP_BASIC_AUTHENTICATION,
HTTP_DIGEST_AUTHENTICATION,
Platform,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from . import (
MotionEyeEntity,
get_camera_from_cameras,
is_acceptable_camera,
listen_for_new_cameras,
)
from .const import (
CONF_ACTION,
CONF_CLIENT,
CONF_COORDINATOR,
CONF_STREAM_URL_TEMPLATE,
CONF_SURVEILLANCE_PASSWORD,
CONF_SURVEILLANCE_USERNAME,
DOMAIN,
MOTIONEYE_MANUFACTURER,
SERVICE_ACTION,
SERVICE_SET_TEXT_OVERLAY,
SERVICE_SNAPSHOT,
TYPE_MOTIONEYE_MJPEG_CAMERA,
)
PLATFORMS = [Platform.CAMERA]
SCHEMA_TEXT_OVERLAY = vol.In(
[
KEY_TEXT_OVERLAY_DISABLED,
KEY_TEXT_OVERLAY_TIMESTAMP,
KEY_TEXT_OVERLAY_CUSTOM_TEXT,
KEY_TEXT_OVERLAY_CAMERA_NAME,
]
)
SCHEMA_SERVICE_SET_TEXT = vol.Schema(
vol.All(
cv.make_entity_service_schema(
{
vol.Optional(KEY_TEXT_OVERLAY_LEFT): SCHEMA_TEXT_OVERLAY,
vol.Optional(KEY_TEXT_OVERLAY_CUSTOM_TEXT_LEFT): cv.string,
vol.Optional(KEY_TEXT_OVERLAY_RIGHT): SCHEMA_TEXT_OVERLAY,
vol.Optional(KEY_TEXT_OVERLAY_CUSTOM_TEXT_RIGHT): cv.string,
},
),
cv.has_at_least_one_key(
KEY_TEXT_OVERLAY_LEFT,
KEY_TEXT_OVERLAY_CUSTOM_TEXT_LEFT,
KEY_TEXT_OVERLAY_RIGHT,
KEY_TEXT_OVERLAY_CUSTOM_TEXT_RIGHT,
),
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up motionEye from a config entry."""
entry_data = hass.data[DOMAIN][entry.entry_id]
@callback
def camera_add(camera: dict[str, Any]) -> None:
"""Add a new motionEye camera."""
async_add_entities(
[
MotionEyeMjpegCamera(
entry.entry_id,
entry.data.get(
CONF_SURVEILLANCE_USERNAME, DEFAULT_SURVEILLANCE_USERNAME
),
entry.data.get(CONF_SURVEILLANCE_PASSWORD, ""),
camera,
entry_data[CONF_CLIENT],
entry_data[CONF_COORDINATOR],
entry.options,
)
]
)
listen_for_new_cameras(hass, entry, camera_add)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_SET_TEXT_OVERLAY,
SCHEMA_SERVICE_SET_TEXT,
"async_set_text_overlay",
)
platform.async_register_entity_service(
SERVICE_ACTION,
{vol.Required(CONF_ACTION): cv.string},
"async_request_action",
)
platform.async_register_entity_service(
SERVICE_SNAPSHOT,
{},
"async_request_snapshot",
)
class MotionEyeMjpegCamera(MotionEyeEntity, MjpegCamera):
"""motionEye mjpeg camera."""
_name: str
def __init__(
self,
config_entry_id: str,
username: str,
password: str,
camera: dict[str, Any],
client: MotionEyeClient,
coordinator: DataUpdateCoordinator,
options: MappingProxyType[str, str],
) -> None:
"""Initialize a MJPEG camera."""
self._surveillance_username = username
self._surveillance_password = password
self._motion_detection_enabled: bool = camera.get(KEY_MOTION_DETECTION, False)
# motionEye cameras are always streaming or unavailable.
self._attr_is_streaming = True
MotionEyeEntity.__init__(
self,
config_entry_id,
TYPE_MOTIONEYE_MJPEG_CAMERA,
camera,
client,
coordinator,
options,
)
MjpegCamera.__init__(
self,
verify_ssl=False,
**self._get_mjpeg_camera_properties_for_camera(camera),
)
@callback
def _get_mjpeg_camera_properties_for_camera(
self, camera: dict[str, Any]
) -> dict[str, Any]:
"""Convert a motionEye camera to MjpegCamera internal properties."""
auth = None
if camera.get(KEY_STREAMING_AUTH_MODE) in (
HTTP_BASIC_AUTHENTICATION,
HTTP_DIGEST_AUTHENTICATION,
):
auth = camera[KEY_STREAMING_AUTH_MODE]
streaming_template = self._options.get(CONF_STREAM_URL_TEMPLATE, "").strip()
streaming_url = None
if streaming_template:
# Note: Can't use homeassistant.helpers.template as it requires hass
# which is not available during entity construction.
streaming_url = Template(streaming_template).render(**camera)
else:
try:
streaming_url = self._client.get_camera_stream_url(camera)
except MotionEyeClientURLParseError:
pass
return {
CONF_NAME: camera[KEY_NAME],
CONF_USERNAME: self._surveillance_username if auth is not None else None,
CONF_PASSWORD: self._surveillance_password if auth is not None else "",
CONF_MJPEG_URL: streaming_url or "",
CONF_STILL_IMAGE_URL: self._client.get_camera_snapshot_url(camera),
CONF_AUTHENTICATION: auth,
}
@callback
def _set_mjpeg_camera_state_for_camera(self, camera: dict[str, Any]) -> None:
"""Set the internal state to match the given camera."""
# Sets the state of the underlying (inherited) MjpegCamera based on the updated
# MotionEye camera dictionary.
properties = self._get_mjpeg_camera_properties_for_camera(camera)
self._name = properties[CONF_NAME]
self._username = properties[CONF_USERNAME]
self._password = properties[CONF_PASSWORD]
self._mjpeg_url = properties[CONF_MJPEG_URL]
self._still_image_url = properties[CONF_STILL_IMAGE_URL]
self._authentication = properties[CONF_AUTHENTICATION]
if (
self._authentication == HTTP_BASIC_AUTHENTICATION
and self._username is not None
):
self._auth = aiohttp.BasicAuth(self._username, password=self._password)
def _is_acceptable_streaming_camera(self) -> bool:
"""Determine if a camera is streaming/usable."""
return is_acceptable_camera(
self._camera
) and MotionEyeClient.is_camera_streaming(self._camera)
@property
def available(self) -> bool:
"""Return if entity is available."""
return super().available and self._is_acceptable_streaming_camera()
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
self._camera = get_camera_from_cameras(self._camera_id, self.coordinator.data)
if self._camera and self._is_acceptable_streaming_camera():
self._set_mjpeg_camera_state_for_camera(self._camera)
self._motion_detection_enabled = self._camera.get(
KEY_MOTION_DETECTION, False
)
super()._handle_coordinator_update()
@property
def brand(self) -> str:
"""Return the camera brand."""
return MOTIONEYE_MANUFACTURER
@property
def motion_detection_enabled(self) -> bool:
"""Return the camera motion detection status."""
return self._motion_detection_enabled
async def async_set_text_overlay(
self,
left_text: str = None,
right_text: str = None,
custom_left_text: str = None,
custom_right_text: str = None,
) -> None:
"""Set text overlay for a camera."""
# Fetch the very latest camera config to reduce the risk of updating with a
# stale configuration.
camera = await self._client.async_get_camera(self._camera_id)
if not camera:
return
if left_text is not None:
camera[KEY_TEXT_OVERLAY_LEFT] = left_text
if right_text is not None:
camera[KEY_TEXT_OVERLAY_RIGHT] = right_text
if custom_left_text is not None:
camera[KEY_TEXT_OVERLAY_CUSTOM_TEXT_LEFT] = custom_left_text.encode(
"unicode_escape"
).decode("UTF-8")
if custom_right_text is not None:
camera[KEY_TEXT_OVERLAY_CUSTOM_TEXT_RIGHT] = custom_right_text.encode(
"unicode_escape"
).decode("UTF-8")
await self._client.async_set_camera(self._camera_id, camera)
async def async_request_action(self, action: str) -> None:
"""Call a motionEye action on a camera."""
await self._client.async_action(self._camera_id, action)
async def async_request_snapshot(self) -> None:
"""Request a motionEye snapshot be saved."""
await self.async_request_action(KEY_ACTION_SNAPSHOT)
| 33.465116 | 87 | 0.663953 |
acf1711bef54dfeda3d69536c8c7a4af3e00eec6 | 16,908 | py | Python | openstack_dashboard/test/test_data/sahara_data.py | maofutian/horizon | dab92e7d2f576caea8f81c8e22a516fb45633794 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/test/test_data/sahara_data.py | maofutian/horizon | dab92e7d2f576caea8f81c8e22a516fb45633794 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/test/test_data/sahara_data.py | maofutian/horizon | dab92e7d2f576caea8f81c8e22a516fb45633794 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.test_data import utils
from saharaclient.api import cluster_templates
from saharaclient.api import clusters
from saharaclient.api import data_sources
from saharaclient.api import job_binaries
from saharaclient.api import job_executions
from saharaclient.api import jobs
from saharaclient.api import node_group_templates
from saharaclient.api import plugins
def data(TEST):
TEST.plugins = utils.TestDataContainer()
TEST.plugins_configs = utils.TestDataContainer()
TEST.nodegroup_templates = utils.TestDataContainer()
TEST.cluster_templates = utils.TestDataContainer()
TEST.clusters = utils.TestDataContainer()
TEST.data_sources = utils.TestDataContainer()
TEST.job_binaries = utils.TestDataContainer()
TEST.jobs = utils.TestDataContainer()
TEST.job_executions = utils.TestDataContainer()
plugin1_dict = {
"description": "vanilla plugin",
"name": "vanilla",
"title": "Vanilla Apache Hadoop",
"versions": ["2.3.0", "1.2.1"]
}
plugin1 = plugins.Plugin(plugins.PluginManager(None), plugin1_dict)
TEST.plugins.add(plugin1)
plugin_config1_dict = {
"node_processes": {
"HDFS": [
"namenode",
"datanode",
"secondarynamenode"
],
"MapReduce": [
"tasktracker",
"jobtracker"
]
},
"description": "This plugin provides an ability to launch vanilla "
"Apache Hadoop cluster without any management "
"consoles.",
"versions": [
"1.2.1"
],
"required_image_tags": [
"vanilla",
"1.2.1"
],
"configs": [
{
"default_value": "/tmp/hadoop-${user.name}",
"name": "hadoop.tmp.dir",
"priority": 2,
"config_type": "string",
"applicable_target": "HDFS",
"is_optional": True,
"scope": "node",
"description": "A base for other temporary directories."
},
{
"default_value": True,
"name": "hadoop.native.lib",
"priority": 2,
"config_type": "bool",
"applicable_target": "HDFS",
"is_optional": True,
"scope": "node",
"description": "Should native hadoop libraries, if present, "
"be used."
},
],
"title": "Vanilla Apache Hadoop",
"name": "vanilla"
}
TEST.plugins_configs.add(plugins.Plugin(plugins.PluginManager(None),
plugin_config1_dict))
# Nodegroup_Templates.
ngt1_dict = {
"created_at": "2014-06-04 14:01:03.701243",
"description": None,
"flavor_id": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
"floating_ip_pool": None,
"hadoop_version": "1.2.1",
"id": "c166dfcc-9cc7-4b48-adc9-f0946169bb36",
"image_id": None,
"name": "sample-template",
"node_configs": {},
"node_processes": [
"namenode",
"jobtracker",
"secondarynamenode",
"hiveserver",
"oozie"
],
"plugin_name": "vanilla",
"tenant_id": "429ad8447c2d47bc8e0382d244e1d1df",
"updated_at": None,
"volume_mount_prefix": "/volumes/disk",
"volumes_per_node": 0,
"volumes_size": 0
}
ngt1 = node_group_templates.NodeGroupTemplate(
node_group_templates.NodeGroupTemplateManager(None), ngt1_dict)
TEST.nodegroup_templates.add(ngt1)
# Cluster_templates.
ct1_dict = {
"anti_affinity": [],
"cluster_configs": {},
"created_at": "2014-06-04 14:01:06.460711",
"default_image_id": None,
"description": None,
"hadoop_version": "1.2.1",
"id": "a2c3743f-31a2-4919-8d02-792138a87a98",
"name": "sample-cluster-template",
"neutron_management_network": None,
"node_groups": [
{
"count": 1,
"created_at": "2014-06-04 14:01:06.462512",
"flavor_id": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
"floating_ip_pool": None,
"image_id": None,
"name": "master",
"node_configs": {},
"node_group_template_id": "c166dfcc-9cc7-4b48-adc9",
"node_processes": [
"namenode",
"jobtracker",
"secondarynamenode",
"hiveserver",
"oozie"
],
"updated_at": None,
"volume_mount_prefix": "/volumes/disk",
"volumes_per_node": 0,
"volumes_size": 0
},
{
"count": 2,
"created_at": "2014-06-04 14:01:06.463214",
"flavor_id": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
"floating_ip_pool": None,
"image_id": None,
"name": "workers",
"node_configs": {},
"node_group_template_id": "4eb5504c-94c9-4049-a440",
"node_processes": [
"datanode",
"tasktracker"
],
"updated_at": None,
"volume_mount_prefix": "/volumes/disk",
"volumes_per_node": 0,
"volumes_size": 0
}
],
"plugin_name": "vanilla",
"tenant_id": "429ad8447c2d47bc8e0382d244e1d1df",
"updated_at": None
}
ct1 = cluster_templates.ClusterTemplate(
cluster_templates.ClusterTemplateManager(None), ct1_dict)
TEST.cluster_templates.add(ct1)
# Clusters.
cluster1_dict = {
"anti_affinity": [],
"cluster_configs": {},
"cluster_template_id": "a2c3743f-31a2-4919-8d02-792138a87a98",
"created_at": "2014-06-04 20:02:14.051328",
"default_image_id": "9eb4643c-dca8-4ea7-92d2-b773f88a8dc6",
"description": "",
"hadoop_version": "1.2.1",
"id": "ec9a0d28-5cfb-4028-a0b5-40afe23f1533",
"info": {},
"is_transient": False,
"management_public_key": "fakekey",
"name": "cercluster",
"neutron_management_network": None,
"node_groups": [
{
"count": 1,
"created_at": "2014-06-04 20:02:14.053153",
"flavor_id": "0",
"floating_ip_pool": None,
"image_id": None,
"instances": [
{
"created_at": "2014-06-04 20:02:14.834529",
"id": "c3b8004b-7063-4b99-a082-820cdc6e961c",
"instance_id": "a45f5495-4a10-4f17-8fae",
"instance_name": "cercluster-master-001",
"internal_ip": None,
"management_ip": None,
"updated_at": None,
"volumes": []
}
],
"name": "master",
"node_configs": {},
"node_group_template_id": "c166dfcc-9cc7-4b48-adc9",
"node_processes": [
"namenode",
"jobtracker",
"secondarynamenode",
"hiveserver",
"oozie"
],
"updated_at": "2014-06-04 20:02:14.841760",
"volume_mount_prefix": "/volumes/disk",
"volumes_per_node": 0,
"volumes_size": 0
},
{
"count": 2,
"created_at": "2014-06-04 20:02:14.053849",
"flavor_id": "0",
"floating_ip_pool": None,
"image_id": None,
"instances": [
{
"created_at": "2014-06-04 20:02:15.097655",
"id": "6a8ae0b1-bb28-4de2-bfbb-bdd3fd2d72b2",
"instance_id": "38bf8168-fb30-483f-8d52",
"instance_name": "cercluster-workers-001",
"internal_ip": None,
"management_ip": None,
"updated_at": None,
"volumes": []
},
{
"created_at": "2014-06-04 20:02:15.344515",
"id": "17b98ed3-a776-467a-90cf-9f46a841790b",
"instance_id": "85606938-8e53-46a5-a50b",
"instance_name": "cercluster-workers-002",
"internal_ip": None,
"management_ip": None,
"updated_at": None,
"volumes": []
}
],
"name": "workers",
"node_configs": {},
"node_group_template_id": "4eb5504c-94c9-4049-a440",
"node_processes": [
"datanode",
"tasktracker"
],
"updated_at": "2014-06-04 20:02:15.355745",
"volume_mount_prefix": "/volumes/disk",
"volumes_per_node": 0,
"volumes_size": 0
}
],
"plugin_name": "vanilla",
"status": "Active",
"status_description": "",
"tenant_id": "429ad8447c2d47bc8e0382d244e1d1df",
"trust_id": None,
"updated_at": "2014-06-04 20:02:15.446087",
"user_keypair_id": "stackboxkp"
}
cluster1 = clusters.Cluster(
clusters.ClusterManager(None), cluster1_dict)
TEST.clusters.add(cluster1)
# Data Sources.
data_source1_dict = {
"created_at": "2014-06-04 14:01:10.371562",
"description": "sample output",
"id": "426fb01c-5c7e-472d-bba2-b1f0fe7e0ede",
"name": "sampleOutput",
"tenant_id": "429ad8447c2d47bc8e0382d244e1d1df",
"type": "swift",
"updated_at": None,
"url": "swift://example.sahara/output"
}
data_source2_dict = {
"created_at": "2014-06-05 15:01:12.331361",
"description": "second sample output",
"id": "ab3413-adfb-bba2-123456785675",
"name": "sampleOutput2",
"tenant_id": "429ad8447c2d47bc8e0382d244e1d1df",
"type": "hdfs",
"updated_at": None,
"url": "hdfs://example.sahara/output"
}
data_source1 = data_sources.DataSources(
data_sources.DataSourceManager(None), data_source1_dict)
data_source2 = data_sources.DataSources(
data_sources.DataSourceManager(None), data_source2_dict)
TEST.data_sources.add(data_source1)
TEST.data_sources.add(data_source2)
# Job Binaries.
job_binary1_dict = {
"created_at": "2014-06-05 18:15:15.581285",
"description": "",
"id": "3f3a07ac-7d6f-49e8-8669-40b25ee891b7",
"name": "example.pig",
"tenant_id": "429ad8447c2d47bc8e0382d244e1d1df",
"updated_at": None,
"url": "internal-db://80121dea-f8bd-4ad3-bcc7-096f4bfc722d"
}
job_binary1 = job_binaries.JobBinaries(
job_binaries.JobBinariesManager(None), job_binary1_dict)
TEST.job_binaries.add(job_binary1)
# Jobs.
job1_dict = {
"created_at": "2014-06-05 19:23:59.637165",
"description": "",
"id": "a077b851-46be-4ad7-93c3-2d83894546ef",
"libs": [
{
"created_at": "2014-06-05 19:23:42.742057",
"description": "",
"id": "ab140807-59f8-4235-b4f2-e03daf946256",
"name": "udf.jar",
"tenant_id": "429ad8447c2d47bc8e0382d244e1d1df",
"updated_at": None,
"url": "internal-db://d186e2bb-df93-47eb-8c0e-ce21e7ecb78b"
}
],
"mains": [
{
"created_at": "2014-06-05 18:15:15.581285",
"description": "",
"id": "3f3a07ac-7d6f-49e8-8669-40b25ee891b7",
"name": "example.pig",
"tenant_id": "429ad8447c2d47bc8e0382d244e1d1df",
"updated_at": None,
"url": "internal-db://80121dea-f8bd-4ad3-bcc7-096f4bfc722d"
}
],
"name": "pigjob",
"tenant_id": "429ad8447c2d47bc8e0382d244e1d1df",
"type": "Pig",
"updated_at": None
}
job1 = jobs.Job(jobs.JobsManager(None), job1_dict)
TEST.jobs.add(job1)
# Job Executions.
jobex1_dict = {
"cluster_id": "ec9a0d28-5cfb-4028-a0b5-40afe23f1533",
"created_at": "2014-06-05 20:03:06.195937",
"end_time": None,
"id": "4b6c1cbf-c713-49d3-8025-808a87c514a6",
"info": {
"acl": None,
"actions": [
{
"consoleUrl": "-",
"cred": "None",
"data": None,
"endTime": "Thu,05 Jun 2014 20:03:32 GMT",
"errorCode": None,
"errorMessage": None,
"externalChildIDs": None,
"externalId": "-",
"externalStatus": "OK",
"id": "0000000-140604200538581-oozie-hado-W@:start:",
"name": ":start:",
"retries": 0,
"startTime": "Thu,05 Jun 2014 20:03:32 GMT",
"stats": None,
"status": "OK",
"toString": "Action name[:start:] status[OK]",
"trackerUri": "-",
"transition": "job-node",
"type": ":START:"
},
{
"consoleUrl": "fake://console.url",
"cred": "None",
"data": None,
"endTime": None,
"errorCode": None,
"errorMessage": None,
"externalChildIDs": None,
"externalId": "job_201406042004_0001",
"externalStatus": "RUNNING",
"id": "0000000-140604200538581-oozie-hado-W@job-node",
"name": "job-node",
"retries": 0,
"startTime": "Thu,05 Jun 2014 20:03:33 GMT",
"stats": None,
"status": "RUNNING",
"toString": "Action name[job-node] status[RUNNING]",
"trackerUri": "cercluster-master-001:8021",
"transition": None,
"type": "pig"
}
],
"appName": "job-wf",
"appPath": "hdfs://fakepath/workflow.xml",
"conf": "<configuration>fakeconfig</configuration>",
"consoleUrl": "fake://consoleURL",
"createdTime": "Thu,05 Jun 2014 20:03:32 GMT",
"endTime": None,
"externalId": None,
"group": None,
"id": "0000000-140604200538581-oozie-hado-W",
"lastModTime": "Thu,05 Jun 2014 20:03:35 GMT",
"parentId": None,
"run": 0,
"startTime": "Thu,05 Jun 2014 20:03:32 GMT",
"status": "RUNNING",
"toString": "Workflow ...status[RUNNING]",
"user": "hadoop"
},
"input_id": "85884883-3083-49eb-b442-71dd3734d02c",
"job_configs": {
"args": [],
"configs": {},
"params": {}
},
"job_id": "a077b851-46be-4ad7-93c3-2d83894546ef",
"oozie_job_id": "0000000-140604200538581-oozie-hado-W",
"output_id": "426fb01c-5c7e-472d-bba2-b1f0fe7e0ede",
"progress": None,
"return_code": None,
"start_time": "2014-06-05T16:03:32",
"tenant_id": "429ad8447c2d47bc8e0382d244e1d1df",
"updated_at": "2014-06-05 20:03:46.438248"
}
jobex1 = job_executions.JobExecution(
job_executions.JobExecutionsManager(None), jobex1_dict)
TEST.job_executions.add(jobex1)
| 36.518359 | 77 | 0.496037 |
acf171c4a507e1dae27f8db918d2db7865b3f387 | 14,054 | py | Python | cryptoapis/model/list_transactions_by_block_hash_ribsz_vin.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | 5 | 2021-05-17T04:45:03.000Z | 2022-03-23T12:51:46.000Z | cryptoapis/model/list_transactions_by_block_hash_ribsz_vin.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | null | null | null | cryptoapis/model/list_transactions_by_block_hash_ribsz_vin.py | Crypto-APIs/Crypto_APIs_2.0_SDK_Python | c59ebd914850622b2c6500c4c30af31fb9cecf0e | [
"MIT"
] | 2 | 2021-06-02T07:32:26.000Z | 2022-02-12T02:36:23.000Z | """
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: developers@cryptoapis.io
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from cryptoapis.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from cryptoapis.exceptions import ApiAttributeError
def lazy_import():
from cryptoapis.model.list_transactions_by_block_hash_ribsz_script_sig import ListTransactionsByBlockHashRIBSZScriptSig
globals()['ListTransactionsByBlockHashRIBSZScriptSig'] = ListTransactionsByBlockHashRIBSZScriptSig
class ListTransactionsByBlockHashRIBSZVin(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'addresses': ([str],), # noqa: E501
'coinbase': (str,), # noqa: E501
'script_sig': (ListTransactionsByBlockHashRIBSZScriptSig,), # noqa: E501
'sequence': (int,), # noqa: E501
'txid': (str,), # noqa: E501
'txinwitness': ([str],), # noqa: E501
'value': (str,), # noqa: E501
'vout': (int,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'addresses': 'addresses', # noqa: E501
'coinbase': 'coinbase', # noqa: E501
'script_sig': 'scriptSig', # noqa: E501
'sequence': 'sequence', # noqa: E501
'txid': 'txid', # noqa: E501
'txinwitness': 'txinwitness', # noqa: E501
'value': 'value', # noqa: E501
'vout': 'vout', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, addresses, coinbase, script_sig, sequence, txid, txinwitness, value, vout, *args, **kwargs): # noqa: E501
"""ListTransactionsByBlockHashRIBSZVin - a model defined in OpenAPI
Args:
addresses ([str]):
coinbase (str): Represents the coinbase hex.
script_sig (ListTransactionsByBlockHashRIBSZScriptSig):
sequence (int): Represents the script sequence number.
txid (str): Represents the reference transaction identifier.
txinwitness ([str]):
value (str): Defines the specific amount.
vout (int): It refers to the index of the output address of this transaction. The index starts from 0.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.addresses = addresses
self.coinbase = coinbase
self.script_sig = script_sig
self.sequence = sequence
self.txid = txid
self.txinwitness = txinwitness
self.value = value
self.vout = vout
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, addresses, coinbase, script_sig, sequence, txid, txinwitness, value, vout, *args, **kwargs): # noqa: E501
"""ListTransactionsByBlockHashRIBSZVin - a model defined in OpenAPI
Args:
addresses ([str]):
coinbase (str): Represents the coinbase hex.
script_sig (ListTransactionsByBlockHashRIBSZScriptSig):
sequence (int): Represents the script sequence number.
txid (str): Represents the reference transaction identifier.
txinwitness ([str]):
value (str): Defines the specific amount.
vout (int): It refers to the index of the output address of this transaction. The index starts from 0.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.addresses = addresses
self.coinbase = coinbase
self.script_sig = script_sig
self.sequence = sequence
self.txid = txid
self.txinwitness = txinwitness
self.value = value
self.vout = vout
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 45.189711 | 484 | 0.587804 |
acf1723283c25d3b6ee6743251a0ce80c60063e7 | 1,251 | py | Python | packages/basemap/doc/make.py | DWesl/basemap | 8e9a37e09a65b16429b699f7c12fcab754e1a85a | [
"MIT"
] | 1 | 2021-12-26T14:13:11.000Z | 2021-12-26T14:13:11.000Z | packages/basemap/doc/make.py | DWesl/basemap | 8e9a37e09a65b16429b699f7c12fcab754e1a85a | [
"MIT"
] | null | null | null | packages/basemap/doc/make.py | DWesl/basemap | 8e9a37e09a65b16429b699f7c12fcab754e1a85a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import print_function
import fileinput
import glob
import os
import shutil
import sys
def html():
os.system('sphinx-build -b html -d build/doctrees . build/html')
def latex():
if sys.platform != 'win32':
# LaTeX format.
os.system('sphinx-build -b latex -d build/doctrees . build/latex')
# Produce pdf.
os.chdir('build/latex')
# Copying the makefile produced by sphinx...
os.system('pdflatex Basemap.tex')
os.system('pdflatex Basemap.tex')
os.system('makeindex -s python.ist Basemap.idx')
os.system('makeindex -s python.ist modBasemap.idx')
os.system('pdflatex Basemap.tex')
os.chdir('../..')
else:
print('latex build has not been tested on windows')
def clean():
shutil.rmtree('build')
def all():
html()
latex()
funcd = {
'html':html,
'latex':latex,
'clean':clean,
'all':all,
}
if len(sys.argv)>1:
for arg in sys.argv[1:]:
func = funcd.get(arg)
if func is None:
raise SystemExit('Do not know how to handle %s; valid args are'%(
arg, list(funcd.keys())))
func()
else:
all()
| 22.339286 | 77 | 0.57554 |
acf17259394a232f21a694973605e3b557e15eef | 3,125 | py | Python | python/ml_assignment/formal/regular/nn.py | freestyletime/Practices4Python | 95bcd5211182d59d7f4e5856eecdef24ef2b278c | [
"Unlicense",
"MIT"
] | null | null | null | python/ml_assignment/formal/regular/nn.py | freestyletime/Practices4Python | 95bcd5211182d59d7f4e5856eecdef24ef2b278c | [
"Unlicense",
"MIT"
] | null | null | null | python/ml_assignment/formal/regular/nn.py | freestyletime/Practices4Python | 95bcd5211182d59d7f4e5856eecdef24ef2b278c | [
"Unlicense",
"MIT"
] | null | null | null | import random
import numpy as np
def sigmoid(z):
"""The sigmoid function."""
return 1.0/(1.0+np.exp(-z))
def sigmoid_prime(z):
"""Derivative of the sigmoid function."""
return sigmoid(z)*(1-sigmoid(z))
class NeuralNetwork(object):
def __init__(self, sizes):
self.num_layers = len(sizes)
self.sizes = sizes
self.biases = [np.random.randn(y, 1) for y in sizes[1:]]
self.weights = [np.random.randn(y, x) for x, y in zip(sizes[:-1], sizes[1:])]
def feedforward(self, input):
for b, w in zip(self.biases, self.weights):
input = sigmoid(np.dot(w, input) + b)
return input
def update_mini_batch(self, mini_batch, eta):
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
delta_nabla_b, delta_nabla_w = self.backprop(x, y)
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
self.weights = [w-(eta/len(mini_batch))*nw for w, nw in zip(self.weights, nabla_w)]
self.biases = [b-(eta/len(mini_batch))*nb for b, nb in zip(self.biases, nabla_b)]
def backprop(self, x, y):
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
# feedforward
activation = x
activations = [x]
zs = []
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation)+b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
# backward pass
delta = self.cost_derivative(activations[-1], y) * sigmoid_prime(zs[-1])
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
for l in range(2, self.num_layers):
z = zs[-l]
sp = sigmoid_prime(z)
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
return (nabla_b, nabla_w)
def evaluate(self, test_data):
test_results = [(np.argmax(self.feedforward(x)), y) for (x, y) in test_data]
return sum(int(x == y) for (x, y) in test_results)
def cost_derivative(self, output_activations, y):
return (output_activations-y)
def SGD(self, training_data, epochs, mini_batch_size, eta, test_data=None):
if test_data: n_test = len(test_data)
n = len(training_data)
for j in range(epochs):
random.shuffle(training_data)
mini_batches = [
training_data[k:k+mini_batch_size]
for k in range(0, n, mini_batch_size)]
for mini_batch in mini_batches:
self.update_mini_batch(mini_batch, eta)
if test_data:
print("Epoch {0}: {1} / {2}".format(j, self.evaluate(test_data), n_test))
else:
print("Epoch {0} complete".format(j)) | 37.650602 | 91 | 0.58144 |
acf1732c830e94c6bdff7ef232be02dfa75990af | 6,785 | py | Python | src/rl_with_teachers/teachers/path.py | NagisaZj/ac-teach | 481811d5c80d0dbee54f16c063b4ea3262b82050 | [
"MIT"
] | 19 | 2019-10-19T09:05:08.000Z | 2022-01-27T13:36:37.000Z | src/rl_with_teachers/teachers/path.py | NagisaZj/ac-teach | 481811d5c80d0dbee54f16c063b4ea3262b82050 | [
"MIT"
] | 13 | 2019-12-07T12:47:20.000Z | 2022-01-13T01:44:35.000Z | src/rl_with_teachers/teachers/path.py | NagisaZj/ac-teach | 481811d5c80d0dbee54f16c063b4ea3262b82050 | [
"MIT"
] | 8 | 2019-10-24T23:36:58.000Z | 2022-01-27T13:36:39.000Z | from rl_with_teachers.teachers.base import TeacherPolicy
class OptimalPathAgent(TeacherPolicy):
"""
A path agent that just goes straight to current goal at maximum seed.
"""
def __init__(self, env, noise=None, adversarial=False):
self.env = env
self.noise = noise
self.adversarial = adversarial
def __call__(self, obs):
loc = obs[:self.env.dims]
goal = self.env.current_goal
if self.env.current_goal is not None:
action = np.clip((self.env.current_goal - loc) / self.env.max_action_val, -1.0 , 1.0)
if self.adversarial:
action *= -1. # move away from target instead of towards target
else:
action = self.env.action_space.sample()
return self.apply_noise(action)
class OptimalPathHalfwayAgent(OptimalPathAgent):
"""
For Path environment.
This agent always reaches for the midpoint between the start and the current goal.
"""
def __init__(self, env, noise=None):
self.env = env
self.noise = noise
self.adversarial = False
def __call__(self, obs):
if self.env.current_goal_idx >= len(self.env.points_ordering):
path_id = len(self.env.points_ordering) - 1
else:
path_id = self.env.current_goal_idx
if path_id == 0:
start = np.zeros(self.env.dims)
else:
start = self.env.path_points[self.env.points_ordering[path_id - 1]]
goal = self.env.path_points[self.env.points_ordering[path_id]]
halfway = (start + goal) / 2.
offset = goal - start
loc = obs[:self.env.dims]
action = np.clip((halfway - loc) / self.env.max_action_val, -1.0 , 1.0)
# action = np.array([0., 0., 0., 1.])
# action[:3] = (self.halfway + self.offset * 0.05 - obs[0:3]) * 3.0
return self.apply_noise(action)
class OptimalPathSwitchAgent(OptimalPathAgent):
"""
For Path environment.
This agent reaches for the start position or the current goal position, depending on
which is closer.
"""
def __init__(self, env, noise=None):
self.env = env
self.noise = noise
self.adversarial = False
def __call__(self, obs):
if self.env.current_goal_idx >= len(self.env.points_ordering):
path_id = len(self.env.points_ordering) - 1
else:
path_id = self.env.current_goal_idx
if path_id == 0:
start = np.zeros(self.env.dims)
else:
start = self.env.path_points[self.env.points_ordering[path_id - 1]]
goal = self.env.path_points[self.env.points_ordering[path_id]]
offset = goal - start
loc = obs[:self.env.dims]
if np.linalg.norm(loc - goal) < np.linalg.norm(loc - start):
to_reach = goal
# goal_now = self.goal - self.offset * 0.15 # 85% of the way to the true goal
else:
to_reach = start
# goal_now = self.start + self.offset * 0.05 # 5% of the way from the start
action = np.clip((to_reach - loc) / self.env.max_action_val, -1.0 , 1.0)
return self.apply_noise(action)
class AxisAlignedPathAgent(OptimalPathAgent):
"""
For Path environment.
A suboptimal agent that only moves along one axis towards goal (and so never gets to it,
if any other axis requires movement).
"""
def __init__(self, env, axis, noise=None, adversarial=False):
self.env = env
self.axis = axis
self.noise = noise
self.adversarial = adversarial
def __call__(self, obs):
action = super().__call__(obs)
for i in range(len(action)):
if i!=self.axis:
action[i]=0
return self.apply_noise(action)
class OneGoalPathAgent(OptimalPathAgent):
"""
For Path environment.
A suboptimal agent that only gets to one location in env, and then does nothing.
"""
def __init__(self, env, goal, noise=None, adversarial=False):
self.env = env
self.goal = goal
self.noise = noise
self.adversarial = adversarial
def is_callable(self, obs):
loc = obs[:self.env.dims]
return np.sum(np.abs(loc-self.goal)) > 0.01
def __call__(self, obs):
loc = obs[:self.env.dims]
action = np.clip((self.goal - loc) / self.env.max_action_val, -1.0 , 1.0)
if self.adversarial:
action *= -1. # move away from target instead of towards target
return self.apply_noise(action)
class OneGoalPathHalfwayAgent(OptimalPathAgent):
"""
For Path environment with single start and goal (subtask). This agent
always reaches for the midpoint between the start and the goal.
"""
def __init__(self, env, path_id, noise=None):
self.env = env
self.path_id = path_id # which section of path this teacher is responsible for
self.noise = noise
self.adversarial = False
def __call__(self, obs):
if self.path_id == 0:
start = np.zeros(self.env.dims)
else:
start = self.env.path_points[self.env.points_ordering[self.path_id - 1]]
goal = self.env.path_points[self.env.points_ordering[self.path_id]]
halfway = (start + goal) / 2.
offset = goal - start
loc = obs[:self.env.dims]
action = np.clip((halfway - loc) / self.env.max_action_val, -1.0 , 1.0)
# action = np.array([0., 0., 0., 1.])
# action[:3] = (self.halfway + self.offset * 0.05 - obs[0:3]) * 3.0
return self.apply_noise(action)
class OneGoalPathSwitchAgent(OptimalPathAgent):
"""
For Path environment with single start and goal (subtask). This agent
reaches for the start position or the goal position, depending on
which is closer.
"""
def __init__(self, env, path_id, noise=None):
self.env = env
self.path_id = path_id
self.noise = noise
self.adversarial = False
def __call__(self, obs):
if self.path_id == 0:
start = np.zeros(self.env.dims)
else:
start = self.env.path_points[self.env.points_ordering[self.path_id - 1]]
goal = self.env.path_points[self.env.points_ordering[self.path_id]]
offset = goal - start
loc = obs[:self.env.dims]
if np.linalg.norm(loc - goal) < np.linalg.norm(loc - start):
to_reach = goal
# goal_now = self.goal - self.offset * 0.15 # 85% of the way to the true goal
else:
to_reach = start
# goal_now = self.start + self.offset * 0.05 # 5% of the way from the start
action = np.clip((to_reach - loc) / self.env.max_action_val, -1.0 , 1.0)
return self.apply_noise(action)
| 36.283422 | 97 | 0.60899 |
acf173d9ec5e066e6307aebe15d355178e959cbc | 871 | py | Python | prometheus-monitoring/python-application/src/server.py | kumarshivam12/cicd-pileline | 8aaef55e7e13f5e4c9e12695af67af3b3706cd90 | [
"MIT"
] | null | null | null | prometheus-monitoring/python-application/src/server.py | kumarshivam12/cicd-pileline | 8aaef55e7e13f5e4c9e12695af67af3b3706cd90 | [
"MIT"
] | null | null | null | prometheus-monitoring/python-application/src/server.py | kumarshivam12/cicd-pileline | 8aaef55e7e13f5e4c9e12695af67af3b3706cd90 | [
"MIT"
] | null | null | null | from flask import Response, Flask, request
import prometheus_client
from prometheus_client.core import CollectorRegistry
from prometheus_client import Summary, Counter, Histogram, Gauge
import time
app = Flask(__name__)
_INF = float("inf")
graphs = {}
graphs['c'] = Counter('python_request_operations_total', 'The total number of processed requests')
graphs['h'] = Histogram('python_request_duration_seconds', 'Histogram for the duration in seconds.', buckets=(1, 2, 5, 6, 10, _INF))
@app.route("/")
def hello():
start = time.time()
graphs['c'].inc()
time.sleep(0.600)
end = time.time()
graphs['h'].observe(end - start)
return "Hello World!"
@app.route("/metrics")
def requests_count():
res = []
for k,v in graphs.items():
res.append(prometheus_client.generate_latest(v))
return Response(res, mimetype="text/plain")
| 27.21875 | 132 | 0.699196 |
acf17484b80891c34ad38c2b5f4345b7dee7a1cf | 317 | py | Python | cesar_decryptage(base).py | alexandre-o/XOR-isn-projet-python- | 0e2bf4f7ba91f7f615710bcacc2275882765128f | [
"CC0-1.0"
] | null | null | null | cesar_decryptage(base).py | alexandre-o/XOR-isn-projet-python- | 0e2bf4f7ba91f7f615710bcacc2275882765128f | [
"CC0-1.0"
] | null | null | null | cesar_decryptage(base).py | alexandre-o/XOR-isn-projet-python- | 0e2bf4f7ba91f7f615710bcacc2275882765128f | [
"CC0-1.0"
] | null | null | null | message_decrypter=input("Votre message a decrypter:")
cle=int(input("Nombre de decalage ?:"))
longueur=len(message_decrypter)
i=0
alph=""
resultat=""
for i in range(longueur):
asc=ord(message_decrypter[i])
if asc>=65 or asc<=90:
asc=asc-cle
resultat=resultat+chr(asc)
print (resultat) | 24.384615 | 54 | 0.675079 |
acf1772f637775a281d27d80d1752424e69246b5 | 385 | py | Python | python/ABC/166/e.py | KATO-Hiro/atcoder-1 | c2cbfcfd5c3d46ac9810ba330a37d437aa2839c2 | [
"MIT"
] | null | null | null | python/ABC/166/e.py | KATO-Hiro/atcoder-1 | c2cbfcfd5c3d46ac9810ba330a37d437aa2839c2 | [
"MIT"
] | null | null | null | python/ABC/166/e.py | KATO-Hiro/atcoder-1 | c2cbfcfd5c3d46ac9810ba330a37d437aa2839c2 | [
"MIT"
] | null | null | null | import sys
input = sys.stdin.readline
def main():
N = int(input())
A = list(map(int, input().split()))
ans = 0
b = {}
for i in range(N):
l = A[i] + (i+1)
r = (i+1) - A[i]
if r in b:
ans += b[r]
if l in b:
b[l] += 1
else:
b[l] = 1
print(ans)
if __name__ == "__main__":
main()
| 14.807692 | 39 | 0.392208 |
acf1788a2e4686b07447ce24ed5a2d9d105604b2 | 1,872 | py | Python | voc_annotation_medical.py | qzpzd/unet-tf2 | 3a9796908b6198804071d3e71a8959a13526ca5e | [
"MIT"
] | 281 | 2021-03-12T02:05:56.000Z | 2022-03-31T08:48:55.000Z | voc_annotation_medical.py | voilet-zkl/unet-tf2 | 059d4723472b451e5633727492bf6712f3107ac3 | [
"MIT"
] | 21 | 2021-04-16T03:10:50.000Z | 2022-03-26T16:05:20.000Z | voc_annotation_medical.py | voilet-zkl/unet-tf2 | 059d4723472b451e5633727492bf6712f3107ac3 | [
"MIT"
] | 89 | 2021-03-12T02:43:40.000Z | 2022-03-30T08:54:14.000Z | import os
import random
#----------------------------------------------------------------------#
# 医药数据集的例子没有验证集
#----------------------------------------------------------------------#
trainval_percent = 1
train_percent = 1
#-------------------------------------------------------#
# 指向医药数据集所在的文件夹
# 默认指向根目录下的Medical_Datasets
#-------------------------------------------------------#
VOCdevkit_path = 'Medical_Datasets'
if __name__ == "__main__":
random.seed(0)
print("Generate txt in ImageSets.")
segfilepath = os.path.join(VOCdevkit_path, 'Labels')
saveBasePath = os.path.join(VOCdevkit_path, 'ImageSets/Segmentation')
temp_seg = os.listdir(segfilepath)
total_seg = []
for seg in temp_seg:
if seg.endswith(".png"):
total_seg.append(seg)
num = len(total_seg)
list = range(num)
tv = int(num*trainval_percent)
tr = int(tv*train_percent)
trainval= random.sample(list,tv)
train = random.sample(trainval,tr)
print("train and val size",tv)
print("traub suze",tr)
ftrainval = open(os.path.join(saveBasePath,'trainval.txt'), 'w')
ftest = open(os.path.join(saveBasePath,'test.txt'), 'w')
ftrain = open(os.path.join(saveBasePath,'train.txt'), 'w')
fval = open(os.path.join(saveBasePath,'val.txt'), 'w')
for i in list:
name=total_seg[i][:-4]+'\n'
if i in trainval:
ftrainval.write(name)
if i in train:
ftrain.write(name)
else:
fval.write(name)
else:
ftest.write(name)
ftrainval.close()
ftrain.close()
fval.close()
ftest.close()
print("Generate txt in ImageSets done.")
| 32.842105 | 77 | 0.475962 |
acf178bb409259a7cefeb96bdd555535104841e6 | 893 | py | Python | erpnext_ocr/tests/test_config_docs.py | Compres/erpnext_ocr | 7c9398ab4f5b81a416f8d05edd12b341c080557c | [
"MIT"
] | 1 | 2020-12-05T01:41:42.000Z | 2020-12-05T01:41:42.000Z | erpnext_ocr/tests/test_config_docs.py | mohsinalimat/erpnext_ocr | 3f78ceb60896aee80219bae3d003203f7ec7f0ae | [
"MIT"
] | null | null | null | erpnext_ocr/tests/test_config_docs.py | mohsinalimat/erpnext_ocr | 3f78ceb60896aee80219bae3d003203f7ec7f0ae | [
"MIT"
] | 1 | 2021-04-25T02:43:33.000Z | 2021-04-25T02:43:33.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Monogramm and Contributors
# For license information, please see license.txt
import unittest
from erpnext_ocr.config.docs import get_context
class TestDocs(unittest.TestCase):
def test_get_context(self):
context = type('obj', (object,), {'brand_html' : None,
'source_link' : None,
'docs_base_url' : None,
'headline' : None,
'sub_heading' : None})
get_context(context)
self.assertIsNotNone(context)
self.assertIsNotNone(context.brand_html)
self.assertIsNotNone(context.source_link)
self.assertIsNotNone(context.docs_base_url)
self.assertIsNotNone(context.headline)
self.assertIsNotNone(context.sub_heading)
| 35.72 | 65 | 0.577828 |
acf179ab5ff14056590b2ac120acba4d5d6b3577 | 1,236 | py | Python | setup.py | Levantado/Flask-Argonaut | 7c998123786721ec710613bb432a7a2692a6f7c2 | [
"BSD-3-Clause"
] | 2 | 2017-09-26T20:59:04.000Z | 2019-01-09T17:48:08.000Z | setup.py | Levantado/Flask-Argonaut | 7c998123786721ec710613bb432a7a2692a6f7c2 | [
"BSD-3-Clause"
] | 1 | 2019-02-08T15:20:36.000Z | 2019-02-08T16:51:54.000Z | setup.py | Levantado/Flask-Argonaut | 7c998123786721ec710613bb432a7a2692a6f7c2 | [
"BSD-3-Clause"
] | null | null | null | """
Flask-Argonaut
------------
Argon2 hashing for your Flask.
"""
from setuptools import setup
import re, io
__version__ = re.search(
r'__version__\s*=\s*[\'"]([^\'"]*)[\'"]', # It excludes inline comment too
io.open('flask_argonaut/__init__.py', encoding='utf_8_sig').read()
).group(1)
setup(name='Flask-Argonaut',
version=__version__,
description='Flask extension use hashing data with Argon2',
author='Anton Oleynik',
author_email='levantado@me.com',
license='BSD',
urls='https://github.com/Levantado/Flask-Argonaut',
download_url = 'https://github.com/Levantado/Flask-Argonaut/tarball/master',
packages=['flask_argonaut'],
platforms='any',
install_requires=['Flask', 'argon2'],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
zip_safe=False,
test_suite='test')
| 33.405405 | 82 | 0.60356 |
acf179ff5d99bf7448a00e61737a029c826e656f | 3,333 | py | Python | src/unv/deploy/components/app/__init__.py | c137digital/unv_deploy | 7cb8e6902f4b3eff7e095f83e71d10582f023ab9 | [
"MIT"
] | null | null | null | src/unv/deploy/components/app/__init__.py | c137digital/unv_deploy | 7cb8e6902f4b3eff7e095f83e71d10582f023ab9 | [
"MIT"
] | null | null | null | src/unv/deploy/components/app/__init__.py | c137digital/unv_deploy | 7cb8e6902f4b3eff7e095f83e71d10582f023ab9 | [
"MIT"
] | null | null | null | import asyncio
from pathlib import Path
from typing import Generator
from watchgod import awatch
from ...tasks import DeployTasks, nohost, register
from ...settings import DeployComponentSettings
from ..systemd import SystemdTasksMixin
class AppSettings(DeployComponentSettings):
NAME = 'app'
SCHEMA = {
'instance': {'type': 'integer'},
'bin': {'type': 'string', 'required': True},
'user': {'type': 'string', 'required': False},
'settings': {'type': 'string'},
'systemd': SystemdTasksMixin.SCHEMA,
'watch': {
'type': 'list',
'schema': {'type': 'dict', 'schema': {
'local': {'type': 'string'},
'remote': {'type': 'string'},
'exclude': {'type': 'list', 'schema': {'type': 'string'}}
}}
},
}
DEFAULT = {
'bin': 'app.sh',
'instance': 1,
'settings': '',
'systemd': {
'template': 'app.service',
'name': '{settings.NAME}_{instance}.service',
'boot': True,
'type': 'simple',
'instances': {'count': 0, 'percent': 0},
'context': {
'limit_nofile': 2000,
'description': "Application description",
}
},
'watch': [
{'local': './somedir', 'remote': './some'}
]
}
@property
def bin(self):
bin_path = self._data['bin'].format(settings=self)
if not bin_path.startswith('/'):
return self.home_abs / bin_path
return Path(bin_path)
@property
def module(self):
return self._data['settings']
@property
def instance(self):
return self._data['instance']
@property
def watch_dirs(self):
for info in self._data['watch']:
yield {
'local': Path(info['local']),
'remote': info['remote'],
'exclude': info.get('exclude', [])
}
SETTINGS = AppSettings()
class AppTasks(DeployTasks, SystemdTasksMixin):
SETTINGS = AppSettings
@register
@nohost
async def watch(self):
await asyncio.gather(*[
self._watch_and_sync_dir(dir_info, task)
for dir_info in self.settings.watch_dirs
for task in self.get_all_manager_tasks(self.get_namespace())
])
async def _watch_and_sync_dir(self, dir_info, task):
async for _ in awatch(dir_info['local']):
with self._set_host(task.host), self._set_user(task.user):
await self._upload(
dir_info['local'], dir_info['remote'],
exclude=dir_info['exclude']
)
await self.restart()
async def build(self):
"""Define build instructions for your app"""
await self._apt_install('rsync')
await self._create_user()
@register
async def sync(self, type_=''):
await self._upload(
self.settings.local_root / self.settings.bin.name,
self.settings.home_abs
)
await self._run(f'chmod +x {self.settings.bin}')
await self._sync_systemd_units()
@register
async def setup(self):
await self.build()
await self.sync()
await self.start()
| 28.008403 | 73 | 0.533753 |
acf17ad6c27ff443c50eb39b54b22859eb02abd7 | 1,002 | py | Python | isicarchive/vars.py | neuroelf/isic-archive | 3250009693bbfa1457a3df2d647a17a977af52dd | [
"MIT"
] | 2 | 2020-08-05T13:21:11.000Z | 2020-11-06T18:23:16.000Z | isicarchive/vars.py | neuroelf/isic-archive | 3250009693bbfa1457a3df2d647a17a977af52dd | [
"MIT"
] | null | null | null | isicarchive/vars.py | neuroelf/isic-archive | 3250009693bbfa1457a3df2d647a17a977af52dd | [
"MIT"
] | null | null | null | """
isicarchive variables
constants
---------
ISIC_API_URI : str
current API URI
ISIC_BASE_URL : str
hostname of ISIC Archive, including https:// protocol id
"""
from .version import __version__
# IsicApi: default URL/URI
ISIC_API_URI = '/api/v1'
ISIC_API_TIMEOUT = 30.0
ISIC_BASE_URL = 'https://isic-archive.com'
# IsicApi: dataset cache settings
ISIC_DATASET_GRACE_PERIOD = 7 * 86400
# IsicApi: image cache settings
ISIC_IMAGE_CACHE_UPDATE_LASTS = 3600.0 # minimum time between updates in seconds
ISIC_IMAGES_PER_CACHING = 3000 # number of image detail items per get(...) call
# IsicApi: segmentation cache settings
ISIC_SEG_SAVE_EVERY = 50
ISIC_SEG_GRACE_PERIOD = 30 * 86400
# IsicApi: study cache settings
ISIC_STUDY_GRACE_PERIOD = 7 * 86400
# func: screen settings
ISIC_FUNC_PPI = 72
# Image: default DICE resampling size
ISIC_DICE_SHAPE = (512,512)
# Image: display settings
ISIC_IMAGE_DISPLAY_SIZE_MAX = 480
# Study: load_images settings
ISIC_IMAGE_DETAILS_PER_REQUEST = 250
| 22.266667 | 80 | 0.770459 |
acf17b306554ca5c16360863f659ced32e5e14b5 | 10,209 | py | Python | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/aio/operations/_quota_by_period_keys_operations.py | adewaleo/azure-sdk-for-python | 169457edbea5e3c5557246cfcf8bd635d528bae4 | [
"MIT"
] | 1 | 2020-03-05T18:10:35.000Z | 2020-03-05T18:10:35.000Z | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/aio/operations/_quota_by_period_keys_operations.py | adewaleo/azure-sdk-for-python | 169457edbea5e3c5557246cfcf8bd635d528bae4 | [
"MIT"
] | 2 | 2020-03-03T23:11:13.000Z | 2020-03-30T18:50:55.000Z | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/aio/operations/_quota_by_period_keys_operations.py | adewaleo/azure-sdk-for-python | 169457edbea5e3c5557246cfcf8bd635d528bae4 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class QuotaByPeriodKeysOperations:
"""QuotaByPeriodKeysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.apimanagement.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
service_name: str,
quota_counter_key: str,
quota_period_key: str,
**kwargs
) -> "models.QuotaCounterContract":
"""Gets the value of the quota counter associated with the counter-key in the policy for the
specific period in service instance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param quota_counter_key: Quota counter key identifier.This is the result of expression defined
in counter-key attribute of the quota-by-key policy.For Example, if you specify counter-
key="boo" in the policy, then it’s accessible by "boo" counter key. But if it’s defined as
counter-key="@("b"+"a")" then it will be accessible by "ba" key.
:type quota_counter_key: str
:param quota_period_key: Quota period key identifier.
:type quota_period_key: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: QuotaCounterContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.QuotaCounterContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.QuotaCounterContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-06-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'quotaCounterKey': self._serialize.url("quota_counter_key", quota_counter_key, 'str'),
'quotaPeriodKey': self._serialize.url("quota_period_key", quota_period_key, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('QuotaCounterContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/quotas/{quotaCounterKey}/periods/{quotaPeriodKey}'} # type: ignore
async def update(
self,
resource_group_name: str,
service_name: str,
quota_counter_key: str,
quota_period_key: str,
calls_count: Optional[int] = None,
kb_transferred: Optional[float] = None,
**kwargs
) -> "models.QuotaCounterContract":
"""Updates an existing quota counter value in the specified service instance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the API Management service.
:type service_name: str
:param quota_counter_key: Quota counter key identifier.This is the result of expression defined
in counter-key attribute of the quota-by-key policy.For Example, if you specify counter-
key="boo" in the policy, then it’s accessible by "boo" counter key. But if it’s defined as
counter-key="@("b"+"a")" then it will be accessible by "ba" key.
:type quota_counter_key: str
:param quota_period_key: Quota period key identifier.
:type quota_period_key: str
:param calls_count: Number of times Counter was called.
:type calls_count: int
:param kb_transferred: Data Transferred in KiloBytes.
:type kb_transferred: float
:keyword callable cls: A custom type or function that will be passed the direct response
:return: QuotaCounterContract, or the result of cls(response)
:rtype: ~azure.mgmt.apimanagement.models.QuotaCounterContract
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.QuotaCounterContract"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_parameters = models.QuotaCounterValueUpdateContract(calls_count=calls_count, kb_transferred=kb_transferred)
api_version = "2020-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str', max_length=50, min_length=1, pattern=r'^[a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?$'),
'quotaCounterKey': self._serialize.url("quota_counter_key", quota_counter_key, 'str'),
'quotaPeriodKey': self._serialize.url("quota_period_key", quota_period_key, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_parameters, 'QuotaCounterValueUpdateContract')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('QuotaCounterContract', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ApiManagement/service/{serviceName}/quotas/{quotaCounterKey}/periods/{quotaPeriodKey}'} # type: ignore
| 52.086735 | 221 | 0.684396 |
acf17ba623f4bd97e39db5480c09bc07da42b87d | 3,571 | py | Python | synapse/streams/events.py | chagai95/synapse | 115da16824fce92684a31d4d2c13d4ff56670b94 | [
"Apache-2.0"
] | null | null | null | synapse/streams/events.py | chagai95/synapse | 115da16824fce92684a31d4d2c13d4ff56670b94 | [
"Apache-2.0"
] | null | null | null | synapse/streams/events.py | chagai95/synapse | 115da16824fce92684a31d4d2c13d4ff56670b94 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Iterator, Tuple
import attr
from synapse.handlers.account_data import AccountDataEventSource
from synapse.handlers.presence import PresenceEventSource
from synapse.handlers.receipts import ReceiptEventSource
from synapse.handlers.room import RoomEventSource
from synapse.handlers.typing import TypingNotificationEventSource
from synapse.streams import EventSource
from synapse.types import StreamToken
if TYPE_CHECKING:
from synapse.server import HomeServer
@attr.s(frozen=True, slots=True, auto_attribs=True)
class _EventSourcesInner:
room: RoomEventSource
presence: PresenceEventSource
typing: TypingNotificationEventSource
receipt: ReceiptEventSource
account_data: AccountDataEventSource
def get_sources(self) -> Iterator[Tuple[str, EventSource]]:
for attribute in attr.fields(_EventSourcesInner):
yield attribute.name, getattr(self, attribute.name)
class EventSources:
def __init__(self, hs: "HomeServer"):
self.sources = _EventSourcesInner(
# mypy thinks attribute.type is `Optional`, but we know it's never `None` here since
# all the attributes of `_EventSourcesInner` are annotated.
*(attribute.type(hs) for attribute in attr.fields(_EventSourcesInner)) # type: ignore[misc]
)
self.store = hs.get_datastores().main
def get_current_token(self) -> StreamToken:
push_rules_key = self.store.get_max_push_rules_stream_id()
to_device_key = self.store.get_to_device_stream_token()
device_list_key = self.store.get_device_stream_token()
groups_key = self.store.get_group_stream_token()
token = StreamToken(
room_key=self.sources.room.get_current_key(),
presence_key=self.sources.presence.get_current_key(),
typing_key=self.sources.typing.get_current_key(),
receipt_key=self.sources.receipt.get_current_key(),
account_data_key=self.sources.account_data.get_current_key(),
push_rules_key=push_rules_key,
to_device_key=to_device_key,
device_list_key=device_list_key,
groups_key=groups_key,
)
return token
async def get_current_token_for_pagination(self, room_id: str) -> StreamToken:
"""Get the current token for a given room to be used to paginate
events.
The returned token does not have the current values for fields other
than `room`, since they are not used during pagination.
Returns:
The current token for pagination.
"""
token = StreamToken(
room_key=await self.sources.room.get_current_key_for_room(room_id),
presence_key=0,
typing_key=0,
receipt_key=0,
account_data_key=0,
push_rules_key=0,
to_device_key=0,
device_list_key=0,
groups_key=0,
)
return token
| 37.989362 | 104 | 0.706525 |
acf17bb0bca3e18f5858ea5cba422b302762a60a | 1,051 | py | Python | square.py | atishbits/101 | 4b4a8e56d82fe2706f065ded7877deebe8f6164f | [
"MIT"
] | null | null | null | square.py | atishbits/101 | 4b4a8e56d82fe2706f065ded7877deebe8f6164f | [
"MIT"
] | null | null | null | square.py | atishbits/101 | 4b4a8e56d82fe2706f065ded7877deebe8f6164f | [
"MIT"
] | null | null | null | #https://www.hackerearth.com/practice/algorithms/searching/binary-search/practice-problems/algorithm/square-transaction-20/
#1 2 1 3 4
#[1, 3, 4, 7, 11]
def getsum(arr):
sums = []
csum = 0
for elem in arr:
elem = int(elem)
csum += elem
sums.append(csum)
return sums
def binsearch(arr, start, end, req):
mid = (start + end)/2
if arr[mid] == req:
return mid
if arr[mid] > req:
if arr[mid-1] < req:
return mid
else:
return binsearch(arr, start, mid-1, req)
elif arr[mid] < req:
return binsearch(arr, mid+1, end, req)
if __name__ == "__main__":
T = int(raw_input())
arr = raw_input().split()
sum_arr = getsum(arr)
print sum_arr
Q = int(raw_input())
while (Q):
req = int(raw_input())
if sum_arr[len(sum_arr)-1] < req:
print -1
elif sum_arr[0] > req:
print 1
else:
print ((binsearch(sum_arr, 0, len(sum_arr) - 1, req)) + 1)
Q -= 1
| 26.275 | 123 | 0.532826 |
acf17cc346b72785fa0c407b4b96edd4c4331a4b | 3,733 | py | Python | tests_python/tests_009/conftest.py | callistonianembrace/tezos | d7c25242bd6b0b368c731fc264a112e00f126818 | [
"MIT"
] | null | null | null | tests_python/tests_009/conftest.py | callistonianembrace/tezos | d7c25242bd6b0b368c731fc264a112e00f126818 | [
"MIT"
] | null | null | null | tests_python/tests_009/conftest.py | callistonianembrace/tezos | d7c25242bd6b0b368c731fc264a112e00f126818 | [
"MIT"
] | null | null | null | """Protocol-specific hooks and fixtures"""
import tempfile
from typing import Optional, Iterator, List
import pytest
from launchers.sandbox import Sandbox
from tools import constants, utils
from tools.client_regression import ClientRegression
from client.client import Client
from client.client_output import CreateMockupResult
from . import protocol
@pytest.fixture(scope="class")
def client(sandbox: Sandbox) -> Iterator[Client]:
"""One node with protocol 009.
Activate protocol 009 one year in the past. This avoids waiting
when baking blocks manually from the client using `bake for`
"""
sandbox.add_node(0, params=constants.NODE_PARAMS)
client = sandbox.client(0)
protocol.activate(client, activate_in_the_past=True)
yield client
@pytest.fixture(scope="class")
def client_regtest_bis(sandbox: Sandbox) -> Iterator[Client]:
"""One node with protocol 009, regression test enabled.
Activate protocol 009 one year in the past. (see fixture client).
"""
def reg_client_factory(
client_path: str,
admin_client_path: str,
host: Optional[str] = None,
base_dir: Optional[str] = None,
rpc_port: Optional[int] = None,
use_tls: Optional[bool] = None,
endpoint: Optional[str] = 'http://127.0.0.1:8732',
mode: str = None,
disable_disclaimer: bool = True,
) -> ClientRegression:
client = ClientRegression(
client_path=client_path,
admin_client_path=admin_client_path,
host=host,
base_dir=base_dir,
rpc_port=rpc_port,
use_tls=use_tls,
endpoint=endpoint,
mode=mode,
disable_disclaimer=disable_disclaimer,
)
return client
sandbox.add_node(
1, client_factory=reg_client_factory, params=constants.NODE_PARAMS
)
client = sandbox.client(1)
protocol.activate(client, activate_in_the_past=True)
yield client
@pytest.fixture(scope="class")
def clients(sandbox: Sandbox, request) -> Iterator[List[Client]]:
"""N node with protocol 009. Parameterized by the number of nodes.
Number of nodes is specified as a class annotation.
@pytest.mark.parametrize('clients', [N], indirect=True)
Activate protocol 009 one year in the past. (see fixture client).
"""
assert request.param is not None
num_nodes = request.param
for i in range(num_nodes):
# Large number may increases peers connection time
sandbox.add_node(i, params=constants.NODE_PARAMS)
protocol.activate(sandbox.client(0), activate_in_the_past=True)
clients = sandbox.all_clients()
for client in clients:
proto = protocol.HASH
assert utils.check_protocol(client, proto)
yield clients
@pytest.fixture
def mockup_client(sandbox: Sandbox) -> Iterator[Client]:
"""
Returns a mockup client with its persistent directory created
This is done in two steps, because we want to create the mockup
with a client that doesn't have "--mode mockup" (as per
the public documentation) but we want to return a
client that has "--mode mockup" and uses the base-dir created
in the first step.
There is no way around this pattern. If you want to create
a mockup using custom arguments; you MUST do the same
as this method.
"""
with tempfile.TemporaryDirectory(prefix='tezos-client.') as base_dir:
unmanaged_client = sandbox.create_client(base_dir=base_dir)
res = unmanaged_client.create_mockup(
protocol=protocol.HASH
).create_mockup_result
assert res == CreateMockupResult.OK
yield sandbox.create_client(base_dir=base_dir, mode="mockup")
| 33.630631 | 74 | 0.691937 |
acf17d824ebc33fb7f859ef7f680f0c59251c34b | 2,362 | py | Python | test/014-test-issue-34.py | o19s/hon-lucene-synonyms | 55ba18f89dd130a6adb408e57435875016995de2 | [
"Apache-2.0"
] | null | null | null | test/014-test-issue-34.py | o19s/hon-lucene-synonyms | 55ba18f89dd130a6adb408e57435875016995de2 | [
"Apache-2.0"
] | null | null | null | test/014-test-issue-34.py | o19s/hon-lucene-synonyms | 55ba18f89dd130a6adb408e57435875016995de2 | [
"Apache-2.0"
] | null | null | null | #
# Basic unit tests for HON-Lucene-Synonyms
#
# Test that synonyms.disablePhraseQueries is working properly
#
import unittest, solr, urllib, time
class TestBasic(unittest.TestCase):
#
# We have the synonyms:
#
# dog, pooch, hound, canis familiaris
#
url = 'http://localhost:8983/solr'
test_data = [ \
{'id': '1', 'name': "I have a dog."}, \
{'id': '2', 'name': "I have a pooch."}, \
{'id': '3', 'name': "I have a hound."}, \
{'id': '4', 'name': "I have a canis."}, \
]
solr_connection = None
def setUp(self):
self.solr_connection = solr.SolrConnection(self.url)
self.solr_connection.delete_query('*:*')
self.solr_connection.add_many(self.test_data)
self.solr_connection.commit()
def tearDown(self):
self.solr_connection.delete_query('*:*')
self.solr_connection.commit()
def test_queries(self):
self.tst_query('"dog"', False, False, 3)
self.tst_query('"pooch"', False, False, 3)
self.tst_query('"hound"', False, False, 3)
self.tst_query('"canis familiaris"', False, False, 3)
self.tst_query('"dog"', True, False, 1)
self.tst_query('"pooch"', True, False, 1)
self.tst_query('"hound"', True, False, 1)
self.tst_query('"canis familiaris"', True, False, 0)
self.tst_query('dog', False, True, 3)
self.tst_query('pooch', False, True, 3)
self.tst_query('hound', False, True, 3)
self.tst_query('canis familiaris', False, True, 4)
def tst_query(self, query, disable_phrase_queries, construct_phrases, expected_num_docs):
params = {'q': query, 'fl' : '*,score', 'qf' : 'name', 'mm' : '1%', 'defType' : 'synonym_edismax', 'synonyms' : 'true', \
'synonyms.disablePhraseQueries' : str(disable_phrase_queries).lower(), \
'synonyms.constructPhrases' : str(construct_phrases).lower()}
response = self.solr_connection.query(**params)
results = response.results
print '\ntesting ',self.url + '/select?' + urllib.urlencode(params),\
'\n',map(lambda x: x['name'],results),'\nActual: %s, Expected: %s' % (len(results), expected_num_docs)
self.assertEqual(len(results), expected_num_docs)
if __name__ == '__main__':
unittest.main()
| 34.735294 | 129 | 0.596952 |
acf17f4260ab7c4b85a38a4320377f345e4fd2d0 | 4,641 | py | Python | scripts/clean_test.py | Tim810306/oppia | 6f90044d12dbe0979c999265cbe46f267c4c592d | [
"Apache-2.0"
] | 4 | 2021-09-16T16:46:53.000Z | 2022-02-06T13:00:14.000Z | scripts/clean_test.py | Tim810306/oppia | 6f90044d12dbe0979c999265cbe46f267c4c592d | [
"Apache-2.0"
] | 80 | 2020-10-31T09:14:46.000Z | 2021-01-12T23:38:15.000Z | scripts/clean_test.py | Tim810306/oppia | 6f90044d12dbe0979c999265cbe46f267c4c592d | [
"Apache-2.0"
] | 1 | 2020-10-02T13:28:26.000Z | 2020-10-02T13:28:26.000Z | # coding: utf-8
#
# Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for scripts/clean_test.py."""
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import shutil
from core.tests import test_utils
from . import clean
class CleanTests(test_utils.GenericTestBase):
"""Test the methods for clean script."""
def test_delete_directory_with_missing_dir(self):
check_function_calls = {
'rmtree_is_called': False
}
expected_check_function_calls = {
'rmtree_is_called': False
}
def mock_rmtree(unused_path):
check_function_calls['rmtree_is_called'] = True
def mock_exists(unused_path):
return False
rmtree_swap = self.swap(shutil, 'rmtree', mock_rmtree)
exists_swap = self.swap(os.path, 'exists', mock_exists)
with rmtree_swap, exists_swap:
clean.delete_directory_tree('dir_path')
self.assertEqual(check_function_calls, expected_check_function_calls)
def test_delete_directory_with_existing_dir(self):
check_function_calls = {
'rmtree_is_called': False
}
expected_check_function_calls = {
'rmtree_is_called': True
}
def mock_rmtree(unused_path):
check_function_calls['rmtree_is_called'] = True
def mock_exists(unused_path):
return True
rmtree_swap = self.swap(shutil, 'rmtree', mock_rmtree)
exists_swap = self.swap(os.path, 'exists', mock_exists)
with rmtree_swap, exists_swap:
clean.delete_directory_tree('dir_path')
self.assertEqual(check_function_calls, expected_check_function_calls)
def test_delete_file_with_missing_file(self):
check_function_calls = {
'remove_is_called': False
}
expected_check_function_calls = {
'remove_is_called': False
}
def mock_remove(unused_path):
check_function_calls['remove_is_called'] = True
def mock_isfile(unused_path):
return False
remove_swap = self.swap(os, 'remove', mock_remove)
isfile_swap = self.swap(os.path, 'isfile', mock_isfile)
with remove_swap, isfile_swap:
clean.delete_file('file_path')
self.assertEqual(check_function_calls, expected_check_function_calls)
def test_delete_file_with_existing_file(self):
check_function_calls = {
'remove_is_called': False
}
expected_check_function_calls = {
'remove_is_called': True
}
def mock_remove(unused_path):
check_function_calls['remove_is_called'] = True
def mock_isfile(unused_path):
return True
remove_swap = self.swap(os, 'remove', mock_remove)
isfile_swap = self.swap(os.path, 'isfile', mock_isfile)
with remove_swap, isfile_swap:
clean.delete_file('file_path')
self.assertEqual(check_function_calls, expected_check_function_calls)
def test_function_calls(self):
check_function_calls = {
'delete_directory_tree_is_called': 0,
'delete_file_is_called': 0
}
expected_check_function_calls = {
'delete_directory_tree_is_called': 9,
'delete_file_is_called': 4
}
def mock_delete_dir(unused_path):
check_function_calls['delete_directory_tree_is_called'] += 1
def mock_delete_file(unused_path):
check_function_calls['delete_file_is_called'] += 1
def mock_listdir(unused_path):
return ['tmpcompiledjs_dir']
delete_dir_swap = self.swap(
clean, 'delete_directory_tree', mock_delete_dir)
delete_file_swap = self.swap(clean, 'delete_file', mock_delete_file)
listdir_swap = self.swap(os, 'listdir', mock_listdir)
with delete_dir_swap, delete_file_swap, listdir_swap:
clean.main(args=[])
self.assertEqual(check_function_calls, expected_check_function_calls)
| 36.257813 | 77 | 0.672053 |
acf17f9d459686ddb44e9a968a3599443fc914cc | 6,313 | py | Python | run_scripts/evaluate_policy.py | zbzhu99/ILSwiss | 9be4ff89a0005cd404014696aacc0eefd7596b86 | [
"MIT"
] | 46 | 2021-07-28T03:15:23.000Z | 2022-03-31T22:19:22.000Z | run_scripts/evaluate_policy.py | zbzhu99/ILSwiss | 9be4ff89a0005cd404014696aacc0eefd7596b86 | [
"MIT"
] | 3 | 2021-07-30T09:55:01.000Z | 2022-03-17T17:21:26.000Z | run_scripts/evaluate_policy.py | zbzhu99/ILSwiss | 9be4ff89a0005cd404014696aacc0eefd7596b86 | [
"MIT"
] | 5 | 2021-07-28T16:45:14.000Z | 2022-03-11T01:39:19.000Z | import yaml
import argparse
import joblib
import numpy as np
import os, sys, inspect
import pickle, random
from pathlib import Path
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
print(sys.path)
from gym.spaces import Dict
from rlkit.envs import get_env
import rlkit.torch.utils.pytorch_util as ptu
from rlkit.launchers.launcher_util import setup_logger, set_seed
from rlkit.core import eval_util
from rlkit.envs.wrappers import ScaledEnv
from rlkit.samplers import PathSampler
from rlkit.torch.common.policies import (
MakeDeterministic,
ReparamTanhMultivariateGaussianLfOPolicy,
)
from .video import save_video
def experiment(variant):
env_specs = variant["env_specs"]
env = get_env(env_specs)
env.seed(env_specs["eval_env_seed"])
print("\n\nEnv: {}".format(env_specs["env_name"]))
print("kwargs: {}".format(env_specs["env_kwargs"]))
print("Obs Space: {}".format(env.observation_space))
print("Act Space: {}\n\n".format(env.action_space))
obs_space = env.observation_space
act_space = env.action_space
obs_dim = obs_space.shape[0]
action_dim = act_space.shape[0]
if variant["scale_env_with_demo_stats"]:
with open("expert_demos_listing.yaml", "r") as f:
listings = yaml.load(f.read())
expert_demos_path = listings[variant["expert_name"]]["file_paths"][
variant["expert_idx"]
]
buffer_save_dict = joblib.load(expert_demos_path)
env = ScaledEnv(
env,
obs_mean=buffer_save_dict["obs_mean"],
obs_std=buffer_save_dict["obs_std"],
acts_mean=buffer_save_dict["acts_mean"],
acts_std=buffer_save_dict["acts_std"],
)
net_size = variant["policy_net_size"]
num_hidden = variant["policy_num_hidden_layers"]
policy = joblib.load(variant["policy_checkpoint"])["exploration_policy"][0]
if variant["eval_deterministic"]:
policy = MakeDeterministic(policy)
policy.to(ptu.device)
eval_sampler = PathSampler(
env,
policy,
variant["num_eval_steps"],
variant["max_path_length"],
no_terminal=variant["no_terminal"],
render=variant["render"],
render_kwargs=variant["render_kwargs"],
render_mode=variant["render_mode"],
)
test_paths = eval_sampler.obtain_samples()
average_returns = eval_util.get_average_returns(test_paths)
std_returns = eval_util.get_std_returns(test_paths)
print(average_returns, std_returns)
if variant["render"] and variant["render_mode"] == "rgb_array":
video_path = variant["video_path"]
video_path = os.path.join(video_path, variant["env_specs"]["env_name"])
print("saving videos...")
for i, test_path in enumerate(test_paths):
images = np.stack(test_path["image"], axis=0)
fps = 1 // getattr(env, "dt", 1 / 30)
video_save_path = os.path.join(video_path, f"episode_{i}.mp4")
save_video(images, video_save_path, fps=fps)
return average_returns, std_returns, test_paths
if __name__ == "__main__":
# Arguments
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--experiment", help="experiment specification file")
parser.add_argument("-g", "--gpu", help="gpu id", type=int, default=0)
parser.add_argument(
"-s", "--save_res", help="save result to file", type=int, default=1
)
args = parser.parse_args()
with open(args.experiment, "r") as spec_file:
spec_string = spec_file.read()
exp_specs = yaml.load(spec_string)
# make all seeds the same.
exp_specs["env_specs"]["eval_env_seed"] = exp_specs["env_specs"][
"training_env_seed"
] = exp_specs["seed"]
if exp_specs["using_gpus"] > 0:
print("\n\nUSING GPU\n\n")
ptu.set_gpu_mode(True)
exp_id = exp_specs["exp_id"]
exp_prefix = exp_specs["exp_name"]
seed = exp_specs["seed"]
set_seed(seed)
# setup_logger(exp_prefix=exp_prefix, exp_id=exp_id, variant=exp_specs)
train_file = (
exp_specs["method"] + "-" + exp_specs["env_specs"]["env_name"]
)
pkl_name = "/best.pkl"
train_files = [train_file]
save_path = "./final_performance/"
for train_file in train_files:
res_files = os.listdir("./logs/" + train_file)
test_paths_all = []
for file_ in res_files:
exp_specs["policy_checkpoint"] = (
"./logs/" + train_file + "/" + file_ + pkl_name
)
flag = False
if "_lfo" in file_:
flag = True
average_returns, std_returns, test_paths = experiment(exp_specs, flag)
test_paths_all.extend(test_paths)
if args.save_res:
save_dir = Path(save_path + train_file)
save_dir.mkdir(exist_ok=True, parents=True)
file_dir = save_dir.joinpath(
exp_specs["method"], exp_specs["env_specs"]["env_name"]
)
file_dir.mkdir(exist_ok=True, parents=True)
if not os.path.exists(file_dir.joinpath("res.csv")):
with open(
save_dir.joinpath(
exp_specs["method"],
exp_specs["env_specs"]["env_name"],
"res.csv",
),
"w",
) as f:
f.write("avg,std\n")
with open(
save_dir.joinpath(
exp_specs["method"],
exp_specs["env_specs"]["env_name"],
"res.csv",
),
"a",
) as f:
f.write("{},{}\n".format(average_returns, std_returns))
if exp_specs["save_samples"]:
with open(
Path(save_path).joinpath(
exp_specs["method"],
exp_specs["env_specs"]["env_name"],
"samples.pkl",
),
"wb",
) as f:
pickle.dump(test_paths_all, f)
| 33.94086 | 86 | 0.593696 |
acf17feef913d09df7d11ee517388b7e6aa45003 | 3,299 | py | Python | sdk/digitaltwins/azure-mgmt-digitaltwins/azure/mgmt/digitaltwins/v2020_10_31/_configuration.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/digitaltwins/azure-mgmt-digitaltwins/azure/mgmt/digitaltwins/v2020_10_31/_configuration.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/digitaltwins/azure-mgmt-digitaltwins/azure/mgmt/digitaltwins/v2020_10_31/_configuration.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
class AzureDigitalTwinsManagementClientConfiguration(Configuration):
"""Configuration for AzureDigitalTwinsManagementClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The subscription identifier.
:type subscription_id: str
"""
def __init__(
self,
credential, # type: "TokenCredential"
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
super(AzureDigitalTwinsManagementClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.subscription_id = subscription_id
self.api_version = "2020-10-31"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-digitaltwins/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| 45.819444 | 129 | 0.6893 |
acf1807bf8600766c3e30d5ee33dc3618cfacfba | 3,917 | py | Python | write_fault_shp.py | griffij/eq_hazmap_tests | db9d6202937b08b543758b8e190566116dbcf83b | [
"Apache-2.0"
] | 1 | 2021-12-03T16:55:03.000Z | 2021-12-03T16:55:03.000Z | write_fault_shp.py | griffij/eq_hazmap_tests | db9d6202937b08b543758b8e190566116dbcf83b | [
"Apache-2.0"
] | null | null | null | write_fault_shp.py | griffij/eq_hazmap_tests | db9d6202937b08b543758b8e190566116dbcf83b | [
"Apache-2.0"
] | 1 | 2019-10-12T09:55:31.000Z | 2019-10-12T09:55:31.000Z | """Write a fault or rupture geometry to shapefile
"""
import gdal, osr, ogr
import os
from numpy import mean
def fault2shp(corner_lons, corner_lats, output_shp, corner_depths=None, vertice_array=False):
"""Function for writing a fault geometry to a shapefile
"""
# Create a Polygon from the extent tuple
ring = ogr.Geometry(ogr.wkbLinearRing)
#for i in range(len(corner_lons)):
# need to get in right order
if vertice_array:
# Assume corner_lons, corner_lats are 2 1D array
# giving the corrdinates of the polygon boundary
for i in range(len(corner_lons)):
ring.AddPoint(corner_lons[i], corner_lats[i])
ring.AddPoint(corner_lons[0], corner_lats[0]) # close polygon
else:
ring.AddPoint(corner_lons[0],corner_lats[0])
ring.AddPoint(corner_lons[1],corner_lats[1])
ring.AddPoint(corner_lons[3],corner_lats[3])
ring.AddPoint(corner_lons[2],corner_lats[2])
ring.AddPoint(corner_lons[0],corner_lats[0]) # close polygon
poly = ogr.Geometry(ogr.wkbPolygon)
poly.AddGeometry(ring)
drv = ogr.GetDriverByName('ESRI Shapefile')
# Remove output shapefile if it already exists
if os.path.exists(output_shp):
drv.DeleteDataSource(output_shp)
# Create the output shapefile
outDataSource = drv.CreateDataSource(output_shp)
outLayer = outDataSource.CreateLayer("Fault_geom", geom_type=ogr.wkbPolygon)
# Add an ID field
idField = ogr.FieldDefn("id", ogr.OFTInteger)
outLayer.CreateField(idField)
# Add a depth field
depthField = ogr.FieldDefn("mean_depth", ogr.OFTReal)
outLayer.CreateField(depthField)
# Create the feature and set values
featureDefn = outLayer.GetLayerDefn()
feature = ogr.Feature(featureDefn)
feature.SetGeometry(poly)
feature.SetField("id", 1)
feature.SetField("mean_depth", mean(corner_depths))
outLayer.CreateFeature(feature)
feature = None
# Save and close
outDataSource = None
drv = None
# Now write upper trace to line shapefile
line = ogr.Geometry(ogr.wkbLineString)
corner_depths = list(corner_depths)
min_dep = min(corner_depths)
min_dep_index = corner_depths.index(min(corner_depths))
print min_dep_index
line.AddPoint(corner_lons[min_dep_index],corner_lats[min_dep_index])
corner_depths[min_dep_index] = 1e10
min_dep_2 = min(corner_depths)
min_dep_index_2 = corner_depths.index(min(corner_depths))
print min_dep_index_2
line.AddPoint(corner_lons[min_dep_index_2],corner_lats[min_dep_index_2])
corner_depths[min_dep_index] = min_dep
print min_dep, min_dep_2
mean_upper_depth = mean([min_dep, min_dep_2])
print mean_upper_depth
drv = ogr.GetDriverByName('ESRI Shapefile')
output_shp = output_shp.rstrip('.shp') + '_upper_edge.shp'
# Remove output shapefile if it already exists
if os.path.exists(output_shp):
drv.DeleteDataSource(output_shp)
# Create the output shapefile
outDataSource = drv.CreateDataSource(output_shp)
outLayer = outDataSource.CreateLayer("Fault_geom", geom_type=ogr.wkbLineString)
# Add a depth field
depthField = ogr.FieldDefn("mean_depth", ogr.OFTReal)
outLayer.CreateField(depthField)
# Create the feature and set values
featureDefn = outLayer.GetLayerDefn()
feature = ogr.Feature(featureDefn)
feature.SetGeometry(line)
feature.SetField("mean_depth", mean_upper_depth)
outLayer.CreateFeature(feature)
feature = None
# Save and close
outDataSource = None
drv = None
if __name__=="__main__":
corner_lons = [ 110.80301631, 110.14964724, 110.86646785, 110.21405031]
corner_lats = [-9.39397613, -9.31704483, -8.86453769, -8.78772005]
corner_depths = [ 32.81350292, 32.81350292, 74.01549338, 74.01549338]
fault2shp(corner_lons, corner_lats, 'test_fault.shp', corner_depths)
| 39.17 | 93 | 0.714322 |
acf1812671a62269c0bc0d0e3523ac15d10511bb | 2,427 | py | Python | data/executed/project_70/scripts/script.py | mossadhelali/AL-public | 94cfc11b434a56cf1d0593069b97173c9a18930b | [
"MIT"
] | null | null | null | data/executed/project_70/scripts/script.py | mossadhelali/AL-public | 94cfc11b434a56cf1d0593069b97173c9a18930b | [
"MIT"
] | null | null | null | data/executed/project_70/scripts/script.py | mossadhelali/AL-public | 94cfc11b434a56cf1d0593069b97173c9a18930b | [
"MIT"
] | null | null | null | # This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
from __future__ import division
import os
print('*'*50 , os.getcwd())
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score
from sklearn.pipeline import Pipeline
from sklearn.svm import OneClassSVM
# Input data files are available in the "" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
from subprocess import check_output
# print(check_output(["ls", "../input"]).decode("utf8"))
# Any results you write to the current directory are saved as output.
# load data
df_train = pd.read_csv('train.csv')
df_test = pd.read_csv('test.csv')
# remove constant columns
remove = []
for col in df_train.columns:
if df_train[col].std() == 0:
remove.append(col)
df_train.drop(remove, axis=1, inplace=True)
df_test.drop(remove, axis=1, inplace=True)
# remove duplicated columns
remove = []
c = df_train.columns
for i in range(len(c)-1):
v = df_train[c[i]].values
for j in range(i+1,len(c)):
if np.array_equal(v,df_train[c[j]].values):
remove.append(c[j])
df_train.drop(remove, axis=1, inplace=True)
df_test.drop(remove, axis=1, inplace=True)
y_train = df_train['TARGET'].values
X_train = df_train.drop(['ID','TARGET'], axis=1).values
id_test = df_test['ID']
X_test = df_test.drop(['ID'], axis=1).values
# length of dataset
len_train = len(X_train)
len_test = len(X_test)
# classifier
clf = xgb.XGBClassifier(missing=np.nan, max_depth=5, n_estimators=350, learning_rate=0.03, nthread=4, subsample=0.95, colsample_bytree=0.85, seed=4242)
X_fit, X_eval, y_fit, y_eval= train_test_split(X_train, y_train, test_size=0.3)
# fitting
clf.fit(X_train, y_train, early_stopping_rounds=20, eval_metric="auc", eval_set=[(X_eval, y_eval)])
print('Overall AUC:', roc_auc_score(y_train, clf.predict_proba(X_train)[:,1]))
# predicting
y_pred= clf.predict_proba(X_test)[:,1]
submission = pd.DataFrame({"ID":id_test, "TARGET":y_pred})
submission.to_csv("submission.csv", index=False)
print('Completed!') | 30.721519 | 151 | 0.743717 |
acf181982e15f72a6684750ff2538a1bc336500a | 18,737 | py | Python | tests/test_lnarray.py | subhylahiri/numpy_linalg_extras | dfc875c43c5b0c734b0df2fcf14cd415406682bb | [
"BSD-3-Clause"
] | 5 | 2019-01-16T01:47:00.000Z | 2021-08-04T08:22:03.000Z | tests/test_lnarray.py | subhylahiri/numpy_linalg_extras | dfc875c43c5b0c734b0df2fcf14cd415406682bb | [
"BSD-3-Clause"
] | null | null | null | tests/test_lnarray.py | subhylahiri/numpy_linalg_extras | dfc875c43c5b0c734b0df2fcf14cd415406682bb | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Test lnarray class
"""
import hypothesis as hy
import numpy as np
import numpy.linalg as nl
import numpy_linalg as la
import numpy_linalg.gufuncs as gf
import numpy_linalg.testing.unittest_numpy as utn
import numpy_linalg.testing.hypothesis_numpy as hn
from numpy_linalg.testing import main, TestCaseNumpy
# =============================================================================
# pylint: disable=missing-function-docstring
hy.settings.register_profile("slow",
suppress_health_check=(hy.HealthCheck.too_slow,))
hy.settings.load_profile('slow')
np.set_printoptions(precision=2, threshold=10, edgeitems=2)
# =============================================================================
__all__ = ['TestArray', 'TestPinvarray']
# =============================================================================
# new class helper
# =============================================================================
def view_as(*arrays: np.ndarray, kind: type = la.lnarray) -> la.lnarray:
"""Convert array types
Parameters
----------
arrays : np.ndarray
Arrays to convert.
kind : type, optional
Number of core dimensions to leave, by default `la.lnarray`.
Returns
-------
views : la.lnarray
Converted arrays.
"""
result = tuple(arr.view(kind) for arr in arrays)
return result[0] if len(result) == 1 else result
def insert(shape, axis=-1):
"""Shape -> shape with one axis inserted"""
return shape[:axis] + (1,) + shape[axis:]
# =============================================================================
# Test python classes
# =============================================================================
class TestArray(TestCaseNumpy):
"""Testing lnarray"""
@hy.given(hn.broadcastable('(a,b),(b,a),(a,a),(b,b)', 'd'))
def test_return_array_types(self, arrays):
m_sb_n, m_bs_n = arrays[:2]
m_sb, m_bs, m_ss, m_bb = view_as(*arrays)
m_bs_m, m_ss_m, m_bb_m = hn.core_only(m_bs, m_ss, m_bb)
hy.assume(hn.all_well_behaved(m_ss, m_bb))
hy.assume(m_sb.ndim != m_ss.ndim - 1) # np..solve's broadcasting issue
self.assertIsInstance(m_sb @ m_bs, la.lnarray)
self.assertIsInstance(m_sb_n @ m_bs, la.lnarray)
expect = utn.array_return_shape('(a,b),(b,c)->(a,c)', m_bs, m_sb)
tw_o = np.empty(expect, m_bs.dtype)
tw_r = la.matmul(m_bs, m_sb_n, tw_o)
self.assertIsInstance(tw_r, np.ndarray)
self.assertIsInstance(tw_o, np.ndarray)
self.assertIsInstance(np.matmul(m_bs, m_sb_n), np.ndarray)
self.assertIsInstance(la.solve(m_ss, m_sb_n), la.lnarray)
self.assertIsInstance(nl.solve(m_ss, m_sb_n), np.ndarray)
self.assertIsInstance(la.lstsq(m_bs, m_bb), la.lnarray)
self.assertIsInstance(nl.lstsq(m_bs_m, m_bb_m, rcond=None)[0],
np.ndarray)
self.assertIsInstance(la.lu(m_ss)[0], la.lnarray)
self.assertIsInstance(la.lu(m_bs_n)[0], np.ndarray)
self.assertIsInstance(la.qr(m_ss)[0], la.lnarray)
self.assertIsInstance(la.qr(m_bs_n)[0], np.ndarray)
self.assertIsInstance(la.lq(m_ss)[0], la.lnarray)
self.assertIsInstance(la.lq(m_bs_n)[0], np.ndarray)
self.assertIsInstance(la.lqr(m_ss)[0], la.lnarray)
self.assertIsInstance(la.lqr(m_bs_n)[0], np.ndarray)
self.assertIsInstance(nl.qr(m_ss_m)[0], np.ndarray)
@hy.given(hn.broadcastable('(a,b),(b,b)', 'D'))
def test_lnarray_shape_methods(self, arrays):
m_bs, m_ss = view_as(*arrays)
tall, smol = m_bs.shape, m_ss.shape
hy.assume(hn.tall(m_bs))
hy.assume(np.max(np.abs(m_bs.imag)) > .01)
hy.assume(np.max(np.abs(m_bs.real)) / np.max(np.abs(m_bs.imag)) < 1e3)
expect = utn.trnsp(tall)
self.assertArrayShape(m_bs.t, expect)
self.assertArrayShape(m_bs.h, expect)
self.assertArrayNotAllClose(m_bs.t, m_bs.h)
self.assertArrayShape(m_ss.c, smol + (1,))
self.assertArrayShape(m_bs.c.uc, tall)
expect = insert(smol)
self.assertArrayShape(m_ss.r, expect)
self.assertArrayShape(m_bs.r.ur, tall)
self.assertArrayShape(m_ss.s, smol + (1, 1))
self.assertArrayShape(m_bs.s.us, tall)
# expect = smol[:1] + (1,) + smol[1:2] + (1,) + smol[2:]
expect = insert(insert(smol, 2), 1)
self.assertArrayShape(m_ss.expand_dims((1, 3)), expect)
expect = tall[:1] + (np.prod(tall[1:4]),) + tall[4:]
self.assertArrayShape(m_bs.flattish(1, 4), expect)
with self.assertRaisesRegex(ValueError, "repeated axis"):
m_bs.expand_dims((m_bs.ndim - 1, -3))
half = (m_bs.ndim + 2) // 2 + 1
with self.assertRaises(ValueError):
(m_bs.s).flattish(half, -half)
@hy.given(hn.broadcastable('(a,b),(b,b),(b)', None))
def test_lnarray_operations_return_expected_values(self, arrays):
m_bs, m_ss, vec = view_as(*arrays)
m_bs_m = hn.core_only(m_bs)
vec = hn.core_only(vec, dims=1)
hy.assume(hn.tall(m_bs))
hy.assume(m_ss.ndim != 3) # causes np..solve's broadcasting issue
hy.assume(hn.all_well_behaved(m_ss, m_bs_m))
expect = utn.array_return_shape('(a,b),(b,c)->(a,c)', m_bs, m_ss)
ts_o = np.empty(expect, m_ss.dtype)
ts_r = la.matmul(m_bs, m_ss, ts_o)
self.assertArrayAllClose(ts_r, ts_o)
self.assertArrayAllClose(m_bs @ m_ss, np.matmul(m_bs, m_ss))
self.assertArrayAllClose(m_bs @ m_ss, np.matmul(m_bs, m_ss))
self.assertArrayAllClose(m_bs @ vec, np.matmul(m_bs, vec))
cond = np.linalg.cond(m_ss).max()
self.assertArrayAllClose(gf.solve(m_ss, vec), nl.solve(m_ss, vec.c).uc,
cond=cond)
cond = np.linalg.cond(m_bs_m).max()
self.assertArrayAllClose(gf.lstsq(m_bs_m.t, vec),
nl.lstsq(m_bs_m.t, vec, rcond=None)[0],
cond=cond)
self.assertArrayAllClose(gf.rmatmul(m_ss, m_bs), np.matmul(m_bs, m_ss))
# m_bs @= m_ss
# self.assertArrayAllClose(ts_r, m_bs)
class TestPinvarray(TestCaseNumpy):
"""test pinvarray & invarray classes
"""
@hy.given(hn.broadcastable('(a,a),(b,a)', ['d', 'D']))
def test_pinvarray_attribute_types(self, arrays):
m_ss, m_bs = view_as(*arrays)
hy.assume(hn.all_well_behaved(m_ss))
self.assertIsInstance(m_ss.pinv, la.pinvarray)
self.assertIsInstance(m_ss.inv, la.invarray)
self.assertIs(m_ss.pinv.dtype, m_bs.dtype)
self.assertIsInstance(m_ss.pinv.pinv, la.lnarray)
self.assertIsInstance(m_ss.inv.inv, la.lnarray)
self.assertIsInstance(m_ss.pinv(), la.lnarray)
self.assertIsInstance(m_ss.inv(), la.lnarray)
m_bs_p = la.pinvarray(m_bs)
self.assertIsInstance(m_bs_p, la.pinvarray)
self.assertIsInstance(m_bs_p.pinv, la.lnarray)
self.assertIsInstance(2 * m_bs_p, la.pinvarray)
self.assertIsInstance((2 * m_bs_p).pinv, la.lnarray)
pout = la.pinvarray(np.empty_like(m_bs))
np.multiply(2, m_bs_p, pout)
self.assertIsInstance(pout, la.pinvarray)
self.assertIsInstance(pout.pinv, la.lnarray)
with self.assertRaises(AttributeError):
m_bs_p.inv # pylint: disable=no-member,pointless-statement
with self.assertRaises(TypeError):
m_ss.inv.pinv # pylint: disable=pointless-statement
@hy.given(hn.matrices_b)
def test_pinvarray_shape_methods(self, array):
m_bs = array.view(la.lnarray)
hy.assume(hn.nonsquare(m_bs))
hy.assume(hn.all_well_behaved(m_bs))
m_bs_p = m_bs.pinv
expect = utn.trnsp(m_bs.shape)
self.assertEqual(m_bs_p.ndim, len(expect))
self.assertEqual(m_bs_p.shape, expect)
self.assertEqual(m_bs_p.size, np.prod(expect))
self.assertArrayShape(m_bs_p(), expect)
with self.assertRaises(ValueError):
m_bs.inv # pylint: disable=pointless-statement
m_bs_p = m_bs.c.pinv
expect = insert(m_bs.shape)
now_expect = expect[1::-1] + expect[2:]
self.assertArrayShape(m_bs_p.swapaxes(0, 1), now_expect)
now_expect = expect[2::-1] + expect[3:]
self.assertArrayShape(m_bs_p.swapaxes(0, 2), now_expect)
now_expect = utn.trnsp(expect)
self.assertArrayShape(m_bs_p.swapaxes(-1, -2), now_expect)
@hy.given(hn.broadcastable('(a,b),(b,a),(b,a)', None))
def test_pinvarray_in_functions(self, arrays):
m_sb, high, m_bs = view_as(*arrays)
# hy.assume(hn.tall(m_bs))
hy.assume(hn.all_well_behaved(m_bs))
cond = np.linalg.cond(m_bs).max()
self.assertArrayAllClose(gf.matmul(m_bs.pinv, high),
gf.lstsq(m_bs, high), cond=cond)
self.assertArrayAllClose(gf.matmul(m_sb, m_bs.pinv.t),
gf.rlstsq(m_sb, m_bs.t), cond=cond)
xpout = la.pinvarray(np.empty_like(m_bs))
m_bs_p = np.multiply(m_bs.pinv, 2, out=xpout)
self.assertArrayAllClose(m_bs_p.pinv, xpout.pinv)
self.assertArrayAllClose(m_bs_p.pinv, m_bs / 2)
with self.assertRaises(TypeError):
gf.matmul(m_bs.pinv, m_sb.pinv)
self.assertArrayAllClose(gf.lstsq(m_sb.pinv, high),
gf.matmul(m_sb, high))
with self.assertRaises(TypeError):
gf.lstsq(high, m_sb.pinv)
self.assertArrayAllClose(gf.rlstsq(high.t, m_sb.t.pinv),
gf.matmul(high.t, m_sb.t))
with self.assertRaises(TypeError):
gf.rlstsq(m_sb.t.pinv, high.t)
with self.assertRaises(TypeError):
gf.rmatmul(m_sb.pinv, m_bs.pinv)
with self.assertRaises(TypeError):
gf.solve(m_sb.pinv, high)
with self.assertRaises(TypeError):
gf.rsolve(m_sb, m_bs.pinv)
@hy.given(hn.broadcastable('(a,a),(b,a),(a,b),(a,a)', None))
def test_invarray_in_functions(self, arrays):
m_ss, m_bs, m_sb, mini = view_as(*arrays)
# hy.assume(hn.tall(m_bs))
hy.assume(hn.all_well_behaved(m_ss, mini))
cond = np.linalg.cond(m_ss).max()
self.assertArrayAllClose(gf.matmul(m_ss.inv, m_sb),
gf.solve(m_ss, m_sb), cond=cond)
self.assertArrayAllClose(gf.matmul(m_bs, m_ss.inv),
gf.rsolve(m_bs, m_ss), cond=cond)
self.assertArrayAllClose(gf.matmul(m_ss.inv, mini.inv).inv,
mini @ m_ss)
self.assertArrayAllClose(gf.solve(m_ss.inv, m_sb),
gf.matmul(m_ss, m_sb))
self.assertArrayAllClose(gf.solve(mini, m_ss.inv).inv,
gf.matmul(m_ss, mini))
self.assertArrayAllClose(gf.solve(mini.inv, m_ss.inv),
gf.rsolve(mini, m_ss), cond=cond)
self.assertArrayAllClose(gf.rsolve(m_ss, mini.inv),
gf.matmul(m_ss, mini))
self.assertArrayAllClose(gf.rsolve(mini.inv, m_ss).inv,
gf.matmul(m_ss, mini))
cond = np.linalg.cond(mini).max()
self.assertArrayAllClose(gf.rsolve(mini.inv, m_ss.inv),
gf.solve(mini, m_ss), cond=cond)
self.assertArrayAllClose(gf.rmatmul(m_ss, mini.inv),
gf.solve(mini, m_ss), cond=cond)
self.assertArrayAllClose(gf.rmatmul(mini.inv, m_ss),
gf.rsolve(m_ss, mini), cond=cond)
self.assertArrayAllClose(gf.rmatmul(mini.inv, m_ss.inv).inv,
mini @ m_ss)
@hy.given(hn.broadcastable('(a,a),(b,a),(a,b),(a,a)', None))
def test_bad_p_invarray_combos_in_functions(self, arrays):
m_ss, m_bs, m_sb, mini = view_as(*arrays)
# hy.assume(hn.tall(m_bs))
with self.assertRaises(TypeError):
la.solve(m_sb.pinv, mini)
with self.assertRaises(TypeError):
la.rsolve(mini, m_bs.pinv)
with self.assertRaises(TypeError):
la.solve(mini, m_bs.pinv)
with self.assertRaises(TypeError):
la.rsolve(m_sb.pinv, mini)
with self.assertRaises(TypeError):
la.solve(m_sb.pinv, m_bs.pinv)
with self.assertRaises(TypeError):
la.rsolve(m_sb.pinv, m_bs.pinv)
with self.assertRaises(TypeError):
la.matmul(m_ss.inv, m_bs.pinv)
with self.assertRaises(TypeError):
la.matmul(m_sb.pinv, mini.inv)
with self.assertRaises(TypeError):
la.solve(m_bs.pinv, mini.inv)
with self.assertRaises(TypeError):
la.rsolve(mini.inv, m_sb.pinv)
@hy.given(hn.broadcastable('(a,a),(b,a),(a,b),(a,a)', None))
def test_good_p_invarray_combos_in_lstsq(self, arrays):
m_ss, m_bs, m_sb, mini = view_as(*arrays)
# hy.assume(hn.tall(m_bs))
hy.assume(hn.all_well_behaved(m_ss, mini, m_bs, m_sb))
self.assertArrayAllClose(la.lstsq(mini.inv, m_sb),
la.matmul(mini, m_sb))
self.assertArrayAllClose(la.rlstsq(m_bs, mini.inv),
la.matmul(m_bs, mini))
cond = np.linalg.cond(m_bs).max()
self.assertArrayAllClose(la.lstsq(mini.inv, m_bs.pinv),
la.rlstsq(mini, m_bs), cond=cond)
cond = np.linalg.cond(mini).max()
self.assertArrayAllClose(la.rlstsq(mini.inv, m_sb.pinv),
la.solve(mini, m_sb), cond=cond)
cond = np.linalg.cond(m_ss).max()
self.assertArrayAllClose(la.lstsq(mini.inv, m_ss.inv),
la.rsolve(mini, m_ss), cond=cond)
cond = np.linalg.cond(mini).max()
self.assertArrayAllClose(la.rlstsq(mini.inv, m_ss.inv),
la.solve(mini, m_ss), cond=cond)
cond = np.linalg.cond(m_ss).max()
self.assertArrayAllClose(la.lstsq(m_bs.pinv, m_ss.inv),
la.rsolve(m_bs, m_ss), cond=cond)
cond = np.linalg.cond(m_sb).max()
self.assertArrayAllClose(la.rlstsq(m_sb.pinv, m_ss.inv),
la.lstsq(m_sb, m_ss), cond=cond)
@hy.given(hn.broadcastable('(a,a),(b,a),(a,b),(a,a)', None))
def test_good_p_invarray_combos_in_solve(self, arrays):
m_ss, m_bs, m_sb, mini = view_as(*arrays)
# hy.assume(hn.tall(m_bs))
hy.assume(hn.all_well_behaved(m_bs, m_sb))
# hy.assume(hn.all_well_behaved(m_ss, mini))
cond = np.linalg.cond(m_bs).max()
self.assertArrayAllClose(la.solve(m_ss.inv, m_bs.pinv),
la.rlstsq(m_ss, m_bs), cond=cond)
cond = np.linalg.cond(m_sb).max()
self.assertArrayAllClose(la.rsolve(m_sb.pinv, mini.inv),
la.lstsq(m_sb, mini), cond=cond)
@hy.given(hn.broadcastable('(a,b),(b,a),(b,a),()', None))
def test_pinvarray_operators(self, arrays):
m_sb, high, m_bs, scal = view_as(*arrays)
scal[np.abs(scal) < 1e-5] += 1.
scal = scal.s
# hy.assume(hn.tall(m_bs))
hy.assume(hn.all_well_behaved(m_bs))
cond = np.linalg.cond(m_bs).max()
self.assertArrayAllClose(m_bs.pinv @ high, gf.lstsq(m_bs, high),
cond=cond)
self.assertArrayAllClose(m_bs.pinv() @ high, gf.lstsq(m_bs, high),
cond=cond)
self.assertArrayAllClose(m_sb @ m_bs.pinv.t, gf.rlstsq(m_sb, m_bs.t),
cond=cond)
with self.assertRaises(TypeError):
m_bs.pinv @ m_sb.pinv # pylint: disable=pointless-statement
self.assertArrayAllClose((m_bs.pinv * 3.5).pinv, m_bs / 3.5)
self.assertArrayAllClose((2.4 * m_bs.pinv).pinv, m_bs / 2.4)
self.assertArrayAllClose((m_bs.pinv / 3.564).pinv, m_bs * 3.564)
with self.assertRaises(TypeError):
65 / m_bs.pinv # pylint: disable=pointless-statement
self.assertArrayAllClose((m_bs.pinv * scal).pinv, m_bs / scal)
self.assertArrayAllClose((scal * m_bs.pinv).pinv, m_bs / scal)
self.assertArrayAllClose((m_bs.pinv / scal).pinv, m_bs * scal)
with self.assertRaises(TypeError):
scal / m_bs.pinv # pylint: disable=pointless-statement
with self.assertRaises(TypeError):
scal.pinv * m_bs.pinv # pylint: disable=pointless-statement
with self.assertRaises(TypeError):
m_bs.pinv + m_sb # pylint: disable=pointless-statement
told = 1. * m_bs
m_bs_p = m_bs.pinv
m_bs_p *= 2
self.assertArrayAllClose(m_bs, told / 2)
@hy.given(hn.broadcastable('(a,a),(b,a),(a,b),(a,a),()', None))
def test_invarray_operators(self, arrays):
m_ss, m_bs, m_sb, mini, scal = view_as(*arrays)
scal[np.abs(scal) < 1e-5] += 1.
scal = scal.s
# hy.assume(hn.tall(m_bs))
hy.assume(hn.all_well_behaved(m_ss, mini))
cond = np.linalg.cond(m_ss).max()
self.assertArrayAllClose(m_ss.inv @ m_sb, gf.solve(m_ss, m_sb),
cond=cond)
self.assertArrayAllClose(m_ss.inv() @ m_sb, gf.solve(m_ss, m_sb),
cond=cond)
self.assertArrayAllClose(m_bs @ m_ss.inv, gf.rsolve(m_bs, m_ss),
cond=cond)
self.assertArrayAllClose((m_ss.inv @ mini.inv).inv, mini @ m_ss)
self.assertArrayAllClose((m_ss.inv * 3.5).inv, m_ss / 3.5)
self.assertArrayAllClose((2.4 * m_ss.inv).inv, m_ss / 2.4)
self.assertArrayAllClose((m_ss.inv / 3.564).inv, m_ss * 3.564)
with self.assertRaises(TypeError):
45.564 / m_ss.inv # pylint: disable=pointless-statement
self.assertArrayAllClose((mini.inv * scal).inv, mini / scal)
self.assertArrayAllClose((scal * mini.inv).inv, mini / scal)
self.assertArrayAllClose((mini.inv / scal).inv, mini * scal)
with self.assertRaises(TypeError):
scal / mini.inv # pylint: disable=pointless-statement
with self.assertRaises(TypeError):
scal.inv * mini.inv # pylint: disable=pointless-statement
with self.assertRaises(TypeError):
m_ss + mini.inv # pylint: disable=pointless-statement
mini_ss = m_ss @ mini
mini_i = mini.inv
mini_i @= m_ss.inv
self.assertArrayAllClose(mini, mini_ss)
if __name__ == '__main__':
main(verbosity=2)
| 45.811736 | 79 | 0.584192 |
acf182389582845ec4b79bd7106039a9afe3bb48 | 49,293 | py | Python | cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_data.py | maydaycc/cinder | 2da0a68ea478913b20ecd1bafe0bde42ea18d840 | [
"Apache-2.0"
] | null | null | null | cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_data.py | maydaycc/cinder | 2da0a68ea478913b20ecd1bafe0bde42ea18d840 | [
"Apache-2.0"
] | null | null | null | cinder/tests/unit/volume/drivers/dell_emc/powermax/powermax_data.py | maydaycc/cinder | 2da0a68ea478913b20ecd1bafe0bde42ea18d840 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017-2019 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from copy import deepcopy
import six
from cinder import context
from cinder.objects import fields
from cinder.objects import group
from cinder.objects import group_snapshot
from cinder.objects import volume_attachment
from cinder.objects import volume_type
from cinder.tests.unit import fake_group
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.volume.drivers.dell_emc.powermax import utils
CINDER_EMC_CONFIG_DIR = '/etc/cinder/'
class PowerMaxData(object):
# array info
array = '000197800123'
uni_array = u'000197800123'
array_herc = '000197900123'
srp = 'SRP_1'
srp2 = 'SRP_2'
slo = 'Diamond'
workload = 'DSS'
port_group_name_f = 'OS-fibre-PG'
port_group_name_i = 'OS-iscsi-PG'
masking_view_name_f = 'OS-HostX-F-OS-fibre-PG-MV'
masking_view_name_i = 'OS-HostX-SRP_1-I-OS-iscsi-PG-MV'
initiatorgroup_name_f = 'OS-HostX-F-IG'
initiatorgroup_name_i = 'OS-HostX-I-IG'
parent_sg_f = 'OS-HostX-F-OS-fibre-PG-SG'
parent_sg_i = 'OS-HostX-I-OS-iscsi-PG-SG'
storagegroup_name_f = 'OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG'
storagegroup_name_i = 'OS-HostX-SRP_1-Diamond-DSS-OS-iscsi-PG'
defaultstoragegroup_name = 'OS-SRP_1-Diamond-DSS-SG'
storagegroup_list = [defaultstoragegroup_name]
default_sg_no_slo = 'OS-no_SLO-SG'
default_sg_compr_disabled = 'OS-SRP_1-Diamond-DSS-CD-SG'
default_sg_re_enabled = 'OS-SRP_1-Diamond-DSS-RE-SG'
failed_resource = 'OS-failed-resource'
fake_host = 'HostX@Backend#Diamond+DSS+SRP_1+000197800123'
new_host = 'HostX@Backend#Silver+OLTP+SRP_1+000197800123'
none_host = 'HostX@Backend#Diamond+None+SRP_1+000197800123'
version = '3.1.0'
volume_wwn = '600000345'
remote_array = '000197800124'
device_id = '00001'
device_id2 = '00002'
device_id3 = '00003'
device_id4 = '00004'
rdf_group_name = '23_24_007'
rdf_group_no = '70'
u4v_version = '91'
storagegroup_name_source = 'Grp_source_sg'
storagegroup_name_target = 'Grp_target_sg'
group_snapshot_name = 'Grp_snapshot'
target_group_name = 'Grp_target'
storagegroup_name_with_id = 'GrpId_group_name'
rdf_managed_async_grp = 'OS-%s-Asynchronous-rdf-sg' % rdf_group_name
volume_id = '2b06255d-f5f0-4520-a953-b029196add6a'
no_slo_sg_name = 'OS-HostX-No_SLO-OS-fibre-PG'
temp_snapvx = 'temp-00001-snapshot_for_clone'
next_gen_ucode = 5978
# connector info
wwpn1 = '123456789012345'
wwpn2 = '123456789054321'
wwnn1 = '223456789012345'
initiator = 'iqn.1993-08.org.debian: 01: 222'
ip, ip2 = u'123.456.7.8', u'123.456.7.9'
iqn = u'iqn.1992-04.com.emc:600009700bca30c01e3e012e00000001,t,0x0001'
iqn2 = u'iqn.1992-04.com.emc:600009700bca30c01e3e012e00000002,t,0x0001'
connector = {'ip': ip,
'initiator': initiator,
'wwpns': [wwpn1, wwpn2],
'wwnns': [wwnn1],
'host': 'HostX'}
fabric_name_prefix = 'fakeFabric'
end_point_map = {connector['wwpns'][0]: [wwnn1],
connector['wwpns'][1]: [wwnn1]}
target_wwns = [wwnn1]
zoning_mappings = {
'array': u'000197800123',
'init_targ_map': end_point_map,
'initiator_group': initiatorgroup_name_f,
'port_group': port_group_name_f,
'target_wwns': target_wwns}
zoning_mappings_metro = deepcopy(zoning_mappings)
zoning_mappings_metro.update({'metro_port_group': port_group_name_f,
'metro_ig': initiatorgroup_name_f,
'metro_array': remote_array})
device_map = {}
for wwn in connector['wwpns']:
fabric_name = ''.join([fabric_name_prefix,
wwn[-2:]])
target_wwn = wwn[::-1]
fabric_map = {'initiator_port_wwn_list': [wwn],
'target_port_wwn_list': [target_wwn]
}
device_map[fabric_name] = fabric_map
iscsi_device_info = {'maskingview': masking_view_name_i,
'ip_and_iqn': [{'ip': ip,
'iqn': initiator}],
'is_multipath': True,
'array': array,
'controller': {'host': '10.00.00.00'},
'hostlunid': 3}
iscsi_device_info_metro = deepcopy(iscsi_device_info)
iscsi_device_info_metro['metro_ip_and_iqn'] = [{'ip': ip2, 'iqn': iqn2}]
iscsi_device_info_metro['metro_hostlunid'] = 2
fc_device_info = {'maskingview': masking_view_name_f,
'array': array,
'controller': {'host': '10.00.00.00'},
'hostlunid': 3}
# snapshot info
snapshot_id = '390eeb4d-0f56-4a02-ba14-167167967014'
snapshot_display_id = 'my_snap'
managed_snap_id = 'OS-390eeb4d-0f56-4a02-ba14-167167967014'
test_snapshot_snap_name = 'OS-' + snapshot_id[:6] + snapshot_id[-9:]
snap_location = {'snap_name': test_snapshot_snap_name,
'source_id': device_id}
# cinder volume info
ctx = context.RequestContext('admin', 'fake', True)
provider_location = {'array': array,
'device_id': device_id}
provider_location2 = {'array': six.text_type(array),
'device_id': device_id2}
provider_location3 = {'array': six.text_type(remote_array),
'device_id': device_id2}
provider_location4 = {'array': six.text_type(uni_array),
'device_id': device_id}
provider_location_clone = {'array': array,
'device_id': device_id,
'snap_name': temp_snapvx,
'source_device_id': device_id}
provider_location_snapshot = {'array': array,
'device_id': device_id,
'snap_name': test_snapshot_snap_name,
'source_device_id': device_id}
provider_location5 = {'array': remote_array,
'device_id': device_id}
legacy_provider_location = {
'classname': 'Symm_StorageVolume',
'keybindings': {'CreationClassName': u'Symm_StorageVolume',
'SystemName': u'SYMMETRIX+000197800123',
'DeviceID': device_id,
'SystemCreationClassName': u'Symm_StorageSystem'}}
legacy_provider_location2 = {
'classname': 'Symm_StorageVolume',
'keybindings': {'CreationClassName': u'Symm_StorageVolume',
'SystemName': u'SYMMETRIX+000197800123',
'DeviceID': device_id2,
'SystemCreationClassName': u'Symm_StorageSystem'}}
test_volume_type = fake_volume.fake_volume_type_obj(
context=ctx
)
test_volume = fake_volume.fake_volume_obj(
context=ctx, name='vol1', size=2, provider_auth=None,
provider_location=six.text_type(provider_location),
volume_type=test_volume_type, host=fake_host,
replication_driver_data=six.text_type(provider_location3))
test_attached_volume = fake_volume.fake_volume_obj(
id='4732de9b-98a4-4b6d-ae4b-3cafb3d34220', context=ctx, name='vol1',
size=0, provider_auth=None, attach_status='attached',
provider_location=six.text_type(provider_location), host=fake_host,
volume_type=test_volume_type,
replication_driver_data=six.text_type(provider_location3))
test_legacy_vol = fake_volume.fake_volume_obj(
context=ctx, name='vol1', size=2, provider_auth=None,
provider_location=six.text_type(legacy_provider_location),
replication_driver_data=six.text_type(legacy_provider_location2),
host=fake_host, volume_type=test_volume_type)
test_clone_volume = fake_volume.fake_volume_obj(
context=ctx, name='vol1', size=2, provider_auth=None,
provider_location=six.text_type(provider_location2),
host=fake_host, source_volid=test_volume.id,
snapshot_id=snapshot_id, _name_id=test_volume.id)
test_volume_snap_manage = fake_volume.fake_volume_obj(
context=ctx, name='vol1', size=2, provider_auth=None,
display_name='vol1',
provider_location=six.text_type(provider_location),
volume_type=test_volume_type, host=fake_host,
replication_driver_data=six.text_type(provider_location4))
test_snapshot = fake_snapshot.fake_snapshot_obj(
context=ctx, id=snapshot_id,
name='my_snap', size=2,
provider_location=six.text_type(snap_location),
host=fake_host, volume=test_volume)
test_legacy_snapshot = fake_snapshot.fake_snapshot_obj(
context=ctx, id=test_volume.id, name='my_snap', size=2,
provider_location=six.text_type(legacy_provider_location),
host=fake_host, volume=test_volume)
test_failed_snap = fake_snapshot.fake_snapshot_obj(
context=ctx,
id='4732de9b-98a4-4b6d-ae4b-3cafb3d34220',
name=failed_resource,
size=2,
provider_location=six.text_type(snap_location),
host=fake_host, volume=test_volume)
test_snapshot_manage = fake_snapshot.fake_snapshot_obj(
context=ctx, id=snapshot_id,
name='my_snap', size=2,
provider_location=six.text_type(snap_location),
host=fake_host, volume=test_volume_snap_manage,
display_name='my_snap')
test_volume_attachment = volume_attachment.VolumeAttachment(
id='2b06255d-f5f0-4520-a953-b029196add6b', volume_id=test_volume.id,
connector=connector)
location_info = {'location_info': '000197800123#SRP_1#Diamond#DSS',
'storage_protocol': 'FC'}
test_host = {'capabilities': location_info,
'host': fake_host}
# extra-specs
vol_type_extra_specs = {'pool_name': u'Diamond+DSS+SRP_1+000197800123'}
vol_type_extra_specs_compr_disabled = {
'pool_name': u'Diamond+DSS+SRP_1+000197800123',
'storagetype:disablecompression': 'true'}
vol_type_extra_specs_rep_enabled = {
'pool_name': u'Diamond+DSS+SRP_1+000197800123',
'replication_enabled': '<is> True'}
extra_specs = {'pool_name': u'Diamond+DSS+SRP_1+000197800123',
'slo': slo,
'workload': workload,
'srp': srp,
'array': array,
'interval': 3,
'retries': 120}
extra_specs_migrate = deepcopy(extra_specs)
extra_specs_migrate[utils.PORTGROUPNAME] = port_group_name_f
extra_specs_disable_compression = deepcopy(extra_specs)
extra_specs_disable_compression[utils.DISABLECOMPRESSION] = 'true'
extra_specs_intervals_set = deepcopy(extra_specs)
extra_specs_intervals_set['interval'] = 1
extra_specs_intervals_set['retries'] = 1
extra_specs_rep_enabled = deepcopy(extra_specs)
extra_specs_rep_enabled['replication_enabled'] = True
rep_extra_specs = deepcopy(extra_specs_rep_enabled)
rep_extra_specs['array'] = remote_array
rep_extra_specs['interval'] = 1
rep_extra_specs['retries'] = 1
rep_extra_specs['srp'] = srp2
rep_extra_specs['rep_mode'] = 'Synchronous'
rep_extra_specs2 = deepcopy(rep_extra_specs)
rep_extra_specs2[utils.PORTGROUPNAME] = port_group_name_f
rep_extra_specs3 = deepcopy(rep_extra_specs)
rep_extra_specs3['slo'] = slo
rep_extra_specs3['workload'] = workload
rep_extra_specs4 = deepcopy(rep_extra_specs3)
rep_extra_specs4['rdf_group_label'] = rdf_group_name
rep_extra_specs5 = deepcopy(rep_extra_specs2)
rep_extra_specs5['target_array_model'] = 'VMAX250F'
rep_extra_specs_ode = deepcopy(rep_extra_specs2)
rep_extra_specs_ode['array'] = array
rep_extra_specs_ode.pop('rep_mode')
rep_extra_specs_ode['mode'] = 'Metro'
rep_extra_specs_legacy = deepcopy(rep_extra_specs_ode)
rep_extra_specs_legacy['mode'] = 'Synchronous'
test_volume_type_1 = volume_type.VolumeType(
id='2b06255d-f5f0-4520-a953-b029196add6a', name='abc',
extra_specs=extra_specs)
test_volume_type_list = volume_type.VolumeTypeList(
objects=[test_volume_type_1])
test_vol_grp_name_id_only = 'ec870a2f-6bf7-4152-aa41-75aad8e2ea96'
test_vol_grp_name = 'Grp_source_sg_%s' % test_vol_grp_name_id_only
test_fo_vol_group = 'fo_vol_group_%s' % test_vol_grp_name_id_only
test_group_1 = group.Group(
context=None, name=storagegroup_name_source,
group_id='abc', size=1,
id=test_vol_grp_name_id_only, status='available',
provider_auth=None, volume_type_ids=['abc'],
group_type_id='grptypeid',
volume_types=test_volume_type_list,
host=fake_host, provider_location=six.text_type(provider_location))
test_group_failed = group.Group(
context=None, name=failed_resource,
group_id='14b8894e-54ec-450a-b168-c172a16ed166',
size=1,
id='318c721c-51ad-4160-bfe1-ebde2273836f',
status='available',
provider_auth=None, volume_type_ids=['abc'],
group_type_id='grptypeid',
volume_types=test_volume_type_list,
host=fake_host, provider_location=six.text_type(provider_location),
replication_status=fields.ReplicationStatus.DISABLED)
test_rep_group = fake_group.fake_group_obj(
context=ctx, name=storagegroup_name_source,
id=test_vol_grp_name_id_only, host=fake_host,
replication_status=fields.ReplicationStatus.ENABLED)
test_group = fake_group.fake_group_obj(
context=ctx, name=storagegroup_name_source,
id=test_vol_grp_name_id_only, host=fake_host)
test_group_without_name = fake_group.fake_group_obj(
context=ctx, name=None,
id=test_vol_grp_name_id_only, host=fake_host)
test_group_snapshot_1 = group_snapshot.GroupSnapshot(
context=None, id='6560405d-b89a-4f79-9e81-ad1752f5a139',
group_id='876d9fbb-de48-4948-9f82-15c913ed05e7',
name=group_snapshot_name,
group_type_id='c6934c26-dde8-4bf8-a765-82b3d0130e9f',
status='available',
group=test_group_1)
test_group_snapshot_failed = group_snapshot.GroupSnapshot(
context=None, id='0819dd5e-9aa1-4ec7-9dda-c78e51b2ad76',
group_id='1fc735cb-d36c-4352-8aa6-dc1e16b5a0a7',
name=failed_resource,
group_type_id='6b70de13-98c5-46b2-8f24-e4e96a8988fa',
status='available',
group=test_group_failed)
test_volume_group_member = fake_volume.fake_volume_obj(
context=ctx, name='vol1', size=2, provider_auth=None,
provider_location=six.text_type(provider_location),
volume_type=test_volume_type, host=fake_host,
replication_driver_data=six.text_type(provider_location3),
group_id=test_vol_grp_name_id_only)
# masking view dict
masking_view_dict = {
'array': array,
'connector': connector,
'device_id': device_id,
'init_group_name': initiatorgroup_name_f,
'initiator_check': None,
'maskingview_name': masking_view_name_f,
'parent_sg_name': parent_sg_f,
'srp': srp,
'storagetype:disablecompression': False,
utils.PORTGROUPNAME: port_group_name_f,
'slo': slo,
'storagegroup_name': storagegroup_name_f,
'volume_name': test_volume.name,
'workload': workload,
'replication_enabled': False}
masking_view_dict_no_slo = deepcopy(masking_view_dict)
masking_view_dict_no_slo.update(
{'slo': None, 'workload': None,
'storagegroup_name': no_slo_sg_name})
masking_view_dict_compression_disabled = deepcopy(masking_view_dict)
masking_view_dict_compression_disabled.update(
{'storagetype:disablecompression': True,
'storagegroup_name': 'OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG-CD'})
masking_view_dict_replication_enabled = deepcopy(masking_view_dict)
masking_view_dict_replication_enabled.update(
{'replication_enabled': True,
'storagegroup_name': 'OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG-RE'})
masking_view_dict_multiattach = deepcopy(masking_view_dict)
masking_view_dict_multiattach.update(
{utils.EXTRA_SPECS: extra_specs, utils.IS_MULTIATTACH: True,
utils.OTHER_PARENT_SG: parent_sg_i, utils.FAST_SG:
storagegroup_name_i, utils.NO_SLO_SG: no_slo_sg_name})
# vmax data
# sloprovisioning
compression_info = {'symmetrixId': ['000197800128']}
inititiatorgroup = [{'initiator': [wwpn1],
'hostId': initiatorgroup_name_f,
'maskingview': [masking_view_name_f]},
{'initiator': [initiator],
'hostId': initiatorgroup_name_i,
'maskingview': [masking_view_name_i]}]
initiator_list = [{'host': initiatorgroup_name_f,
'initiatorId': wwpn1,
'maskingview': [masking_view_name_f]},
{'host': initiatorgroup_name_i,
'initiatorId': initiator,
'maskingview': [masking_view_name_i]},
{'initiatorId': [
'FA-1D:4:' + wwpn1,
'SE-4E:0:' + initiator]}]
maskingview = [{'maskingViewId': masking_view_name_f,
'portGroupId': port_group_name_f,
'storageGroupId': storagegroup_name_f,
'hostId': initiatorgroup_name_f,
'maskingViewConnection': [
{'host_lun_address': '0003'}]},
{'maskingViewId': masking_view_name_i,
'portGroupId': port_group_name_i,
'storageGroupId': storagegroup_name_i,
'hostId': initiatorgroup_name_i,
'maskingViewConnection': [
{'host_lun_address': '0003'}]},
{}]
portgroup = [{'portGroupId': port_group_name_f,
'symmetrixPortKey': [
{'directorId': 'FA-1D',
'portId': '4'}],
'maskingview': [masking_view_name_f]},
{'portGroupId': port_group_name_i,
'symmetrixPortKey': [
{'directorId': 'SE-4E',
'portId': '0'}],
'maskingview': [masking_view_name_i]}]
port_list = [
{'symmetrixPort': {'num_of_masking_views': 1,
'maskingview': [masking_view_name_f],
'identifier': wwnn1,
'symmetrixPortKey': {
'directorId': 'FA-1D',
'portId': '4'},
'portgroup': [port_group_name_f]}},
{'symmetrixPort': {'identifier': initiator,
'symmetrixPortKey': {
'directorId': 'SE-4E',
'portId': '0'},
'ip_addresses': [ip],
'num_of_masking_views': 1,
'maskingview': [masking_view_name_i],
'portgroup': [port_group_name_i]}}]
sg_details = [{'srp': srp,
'num_of_vols': 2,
'cap_gb': 2,
'storageGroupId': defaultstoragegroup_name,
'slo': slo,
'workload': workload},
{'srp': srp,
'num_of_vols': 2,
'cap_gb': 2,
'storageGroupId': storagegroup_name_f,
'slo': slo,
'workload': workload,
'maskingview': [masking_view_name_f],
'parent_storage_group': [parent_sg_f]},
{'srp': srp,
'num_of_vols': 2,
'cap_gb': 2,
'storageGroupId': storagegroup_name_i,
'slo': slo,
'workload': workload,
'maskingview': [masking_view_name_i],
'parent_storage_group': [parent_sg_i]},
{'num_of_vols': 2,
'cap_gb': 2,
'storageGroupId': parent_sg_f,
'num_of_child_sgs': 1,
'child_storage_group': [storagegroup_name_f],
'maskingview': [masking_view_name_f]},
{'num_of_vols': 2,
'cap_gb': 2,
'storageGroupId': parent_sg_i,
'num_of_child_sgs': 1,
'child_storage_group': [storagegroup_name_i],
'maskingview': [masking_view_name_i], },
{'srp': srp,
'num_of_vols': 2,
'cap_gb': 2,
'storageGroupId': no_slo_sg_name,
'slo': None,
'workload': None,
'maskingview': [masking_view_name_i],
'parent_storage_group': [parent_sg_i]}
]
sg_details_rep = [{'childNames': [],
'numDevicesNonGk': 2,
'isLinkTarget': False,
'rdf': True,
'capacityGB': 2.0,
'name': storagegroup_name_source,
'snapVXSnapshots': ['6560405d-752f5a139'],
'symmetrixId': array,
'numSnapVXSnapshots': 1}]
sg_rdf_details = [{'storageGroupName': test_vol_grp_name,
'symmetrixId': array,
'modes': ['Synchronous'],
'rdfGroupNumber': rdf_group_no,
'states': ['Synchronized']},
{'storageGroupName': test_fo_vol_group,
'symmetrixId': array,
'modes': ['Synchronous'],
'rdfGroupNumber': rdf_group_no,
'states': ['Failed Over']}]
sg_list = {'storageGroupId': [storagegroup_name_f,
defaultstoragegroup_name]}
sg_list_rep = [storagegroup_name_with_id]
srp_details = {'srp_capacity': {u'subscribed_total_tb': 93.52,
u'usable_used_tb': 8.62,
u'usable_total_tb': 24.45,
u'snapshot_modified_tb': 0.0,
u'subscribed_allocated_tb': 18.77,
u'snapshot_total_tb': 1.58},
'srpId': srp,
'reserved_cap_percent': 10}
array_info_wl = {'RestServerIp': '1.1.1.1', 'RestServerPort': 3448,
'RestUserName': 'smc', 'RestPassword': 'smc',
'SSLVerify': False, 'SerialNumber': array,
'srpName': 'SRP_1', 'PortGroup': port_group_name_i,
'SLO': 'Diamond', 'Workload': 'OLTP'}
array_info_no_wl = {'RestServerIp': '1.1.1.1', 'RestServerPort': 3448,
'RestUserName': 'smc', 'RestPassword': 'smc',
'SSLVerify': False, 'SerialNumber': array,
'srpName': 'SRP_1', 'PortGroup': port_group_name_i,
'SLO': 'Diamond'}
volume_details = [{'cap_gb': 2,
'num_of_storage_groups': 1,
'volumeId': device_id,
'volume_identifier': 'OS-%s' % test_volume.id,
'wwn': volume_wwn,
'snapvx_target': 'false',
'snapvx_source': 'false',
'storageGroupId': [defaultstoragegroup_name,
storagegroup_name_f]},
{'cap_gb': 1,
'num_of_storage_groups': 1,
'volumeId': device_id2,
'volume_identifier': 'OS-%s' % test_volume.id,
'wwn': '600012345',
'storageGroupId': [defaultstoragegroup_name,
storagegroup_name_f]},
{'cap_gb': 1,
'num_of_storage_groups': 0,
'volumeId': device_id3,
'volume_identifier': '123',
'wwn': '600012345'},
{'cap_gb': 1,
'num_of_storage_groups': 1,
'volumeId': device_id4,
'volume_identifier': 'random_name',
'wwn': '600012345',
'storageGroupId': ['random_sg_1',
'random_sg_2']},
]
volume_details_attached = {'cap_gb': 2,
'num_of_storage_groups': 1,
'volumeId': device_id,
'volume_identifier': 'OS-%s' % test_volume.id,
'wwn': volume_wwn,
'snapvx_target': 'false',
'snapvx_source': 'false',
'storageGroupId': [storagegroup_name_f]}
volume_details_no_sg = {'cap_gb': 2,
'num_of_storage_groups': 1,
'volumeId': device_id,
'volume_identifier': 'OS-%s' % test_volume.id,
'wwn': volume_wwn,
'snapvx_target': 'false',
'snapvx_source': 'false',
'storageGroupId': []}
volume_list = [
{'id': '6b70de13-98c5-46b2-8f24-e4e96a8988fa',
'count': 2,
'maxPageSize': 1,
'resultList': {'result': [{'volumeId': device_id}],
'from': 0, 'to': 1}},
{'resultList': {'result': [{'volumeId': device_id2}]}},
{'id': '6b70de13-98c5-46b2-8f24-e4e96a8988fa',
'count': 2,
'maxPageSize': 1,
'resultList': {'result': [{'volumeId': device_id},
{'volumeId': device_id2}],
'from': 0, 'to': 1}}]
private_vol_details = {
'id': '6b70de13-98c5-46b2-8f24-e4e96a8988fa',
'count': 2,
'maxPageSize': 1,
'resultList': {
'result': [{
'timeFinderInfo': {
'snapVXSession': [
{'srcSnapshotGenInfo': [
{'snapshotHeader': {
'snapshotName': 'temp-1',
'device': device_id,
'generation': '0'},
'lnkSnapshotGenInfo': [
{'targetDevice': device_id2,
'state': 'Copied'}]}]},
{'tgtSrcSnapshotGenInfo': {
'snapshotName': 'temp-1',
'targetDevice': device_id2,
'sourceDevice': device_id,
'generation': '0',
'state': 'Copied'}}],
'snapVXSrc': 'true',
'snapVXTgt': 'true'},
'rdfInfo': {'RDFSession': [
{'SRDFStatus': 'Ready',
'pairState': 'Synchronized',
'remoteDeviceID': device_id2,
'remoteSymmetrixID': remote_array}]}}],
'from': 0, 'to': 1}}
# Service Levels / Workloads
workloadtype = {'workloadId': ['OLTP', 'OLTP_REP', 'DSS', 'DSS_REP']}
srp_slo_details = {'serviceLevelDemand': [
{'serviceLevelId': 'None'}, {'serviceLevelId': 'Diamond'},
{'serviceLevelId': 'Gold'}, {'serviceLevelId': 'Optimized'}]}
slo_details = ['None', 'Diamond', 'Gold', 'Optimized']
powermax_slo_details = {'sloId': ['Bronze', 'Diamond', 'Gold',
'Optimized', 'Platinum', 'Silver']}
powermax_model_details = {'symmetrixId': array,
'model': 'PowerMax_2000',
'ucode': '5978.1091.1092'}
vmax_slo_details = {'sloId': ['Diamond', 'Optimized']}
vmax_model_details = {'model': 'VMAX450F'}
# replication
volume_snap_vx = {'snapshotLnks': [],
'snapshotSrcs': [
{'generation': 0,
'linkedDevices': [
{'targetDevice': device_id2,
'percentageCopied': 100,
'state': 'Copied',
'copy': True,
'defined': True,
'linked': True}],
'snapshotName': test_snapshot_snap_name,
'state': 'Established'}]}
capabilities = {'symmetrixCapability': [{'rdfCapable': True,
'snapVxCapable': True,
'symmetrixId': '0001111111'},
{'symmetrixId': array,
'snapVxCapable': True,
'rdfCapable': True}]}
group_snap_vx = {'generation': 0,
'isLinked': False,
'numUniqueTracks': 0,
'isRestored': False,
'name': group_snapshot_name,
'numStorageGroupVolumes': 1,
'state': ['Established'],
'timeToLiveExpiryDate': 'N/A',
'isExpired': False,
'numSharedTracks': 0,
'timestamp': '00:30:50 Fri, 02 Jun 2017 IST +0100',
'numSourceVolumes': 1
}
group_snap_vx_1 = {'generation': 0,
'isLinked': False,
'numUniqueTracks': 0,
'isRestored': False,
'name': group_snapshot_name,
'numStorageGroupVolumes': 1,
'state': ['Copied'],
'timeToLiveExpiryDate': 'N/A',
'isExpired': False,
'numSharedTracks': 0,
'timestamp': '00:30:50 Fri, 02 Jun 2017 IST +0100',
'numSourceVolumes': 1,
'linkedStorageGroup':
{'name': target_group_name,
'percentageCopied': 100},
}
grp_snapvx_links = [{'name': target_group_name,
'percentageCopied': 100},
{'name': 'another-target',
'percentageCopied': 90}]
rdf_group_list = {'rdfGroupID': [{'rdfgNumber': rdf_group_no,
'label': rdf_group_name}]}
rdf_group_details = {'modes': ['Synchronous'],
'remoteSymmetrix': remote_array,
'label': rdf_group_name,
'type': 'Dynamic',
'numDevices': 1,
'remoteRdfgNumber': rdf_group_no,
'rdfgNumber': rdf_group_no}
rdf_group_vol_details = {'remoteRdfGroupNumber': rdf_group_no,
'localSymmetrixId': array,
'volumeConfig': 'RDF1+TDEV',
'localRdfGroupNumber': rdf_group_no,
'localVolumeName': device_id,
'rdfpairState': 'Synchronized',
'remoteVolumeName': device_id2,
'localVolumeState': 'Ready',
'rdfMode': 'Synchronous',
'remoteVolumeState': 'Write Disabled',
'remoteSymmetrixId': remote_array}
# system
job_list = [{'status': 'SUCCEEDED',
'jobId': '12345',
'result': 'created',
'resourceLink': 'storagegroup/%s' % storagegroup_name_f},
{'status': 'RUNNING', 'jobId': '55555'},
{'status': 'FAILED', 'jobId': '09999'}]
symmetrix = [{'symmetrixId': array,
'model': 'VMAX250F',
'ucode': '5977.1091.1092'},
{'symmetrixId': array_herc,
'model': 'PowerMax 2000',
'ucode': '5978.1091.1092'}]
version_details = {'version': 'V9.1.0.1'}
headroom = {'headroom': [{'headroomCapacity': 20348.29}]}
ucode_5978_foxtail = {'ucode': '5978.435.435'}
p_vol_rest_response_single = {
'id': 'f3aab01c-a5a8-4fb4-af2b-16ae1c46dc9e_0', 'count': 1,
'expirationTime': 1521650650793, 'maxPageSize': 1000,
'resultList': {'to': 1, 'from': 1, 'result': [
{'volumeHeader': {
'capGB': 1.0, 'capMB': 1026.0, 'volumeId': '00001',
'status': 'Ready', 'configuration': 'TDEV'}}]}}
p_vol_rest_response_none = {
'id': 'f3aab01c-a5a8-4fb4-af2b-16ae1c46dc9e_0', 'count': 0,
'expirationTime': 1521650650793, 'maxPageSize': 1000,
'resultList': {'to': 0, 'from': 0, 'result': []}}
p_vol_rest_response_iterator_1 = {
'id': 'f3aab01c-a5a8-4fb4-af2b-16ae1c46dc9e_0', 'count': 1500,
'expirationTime': 1521650650793, 'maxPageSize': 1000,
'resultList': {'to': 1, 'from': 1, 'result': [
{'volumeHeader': {
'capGB': 1.0, 'capMB': 1026.0, 'volumeId': '00002',
'status': 'Ready', 'configuration': 'TDEV'}}]}}
p_vol_rest_response_iterator_2 = {
'to': 2000, 'from': 1001, 'result': [
{'volumeHeader': {
'capGB': 1.0, 'capMB': 1026.0, 'volumeId': '00001',
'status': 'Ready', 'configuration': 'TDEV'}}]}
rest_iterator_resonse_one = {
'to': 1000, 'from': 1, 'result': [
{'volumeHeader': {
'capGB': 1.0, 'capMB': 1026.0, 'volumeId': '00001',
'status': 'Ready', 'configuration': 'TDEV'}}]}
rest_iterator_resonse_two = {
'to': 1500, 'from': 1001, 'result': [
{'volumeHeader': {
'capGB': 1.0, 'capMB': 1026.0, 'volumeId': '00002',
'status': 'Ready', 'configuration': 'TDEV'}}]}
# COMMON.PY
priv_vol_func_response_single = [
{'volumeHeader': {
'private': False, 'capGB': 1.0, 'capMB': 1026.0,
'serviceState': 'Normal', 'emulationType': 'FBA',
'volumeId': '00001', 'status': 'Ready', 'mapped': False,
'numStorageGroups': 0, 'reservationInfo': {'reserved': False},
'encapsulated': False, 'formattedName': '00001',
'system_resource': False, 'numSymDevMaskingViews': 0,
'nameModifier': "", 'configuration': 'TDEV'},
'maskingInfo': {'masked': False},
'rdfInfo': {
'dynamicRDF': False, 'RDF': False,
'concurrentRDF': False,
'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False},
'timeFinderInfo': {
'mirror': False, 'snapVXTgt': False,
'cloneTarget': False, 'cloneSrc': False,
'snapVXSrc': True, 'snapVXSession': [
{'srcSnapshotGenInfo': [
{'snapshotHeader': {
'timestamp': 1512763278000, 'expired': False,
'secured': False, 'snapshotName': 'testSnap1',
'device': '00001', 'generation': 0, 'timeToLive': 0
}}]}]}}]
priv_vol_func_response_multi = [
{'volumeHeader': {
'private': False, 'capGB': 100.0, 'capMB': 102400.0,
'serviceState': 'Normal', 'emulationType': 'FBA',
'volumeId': '00001', 'status': 'Ready', 'numStorageGroups': 0,
'reservationInfo': {'reserved': False}, 'mapped': False,
'encapsulated': False, 'formattedName': '00001',
'system_resource': False, 'numSymDevMaskingViews': 0,
'nameModifier': "", 'configuration': 'TDEV'},
'rdfInfo': {
'dynamicRDF': False, 'RDF': False,
'concurrentRDF': False,
'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False},
'maskingInfo': {'masked': False},
'timeFinderInfo': {
'mirror': False, 'snapVXTgt': False,
'cloneTarget': False, 'cloneSrc': False,
'snapVXSrc': True, 'snapVXSession': [
{'srcSnapshotGenInfo': [
{'snapshotHeader': {
'timestamp': 1512763278000, 'expired': False,
'secured': False, 'snapshotName': 'testSnap1',
'device': '00001', 'generation': 0, 'timeToLive': 0
}}]}]}},
{'volumeHeader': {
'private': False, 'capGB': 200.0, 'capMB': 204800.0,
'serviceState': 'Normal', 'emulationType': 'FBA',
'volumeId': '00002', 'status': 'Ready', 'numStorageGroups': 0,
'reservationInfo': {'reserved': False}, 'mapped': False,
'encapsulated': False, 'formattedName': '00002',
'system_resource': False, 'numSymDevMaskingViews': 0,
'nameModifier': "", 'configuration': 'TDEV'},
'rdfInfo': {
'dynamicRDF': False, 'RDF': False,
'concurrentRDF': False,
'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False},
'maskingInfo': {'masked': False},
'timeFinderInfo': {
'mirror': False, 'snapVXTgt': False,
'cloneTarget': False, 'cloneSrc': False,
'snapVXSrc': True, 'snapVXSession': [
{'srcSnapshotGenInfo': [
{'snapshotHeader': {
'timestamp': 1512763278000, 'expired': False,
'secured': False, 'snapshotName': 'testSnap2',
'device': '00002', 'generation': 0, 'timeToLive': 0
}}]}]}},
{'volumeHeader': {
'private': False, 'capGB': 300.0, 'capMB': 307200.0,
'serviceState': 'Normal', 'emulationType': 'FBA',
'volumeId': '00003', 'status': 'Ready', 'numStorageGroups': 0,
'reservationInfo': {'reserved': False}, 'mapped': False,
'encapsulated': False, 'formattedName': '00003',
'system_resource': False, 'numSymDevMaskingViews': 0,
'nameModifier': "", 'configuration': 'TDEV'},
'rdfInfo': {
'dynamicRDF': False, 'RDF': False,
'concurrentRDF': False,
'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False},
'maskingInfo': {'masked': False},
'timeFinderInfo': {
'mirror': False, 'snapVXTgt': False,
'cloneTarget': False, 'cloneSrc': False,
'snapVXSrc': True, 'snapVXSession': [
{'srcSnapshotGenInfo': [
{'snapshotHeader': {
'timestamp': 1512763278000, 'expired': False,
'secured': False, 'snapshotName': 'testSnap3',
'device': '00003', 'generation': 0, 'timeToLive': 0
}}]}]}},
{'volumeHeader': {
'private': False, 'capGB': 400.0, 'capMB': 409600.0,
'serviceState': 'Normal', 'emulationType': 'FBA',
'volumeId': '00004', 'status': 'Ready', 'numStorageGroups': 0,
'reservationInfo': {'reserved': False}, 'mapped': False,
'encapsulated': False, 'formattedName': '00004',
'system_resource': False, 'numSymDevMaskingViews': 0,
'nameModifier': "", 'configuration': 'TDEV'},
'rdfInfo': {
'dynamicRDF': False, 'RDF': False,
'concurrentRDF': False,
'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False},
'maskingInfo': {'masked': False},
'timeFinderInfo': {
'mirror': False, 'snapVXTgt': False,
'cloneTarget': False, 'cloneSrc': False,
'snapVXSrc': True, 'snapVXSession': [
{'srcSnapshotGenInfo': [
{'snapshotHeader': {
'timestamp': 1512763278000, 'expired': False,
'secured': False, 'snapshotName': 'testSnap4',
'device': '00004', 'generation': 0, 'timeToLive': 0
}}]}]}}]
priv_vol_func_response_multi_invalid = [
{'volumeHeader': {
'private': False, 'capGB': 1.0, 'capMB': 10.0,
'serviceState': 'Normal', 'emulationType': 'FBA',
'volumeId': '00001', 'status': 'Ready', 'mapped': False,
'numStorageGroups': 0, 'reservationInfo': {'reserved': False},
'encapsulated': False, 'formattedName': '00001',
'system_resource': False, 'numSymDevMaskingViews': 0,
'nameModifier': "", 'configuration': 'TDEV'},
'maskingInfo': {'masked': False},
'rdfInfo': {
'dynamicRDF': False, 'RDF': False,
'concurrentRDF': False,
'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False},
'timeFinderInfo': {'snapVXTgt': False, 'snapVXSrc': False}},
{'volumeHeader': {
'private': False, 'capGB': 1.0, 'capMB': 1026.0,
'serviceState': 'Normal', 'emulationType': 'FBA',
'volumeId': '00002', 'status': 'Ready', 'mapped': False,
'numStorageGroups': 0, 'reservationInfo': {'reserved': False},
'encapsulated': False, 'formattedName': '00002',
'system_resource': False, 'numSymDevMaskingViews': 1,
'nameModifier': "", 'configuration': 'TDEV'},
'maskingInfo': {'masked': False},
'rdfInfo': {
'dynamicRDF': False, 'RDF': False,
'concurrentRDF': False,
'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False},
'timeFinderInfo': {'snapVXTgt': False, 'snapVXSrc': False}},
{'volumeHeader': {
'private': False, 'capGB': 1.0, 'capMB': 1026.0,
'serviceState': 'Normal', 'emulationType': 'CKD',
'volumeId': '00003', 'status': 'Ready', 'mapped': False,
'numStorageGroups': 0, 'reservationInfo': {'reserved': False},
'encapsulated': False, 'formattedName': '00003',
'system_resource': False, 'numSymDevMaskingViews': 0,
'nameModifier': "", 'configuration': 'TDEV'},
'maskingInfo': {'masked': False},
'rdfInfo': {
'dynamicRDF': False, 'RDF': False,
'concurrentRDF': False,
'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False},
'timeFinderInfo': {'snapVXTgt': False, 'snapVXSrc': False}},
{'volumeHeader': {
'private': False, 'capGB': 1.0, 'capMB': 1026.0,
'serviceState': 'Normal', 'emulationType': 'FBA',
'volumeId': '00004', 'status': 'Ready', 'mapped': False,
'numStorageGroups': 0, 'reservationInfo': {'reserved': False},
'encapsulated': False, 'formattedName': '00004',
'system_resource': False, 'numSymDevMaskingViews': 0,
'nameModifier': "", 'configuration': 'TDEV'},
'maskingInfo': {'masked': False},
'rdfInfo': {
'dynamicRDF': False, 'RDF': False,
'concurrentRDF': False,
'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False},
'timeFinderInfo': {'snapVXTgt': True, 'snapVXSrc': False}},
{'volumeHeader': {
'private': False, 'capGB': 1.0, 'capMB': 1026.0,
'serviceState': 'Normal', 'emulationType': 'FBA',
'volumeId': '00005', 'status': 'Ready', 'mapped': False,
'numStorageGroups': 0, 'reservationInfo': {'reserved': False},
'encapsulated': False, 'formattedName': '00005',
'system_resource': False, 'numSymDevMaskingViews': 0,
'nameModifier': 'OS-vol', 'configuration': 'TDEV'},
'maskingInfo': {'masked': False},
'rdfInfo': {
'dynamicRDF': False, 'RDF': False,
'concurrentRDF': False,
'getDynamicRDFCapability': 'RDF1_Capable', 'RDFA': False},
'timeFinderInfo': {'snapVXTgt': False, 'snapVXSrc': False}}]
volume_info_dict = {
'volume_id': volume_id,
'service_level': 'Diamond',
'masking_view': 'OS-HostX-F-OS-fibre-PG-MV',
'host': fake_host,
'display_name': 'attach_vol_name',
'volume_updated_time': '2018-03-05 20:32:41',
'port_group': 'OS-fibre-PG',
'operation': 'attach', 'srp': 'SRP_1',
'initiator_group': 'OS-HostX-F-IG',
'serial_number': '000197800123',
'parent_storage_group': 'OS-HostX-F-OS-fibre-PG-SG',
'workload': 'DSS',
'child_storage_group': 'OS-HostX-SRP_1-DiamondDSS-OS-fibre-PG'}
add_volume_sg_info_dict = {
"storageGroupId": defaultstoragegroup_name,
"slo": "Optimized",
"service_level": "Optimized",
"base_slo_name": "Optimized",
"srp": "SRP_1",
"slo_compliance": "NONE",
"num_of_vols": 39,
"num_of_child_sgs": 0,
"num_of_parent_sgs": 0,
"num_of_masking_views": 0,
"num_of_snapshots": 0,
"cap_gb": 109.06,
"device_emulation": "FBA",
"type": "Standalone",
"unprotected": "true",
"compression": "true",
"compressionRatio": "1.0:1",
"compression_ratio_to_one": 1,
"vp_saved_percent": 99.9
}
data_dict = {volume_id: volume_info_dict}
platform = 'Linux-4.4.0-104-generic-x86_64-with-Ubuntu-16.04-xenial'
unisphere_version = u'V9.1.0.1'
openstack_release = '12.0.0.0b3.dev401'
openstack_version = '12.0.0'
python_version = '2.7.12'
vmax_driver_version = '4.1'
vmax_firmware_version = u'5977.1125.1125'
vmax_model = u'VMAX250F'
version_dict = {
'unisphere_for_powermax_version': unisphere_version,
'openstack_release': openstack_release,
'openstack_version': openstack_version,
'python_version': python_version,
'powermax_cinder_driver_version': vmax_driver_version,
'openstack_platform': platform,
'storage_firmware_version': vmax_firmware_version,
'serial_number': array,
'storage_model': vmax_model}
u4p_failover_config = {
'u4p_failover_backoff_factor': '2',
'u4p_failover_retries': '3',
'u4p_failover_timeout': '10',
'u4p_primary': '10.10.10.10',
'u4p_failover_autofailback': 'True',
'u4p_failover_targets': [
{'san_ip': '10.10.10.11',
'san_api_port': '8443',
'san_login': 'test',
'san_password': 'test',
'driver_ssl_cert_verify': '/path/to/cert',
'driver_ssl_cert_path': 'True'},
{'san_ip': '10.10.10.12',
'san_api_port': '8443',
'san_login': 'test',
'san_password': 'test',
'driver_ssl_cert_verify': 'True'},
{'san_ip': '10.10.10.11',
'san_api_port': '8443',
'san_login': 'test',
'san_password': 'test',
'driver_ssl_cert_verify': '/path/to/cert',
'driver_ssl_cert_path': 'False'}]}
u4p_failover_target = [{
'RestServerIp': '10.10.10.11',
'RestServerPort': '8443',
'RestUserName': 'test',
'RestPassword': 'test',
'SSLVerify': '/path/to/cert'},
{'RestServerIp': '10.10.10.12',
'RestServerPort': '8443',
'RestUserName': 'test',
'RestPassword': 'test',
'SSLVerify': 'True'}]
| 45.389503 | 79 | 0.538048 |
acf182e61abafe579eba45e09cec94436dd9ec3d | 4,239 | py | Python | tests/fixtures_utils.py | CuriBio/pulse3d | 16ebe6edea0a01a5729940e6ed4d19c7e1e66d02 | [
"MIT"
] | null | null | null | tests/fixtures_utils.py | CuriBio/pulse3d | 16ebe6edea0a01a5729940e6ed4d19c7e1e66d02 | [
"MIT"
] | 7 | 2021-12-20T03:28:20.000Z | 2022-03-09T19:16:55.000Z | tests/fixtures_utils.py | CuriBio/sdk_refactor | dedcd0d6dcb53622dbd7bbe042e251557bf7d04c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import csv
import os
from typing import List
from typing import Optional
from typing import Tuple
from h5py import File
import matplotlib
import numpy as np
from pulse3D.constants import TISSUE_SENSOR_READINGS
from pulse3D.plate_recording import WellFile
import pytest
from stdlib_utils import get_current_file_abs_directory
matplotlib.use("Agg")
PATH_OF_CURRENT_FILE = get_current_file_abs_directory()
PATH_TO_DATASETS = os.path.join(PATH_OF_CURRENT_FILE, "datasets")
PATH_TO_MAGNET_FINDING_FILES = os.path.join(PATH_OF_CURRENT_FILE, "magnet_finding")
# PATH_TO_PNGS = os.path.join(PATH_OF_CURRENT_FILE, "pngs")
def _load_file(file_path: str) -> Tuple[List[str], List[str]]:
time = []
v = []
header_placer = [] # used to get rid of the header
with open(file_path, "r") as file_name:
file_reader = csv.reader(file_name, delimiter=",")
header = next(file_reader)
header_placer.append(header)
for row in file_reader:
# row variable is a list that represents a row in csv
time.append(row[0])
v.append(row[1])
return time, v
def _load_file_tsv(file_path: str) -> Tuple[List[str], List[str]]:
time = []
v = []
with open(file_path, "r") as file_name:
file_reader = csv.reader(file_name, delimiter="\t")
for row in file_reader:
time.append(row[0])
v.append(row[1])
return time, v
def _load_file_h5(
file_path: str, sampling_rate_construct: int, x_range: Optional[Tuple[int, int]]
) -> Tuple[List[str], List[str]]:
wf = WellFile(file_path)
tissue_data = wf.raw_tissue_magnetic_data
if x_range is None:
return tissue_data[0], tissue_data[1]
start = x_range[0] * sampling_rate_construct
stop = x_range[1] * sampling_rate_construct
return tissue_data[0][start:stop], tissue_data[1][start:stop]
def create_numpy_array_of_raw_gmr_from_python_arrays(time_array, gmr_array):
time = np.array(time_array, dtype=np.int32)
v = np.array(gmr_array, dtype=np.int32)
data = np.array([time, v], dtype=np.int32)
return data
def assert_percent_diff(actual, expected, threshold=0.0006):
percent_diff = abs(actual - expected) / expected
assert percent_diff < threshold
@pytest.fixture(scope="function", name="raw_generic_well_a1")
def fixture_raw_generic_well_a1():
time, gmr = _load_file_tsv(os.path.join(PATH_TO_DATASETS, "new_A1_tsv.tsv"))
raw_gmr_data = create_numpy_array_of_raw_gmr_from_python_arrays(time, gmr)
raw_gmr_data[0] *= 10
return raw_gmr_data
@pytest.fixture(scope="function", name="raw_generic_well_a2")
def fixture_raw_generic_well_a2():
time, gmr = _load_file_tsv(os.path.join(PATH_TO_DATASETS, "new_A2_tsv.tsv"))
raw_gmr_data = create_numpy_array_of_raw_gmr_from_python_arrays(time, gmr)
return raw_gmr_data
@pytest.fixture(scope="function", name="sample_tissue_reading")
def fixture_sample_tissue_reading():
time, gmr = _load_file_tsv(os.path.join(PATH_TO_DATASETS, "sample_tissue_reading.tsv"))
raw_gmr_data = create_numpy_array_of_raw_gmr_from_python_arrays(time, gmr)
return raw_gmr_data
@pytest.fixture(scope="function", name="sample_reference_reading")
def fixture_sample_reference_reading():
time, gmr = _load_file_tsv(os.path.join(PATH_TO_DATASETS, "sample_reference_reading.tsv"))
raw_gmr_data = create_numpy_array_of_raw_gmr_from_python_arrays(time, gmr)
return raw_gmr_data
def load_h5_folder_as_array(recording_name):
# TODO move this to magnet finding repo and then remove module ID mentions
plate_data_array = None
for module_id in range(1, 25):
file_path = os.path.join(
PATH_TO_MAGNET_FINDING_FILES, recording_name, f"{recording_name}__module_{module_id}.h5"
)
with File(file_path, "r") as well_file:
tissue_data = well_file[TISSUE_SENSOR_READINGS][:]
if plate_data_array is None:
num_samples = tissue_data.shape[-1]
plate_data_array = np.empty((24, 3, 3, num_samples))
reshaped_data = tissue_data.reshape((3, 3, num_samples))
plate_data_array[module_id - 1, :, :, :] = reshaped_data
return plate_data_array
| 34.185484 | 100 | 0.721397 |
acf184f0d99f8a1e212f8b7744abfa263a32e828 | 190 | py | Python | awards/admin.py | Nobella-Nyarari-Ejiofor/Awwards | d92f93757018eba7a4a9389b76c129639c99a3b7 | [
"PostgreSQL",
"Unlicense"
] | null | null | null | awards/admin.py | Nobella-Nyarari-Ejiofor/Awwards | d92f93757018eba7a4a9389b76c129639c99a3b7 | [
"PostgreSQL",
"Unlicense"
] | null | null | null | awards/admin.py | Nobella-Nyarari-Ejiofor/Awwards | d92f93757018eba7a4a9389b76c129639c99a3b7 | [
"PostgreSQL",
"Unlicense"
] | null | null | null | from django.contrib import admin
from django.forms import models
from .models import Profile, Project
# Register your models here.
admin.site.register(Profile)
admin.site.register(Project)
| 23.75 | 36 | 0.815789 |
acf18668ac9152db828d29994468735070aa5ddc | 6,024 | py | Python | striplog/component.py | kinverarity1/striplog | 7c6ae22a130ad2ba3d351c31574778ec121571d3 | [
"Apache-2.0"
] | 1 | 2020-06-19T13:24:11.000Z | 2020-06-19T13:24:11.000Z | striplog/component.py | kinverarity1/striplog | 7c6ae22a130ad2ba3d351c31574778ec121571d3 | [
"Apache-2.0"
] | null | null | null | striplog/component.py | kinverarity1/striplog | 7c6ae22a130ad2ba3d351c31574778ec121571d3 | [
"Apache-2.0"
] | 1 | 2021-11-05T16:49:29.000Z | 2021-11-05T16:49:29.000Z | # -*- coding: utf-8 -*-
"""
Defines components for holding properties of rocks or samples or whatevers.
:copyright: 2015 Agile Geoscience
:license: Apache 2.0
"""
import json
from .utils import CustomFormatter
class ComponentError(Exception):
"""
Generic error class.
"""
pass
class Component(object):
"""
Initialize with a dictionary of properties. You can use any
properties you want e.g.:
- lithology: a simple one-word rock type
- colour, e.g. 'grey'
- grainsize or range, e.g. 'vf-f'
- modifier, e.g. 'rippled'
- quantity, e.g. '35%', or 'stringers'
- description, e.g. from cuttings
"""
def __init__(self, properties=None):
if properties is not None:
for k, v in properties.items():
try: # To treat as number...
setattr(self, k, float(v))
except ValueError: # It's a string.
setattr(self, k, v)
except TypeError: # It's probably None.
continue
def __str__(self):
return self.__dict__.__str__()
def __repr__(self):
s = str(self)
return "Component({0})".format(s)
def __len__(self):
return len(self.__dict__)
def __iter__(self):
return iter(self.__dict__)
def __getitem__(self, key):
return self.__dict__.get(key)
def __setitem__(self, key, value):
self.__dict__[key] = value
return
def __delitem__(self, key):
del self.__dict__[key]
return
def __bool__(self):
if not self.__dict__.keys():
return False
else:
return True
# For Python 2
__nonzero__ = __bool__
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
ds = self.__dict__.items()
do = other.__dict__.items()
try:
strobj = basestring # Fails in Python 3
except:
strobj = str
# Weed out empty elements and case-desensitize.
try:
s = {k.lower(): v.lower() for k, v in ds if v}
o = {k.lower(): v.lower() for k, v in do if v}
except (AttributeError, ValueError): # Dealing with numbers.
s = {k.lower(): v for k, v in ds if isinstance(v, strobj)}
o = {k.lower(): v for k, v in do if isinstance(v, strobj)}
# Compare.
if s == o:
return True
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
"""
If we define __eq__ we also need __hash__ otherwise the object
becomes unhashable. All this does is hash the frozenset of the
keys. (You can only hash immutables.)
"""
return hash(frozenset(self.__dict__.keys()))
def keys(self):
"""
Needed for double-star behaviour, along with __getitem__().
"""
return self.__dict__.keys()
def _repr_html_(self):
"""
Jupyter Notebook magic repr function.
"""
rows = ''
s = '<tr><td><strong>{k}</strong></td><td>{v}</td></tr>'
for k, v in self.__dict__.items():
rows += s.format(k=k, v=v)
html = '<table>{}</table>'.format(rows)
return html
def json(self):
"""
Returns a JSON dump of the dictionary representation of the instance.
"""
return json.dumps(self.__dict__)
@classmethod
def from_text(cls, text, lexicon, required=None, first_only=True):
"""
Generate a Component from a text string, using a Lexicon.
Args:
text (str): The text string to parse.
lexicon (Lexicon): The dictionary to use for the
categories and lexemes.
required (str): An attribute that we must have. If a required
attribute is missing from the component, then None is returned.
first_only (bool): Whether to only take the first
match of a lexeme against the text string.
Returns:
Component: A Component object, or None if there was no
must-have field.
"""
component = lexicon.get_component(text, first_only=first_only)
if required and (required not in component):
return None
else:
return cls(component)
def summary(self, fmt=None, initial=True, default=''):
"""
Given a format string, return a summary description of a component.
Args:
component (dict): A component dictionary.
fmt (str): Describes the format with a string. If no format is
given, you will just get a list of attributes. If you give the
empty string (''), you'll get `default` back. By default this
gives you the empty string, effectively suppressing the
summary.
initial (bool): Whether to capitialize the first letter. Default is
True.
default (str): What to give if there's no component defined.
Returns:
str: A summary string.
Example:
r = Component({'colour': 'Red',
'grainsize': 'VF-F',
'lithology': 'Sandstone'})
r.summary() --> 'Red, vf-f, sandstone'
"""
if default and not self.__dict__:
return default
if fmt == '':
return default
keys = [k for k, v in self.__dict__.items() if v is not '']
f = fmt or '{' + '}, {'.join(keys) + '}'
try:
summary = CustomFormatter().format(f, **self.__dict__)
except KeyError as e:
raise ComponentError("Error building summary, "+str(e))
if summary and initial and not fmt:
summary = summary[0].upper() + summary[1:]
return summary
| 29.385366 | 79 | 0.549801 |
acf1867a30b914d6603bb4c0d97a5a4f08c3fa29 | 14,288 | py | Python | tests/steembase/test_base_account.py | Netherdrake/steem-python | 06d084c0b784556fec1210c8d37bc0fb612d4426 | [
"MIT"
] | 24 | 2017-05-01T20:31:32.000Z | 2020-04-09T02:32:02.000Z | tests/steembase/test_base_account.py | Netherdrake/steem-python | 06d084c0b784556fec1210c8d37bc0fb612d4426 | [
"MIT"
] | 7 | 2017-05-31T22:47:57.000Z | 2018-02-10T22:21:25.000Z | tests/steembase/test_base_account.py | Netherdrake/steem-python | 06d084c0b784556fec1210c8d37bc0fb612d4426 | [
"MIT"
] | 25 | 2017-05-31T22:48:11.000Z | 2020-07-19T11:23:34.000Z | import unittest
from steembase.base58 import Base58
from steembase.account import BrainKey, Address, PublicKey, PrivateKey, PasswordKey
class Testcases(unittest.TestCase):
def test_B85hexgetb58_btc(self):
self.assertEqual(["5HqUkGuo62BfcJU5vNhTXKJRXuUi9QSE6jp8C3uBJ2BVHtB8WSd",
"5JWcdkhL3w4RkVPcZMdJsjos22yB5cSkPExerktvKnRNZR5gx1S",
"5HvVz6XMx84aC5KaaBbwYrRLvWE46cH6zVnv4827SBPLorg76oq",
"5Jete5oFNjjk3aUMkKuxgAXsp7ZyhgJbYNiNjHLvq5xzXkiqw7R",
"5KDT58ksNsVKjYShG4Ls5ZtredybSxzmKec8juj7CojZj6LPRF7",
"02b52e04a0acfe611a4b6963462aca94b6ae02b24e321eda86507661901adb49",
"5b921f7051be5e13e177a0253229903c40493df410ae04f4a450c85568f19131",
"0e1bfc9024d1f55a7855dc690e45b2e089d2d825a4671a3c3c7e4ea4e74ec00e",
"6e5cc4653d46e690c709ed9e0570a2c75a286ad7c1bc69a648aae6855d919d3e",
],
[format(Base58("02b52e04a0acfe611a4b6963462aca94b6ae02b24e321eda86507661901adb49"), "WIF"),
format(Base58("5b921f7051be5e13e177a0253229903c40493df410ae04f4a450c85568f19131"), "WIF"),
format(Base58("0e1bfc9024d1f55a7855dc690e45b2e089d2d825a4671a3c3c7e4ea4e74ec00e"), "WIF"),
format(Base58("6e5cc4653d46e690c709ed9e0570a2c75a286ad7c1bc69a648aae6855d919d3e"), "WIF"),
format(Base58("b84abd64d66ee1dd614230ebbe9d9c6d66d78d93927c395196666762e9ad69d8"), "WIF"),
repr(Base58("5HqUkGuo62BfcJU5vNhTXKJRXuUi9QSE6jp8C3uBJ2BVHtB8WSd")),
repr(Base58("5JWcdkhL3w4RkVPcZMdJsjos22yB5cSkPExerktvKnRNZR5gx1S")),
repr(Base58("5HvVz6XMx84aC5KaaBbwYrRLvWE46cH6zVnv4827SBPLorg76oq")),
repr(Base58("5Jete5oFNjjk3aUMkKuxgAXsp7ZyhgJbYNiNjHLvq5xzXkiqw7R")),
])
def test_B85hexgetb58(self):
self.assertEqual(['BTS2CAbTi1ZcgMJ5otBFZSGZJKJenwGa9NvkLxsrS49Kr8JsiSGc',
'BTShL45FEyUVSVV1LXABQnh4joS9FsUaffRtsdarB5uZjPsrwMZF',
'BTS7DQR5GsfVaw4wJXzA3TogDhuQ8tUR2Ggj8pwyNCJXheHehL4Q',
'BTSqc4QMAJHAkna65i8U4b7nkbWk4VYSWpZebW7JBbD7MN8FB5sc',
'BTS2QAVTJnJQvLUY4RDrtxzX9jS39gEq8gbqYMWjgMxvsvZTJxDSu'
],
[format(Base58("02b52e04a0acfe611a4b6963462aca94b6ae02b24e321eda86507661901adb49"), "BTS"),
format(Base58("5b921f7051be5e13e177a0253229903c40493df410ae04f4a450c85568f19131"), "BTS"),
format(Base58("0e1bfc9024d1f55a7855dc690e45b2e089d2d825a4671a3c3c7e4ea4e74ec00e"), "BTS"),
format(Base58("6e5cc4653d46e690c709ed9e0570a2c75a286ad7c1bc69a648aae6855d919d3e"), "BTS"),
format(Base58("b84abd64d66ee1dd614230ebbe9d9c6d66d78d93927c395196666762e9ad69d8"), "BTS")])
def test_Address(self):
self.assertEqual([format(Address("BTSFN9r6VYzBK8EKtMewfNbfiGCr56pHDBFi", prefix="BTS"), "BTS"),
format(Address("BTSdXrrTXimLb6TEt3nHnePwFmBT6Cck112", prefix="BTS"), "BTS"),
format(Address("BTSJQUAt4gz4civ8gSs5srTK4r82F7HvpChk", prefix="BTS"), "BTS"),
format(Address("BTSFPXXHXXGbyTBwdKoJaAPXRnhFNtTRS4EL", prefix="BTS"), "BTS"),
format(Address("BTS3qXyZnjJneeAddgNDYNYXbF7ARZrRv5dr", prefix="BTS"), "BTS"),
],
["BTSFN9r6VYzBK8EKtMewfNbfiGCr56pHDBFi",
"BTSdXrrTXimLb6TEt3nHnePwFmBT6Cck112",
"BTSJQUAt4gz4civ8gSs5srTK4r82F7HvpChk",
"BTSFPXXHXXGbyTBwdKoJaAPXRnhFNtTRS4EL",
"BTS3qXyZnjJneeAddgNDYNYXbF7ARZrRv5dr",
])
def test_PubKey(self):
self.assertEqual([format(PublicKey("BTS6UtYWWs3rkZGV8JA86qrgkG6tyFksgECefKE1MiH4HkLD8PFGL", prefix="BTS").address, "BTS"),
format(PublicKey("BTS8YAMLtNcnqGNd3fx28NP3WoyuqNtzxXpwXTkZjbfe9scBmSyGT", prefix="BTS").address, "BTS"),
format(PublicKey("BTS7HUo6bm7Gfoi3RqAtzwZ83BFCwiCZ4tp37oZjtWxGEBJVzVVGw", prefix="BTS").address, "BTS"),
format(PublicKey("BTS6676cZ9qmqPnWMrm4McjCuHcnt6QW5d8oRJ4t8EDH8DdCjvh4V", prefix="BTS").address, "BTS"),
format(PublicKey("BTS7u8m6zUNuzPNK1tPPLtnipxgqV9mVmTzrFNJ9GvovvSTCkVUra", prefix="BTS").address, "BTS")
],
["BTS66FCjYKzMwLbE3a59YpmFqA9bwporT4L3",
"BTSKNpRuPX8KhTBsJoFp1JXd7eQEsnCpRw3k",
"BTS838ENJargbUrxXWuE2xD9HKjQaS17GdCd",
"BTSNsrLFWTziSZASnNJjWafFtGBfSu8VG8KU",
"BTSDjAGuXzk3WXabBEgKKc8NsuQM412boBdR"
])
def test_btsprivkey(self):
self.assertEqual([format(PrivateKey("5HqUkGuo62BfcJU5vNhTXKJRXuUi9QSE6jp8C3uBJ2BVHtB8WSd").address, "BTS"),
format(PrivateKey("5JWcdkhL3w4RkVPcZMdJsjos22yB5cSkPExerktvKnRNZR5gx1S").address, "BTS"),
format(PrivateKey("5HvVz6XMx84aC5KaaBbwYrRLvWE46cH6zVnv4827SBPLorg76oq").address, "BTS"),
format(PrivateKey("5Jete5oFNjjk3aUMkKuxgAXsp7ZyhgJbYNiNjHLvq5xzXkiqw7R").address, "BTS"),
format(PrivateKey("5KDT58ksNsVKjYShG4Ls5ZtredybSxzmKec8juj7CojZj6LPRF7").address, "BTS")
],
["BTSFN9r6VYzBK8EKtMewfNbfiGCr56pHDBFi",
"BTSdXrrTXimLb6TEt3nHnePwFmBT6Cck112",
"BTSJQUAt4gz4civ8gSs5srTK4r82F7HvpChk",
"BTSFPXXHXXGbyTBwdKoJaAPXRnhFNtTRS4EL",
"BTS3qXyZnjJneeAddgNDYNYXbF7ARZrRv5dr",
])
def test_btcprivkey(self):
self.assertEqual([format(PrivateKey("5HvVz6XMx84aC5KaaBbwYrRLvWE46cH6zVnv4827SBPLorg76oq").uncompressed.address, "BTC"),
format(PrivateKey("5Jete5oFNjjk3aUMkKuxgAXsp7ZyhgJbYNiNjHLvq5xzXkiqw7R").uncompressed.address, "BTC"),
format(PrivateKey("5KDT58ksNsVKjYShG4Ls5ZtredybSxzmKec8juj7CojZj6LPRF7").uncompressed.address, "BTC"),
],
["1G7qw8FiVfHEFrSt3tDi6YgfAdrDrEM44Z",
"12c7KAAZfpREaQZuvjC5EhpoN6si9vekqK",
"1Gu5191CVHmaoU3Zz3prept87jjnpFDrXL",
])
def test_PublicKey(self):
self.assertEqual([str(PublicKey("BTS6UtYWWs3rkZGV8JA86qrgkG6tyFksgECefKE1MiH4HkLD8PFGL", prefix="BTS")),
str(PublicKey("BTS8YAMLtNcnqGNd3fx28NP3WoyuqNtzxXpwXTkZjbfe9scBmSyGT", prefix="BTS")),
str(PublicKey("BTS7HUo6bm7Gfoi3RqAtzwZ83BFCwiCZ4tp37oZjtWxGEBJVzVVGw", prefix="BTS")),
str(PublicKey("BTS6676cZ9qmqPnWMrm4McjCuHcnt6QW5d8oRJ4t8EDH8DdCjvh4V", prefix="BTS")),
str(PublicKey("BTS7u8m6zUNuzPNK1tPPLtnipxgqV9mVmTzrFNJ9GvovvSTCkVUra", prefix="BTS"))
],
["BTS6UtYWWs3rkZGV8JA86qrgkG6tyFksgECefKE1MiH4HkLD8PFGL",
"BTS8YAMLtNcnqGNd3fx28NP3WoyuqNtzxXpwXTkZjbfe9scBmSyGT",
"BTS7HUo6bm7Gfoi3RqAtzwZ83BFCwiCZ4tp37oZjtWxGEBJVzVVGw",
"BTS6676cZ9qmqPnWMrm4McjCuHcnt6QW5d8oRJ4t8EDH8DdCjvh4V",
"BTS7u8m6zUNuzPNK1tPPLtnipxgqV9mVmTzrFNJ9GvovvSTCkVUra"
])
def test_Privatekey(self):
self.assertEqual([str(PrivateKey("5HvVz6XMx84aC5KaaBbwYrRLvWE46cH6zVnv4827SBPLorg76oq")),
str(PrivateKey("5Jete5oFNjjk3aUMkKuxgAXsp7ZyhgJbYNiNjHLvq5xzXkiqw7R")),
str(PrivateKey("5KDT58ksNsVKjYShG4Ls5ZtredybSxzmKec8juj7CojZj6LPRF7")),
repr(PrivateKey("5HvVz6XMx84aC5KaaBbwYrRLvWE46cH6zVnv4827SBPLorg76oq")),
repr(PrivateKey("5Jete5oFNjjk3aUMkKuxgAXsp7ZyhgJbYNiNjHLvq5xzXkiqw7R")),
repr(PrivateKey("5KDT58ksNsVKjYShG4Ls5ZtredybSxzmKec8juj7CojZj6LPRF7")),
],
["5HvVz6XMx84aC5KaaBbwYrRLvWE46cH6zVnv4827SBPLorg76oq",
"5Jete5oFNjjk3aUMkKuxgAXsp7ZyhgJbYNiNjHLvq5xzXkiqw7R",
"5KDT58ksNsVKjYShG4Ls5ZtredybSxzmKec8juj7CojZj6LPRF7",
'0e1bfc9024d1f55a7855dc690e45b2e089d2d825a4671a3c3c7e4ea4e74ec00e',
'6e5cc4653d46e690c709ed9e0570a2c75a286ad7c1bc69a648aae6855d919d3e',
'b84abd64d66ee1dd614230ebbe9d9c6d66d78d93927c395196666762e9ad69d8'
])
def test_BrainKey(self):
self.assertEqual([str(BrainKey("COLORER BICORN KASBEKE FAERIE LOCHIA GOMUTI SOVKHOZ Y GERMAL AUNTIE PERFUMY TIME FEATURE GANGAN CELEMIN MATZO").get_private()),
str(BrainKey("NAK TILTING MOOTING TAVERT SCREENY MAGIC BARDIE UPBORNE CONOID MAUVE CARBON NOTAEUM BITUMEN HOOEY KURUMA COWFISH").get_private()),
str(BrainKey("CORKITE CORDAGE FONDISH UNDER FORGET BEFLEA OUTBUD ZOOGAMY BERLINE ACANTHA STYLO YINCE TROPISM TUNKET FALCULA TOMENT").get_private()),
str(BrainKey("MURZA PREDRAW FIT LARIGOT CRYOGEN SEVENTH LISP UNTAWED AMBER CRETIN KOVIL TEATED OUTGRIN POTTAGY KLAFTER DABB").get_private()),
str(BrainKey("VERDICT REPOUR SUNRAY WAMBLY UNFILM UNCOUS COWMAN REBUOY MIURUS KEACORN BENZOLE BEMAUL SAXTIE DOLENT CHABUK BOUGHED").get_private()),
str(BrainKey("HOUGH TRUMPH SUCKEN EXODY MAMMATE PIGGIN CRIME TEPEE URETHAN TOLUATE BLINDLY CACOEPY SPINOSE COMMIE GRIECE FUNDAL").get_private()),
str(BrainKey("OERSTED ETHERIN TESTIS PEGGLE ONCOST POMME SUBAH FLOODER OLIGIST ACCUSE UNPLAT OATLIKE DEWTRY CYCLIZE PIMLICO CHICOT").get_private()),
],
["5JfwDztjHYDDdKnCpjY6cwUQfM4hbtYmSJLjGd9KTpk9J4H2jDZ",
"5JcdQEQjBS92rKqwzQnpBndqieKAMQSiXLhU7SFZoCja5c1JyKM",
"5JsmdqfNXegnM1eA8HyL6uimHp6pS9ba4kwoiWjjvqFC1fY5AeV",
"5J2KeFptc73WTZPoT1Sd59prFep6SobGobCYm7T5ZnBKtuW9RL9",
"5HryThsy6ySbkaiGK12r8kQ21vNdH81T5iifFEZNTe59wfPFvU9",
"5Ji4N7LSSv3MAVkM3Gw2kq8GT5uxZYNaZ3d3y2C4Ex1m7vshjBN",
"5HqSHfckRKmZLqqWW7p2iU18BYvyjxQs2sksRWhXMWXsNEtxPZU",
])
def test_BrainKey_normalize(self):
b = "COLORER BICORN KASBEKE FAERIE LOCHIA GOMUTI SOVKHOZ Y GERMAL AUNTIE PERFUMY TIME FEATURE GANGAN CELEMIN MATZO"
self.assertEqual([BrainKey(b + "").get_brainkey(),
BrainKey(b + " ").get_brainkey(),
BrainKey(b + " ").get_brainkey(),
BrainKey(b + "\t").get_brainkey(),
BrainKey(b + "\t\t").get_brainkey(),
BrainKey(b.replace(" ", "\t")).get_brainkey(),
BrainKey(b.replace(" ", " ")).get_brainkey(),
],
[b, b, b, b, b, b, b])
def test_BrainKey_sequences(self):
b = BrainKey("COLORER BICORN KASBEKE FAERIE LOCHIA GOMUTI SOVKHOZ Y GERMAL AUNTIE PERFUMY TIME FEATURE GANGAN CELEMIN MATZO")
keys = ["5Hsbn6kXio4bb7eW5bX7kTp2sdkmbzP8kGWoau46Cf7en7T1RRE",
"5K9MHEyiSye5iFL2srZu3ZVjzAZjcQxUgUvuttcVrymovFbU4cc",
"5JBXhzDWQdYPAzRxxuGtzqM7ULLKPK7GZmktHTyF9foGGfbtDLT",
"5Kbbfbs6DmJFNddWiP1XZfDKwhm5dkn9KX5AENQfQke2RYBBDcz",
"5JUqLwgxn8f7myNz4gDwo5e77HZgopHMDHv4icNVww9Rxu1GDG5",
"5JNBVj5QVh86N8MUUwY3EVUmsZwChZftxnuJx22DzEtHWC4rmvK",
"5JdvczYtxPPjQdXMki1tpNvuSbvPMxJG5y4ndEAuQsC5RYMQXuC",
"5HsUSesU2YB4EA3dmpGtHh8aPAwEdkdhidG8hcU2Nd2tETKk85t",
"5JpveiQd1mt91APyQwvsCdAXWJ7uag3JmhtSxpGienic8vv1k2W",
"5KDGhQUqQmwcGQ9tegimSyyT4vmH8h2fMzoNe1MT9bEGvRvR6kD"]
for i in keys:
p = b.next_sequence().get_private()
self.assertEqual(str(p), i)
def test_PasswordKey(self):
a = ["Aang7foN3oz1Ungai2qua5toh3map8ladei1eem2ohsh2shuo8aeji9Thoseo7ah",
"iep1Mees9eghiifahwei5iidi0Sazae9aigaeT7itho3quoo2dah5zuvobaelau5",
"ohBeuyoothae5aer9odaegh5Eeloh1fi7obei9ahSh0haeYuas1sheehaiv5LaiX",
"geiQuoo9NeeLoaZee0ain3Ku1biedohsesien4uHo1eib1ahzaesh5shae3iena7",
"jahzeice6Ix8ohBo3eik9pohjahgeegoh9sahthai1aeMahs8ki7Iub1oojeeSuo",
"eiVahHoh2hi4fazah9Tha8loxeeNgequaquuYee6Shoopo3EiWoosheeX6yohg2o",
"PheeCh3ar8xoofoiphoo4aisahjiiPah4vah0eeceiJ2iyeem9wahyupeithah9T",
"IuyiibahNgieshei2eeFu8aic1IeMae9ooXi9jaiwaht4Wiengieghahnguang0U",
"Ipee1quee7sheughemae4eir8pheix3quac3ei0Aquo9ohieLaeseeh8AhGeM2ew",
"Tech5iir0aP6waiMeiHoph3iwoch4iijoogh0zoh9aSh6Ueb2Dee5dang1aa8IiP"
]
b = ["STM5NyCrrXHmdikC6QPRAPoDjSHVQJe3WC5bMZuF6YhqhSsfYfjhN",
"STM8gyvJtYyv5ZbT2ZxbAtgufQ5ovV2bq6EQp4YDTzQuSwyg7Ckry",
"STM7yE71iVPSpaq8Ae2AmsKfyFxA8pwYv5zgQtCnX7xMwRUQMVoGf",
"STM5jRgWA2kswPaXsQNtD2MMjs92XfJ1TYob6tjHtsECg2AusF5Wo",
"STM6XHwVxcP6zP5NV1jUbG6Kso9m8ZG9g2CjDiPcZpAxHngx6ATPB",
"STM59X1S4ofTAeHd1iNHDGxim5GkLo2AdcznksUsSYGU687ywB5WV",
"STM6BPPL4iSRbFVVN8v3BEEEyDsC1STRK7Ba9ewQ4Lqvszn5J8VAe",
"STM7cdK927wj95ptUrCk6HKWVeF74LG5cTjDTV22Z3yJ4Xw8xc9qp",
"STM7VNFRjrE1hs1CKpEAP9NAabdFpwvzYXRKvkrVBBv2kTQCbNHz7",
"STM7ZZFhEBjujcKjkmY31i1spPMx6xDSRhkursZLigi2HKLuALe5t",
]
for i, pwd in enumerate(a):
p = format(PasswordKey("xeroc", pwd, "posting").get_public(), "STM")
self.assertEqual(p, b[i])
| 72.897959 | 174 | 0.641797 |
acf187c204cdadafa46ecbd4bd74047f442263aa | 6,369 | py | Python | src/NeuralNetwork.py | pkouvaros/venus2_vnncomp21 | 57e9608041d230b5d78c4f2afb890b81035436a1 | [
"BSD-2-Clause"
] | null | null | null | src/NeuralNetwork.py | pkouvaros/venus2_vnncomp21 | 57e9608041d230b5d78c4f2afb890b81035436a1 | [
"BSD-2-Clause"
] | null | null | null | src/NeuralNetwork.py | pkouvaros/venus2_vnncomp21 | 57e9608041d230b5d78c4f2afb890b81035436a1 | [
"BSD-2-Clause"
] | null | null | null | # ************
# File: NeuralNetwork.py
# Top contributors (to current version):
# Panagiotis Kouvaros (panagiotis.kouvaros@gmail.com)
# This file is part of the Venus project.
# Copyright: 2019-2021 by the authors listed in the AUTHORS file in the
# top-level directory.
# License: BSD 2-Clause (see the file LICENSE in the top-level directory).
# Description: Neural Network class.
# ************
from src.input.ONNXParser import ONNXParser
# from src.input.KerasParser import KerasParser
from src.Specification import Specification
from src.Layers import FullyConnected
from src.Formula import TrueFormula
from src.Layers import Input, FullyConnected, Conv2D
from src.SIP import SIP
# from src.utils.Logger import get_logger
from src.utils.Activations import Activations
from src.Parameters import SIP as SIP_PARAMS
import numpy as np
import os
import itertools
class NeuralNetwork():
# logger = None
def __init__(self, model_path, logfile):
"""
Arguments:
model_path: str of path to neural network model.
name: name of the neural network.
"""
self.model_path = model_path
self.logfile = logfile
# if NeuralNetwork.logger is None:
# NeuralNetwork.logger = get_logger(__name__, logfile)
self.mean = 0
self.std = 1
def load(self):
"""
Loads a neural network into Venus.
Arguments:
model_path: str of path to neural network model.
Returns
None
"""
_,model_format = os.path.splitext(self.model_path)
if not model_format in ['.h5', '.onnx', '.onnx.gz']:
raise Exception('only .h5 and .onnx models are supported')
if model_format == '.h5':
keras_parser = KerasParser()
self.layers = keras_parser.load(self.model_path)
else:
onnx_parser = ONNXParser()
self.layers = onnx_parser.load(self.model_path)
self.mean = onnx_parser.mean
self.std = onnx_parser.std
def copy(self):
"""
Returns:
a copy of the calling object
"""
nn = NeuralNetwork(self.model_path, self.logfile)
nn.layers = [layer.copy() for layer in self.layers]
return nn
def clean_vars(self):
"""
Nulls out all MILP variables associate with the network.
Returns
None
"""
for layer in self.layers + [self.input] + [self.output]:
layer.clean_vars()
def predict(self, input, mean=0, std=1):
"""
Computes the output of the network on a given input.
Arguments:
input: input vector to the network.
mean: normalisation mean.
std: normalisation standard deviation.
Returns
vector of the network's output on input
"""
nn = self.copy()
input_layer = Input(input,input)
spec = Specification(input_layer,TrueFormula())
spec.normalise(mean, std)
params = SIP_PARAMS()
params.OSIP_CONV = False
params.OSIP_FC = False
sip = SIP([input_layer] + nn.layers, params, self.logfile)
sip.set_bounds()
return nn.layers[-1].post_bounds.lower
def classify(self, input, mean=0, std=1):
"""
Computes the classification of a given input by the network.
Arguments:
input: input vector to the network.
mean: normalisation mean.
std: normalisation standard deviation.
Returns
int of the class of the input
"""
pred = self.predict(input, mean, std)
return np.argmax(pred)
def is_fc(self):
for layer in self.layers:
if not isinstance(layer, FullyConnected):
return False
return True
def get_n_relu_nodes(self):
"""
Computes the number of ReLU nodes in the network.
Returns:
int of the number of ReLU nodes.
"""
count = 0
for layer in self.layers:
if layer.activation == Activations.relu:
count += len(layer.outputs())
return count
def get_n_stabilised_nodes(self):
"""
Computes the number of stabilised ReLU nodes in the network.
Returns:
int of the number of stabilised ReLU nodes.
"""
count = 0
for layer in self.layers:
if layer.activation == Activations.relu:
count += len(layer.get_stable())
return count
def get_stability_ratio(self):
"""
Computes the ratio of stabilised ReLU nodes to the total number of ReLU nodes.
Returns:
float of the ratio.
"""
relu_count = self.get_n_relu_nodes()
return self.get_n_stabilised_nodes() / relu_count if relu_count > 0 else 0
def get_output_range(self):
"""
Computes the output range of the network.
Returns:
float of the range.
"""
diff = self.layers[-1].post_bounds.upper - self.layers[-1].post_bounds.lower
rng = np.average(diff)
return rng
def neighbours_from_p_layer(self, depth, node):
"""
Determines the neighbouring nodes to the given node from the previous
layer.
Arguments:
depth: the depth of the layer of the node.
node: the index of the node.
Returns:
a list of neighbouring nodes.
"""
l, p_l = self.layers[depth-1], self.layers[depth-2]
if isinstance(l, FullyConnected):
return p_l.outputs()
elif isinstance(l, Conv2D):
x_start = node[0] * l.strides[0] - l.padding[0]
x_rng = range(x_start, x_start + l.kernels.shape[0])
x = [i for i in x_rng if i >= 0 and i < l.input_shape[0]]
y_start = node[1] * l.strides[1] - l.padding[1]
y_rng = range(y_start, y_start + l.kernels.shape[1])
y = [i for i in y_rng if i>=0 and i < l.input_shape[1]]
z = [i for i in range(l.kernels.shape[2])]
return [i for i in itertools.product(*[x,y,z])]
| 27.691304 | 86 | 0.578584 |
acf188e18ff0c9e8b4d4d02eb631d86218f97594 | 6,731 | py | Python | fairseq/criterions/drop_label_smoothed_cross_entropy.py | violet-zct/fairseq-detect-hallucination | 7382452ed87cbdaa7080fc065e3efaea34d8a12b | [
"MIT"
] | 13 | 2021-06-08T21:07:12.000Z | 2022-02-28T16:47:10.000Z | fairseq/criterions/drop_label_smoothed_cross_entropy.py | violet-zct/fairseq-detect-hallucination | 7382452ed87cbdaa7080fc065e3efaea34d8a12b | [
"MIT"
] | 1 | 2021-12-28T13:04:23.000Z | 2022-03-19T17:39:57.000Z | fairseq/criterions/drop_label_smoothed_cross_entropy.py | violet-zct/fairseq-detect-hallucination | 7382452ed87cbdaa7080fc065e3efaea34d8a12b | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from loss_dropper import LossDropper
def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=None, reduce=True):
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(-1)
nll_loss = -lprobs.gather(dim=-1, index=target)
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
if pad_mask.any():
nll_loss.masked_fill_(pad_mask, 0.)
smooth_loss.masked_fill_(pad_mask, 0.)
else:
nll_loss = nll_loss.squeeze(-1)
smooth_loss = smooth_loss.squeeze(-1)
if reduce:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = epsilon / lprobs.size(-1)
loss = (1. - epsilon) * nll_loss + eps_i * smooth_loss
return loss, nll_loss
@register_criterion('drop_loss_label_smoothed_cross_entropy')
class Drop_Loss_LabelSmoothedCrossEntropyCriterion(FairseqCriterion):
def __init__(self, task, sentence_avg, label_smoothing, word_filter, drop_c):
super().__init__(task)
self.sentence_avg = sentence_avg
self.eps = label_smoothing
self.word_filter = word_filter
self.drop_c = drop_c
if drop_c > 0:
self.dropper = LossDropper(dropc=drop_c)
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
# fmt: off
parser.add_argument('--label-smoothing', default=0., type=float, metavar='D',
help='epsilon for label smoothing, 0 means no label smoothing')
parser.add_argument('--word-filter', default=0, type=int, help="set to 1 to turn on our word filter loss")
parser.add_argument('--drop-c', default=0, type=float, help="truncation loss from (Kang and Hashimoto, 2020)")
# fmt: on
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
if self.training and "target_labels" in sample:
sample['net_input']["target_label_mask"] = 1. - sample["target_labels"]
net_output = model(**sample['net_input'])
if self.training and "target_labels" in sample and self.word_filter:
loss, nll_loss = self.compute_token_loss(model, net_output, sample, sample['target_labels'])
sample_size = sample['target'].size(0) if self.sentence_avg else (1 - sample['target_labels']).sum().item()
elif self.training and self.drop_c > 0:
loss, nll_loss, pad_mask = self.compute_drop_loss(model, net_output, sample)
mask = self.dropper(loss) # The dropper returns a mask of 0s where data should be dropped.
loss *= mask
loss = loss.sum()
sample_size = mask.sum()
# sample_size = mask.sum() if self.sentence_avg else (pad_mask*mask).sum()
else:
loss, nll_loss = self.compute_loss(model, net_output, sample, reduce=reduce)
sample_size = sample['target'].size(0) if self.sentence_avg else sample['ntokens']
logging_output = {
'loss': loss.data,
'nll_loss': nll_loss.data,
'ntokens': sample['ntokens'],
'nsentences': sample['target'].size(0),
'sample_size': sample_size,
}
return loss, sample_size, logging_output
def compute_token_loss(self, model, net_output, sample, target_weights):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
# mask = (sample['target'] != self.padding_idx).float()
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output).view(-1)
loss, nll_loss = label_smoothed_nll_loss(
lprobs, target, self.eps, ignore_index=self.padding_idx, reduce=False,
)
token_loss = loss.reshape_as(sample['target']) * (1.0 - target_weights)
nll_loss = nll_loss.reshape_as(sample['target']) * (1.0 - target_weights)
return token_loss.sum(), nll_loss.sum()
def compute_drop_loss(self, model, net_output, sample):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
# mask = (sample['target'] != self.padding_idx).float()
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output).view(-1)
loss, nll_loss = label_smoothed_nll_loss(
lprobs, target, self.eps, ignore_index=self.padding_idx, reduce=False,
)
mask = (sample['target'] != self.padding_idx).float()
loss = loss.reshape_as(sample['target'])
token_loss = (loss * mask).sum(1) / mask.sum(1)
nll_loss = nll_loss.reshape_as(sample['target'])
return token_loss, nll_loss.sum(), mask.sum(1)
def compute_loss(self, model, net_output, sample, reduce=True):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output).view(-1, 1)
loss, nll_loss = label_smoothed_nll_loss(
lprobs, target, self.eps, ignore_index=self.padding_idx, reduce=reduce,
)
return loss, nll_loss
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get('loss', 0) for log in logging_outputs)
nll_loss_sum = sum(log.get('nll_loss', 0) for log in logging_outputs)
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
metrics.log_scalar('loss', loss_sum / sample_size / math.log(2), sample_size, round=3)
metrics.log_scalar('nll_loss', nll_loss_sum / ntokens / math.log(2), ntokens, round=3)
metrics.log_derived('ppl', lambda meters: utils.get_perplexity(meters['nll_loss'].avg))
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
| 46.42069 | 119 | 0.653692 |
acf189444720f4d3fe3faaf5460dc525978da065 | 10,520 | py | Python | pyVideoDatasets/BackgroundSubtraction.py | colincsl/RGBD-Dataset-Reader | bafa7c74770f6e91a41312e009fbb4176af23e77 | [
"BSD-2-Clause-FreeBSD"
] | 6 | 2019-06-06T03:56:18.000Z | 2020-11-28T07:32:37.000Z | pyVideoDatasets/BackgroundSubtraction.py | colincsl/RGBD-Dataset-Reader | bafa7c74770f6e91a41312e009fbb4176af23e77 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | pyVideoDatasets/BackgroundSubtraction.py | colincsl/RGBD-Dataset-Reader | bafa7c74770f6e91a41312e009fbb4176af23e77 | [
"BSD-2-Clause-FreeBSD"
] | 3 | 2019-07-04T08:51:02.000Z | 2020-12-10T16:13:59.000Z | '''
Implements methods for background subtraction
--Adaptive Mixture of Gaussians
--Median Model
--Mean Model
--Static Model
--Box Model
--other utility functions
Colin Lea 2013
'''
import os, time, sys
import numpy as np
import scipy.ndimage as nd
from IPython import embed
def constrain(img, mini=-1, maxi=-1): #500, 4000
if mini == -1:
min_ = np.min(img[np.nonzero(img)])
else:
min_ = mini
if maxi == -1:
max_ = img.max()
else:
max_ = maxi
img = np.clip(img, min_, max_)
img -= min_
if max_ == 0:
max_ = 1
img = np.array((img * (255.0 / (max_-min_))), dtype=np.uint8)
img[img==img.max()] = 0
return img
def extract_people(im, minPersonPixThresh=5000, gradThresh=None):
'''
---Paramaters---
im :
mask :
minPersonPixThresh :
gradThresh :
gradientFilter :
---Returns---
mask :
userBoundingBoxes :
userLabels :
'''
mask = im!=0
if gradThresh == None:
grad_bin = mask
else:
gradients = np.gradient(im)
mag = np.sqrt(gradients[0]**2+gradients[1]**2)
mask *= mag < gradThresh
labelIm, maxLabel = nd.label(im*mask)
connComps = nd.find_objects(labelIm, maxLabel)
# Only extract if there are sufficient pixels and it is within a valid height/width ratio
px_count = [nd.sum(labelIm[c]==l) for c,l in zip(connComps,range(1, maxLabel+1))]
ratios = [(c[1].stop-c[1].start)/float(c[0].stop-c[0].start)for c in connComps]
du = [float(c[0].stop-c[0].start) for c in connComps]
dv = [float(c[1].stop-c[1].start) for c in connComps]
areas = [float(c[0].stop-c[0].start)*float(c[1].stop-c[1].start)for c in connComps]
# Filter
usrTmp = [(c,l,px) for c,l,px,ratio,area in zip(connComps,range(1, maxLabel+1), px_count, ratios, areas)
if ratio < 2 and
px > minPersonPixThresh and
px/area > 0.2
]
if len(usrTmp) > 0:
userBoundingBoxes, userLabels, px_count = zip(*usrTmp)
else:
userBoundingBoxes = []
userLabels = []
userCount = len(userLabels)
#Relabel foregound mask with multiple labels
mask = im.astype(np.uint8)*0
for i,i_new in zip(userLabels, range(1, userCount+1)):
mask[labelIm==i] = i_new
return mask, userBoundingBoxes, userLabels, px_count
def get_mean_image(depthImgs):
mean_ = np.mean(depthImgs, 2)
mean_ = mean_*(~nd.binary_dilation(mean_==0, iterations=3))
## Close holes in images
inds = nd.distance_transform_edt(mean_<500, return_distances=False, return_indices=True)
i2 = np.nonzero(mean_<500)
i3 = inds[:, i2[0], i2[1]]
mean_[i2] = mean_[i3[0], i3[1]] # For all errors, set to avg
return mean_
def fill_image(im, tol=None):
## Close holes in images
inds = nd.distance_transform_edt(im==0, return_distances=False, return_indices=True)
i2 = np.nonzero(im==0)
i3 = inds[:, i2[0], i2[1]]
im[i2] = im[i3[0], i3[1]]
return im
class BaseBackgroundModel(object):
backgroundModel = None
fill_image = False
def __init__(self, depthIm=None, fill_image=False):
if depthIm is not None:
self.backgroundModel = depthIm.copy()
self.fill_image = fill_image
def update(self,depthIm):
self.currentIm = depthIm.copy()
def get_model(self):
return self.backgroundModel
def get_foreground(self, thresh=500):
max_bg = self.backgroundModel.max()
return (np.abs(self.backgroundModel - self.currentIm)*(self.currentIm!=0)*(self.currentIm<max_bg)) > thresh
class MedianModel(BaseBackgroundModel):
'''
Takes the median of the depths of the past N frames
'''
def __init__(self, depthIm, n_images=50, **kwargs):
super(MedianModel, self).__init__(**kwargs)
if self.fill_image:
self.prevDepthIms = fill_image(depthIm.copy())[:,:,None]
else:
self.prevDepthIms = depthIm.copy()[:,:,None]
self.backgroundModel = self.prevDepthIms[:,:,0]
self.n_images = n_images
def update(self,depthIm):
# Add to set of images
if self.fill_image:
self.currentIm = fill_image(depthIm.copy())
else:
self.currentIm = depthIm.copy()
self.prevDepthIms = np.dstack([self.prevDepthIms, self.currentIm])
# Check if too many (or few) images
imCount = self.prevDepthIms.shape[2]
if imCount > self.n_images:
self.prevDepthIms = self.prevDepthIms[:,:,-self.n_images:]
self.backgroundModel = np.median(self.prevDepthIms, -1)
class MeanModel(BaseBackgroundModel):
'''
Averages the depths of the past N frames
'''
def __init__(self, depthIm, n_images=50, **kwargs):
super(MeanModel, self).__init__(**kwargs)
if self.fill_image:
self.prevDepthIms = fill_image(depthIm.copy())[:,:,None]
else:
self.prevDepthIms = depthIm.copy()[:,:,None]
self.backgroundModel = self.prevDepthIms[:,:,0]
self.n_images = n_images
def update(self,depthIm):
# Add to set of images
if self.fill_image:
self.currentIm = fill_image(depthIm.copy())
else:
self.currentIm = depthIm.copy()
self.prevDepthIms = np.dstack([self.prevDepthIms, self.currentIm])
# Check if too many (or few) images
imCount = self.prevDepthIms.shape[2]
if imCount > self.n_images:
self.prevDepthIms = self.prevDepthIms[:,:,-self.n_images:]
self.backgroundModel = np.mean(self.prevDepthIms, -1)
class StaticModel(BaseBackgroundModel):
'''
Set an image as a background boundary
'''
def __init__(self, **kwargs):
super(StaticModel, self).__init__(**kwargs)
def update(self,depthIm):
self.currentIm = depthIm
class BoxModel(BaseBackgroundModel):
'''
Set maximum distance that can be detected as foreground
'''
def __init__(self, max_depth=3000, **kwargs):
super(BoxModel, self).__init__(**kwargs)
self.max_depth = max_depth
def update(self,depthIm):
self.currentIm = depthIm
self.backgroundModel = depthIm*(depthIm > self.max_depth)
''' Adaptive Mixture of Gaussians '''
class AdaptiveMixtureOfGaussians(BaseBackgroundModel):
def __init__(self, im, maxGaussians=5, learningRate=0.05, decayRate=0.25, variance=100**2, **kwargs):
super(AdaptiveMixtureOfGaussians, self).__init__(**kwargs)
xRez, yRez = im.shape
self.MaxGaussians = maxGaussians
self.LearningRate = learningRate
self.DecayRate = decayRate
self.VarianceInit = variance
self.CurrentGaussianCount = 1
self.Means = np.zeros([xRez,yRez,self.MaxGaussians])
self.Variances = np.empty([xRez,yRez,self.MaxGaussians])
self.Weights = np.empty([xRez,yRez,self.MaxGaussians])
self.Deltas = np.empty([xRez,yRez,self.MaxGaussians])
self.NumGaussians = np.ones([xRez,yRez], dtype=np.uint8)
self.Deltas = np.zeros([xRez,yRez,self.MaxGaussians]) + np.inf
self.Means[:,:,0] = im
self.Weights[:,:,0] = self.LearningRate
self.Variances[:,:,0] = self.VarianceInit
self.Deviations = ((self.Means - im[:,:,np.newaxis])**2 / self.Variances)
self.backgroundModel = im
self.currentIm = im
# @profile
def update(self, im):
self.currentIm = im
self.currentIm[im == 0] = im.max()
mask = im != 0
''' Check deviations '''
self.Deviations = ((self.Means - im[:,:,np.newaxis])**2 / self.Variances) * mask[:,:,None]
for m in range(self.CurrentGaussianCount):
self.Deviations[m > self.NumGaussians,m] = np.inf
Ownership = np.argmin(self.Deviations, -1)
deviationMin = np.min(self.Deviations, -1)
createNewMixture = deviationMin > 3
createNewMixture[np.isinf(deviationMin)] = False
replaceLowestMixture = np.logical_and(createNewMixture, self.NumGaussians>=self.MaxGaussians)
createNewMixture = np.logical_and(createNewMixture, self.NumGaussians<self.MaxGaussians)
''' Create new mixture using existing indices'''
if np.any(createNewMixture):
activeset_x, activeset_y = np.nonzero(createNewMixture)
activeset_z = self.NumGaussians[activeset_x, activeset_y].ravel()
# print "-------New Mixture------", len(activeset_x)
self.Means[activeset_x,activeset_y,activeset_z] = im[activeset_x,activeset_y]
self.Weights[activeset_x,activeset_y,activeset_z] = self.LearningRate
self.Variances[activeset_x,activeset_y,activeset_z] = self.VarianceInit
self.NumGaussians[activeset_x,activeset_y] += 1
Ownership[activeset_x,activeset_y] = activeset_z
''' Replace lowest weighted mixture '''
if np.any(replaceLowestMixture):
activeset_x, activeset_y = np.nonzero(replaceLowestMixture)
activeset_z = np.argmin(self.Weights[activeset_x, activeset_y,:], -1)
# print "-------Replace Mixture------", len(activeset_x)
self.Means[activeset_x,activeset_y,activeset_z] = im[activeset_x,activeset_y]
self.Weights[activeset_x,activeset_y,activeset_z] = self.LearningRate
self.Variances[activeset_x,activeset_y,activeset_z] = self.VarianceInit
Ownership[activeset_x,activeset_y] = activeset_z
self.CurrentGaussianCount = self.NumGaussians.max()
# print "Gaussians: ", self.NumGaussians.max()
''' Update gaussians'''
for m in range(self.CurrentGaussianCount):
self.Deltas[:,:,m] = im - self.Means[:,:,m]
tmpOwn = Ownership==m
# print "Own:",np.sum(tmpOwn)
self.Weights[:,:,m] = self.Weights[:,:,m] + self.LearningRate*(tmpOwn - self.Weights[:,:,m]) - self.LearningRate*self.DecayRate
tmpWeight = tmpOwn*(self.LearningRate/self.Weights[:,:,m])
tmpMask = (self.Weights[:,:,m]<=0.001)
tmpWeight[tmpMask] = 0
self.Means[:,:,m] = self.Means[:,:,m] + tmpWeight * self.Deltas[:,:,m]
# self.Variances[:,:,m] = self.Variances[:,:,m] + tmpWeight * (self.Deltas[:,:,m]**2 - self.Variances[:,:,m])
''' If the weight is zero, reset '''
if m < np.any(tmpMask):
if self.CurrentGaussianCount > 1:
activeset_x, activeset_y = np.nonzero(tmpMask * (self.NumGaussians > m))
try:
# self.Variances[activeset_x, activeset_y, m:self.CurrentGaussianCount-1] = self.Variances[activeset_x, activeset_y, m+1:self.CurrentGaussianCount]
self.Means[activeset_x, activeset_y, m:self.CurrentGaussianCount-1] = self.Means[activeset_x, activeset_y, m+1:self.CurrentGaussianCount]
self.Weights[activeset_x, activeset_y, m:self.CurrentGaussianCount-1] = self.Means[activeset_x, activeset_y, m+1:self.CurrentGaussianCount]
# self.Variances[activeset_x, activeset_y, m:self.CurrentGaussianCount-1] = 0
self.Means[activeset_x, activeset_y, m:self.CurrentGaussianCount-1] = 0
self.Weights[activeset_x, activeset_y, m:self.CurrentGaussianCount-1] = 0
self.NumGaussians[activeset_x, activeset_y] -= 1
# print "Reduce gaussians on slice", m, "max:", self.NumGaussians.max()
except:
embed()
self.backgroundModel = np.max(self.Means, 2)
# self.backgroundModel = np.nanmax(self.Means, 2)
# tmp = np.argmax(self.Weights,2).ravel()
# self.backgroundModel = self.Means[:,:,tmp]
def get_model(self):
return self.backgroundModel
| 31.216617 | 153 | 0.705513 |
acf189c0accd562ab36fe5004027b2e02f17270e | 19,571 | py | Python | eventsourcing/persistence.py | Skyross/eventsourcing | 30da69f6a17e9575cadf0bb211d3b7c77d239ffb | [
"BSD-3-Clause"
] | null | null | null | eventsourcing/persistence.py | Skyross/eventsourcing | 30da69f6a17e9575cadf0bb211d3b7c77d239ffb | [
"BSD-3-Clause"
] | null | null | null | eventsourcing/persistence.py | Skyross/eventsourcing | 30da69f6a17e9575cadf0bb211d3b7c77d239ffb | [
"BSD-3-Clause"
] | null | null | null | import json
import os
import uuid
from abc import ABC, abstractmethod
from dataclasses import dataclass
from datetime import datetime
from decimal import Decimal
from distutils.util import strtobool
from typing import (
Any,
Dict,
Generic,
Iterator,
List,
Mapping,
Optional,
Type,
cast,
)
from uuid import UUID
from eventsourcing.domain import DomainEvent, TDomainEvent
from eventsourcing.utils import get_topic, resolve_topic
class Transcoding(ABC):
# noinspection SpellCheckingInspection
"""
Abstract base class for custom transcodings.
"""
@property
@abstractmethod
def type(self) -> type:
# noinspection SpellCheckingInspection
"""Object type of transcoded object."""
@property
@abstractmethod
def name(self) -> str:
"""Name of transcoding."""
@abstractmethod
def encode(self, obj: Any) -> Any:
"""Encodes given object."""
@abstractmethod
def decode(self, data: Any) -> Any:
"""Decodes encoded object."""
class Transcoder(ABC):
"""
Abstract base class for transcoders.
"""
def __init__(self) -> None:
self.types: Dict[type, Transcoding] = {}
self.names: Dict[str, Transcoding] = {}
def register(self, transcoding: Transcoding) -> None:
"""
Registers given transcoding with the transcoder.
"""
self.types[transcoding.type] = transcoding
self.names[transcoding.name] = transcoding
@abstractmethod
def encode(self, obj: Any) -> bytes:
"""Encodes obj as bytes."""
@abstractmethod
def decode(self, data: bytes) -> Any:
"""Decodes obj from bytes."""
class JSONTranscoder(Transcoder):
"""
Extensible transcoder that uses the Python :mod:`json` module.
"""
def __init__(self) -> None:
super().__init__()
self.encoder = json.JSONEncoder(default=self._encode_obj)
self.decoder = json.JSONDecoder(object_hook=self._decode_obj)
def encode(self, obj: Any) -> bytes:
"""
Encodes given object as a bytes array.
"""
return self.encoder.encode(obj).encode("utf8")
def decode(self, data: bytes) -> Any:
"""
Decodes bytes array as previously encoded object.
"""
return self.decoder.decode(data.decode("utf8"))
def _encode_obj(self, o: Any) -> Dict[str, Any]:
try:
transcoding = self.types[type(o)]
except KeyError:
raise TypeError(
f"Object of type {type(o)} is not "
"serializable. Please define and register "
"a custom transcoding for this type."
)
else:
return {
"_type_": transcoding.name,
"_data_": transcoding.encode(o),
}
def _decode_obj(self, d: Dict[str, Any]) -> Any:
if set(d.keys()) == {
"_type_",
"_data_",
}:
t = d["_type_"]
t = cast(str, t)
try:
transcoding = self.names[t]
except KeyError:
raise TypeError(
f"Data serialized with name '{t}' is not "
"deserializable. Please register a "
"custom transcoding for this type."
)
return transcoding.decode(d["_data_"])
else:
return d
class UUIDAsHex(Transcoding):
"""
Transcoding that represents :class:`UUID` objects as hex values.
"""
type = UUID
name = "uuid_hex"
def encode(self, obj: UUID) -> str:
return obj.hex
def decode(self, data: str) -> UUID:
assert isinstance(data, str)
return UUID(data)
class DecimalAsStr(Transcoding):
"""
Transcoding that represents :class:`Decimal` objects as strings.
"""
type = Decimal
name = "decimal_str"
def encode(self, obj: Decimal) -> str:
return str(obj)
def decode(self, data: str) -> Decimal:
return Decimal(data)
class DatetimeAsISO(Transcoding):
"""
Transcoding that represents :class:`datetime` objects as ISO strings.
"""
type = datetime
name = "datetime_iso"
def encode(self, obj: datetime) -> str:
return obj.isoformat()
def decode(self, data: str) -> datetime:
assert isinstance(data, str)
return datetime.fromisoformat(data)
@dataclass(frozen=True)
class StoredEvent:
# noinspection PyUnresolvedReferences
"""
Frozen dataclass that represents :class:`~eventsourcing.domain.DomainEvent`
objects, such as aggregate :class:`~eventsourcing.domain.Aggregate.Event`
objects and :class:`~eventsourcing.domain.Snapshot` objects.
Constructor parameters:
:param UUID originator_id: ID of the originating aggregate
:param int originator_version: version of the originating aggregate
:param str topic: topic of the domain event object class
:param bytes state: serialised state of the domain event object
"""
originator_id: uuid.UUID
originator_version: int
topic: str
state: bytes
class Compressor(ABC):
"""
Base class for compressors.
"""
@abstractmethod
def compress(self, data: bytes) -> bytes:
"""
Compress bytes.
"""
@abstractmethod
def decompress(self, data: bytes) -> bytes:
"""
Decompress bytes.
"""
class Cipher(ABC):
"""
Base class for ciphers.
"""
# noinspection PyUnusedLocal
@abstractmethod
def __init__(self, cipher_key: str):
"""
Initialises cipher with given key.
"""
@abstractmethod
def encrypt(self, plaintext: bytes) -> bytes:
"""
Return ciphertext for given plaintext.
"""
@abstractmethod
def decrypt(self, ciphertext: bytes) -> bytes:
"""
Return plaintext for given ciphertext.
"""
class Mapper(Generic[TDomainEvent]):
"""
Converts between domain event objects and :class:`StoredEvent` objects.
Uses a :class:`Transcoder`, and optionally a cryptographic cipher and compressor.
"""
def __init__(
self,
transcoder: Transcoder,
compressor: Optional[Compressor] = None,
cipher: Optional[Cipher] = None,
):
self.transcoder = transcoder
self.compressor = compressor
self.cipher = cipher
def from_domain_event(self, domain_event: TDomainEvent) -> StoredEvent:
"""
Converts the given domain event to a :class:`StoredEvent` object.
"""
topic: str = get_topic(domain_event.__class__)
event_state = domain_event.__dict__.copy()
originator_id = event_state.pop("originator_id")
originator_version = event_state.pop("originator_version")
class_version = getattr(type(domain_event), "class_version", 1)
if class_version > 1:
event_state["class_version"] = class_version
stored_state: bytes = self.transcoder.encode(event_state)
if self.compressor:
stored_state = self.compressor.compress(stored_state)
if self.cipher:
stored_state = self.cipher.encrypt(stored_state)
return StoredEvent(
originator_id=originator_id,
originator_version=originator_version,
topic=topic,
state=stored_state,
)
def to_domain_event(self, stored: StoredEvent) -> TDomainEvent:
"""
Converts the given :class:`StoredEvent` to a domain event object.
"""
stored_state: bytes = stored.state
if self.cipher:
stored_state = self.cipher.decrypt(stored_state)
if self.compressor:
stored_state = self.compressor.decompress(stored_state)
event_state: dict = self.transcoder.decode(stored_state)
event_state["originator_id"] = stored.originator_id
event_state["originator_version"] = stored.originator_version
cls = resolve_topic(stored.topic)
assert issubclass(cls, DomainEvent)
class_version = getattr(cls, "class_version", 1)
from_version = event_state.pop("class_version", 1)
while from_version < class_version:
getattr(cls, f"upcast_v{from_version}_v{from_version + 1}")(event_state)
from_version += 1
domain_event = object.__new__(cls)
domain_event.__dict__.update(event_state)
return domain_event
class RecordConflictError(Exception):
"""
Legacy exception, replaced with IntegrityError.
"""
class PersistenceError(Exception):
"""
The base class of the other exceptions in this module.
Exception class names follow https://www.python.org/dev/peps/pep-0249/#exceptions
"""
class InterfaceError(PersistenceError):
"""
Exception raised for errors that are related to the database
interface rather than the database itself.
"""
class DatabaseError(PersistenceError):
"""
Exception raised for errors that are related to the database.
"""
class DataError(DatabaseError):
"""
Exception raised for errors that are due to problems with the
processed data like division by zero, numeric value out of range, etc.
"""
class OperationalError(DatabaseError):
"""
Exception raised for errors that are related to the database’s
operation and not necessarily under the control of the programmer,
e.g. an unexpected disconnect occurs, the data source name is not
found, a transaction could not be processed, a memory allocation
error occurred during processing, etc.
"""
class IntegrityError(DatabaseError, RecordConflictError):
"""
Exception raised when the relational integrity of the
database is affected, e.g. a foreign key check fails.
"""
class InternalError(DatabaseError):
"""
Exception raised when the database encounters an internal
error, e.g. the cursor is not valid anymore, the transaction
is out of sync, etc.
"""
class ProgrammingError(DatabaseError):
"""
Exception raised for programming errors, e.g. table not
found or already exists, syntax error in the SQL statement,
wrong number of parameters specified, etc.
"""
class NotSupportedError(DatabaseError):
"""
Exception raised in case a method or database API was used
which is not supported by the database, e.g. calling the
rollback() method on a connection that does not support
transaction or has transactions turned off.
"""
class Recorder(ABC):
"""
Abstract base class for stored event recorders.
"""
class AggregateRecorder(Recorder):
"""
Abstract base class for recorders that record and
retrieve stored events for domain model aggregates.
"""
@abstractmethod
def insert_events(self, stored_events: List[StoredEvent], **kwargs: Any) -> None:
"""
Writes stored events into database.
"""
# Todo: Change the implementations to get in batches, in case lots of events.
@abstractmethod
def select_events(
self,
originator_id: UUID,
gt: Optional[int] = None,
lte: Optional[int] = None,
desc: bool = False,
limit: Optional[int] = None,
) -> List[StoredEvent]:
"""
Reads stored events from database.
"""
@dataclass(frozen=True)
class Notification(StoredEvent):
"""
Frozen dataclass that represents domain event notifications.
"""
id: int
class ApplicationRecorder(AggregateRecorder):
"""
Abstract base class for recorders that record and
retrieve stored events for domain model aggregates.
Extends the behaviour of aggregate recorders by
recording aggregate events in a total order that
allows the stored events also to be retrieved
as event notifications.
"""
@abstractmethod
def select_notifications(self, start: int, limit: int) -> List[Notification]:
"""
Returns a list of event notifications
from 'start', limited by 'limit'.
"""
@abstractmethod
def max_notification_id(self) -> int:
"""
Returns the maximum notification ID.
"""
class ProcessRecorder(ApplicationRecorder):
"""
Abstract base class for recorders that record and
retrieve stored events for domain model aggregates.
Extends the behaviour of applications recorders by
recording aggregate events with tracking information
that records the position of a processed event
notification in a notification log.
"""
@abstractmethod
def max_tracking_id(self, application_name: str) -> int:
"""
Returns the last recorded notification ID from given application.
"""
class EventStore(Generic[TDomainEvent]):
"""
Stores and retrieves domain events.
"""
def __init__(
self,
mapper: Mapper[TDomainEvent],
recorder: AggregateRecorder,
):
self.mapper = mapper
self.recorder = recorder
def put(self, events: List[TDomainEvent], **kwargs: Any) -> None:
"""
Stores domain events in aggregate sequence.
"""
self.recorder.insert_events(
list(
map(
self.mapper.from_domain_event,
events,
)
),
**kwargs,
)
def get(
self,
originator_id: UUID,
gt: Optional[int] = None,
lte: Optional[int] = None,
desc: bool = False,
limit: Optional[int] = None,
) -> Iterator[TDomainEvent]:
"""
Retrieves domain events from aggregate sequence.
"""
return map(
self.mapper.to_domain_event,
self.recorder.select_events(
originator_id=originator_id,
gt=gt,
lte=lte,
desc=desc,
limit=limit,
),
)
class InfrastructureFactory(ABC):
"""
Abstract base class for infrastructure factories.
"""
TOPIC = "INFRASTRUCTURE_FACTORY"
MAPPER_TOPIC = "MAPPER_TOPIC"
CIPHER_TOPIC = "CIPHER_TOPIC"
CIPHER_KEY = "CIPHER_KEY"
COMPRESSOR_TOPIC = "COMPRESSOR_TOPIC"
IS_SNAPSHOTTING_ENABLED = "IS_SNAPSHOTTING_ENABLED"
@classmethod
def construct(
cls,
application_name: str = "",
env: Optional[Mapping] = None,
) -> "InfrastructureFactory":
"""
Constructs concrete infrastructure factory for given
named application. Reads and resolves infrastructure
factory class topic from environment variable 'INFRASTRUCTURE_FACTORY'.
"""
# noinspection SpellCheckingInspection
env = env if env is not None else os.environ
topic = env.get(
cls.TOPIC,
"eventsourcing.popo:Factory",
)
try:
factory_cls = resolve_topic(topic)
except (ModuleNotFoundError, AttributeError):
raise EnvironmentError(
"Failed to resolve "
"infrastructure factory topic: "
f"'{topic}' from environment "
f"variable '{cls.TOPIC}'"
)
if not issubclass(factory_cls, InfrastructureFactory):
raise AssertionError(f"Not an infrastructure factory: {topic}")
return factory_cls(application_name=application_name, env=env)
def __init__(self, application_name: str, env: Mapping):
"""
Initialises infrastructure factory object with given application name.
"""
self.application_name = application_name
self.env = env
# noinspection SpellCheckingInspection
def getenv(
self, key: str, default: Optional[str] = None, application_name: str = ""
) -> Optional[str]:
"""
Returns value of environment variable defined by given key.
"""
if not application_name:
application_name = self.application_name
keys = [
application_name.upper() + "_" + key,
key,
]
for key in keys:
value = self.env.get(key)
if value is not None:
return value
return default
def mapper(
self,
transcoder: Transcoder,
application_name: str = "",
) -> Mapper:
"""
Constructs a mapper.
"""
return Mapper(
transcoder=transcoder,
cipher=self.cipher(application_name),
compressor=self.compressor(application_name),
)
def cipher(self, application_name: str) -> Optional[Cipher]:
"""
Reads environment variables 'CIPHER_TOPIC'
and 'CIPHER_KEY' to decide whether or not
to construct a cipher.
"""
cipher_topic = self.getenv(self.CIPHER_TOPIC, application_name=application_name)
cipher_key = self.getenv(self.CIPHER_KEY, application_name=application_name)
cipher: Optional[Cipher] = None
if cipher_topic:
if not cipher_key:
raise EnvironmentError(
f"'{self.CIPHER_KEY}' not set in env, "
f"although '{self.CIPHER_TOPIC}' was set"
)
elif cipher_key:
cipher_topic = "eventsourcing.cipher:AESCipher"
if cipher_topic and cipher_key:
cipher_cls: Type[Cipher] = resolve_topic(cipher_topic)
cipher = cipher_cls(cipher_key=cipher_key)
return cipher
def compressor(self, application_name: str) -> Optional[Compressor]:
"""
Reads environment variable 'COMPRESSOR_TOPIC' to
decide whether or not to construct a compressor.
"""
compressor: Optional[Compressor] = None
compressor_topic = self.getenv(
self.COMPRESSOR_TOPIC, application_name=application_name
)
if compressor_topic:
compressor_cls: Type[Compressor] = resolve_topic(compressor_topic)
if callable(compressor_cls):
compressor = compressor_cls()
else:
compressor = compressor_cls
return compressor
@staticmethod
def event_store(**kwargs: Any) -> EventStore:
"""
Constructs an event store.
"""
return EventStore(**kwargs)
@abstractmethod
def aggregate_recorder(self, purpose: str = "events") -> AggregateRecorder:
"""
Constructs an aggregate recorder.
"""
@abstractmethod
def application_recorder(self) -> ApplicationRecorder:
"""
Constructs an application recorder.
"""
@abstractmethod
def process_recorder(self) -> ProcessRecorder:
"""
Constructs a process recorder.
"""
def is_snapshotting_enabled(self) -> bool:
"""
Decides whether or not snapshotting is enabled by
reading environment variable 'IS_SNAPSHOTTING_ENABLED'.
Snapshotting is not enabled by default.
"""
default = "no"
return bool(
strtobool(self.getenv(self.IS_SNAPSHOTTING_ENABLED, default) or default)
)
@dataclass(frozen=True)
class Tracking:
"""
Frozen dataclass representing the position of a domain
event :class:`Notification` in an application's notification log.
"""
application_name: str
notification_id: int
| 28.240981 | 88 | 0.619079 |
acf189f3b53684339c67a9e9335dc08b71716b1b | 2,597 | py | Python | analysis-master/tra_analysis/SVM.py | titanscouting/tra-analysis | 5153fc3f82b0ca3aa43d4e926127da10d84234dd | [
"BSD-3-Clause"
] | 2 | 2020-09-19T22:27:16.000Z | 2021-04-01T08:30:15.000Z | analysis-master/tra_analysis/SVM.py | titanscouting/red-alliance-analysis | 5153fc3f82b0ca3aa43d4e926127da10d84234dd | [
"BSD-3-Clause"
] | 23 | 2020-09-20T02:02:20.000Z | 2021-04-01T08:03:09.000Z | analysis-master/tra_analysis/SVM.py | titanscouting/red-alliance-analysis | 5153fc3f82b0ca3aa43d4e926127da10d84234dd | [
"BSD-3-Clause"
] | 4 | 2020-10-09T04:57:59.000Z | 2020-10-14T18:46:45.000Z | # Titan Robotics Team 2022: SVM submodule
# Written by Arthur Lu
# Notes:
# this should be imported as a python module using 'from tra_analysis import SVM'
# setup:
__version__ = "1.0.2"
__changelog__ = """changelog:
1.0.2:
- fixed __all__
1.0.1:
- removed unessasary self calls
- removed classness
1.0.0:
- ported analysis.SVM() here
"""
__author__ = (
"Arthur Lu <learthurgo@gmail.com>",
)
__all__ = [
"CustomKernel",
"StandardKernel",
"PrebuiltKernel",
"fit",
"eval_classification",
"eval_regression",
]
import sklearn
from sklearn import svm
from . import ClassificationMetric, RegressionMetric
class CustomKernel:
def __new__(cls, C, kernel, degre, gamma, coef0, shrinking, probability, tol, cache_size, class_weight, verbose, max_iter, decision_function_shape, random_state):
return sklearn.svm.SVC(C = C, kernel = kernel, gamma = gamma, coef0 = coef0, shrinking = shrinking, probability = probability, tol = tol, cache_size = cache_size, class_weight = class_weight, verbose = verbose, max_iter = max_iter, decision_function_shape = decision_function_shape, random_state = random_state)
class StandardKernel:
def __new__(cls, kernel, C=1.0, degree=3, gamma='auto_deprecated', coef0=0.0, shrinking=True, probability=False, tol=0.001, cache_size=200, class_weight=None, verbose=False, max_iter=-1, decision_function_shape='ovr', random_state=None):
return sklearn.svm.SVC(C = C, kernel = kernel, gamma = gamma, coef0 = coef0, shrinking = shrinking, probability = probability, tol = tol, cache_size = cache_size, class_weight = class_weight, verbose = verbose, max_iter = max_iter, decision_function_shape = decision_function_shape, random_state = random_state)
class PrebuiltKernel:
class Linear:
def __new__(cls):
return sklearn.svm.SVC(kernel = 'linear')
class Polynomial:
def __new__(cls, power, r_bias):
return sklearn.svm.SVC(kernel = 'polynomial', degree = power, coef0 = r_bias)
class RBF:
def __new__(cls, gamma):
return sklearn.svm.SVC(kernel = 'rbf', gamma = gamma)
class Sigmoid:
def __new__(cls, r_bias):
return sklearn.svm.SVC(kernel = 'sigmoid', coef0 = r_bias)
def fit(kernel, train_data, train_outputs): # expects *2d data, 1d labels or outputs
return kernel.fit(train_data, train_outputs)
def eval_classification(kernel, test_data, test_outputs):
predictions = kernel.predict(test_data)
return ClassificationMetric(predictions, test_outputs)
def eval_regression(kernel, test_data, test_outputs):
predictions = kernel.predict(test_data)
return RegressionMetric(predictions, test_outputs) | 29.511364 | 313 | 0.748941 |
acf18a06c8873ffa1be71a0e86a69c12b1b80d9c | 3,470 | py | Python | running_modes/automated_curriculum_learning/scoring_table.py | truehanwj/REINVENT | b36b9d206e76590c7d584683fc45de8a74ce6033 | [
"Apache-2.0"
] | null | null | null | running_modes/automated_curriculum_learning/scoring_table.py | truehanwj/REINVENT | b36b9d206e76590c7d584683fc45de8a74ce6033 | [
"Apache-2.0"
] | null | null | null | running_modes/automated_curriculum_learning/scoring_table.py | truehanwj/REINVENT | b36b9d206e76590c7d584683fc45de8a74ce6033 | [
"Apache-2.0"
] | null | null | null | from typing import List, Any, Dict
import pandas as pd
from running_modes.dto.scoring_table_entry_dto import ScoringTableEntryDTO
from running_modes.enums.scoring_table_enum import ScoringTableEnum
class ScoringTable:
def __init__(self):
self._scoring_table_enum = ScoringTableEnum()
self.scoring_table = pd.DataFrame(columns=[self._scoring_table_enum.AGENTS,
self._scoring_table_enum.SCORES,
self._scoring_table_enum.SCORING_FUNCTIONS,
self._scoring_table_enum.COMPONENT_NAMES])
self.constant_component_table = pd.DataFrame(columns=[self._scoring_table_enum.SCORING_FUNCTIONS,
self._scoring_table_enum.COMPONENT_NAMES])
def add_score_for_agent(self, entry: ScoringTableEntryDTO):
new_row = pd.DataFrame(data={self._scoring_table_enum.AGENTS: entry.agent,
self._scoring_table_enum.SCORES: entry.score,
self._scoring_table_enum.SCORING_FUNCTIONS: [entry.scoring_function_components],
self._scoring_table_enum.COMPONENT_NAMES:
entry.scoring_function_components.get('name', "unknown_name")})
self.scoring_table = self.scoring_table.append(new_row, ignore_index=True)
def add_constant_component(self, entry: Dict):
new_row = pd.DataFrame(data={self._scoring_table_enum.SCORING_FUNCTIONS: [entry],
self._scoring_table_enum.COMPONENT_NAMES:
entry.get('name', "unknown_name")})
self.constant_component_table = self.constant_component_table.append(new_row, ignore_index=True)
def rank_by_score(self) -> pd.DataFrame:
grouped_scoring_table = self.scoring_table\
.groupby(self._scoring_table_enum.SCORES)\
.agg({self._scoring_table_enum.SCORING_FUNCTIONS: list,
self._scoring_table_enum.AGENTS: lambda x: list(x)[0],
self._scoring_table_enum.COMPONENT_NAMES: list})\
.reset_index()
return grouped_scoring_table.sort_values(self._scoring_table_enum.SCORES,
ascending=False).reset_index(drop=True)
def get_top_sf_components(self, number: int = -1) -> List:
table = self.rank_by_score().head(number)
components = table[self._scoring_table_enum.SCORING_FUNCTIONS].tolist()
return sum(components, []) # flattening the list in case of tied ranks
def get_top_agent(self) -> Any:
table = self.rank_by_score()
agent = table.loc[0][self._scoring_table_enum.AGENTS]
return agent
def get_sf_components_by_name(self, names: List[str]) -> List:
df = pd.concat([self.scoring_table, self.constant_component_table], axis=0, sort=False)
components_df = [df[df[self._scoring_table_enum.COMPONENT_NAMES] == name] for name in names]
components = [component[self._scoring_table_enum.SCORING_FUNCTIONS].item() for component in components_df]
return components
def get_sf_components_by_rank(self, rank: int=0) -> List:
components = self.rank_by_score().loc[rank][self._scoring_table_enum.SCORING_FUNCTIONS]
return components
| 55.079365 | 117 | 0.642939 |
acf18a8644c2ff62418e366e08db790fe757b898 | 988 | py | Python | kubernetes/test/test_v2alpha1_cron_job_list.py | reymont/python | 02a3a31c630c305527b328af49724f348fbdae15 | [
"Apache-2.0"
] | 1 | 2018-10-20T19:37:57.000Z | 2018-10-20T19:37:57.000Z | kubernetes/test/test_v2alpha1_cron_job_list.py | reymont/python | 02a3a31c630c305527b328af49724f348fbdae15 | [
"Apache-2.0"
] | null | null | null | kubernetes/test/test_v2alpha1_cron_job_list.py | reymont/python | 02a3a31c630c305527b328af49724f348fbdae15 | [
"Apache-2.0"
] | 2 | 2018-07-27T19:39:34.000Z | 2020-12-25T02:48:27.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.11.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v2alpha1_cron_job_list import V2alpha1CronJobList
class TestV2alpha1CronJobList(unittest.TestCase):
""" V2alpha1CronJobList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV2alpha1CronJobList(self):
"""
Test V2alpha1CronJobList
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v2alpha1_cron_job_list.V2alpha1CronJobList()
pass
if __name__ == '__main__':
unittest.main()
| 21.955556 | 105 | 0.716599 |
acf18a92728136b4aaafb14fb4c9ece407b61a42 | 1,210 | py | Python | day3/main.py | schuppentier/adventofcode | b67026b1a8fb61ea61ec9f93ba544fffd44450a8 | [
"MIT"
] | null | null | null | day3/main.py | schuppentier/adventofcode | b67026b1a8fb61ea61ec9f93ba544fffd44450a8 | [
"MIT"
] | null | null | null | day3/main.py | schuppentier/adventofcode | b67026b1a8fb61ea61ec9f93ba544fffd44450a8 | [
"MIT"
] | null | null | null | import math
def get_increased_modulo_row(start, inc, limit):
row = start
while True:
yield row
row = (row + inc) % limit
def get_hit_trees_for_slope(field, row_inc, col_inc):
trees_hit_count = 0
col_generator = get_increased_modulo_row(0, col_inc, 31)
for row in range(0, len(field), row_inc):
col = col_generator.__next__()
square = field[row][col]
if square == "#":
trees_hit_count += 1
print(f"[Slope: {row_inc} down, {col_inc} right] Trees hit: {trees_hit_count}")
return trees_hit_count
def part1(field):
print(f"[PART 1] Trees hit: {get_hit_trees_for_slope(field, 1, 3)}")
def part2(field):
slope_hit_count = [
get_hit_trees_for_slope(field, 1, 1),
get_hit_trees_for_slope(field, 1, 3),
get_hit_trees_for_slope(field, 1, 5),
get_hit_trees_for_slope(field, 1, 7),
get_hit_trees_for_slope(field, 2, 1),
]
print(f"[PART 2] Trees hit: {math.prod(slope_hit_count)}")
if __name__ == "__main__":
with open("input.txt", "r") as input_file:
field = [[square for square in row] for row in input_file.readlines()]
part1(field)
part2(field)
| 27.5 | 83 | 0.636364 |
acf18b3ecd67d3eebbb4fd7df87f914fa2b2cf19 | 1,846 | py | Python | preprocess-ml100k.py | Sandy4321/ifm | 72fbe446a373afb27cdfd93b701fb60ad907645e | [
"MIT"
] | 1 | 2021-10-03T21:55:39.000Z | 2021-10-03T21:55:39.000Z | preprocess-ml100k.py | Sandy4321/ifm | 72fbe446a373afb27cdfd93b701fb60ad907645e | [
"MIT"
] | 1 | 2021-10-03T21:55:30.000Z | 2021-10-04T07:22:46.000Z | preprocess-ml100k.py | Sandy4321/ifm | 72fbe446a373afb27cdfd93b701fb60ad907645e | [
"MIT"
] | 1 | 2021-10-03T21:55:32.000Z | 2021-10-03T21:55:32.000Z | #!/usr/bin/python3
import numpy as np
import datetime
from collections import defaultdict
occupations = list(np.loadtxt('u.occupation', dtype=bytes))
converters = {
2: lambda s: 0 if s == b'M' else 1,
3: lambda s: occupations.index(s)
}
demographics = np.loadtxt('u.user', delimiter='|', usecols=(0, 1, 2, 3), converters=converters, dtype=int)
demographics_dict = dict(zip(demographics[:, 0], demographics[:, 1:]))
item_info = np.loadtxt('u.item.utf-8', delimiter='|', usecols=tuple([0] + list(range(5, 24))), dtype=int)
item_info_dict = dict(zip(item_info[:, 0], item_info[:, 1:]))
genres = np.loadtxt('u.genre', delimiter='|', usecols=0, dtype=bytes)
ratings = np.loadtxt('u.data', dtype=int)
num_users = len(demographics)
num_items = len(item_info)
num_genres = len(genres)
num_features = num_users + len(demographics[0]) - 1 + num_items + num_genres * 2 + 7 * 2
def to_one_hot(i, size):
a = np.zeros(size, dtype=int)
a[i] = 1
return a
last_rated_genre = defaultdict(lambda: np.zeros(num_genres))
last_rated_day = defaultdict(lambda: None)
data = np.ndarray((len(ratings), num_features), dtype=int)
for (i, (user_id, item_id, rating, timestamp)) in enumerate(sorted(ratings, key=lambda x: x[3])):
weekday = datetime.datetime.fromtimestamp(timestamp).weekday()
data[i] = np.hstack([to_one_hot(user_id - 1, num_users),
demographics_dict[user_id],
to_one_hot(item_id - 1, num_items),
item_info_dict[item_id],
last_rated_genre[user_id],
to_one_hot(weekday, 7),
to_one_hot(last_rated_day[user_id], 7)])
last_rated_genre[user_id] = item_info_dict[item_id]
last_rated_day[user_id] = weekday
np.save('data.npy', data)
np.save('targets.npy', ratings[:, 2])
| 37.673469 | 106 | 0.651679 |
acf18c5c2916648cbae7e705016ca1c36892dec9 | 3,023 | py | Python | resources/usr/local/lib/python2.7/dist-packages/bx/align/tools/tile.py | edawson/parliament2 | 2632aa3484ef64c9539c4885026b705b737f6d1e | [
"Apache-2.0"
] | 2 | 2019-07-29T04:12:46.000Z | 2020-06-01T00:35:51.000Z | resources/usr/local/lib/python2.7/dist-packages/bx/align/tools/tile.py | edawson/parliament2 | 2632aa3484ef64c9539c4885026b705b737f6d1e | [
"Apache-2.0"
] | null | null | null | resources/usr/local/lib/python2.7/dist-packages/bx/align/tools/tile.py | edawson/parliament2 | 2632aa3484ef64c9539c4885026b705b737f6d1e | [
"Apache-2.0"
] | 1 | 2019-07-29T04:12:50.000Z | 2019-07-29T04:12:50.000Z | """
Tools for tiling / projecting alignments onto an interval of a sequence.
"""
import bx.align as align
from bx import misc
import bx.seq.nib
import os
import string
import sys
def tile_interval( sources, index, ref_src, start, end, seq_db=None ):
"""
Tile maf blocks onto an interval. The resulting block will span the interval
exactly and contain the column from the highest scoring alignment at each
position.
`sources`: list of sequence source names to include in final block
`index`: an instnace that can return maf blocks overlapping intervals
`ref_src`: source name of the interval (ie, hg17.chr7)
`start`: start of interval
`end`: end of interval
`seq_db`: a mapping for source names in the reference species to nib files
"""
# First entry in sources should also be on the reference species
assert sources[0].split('.')[0] == ref_src.split('.')[0], \
"%s != %s" % ( sources[0].split('.')[0], ref_src.split('.')[0] )
base_len = end - start
blocks = index.get( ref_src, start, end )
# From low to high score
blocks.sort(key=lambda t: t.score)
mask = [ -1 ] * base_len
ref_src_size = None
for i, block in enumerate( blocks ):
ref = block.get_component_by_src_start( ref_src )
ref_src_size = ref.src_size
assert ref.strand == "+"
slice_start = max( start, ref.start )
slice_end = min( end, ref.end )
for j in range( slice_start, slice_end ):
mask[j-start] = i
tiled = []
for i in range( len( sources ) ):
tiled.append( [] )
for ss, ee, index in intervals_from_mask( mask ):
# Interval with no covering alignments
if index < 0:
# Get sequence if available, otherwise just use 'N'
if seq_db:
tiled[0].append( bx.seq.nib.NibFile( open( seq_db[ ref_src ] ) ).get( start+ss, ee-ss ) )
else:
tiled[0].append( "N" * (ee-ss) )
# Gaps in all other species
for row in tiled[1:]:
row.append( "-" * ( ee - ss ) )
else:
slice_start = start + ss
slice_end = start + ee
block = blocks[index]
ref = block.get_component_by_src_start( ref_src )
sliced = block.slice_by_component( ref, slice_start, slice_end )
sliced = sliced.limit_to_species( sources )
sliced.remove_all_gap_columns()
for i, src in enumerate( sources ):
comp = sliced.get_component_by_src_start( src )
if comp:
tiled[i].append( comp.text )
else:
tiled[i].append( "-" * sliced.text_size )
return [ "".join( t ) for t in tiled ]
def intervals_from_mask( mask ):
start = 0
last = mask[0]
for i in range( 1, len( mask ) ):
if mask[i] != last:
yield start, i, last
start = i
last = mask[i]
yield start, len(mask), last
| 36.865854 | 105 | 0.581872 |
acf18c93e6729e30b523b3d278e28d484f5999d3 | 4,704 | py | Python | 15 - fixes audio/code/enemy.py | aldrinbrillante/Zelda | 83d74beca1e1d352a17fc4218cf1e2226d5788c3 | [
"CC0-1.0"
] | null | null | null | 15 - fixes audio/code/enemy.py | aldrinbrillante/Zelda | 83d74beca1e1d352a17fc4218cf1e2226d5788c3 | [
"CC0-1.0"
] | null | null | null | 15 - fixes audio/code/enemy.py | aldrinbrillante/Zelda | 83d74beca1e1d352a17fc4218cf1e2226d5788c3 | [
"CC0-1.0"
] | null | null | null | import pygame
from settings import *
from entity import Entity
from support import *
class Enemy(Entity):
def __init__(self,monster_name,pos,groups,obstacle_sprites,damage_player,trigger_death_particles,add_exp):
# general setup
super().__init__(groups)
self.sprite_type = 'enemy'
# graphics setup
self.import_graphics(monster_name)
self.status = 'idle'
self.image = self.animations[self.status][self.frame_index]
# movement
self.rect = self.image.get_rect(topleft = pos)
self.hitbox = self.rect.inflate(0,-10)
self.obstacle_sprites = obstacle_sprites
# stats
self.monster_name = monster_name
monster_info = monster_data[self.monster_name]
self.health = monster_info['health']
self.exp = monster_info['exp']
self.speed = monster_info['speed']
self.attack_damage = monster_info['damage']
self.resistance = monster_info['resistance']
self.attack_radius = monster_info['attack_radius']
self.notice_radius = monster_info['notice_radius']
self.attack_type = monster_info['attack_type']
# player interaction
self.can_attack = True
self.attack_time = None
self.attack_cooldown = 400
self.damage_player = damage_player
self.trigger_death_particles = trigger_death_particles
self.add_exp = add_exp
# invincibility timer
self.vulnerable = True
self.hit_time = None
self.invincibility_duration = 300
# sounds
self.death_sound = pygame.mixer.Sound('../audio/death.wav')
self.hit_sound = pygame.mixer.Sound('../audio/hit.wav')
self.attack_sound = pygame.mixer.Sound(monster_info['attack_sound'])
self.death_sound.set_volume(0.6)
self.hit_sound.set_volume(0.6)
self.attack_sound.set_volume(0.6)
def import_graphics(self,name):
self.animations = {'idle':[],'move':[],'attack':[]}
main_path = f'../graphics/monsters/{name}/'
for animation in self.animations.keys():
self.animations[animation] = import_folder(main_path + animation)
def get_player_distance_direction(self,player):
enemy_vec = pygame.math.Vector2(self.rect.center)
player_vec = pygame.math.Vector2(player.rect.center)
distance = (player_vec - enemy_vec).magnitude()
if distance > 0:
direction = (player_vec - enemy_vec).normalize()
else:
direction = pygame.math.Vector2()
return (distance,direction)
def get_status(self, player):
distance = self.get_player_distance_direction(player)[0]
if distance <= self.attack_radius and self.can_attack:
if self.status != 'attack':
self.frame_index = 0
self.status = 'attack'
elif distance <= self.notice_radius:
self.status = 'move'
else:
self.status = 'idle'
def actions(self,player):
if self.status == 'attack':
self.attack_time = pygame.time.get_ticks()
self.damage_player(self.attack_damage,self.attack_type)
self.attack_sound.play()
elif self.status == 'move':
self.direction = self.get_player_distance_direction(player)[1]
else:
self.direction = pygame.math.Vector2()
def animate(self):
animation = self.animations[self.status]
self.frame_index += self.animation_speed
if self.frame_index >= len(animation):
if self.status == 'attack':
self.can_attack = False
self.frame_index = 0
self.image = animation[int(self.frame_index)]
self.rect = self.image.get_rect(center = self.hitbox.center)
if not self.vulnerable:
alpha = self.wave_value()
self.image.set_alpha(alpha)
else:
self.image.set_alpha(255)
def cooldowns(self):
current_time = pygame.time.get_ticks()
if not self.can_attack:
if current_time - self.attack_time >= self.attack_cooldown:
self.can_attack = True
if not self.vulnerable:
if current_time - self.hit_time >= self.invincibility_duration:
self.vulnerable = True
def get_damage(self,player,attack_type):
if self.vulnerable:
self.hit_sound.play()
self.direction = self.get_player_distance_direction(player)[1]
if attack_type == 'weapon':
self.health -= player.get_full_weapon_damage()
else:
self.health -= player.get_full_magic_damage()
self.hit_time = pygame.time.get_ticks()
self.vulnerable = False
def check_death(self):
if self.health <= 0:
self.kill()
self.trigger_death_particles(self.rect.center,self.monster_name)
self.add_exp(self.exp)
self.death_sound.play()
def hit_reaction(self):
if not self.vulnerable:
self.direction *= -self.resistance
def update(self):
self.hit_reaction()
self.move(self.speed)
self.animate()
self.cooldowns()
self.check_death()
def enemy_update(self,player):
self.get_status(player)
self.actions(player) | 30.348387 | 108 | 0.709396 |
acf18cae54e288e4db5d0e9829c7ac372b1783c2 | 385 | py | Python | boke/boke/wsgi.py | hippoleslie/person_web | a63e05765193f67ced4f57d0cda4525720bb5faa | [
"MIT"
] | null | null | null | boke/boke/wsgi.py | hippoleslie/person_web | a63e05765193f67ced4f57d0cda4525720bb5faa | [
"MIT"
] | null | null | null | boke/boke/wsgi.py | hippoleslie/person_web | a63e05765193f67ced4f57d0cda4525720bb5faa | [
"MIT"
] | null | null | null | """
WSGI config for boke project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'boke.settings')
application = get_wsgi_application()
| 22.647059 | 78 | 0.781818 |
acf18e6ab514f2d5d898011439adc6dcaad36398 | 2,123 | py | Python | Spider-code Template/main.py | Arshianb/Web-Crawling | 5f9fea01bd5eceb903b55c5956ef5a2c0bf6c190 | [
"MIT"
] | 1 | 2022-02-25T00:09:19.000Z | 2022-02-25T00:09:19.000Z | Spider-code Template/main.py | Arshianb/Web-Crawling | 5f9fea01bd5eceb903b55c5956ef5a2c0bf6c190 | [
"MIT"
] | null | null | null | Spider-code Template/main.py | Arshianb/Web-Crawling | 5f9fea01bd5eceb903b55c5956ef5a2c0bf6c190 | [
"MIT"
] | null | null | null | import threading
from queue import Queue
from spider import Spider
from domain import *
from general import *
# 52
def s (i):
if i<=category_len('HELP/crawled.txt') :
print("_____________________________________________________{}________________________________________________________________".format(i) )
x1 = aaaaa('HELP/crawled.txt' , i).split('/')
x=x1[3]
PROJECT_NAME = x
HOMEPAGE = 'http://www.bizrate.com/{}/ratings_guide/listing/'.format(x)
DOMAIN_NAME = get_domain_name(HOMEPAGE)
QUEUE_FILE = PROJECT_NAME + '/queue.txt'
CRAWLED_FILE = PROJECT_NAME + '/crawled.txt'
NUMBER_OF_THREADS = 5
queue = Queue()
Spider(PROJECT_NAME, HOMEPAGE, DOMAIN_NAME)
# Create worker threads (will die when main exits)
def create_workers():
for _ in range(NUMBER_OF_THREADS):
t = threading.Thread(target=work)
t.daemon = True
t.start()
# Do the next job in the queue
def work():
while True:
url = queue.get()
Spider.crawl_page(threading.current_thread().name, url)
queue.task_done()
# Each queued link is a new job
def create_jobs(i):
for link in file_to_set(QUEUE_FILE):
queue.put(link)
queue.join()
crawl(i)
# Check if there are items in the queue, if so crawl them
def crawl(i):
queued_links = file_to_set(QUEUE_FILE)
if len(queued_links) > 0:
print(str(len(queued_links)) + ' links in the queue')
create_jobs(i)
else:
for _ in range(NUMBER_OF_THREADS):
threading.current_thread()._stop
threading.current_thread()._delete
s(i+1)
# for _ in range(NUMBER_OF_THREADS):
# t = threading.Thread(target=work)
# t._stop
create_workers()
crawl(i)
s(0) | 30.768116 | 147 | 0.55252 |
acf18f0636e01c904818e775118e5900903e86c5 | 3,791 | py | Python | 01-python/source code/07_pytorch/03_rnn_detectimg.py | lizhangjie316/ComputerVision | 86d82358bd160074d154773df0284e1154a6d077 | [
"Apache-2.0"
] | 1 | 2019-11-05T08:38:03.000Z | 2019-11-05T08:38:03.000Z | 01-python/source code/07_pytorch/03_rnn_detectimg.py | lizhangjie316/ComputerVision | 86d82358bd160074d154773df0284e1154a6d077 | [
"Apache-2.0"
] | 6 | 2020-11-18T22:13:33.000Z | 2022-03-12T00:04:02.000Z | 01-python/source code/07_pytorch/03_rnn_detectimg.py | lizhangjie316/ComputerVision | 86d82358bd160074d154773df0284e1154a6d077 | [
"Apache-2.0"
] | null | null | null | # -*- encoding: utf-8 -*-
"""
@File : 03_rnn_detectimg.py
@Time : 2019/7/25 14:11
@Author : Keen
@Software: PyCharm
"""
import torch
from torch import nn
import torchvision
import torchvision.datasets as dsets
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
torch.manual_seed(1) # reproducible
# Hyper Parameters
EPOCH = 1 # 训练整批数据多少次, 为了节约时间, 我们只训练一次
BATCH_SIZE = 64
TIME_STEP = 28 # rnn 时间步数 / 图片高度
INPUT_SIZE = 28 # rnn 每步输入值 / 图片每行像素
LR = 0.01 # learning rate
DOWNLOAD_MNIST = True # 如果你已经下载好了mnist数据就写上 False;未下载好为 True
# Mnist 手写数字
train_data = torchvision.datasets.MNIST(
root='./mnist/', # 保存或者提取位置
train=True, # this is training data
transform=torchvision.transforms.ToTensor(), # 转换 PIL.Image or numpy.ndarray 成
# torch.FloatTensor (C x H x W), 训练的时候 normalize 成 [0.0, 1.0] 区间
download=DOWNLOAD_MNIST, # 没下载就下载, 下载了就不用再下了
)
test_data = torchvision.datasets.MNIST(root='./mnist/', train=False)
# 批训练 50samples, 1 channel, 28x28 (50, 1, 28, 28)
train_loader = Data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)
# 为了节约时间, 我们测试时只测试前2000个
test_x = torch.unsqueeze(test_data.test_data, dim=1).type(torch.FloatTensor)[:2000]/255. # shape from (2000, 28, 28) to (2000, 1, 28, 28), value in range(0,1)
test_y = test_data.test_labels[:2000]
class RNN(nn.Module):
def __init__(self):
super(RNN, self).__init__()
self.rnn = nn.LSTM( # LSTM 效果要比 nn.RNN() 好多了
input_size=28, # 图片每行的数据像素点
hidden_size=64, # rnn hidden unit
num_layers=1, # 有几层 RNN layers
batch_first=True,
# input & output 会是以 batch size 为第一维度的特征集 e.g. (batch, time_step, input_size)
)
self.out = nn.Linear(64, 10) # 输出层
def forward(self, x):
# x shape (batch, time_step, input_size)
# r_out shape (batch, time_step, output_size)
# h_n shape (n_layers, batch, hidden_size) LSTM 有两个 hidden states, h_n 是分线, h_c 是主线
# h_c shape (n_layers, batch, hidden_size)
r_out, (h_n, h_c) = self.rnn(x, None) # None 表示 hidden state 会用全0的 state
# 选取最后一个时间点的 r_out 输出
# 这里 r_out[:, -1, :] 的值也是 h_n 的值
# r_out shape (batch, time_step, output_size)
out = self.out(r_out[:, -1, :])
return out
rnn = RNN()
print(rnn)
"""
RNN (
(rnn): LSTM(28, 64, batch_first=True)
(out): Linear (64 -> 10)
)
"""
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all parameters
# 计算误差
loss_func = nn.CrossEntropyLoss() # the target label is not one-hotted
# https://blog.csdn.net/google19890102/article/details/44039761
# training and testing
for epoch in range(EPOCH):
for step, (x, b_y) in enumerate(train_loader): # gives batch data
b_x = x.view(-1, 28, 28) # reshape x to (batch, time_step, input_size)
output = rnn(b_x) # rnn output
loss = loss_func(output, b_y) # cross entropy loss
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
"""
...
Epoch: 0 | train loss: 0.0945 | test accuracy: 0.94
Epoch: 0 | train loss: 0.0984 | test accuracy: 0.94
Epoch: 0 | train loss: 0.0332 | test accuracy: 0.95
Epoch: 0 | train loss: 0.1868 | test accuracy: 0.96
"""
test_output = rnn(test_x[:10].view(-1, 28, 28))
pred_y = torch.max(test_output, 1)[1].data.numpy().squeeze()
print(pred_y, 'prediction number')
print(test_y[:10], 'real number')
"""
[7 2 1 0 4 1 4 9 5 9] prediction number
[7 2 1 0 4 1 4 9 5 9] real number
"""
| 31.857143 | 160 | 0.628066 |
acf18f08fc4c3caee37d6797cb04dee87a7a9796 | 9,453 | py | Python | SmartScope/smartscope/source/position.py | calebsanfo/IncubatorImaging | 720a11353b0fa9928792d7ed91fe1f02000dca9a | [
"MIT"
] | 1 | 2019-12-02T15:48:16.000Z | 2019-12-02T15:48:16.000Z | SmartScope/smartscope/source/position.py | yellenlab/IncubatorImaging | 90a63c18eb200a208daa834a5b692bdd114d87b8 | [
"MIT"
] | null | null | null | SmartScope/smartscope/source/position.py | yellenlab/IncubatorImaging | 90a63c18eb200a208daa834a5b692bdd114d87b8 | [
"MIT"
] | null | null | null | """
SmartScope
Position related functions and classes.
Duke University - 2019
Licensed under the MIT License (see LICENSE for details)
Written by Caleb Sanford
"""
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import json
from collections import defaultdict
from collections import OrderedDict
import time
import tifffile as tif
import os
import skimage.io
import scipy.misc
import cv2
from smartscope.source import chip
from smartscope.source import sc_utils
class PositionList:
def __init__(self, sp=None, positions=None):
if positions is not None:
self.positions = positions
else:
self.positions = []
if sp is not None and isinstance(sp, StagePosition):
self.append(sp)
def __len__(self):
return len(self.positions)
def __add__(self, other):
posits = self.positions + other.positions
return PositionList(positions=posits)
def __iter__(self):
return iter(self.positions)
def __getitem__(self, key):
return self.positions[key]
def __setitem__(self, key, val):
self.positions[key] = val
def __delitem__(self, key):
del self.positions[key]
def __str__(self):
string = ''
for p in self.positions:
string = string + str(p) + '\n'
return string
def append(self, item):
self.positions.append(item)
def insert(self, item, idx):
self.positions.insert(idx, item)
def visualize(self, xy=False):
''' Plots a 3D PositionList
arg:
xy: bool - if True plot x vs y in 2D
'''
if xy is False:
fig = plt.figure()
plot = fig.add_subplot(111,projection='3d')
xpos = [i.x for i in self.positions]
ypos = [i.y for i in self.positions]
zpos = [i.z for i in self.positions]
plot.scatter(xpos,ypos,zpos)
plot.set_xlabel('X')
plot.set_ylabel('Y')
plot.set_zlabel('Z')
else:
x = [p.x for p in self.positions]
y = [p.y for p in self.positions]
plt.scatter(x,y)
plt.title('Position List')
plt.xlabel('X')
plt.ylabel('Y')
def image(self, mmc, save_dir, naming_scheme, save_jpg=False, rotation=0, exposure=1, output_pixels=[2688,2200]):
''' Images the positions in the PositionList
args:
mmc: Micro-manager instance
save_dir: Directory to save tiff files
'''
# Make the directory to save to and change into it
orig_dir = os.getcwd()
dir_name = save_dir+'\\'+naming_scheme
os.makedirs(dir_name)
os.chdir(dir_name)
cam = sc_utils.start_cam()
for ctr, pos in enumerate(self.positions):
# set position and wait
set_pos(mmc, pos.x, pos.y, z=pos.z)
sc_utils.before_every_image()
# Get image and save
frame = sc_utils.get_live_frame(cam, exposure)
sc_utils.after_every_image()
frame = np.flipud(frame)
if rotation >= 90:
frame = np.rot90(frame)
if rotation >= 180:
frame = np.rot90(frame)
if rotation >= 270:
frame = np.rot90(frame)
convert_and_save(frame, save_jpg, pos, naming_scheme, output_pixels, convert_to_16bit=True)
time.sleep(0.01)
sc_utils.close_cam(cam)
os.chdir(orig_dir)
def save(self, filename, path):
''' Save PositionList() as a json file
'''
# Convert to dict form
data = defaultdict(dict)
for i, val in enumerate(self.positions):
data[i]['x'] = val.x
data[i]['y'] = val.y
data[i]['z'] = val.z
data[i]['theta'] = val.theta
data[i]['numAxes'] = val.numAxes
# Write to file
with open(path + '/' + filename + '.json', 'w') as outfile:
json.dump(data, outfile)
def convert_and_save(frame, save_jpg, pos, naming_scheme, output_pixels, convert_to_16bit=True):
if convert_to_16bit:
frame = sc_utils.bytescale(frame)
if output_pixels != [2688, 2200]:
frame = cv2.resize(frame, tuple(output_pixels), interpolation = cv2.INTER_AREA)
tif.imwrite(naming_scheme + pos.name + time.strftime("%Y%m%d%H%M") + '.tif', frame)
if save_jpg:
os.makedirs('jpg', exist_ok=True)
scipy.misc.imsave('jpg/'+naming_scheme + pos.name + time.strftime("%Y%m%d%H%M") + '.jpg', frame)
def load(filename, path):
''' Load PositionList() from json file
args:
filename: string
path: directory to save file
returns:
PositionList()
'''
with open(path + '/' + filename + '.json') as f:
data = json.load(f,object_pairs_hook=OrderedDict)
sp = []
for key, val in data.items():
sp.append(StagePosition(x=val['x'], y=val['y'],
z=val['z'], theta=val['theta']))
return PositionList(positions=sp)
def current(stage_controller, axis='xyz'):
''' Gets the current stage position
arg:
stage_controller: Micromanager instance
axis: axis to return
returns:
(x_pos, y_pos, z_pos)
'''
if axis == 'x':
return StagePosition(x=sc_utils.get_x_pos(stage_controller))
if axis == 'y':
return StagePosition(y=sc_utils.get_y_pos(stage_controller))
if axis == 'z':
return StagePosition(z=sc_utils.get_z_pos(stage_controller))
if axis == 'xy':
return StagePosition(x=sc_utils.get_x_pos(stage_controller),
y=sc_utils.get_y_pos(stage_controller))
return StagePosition(x=sc_utils.get_x_pos(stage_controller),
y=sc_utils.get_y_pos(stage_controller),
z=sc_utils.get_z_pos(stage_controller))
def set_pos(stage_controller, x=None, y=None, z=None):
''' Sets a microscope position
args:
- mmc instance
- x (float)
- y (float)
- z (float) (default is None - keeps previous focus)
'''
if z is not None:
if x is None and y is None:
sc_utils.set_z_pos(stage_controller, z)
sc_utils.wait_for_system(stage_controller)
else:
sc_utils.set_xy_pos(stage_controller, x, y)
sc_utils.set_z_pos(stage_controller, z)
sc_utils.wait_for_system(stage_controller)
else:
sc_utils.set_xy_pos(stage_controller, x, y)
sc_utils.wait_for_system(stage_controller)
class StagePosition:
''' Stores the data of one instantaneous stage position
args:
x: x position (optional)
y: y position (optional)
z: z position (optional)
theta: theta position (optional)
'''
def __init__(self, x=None, y=None, z=None, theta=None, name=None):
self.x = x
self.y = y
self.z = z
self.theta = theta
self.numAxes = 0
self.name = name
for val in [x, y, z, theta]:
if val is not None:
self.numAxes = self.numAxes + 1
def __eq__(self, other):
''' Allows use of == operator on two StagePositions
'''
return (self.x == other.x and
self.y == other.y and
self.z == other.z and
self.theta == other.theta and
self.numAxes == other.numAxes)
def __str__(self):
''' Allows for print(StagePosition()) to see values
'''
if self.numAxes == 0:
return 'No vals'
if self.numAxes == 1:
return "(" + str(self.x) + ")"
elif self.numAxes == 2:
return "(" + str(self.x) + "," + str(self.y) + ")"
elif self.numAxes ==3:
return ("(" + str(self.x) + "," + str(self.y) +
"," + str(self.z) + ")")
else:
return ("(" + str(self.x) + "," + str(self.y) +
"," + str(self.z) + "," + str(self.theta) + ")")
def dist(self, other):
''' l2 distance between two stage postions. eg stage1.dist(stage2)
args:
other: StagePosition()
returns:
distance between points
'''
if self.numAxes == 0:
raise ValueError('StagePosition does not have any values')
if self.numAxes == 1:
return np.sqrt(np.square(self.x - other.x))
elif self.numAxes == 2:
return np.sqrt(np.square(self.x - other.x) +
np.square(self.y - other.y))
elif self.numAxes ==3:
return np.sqrt(np.square(self.x - other.x) +
np.square(self.y - other.y) +
np.square(self.z - other.z))
def goto(self, mmc, xy_only=False):
''' Goes to the stage position
args:
mmc: Micro-Manager instance
xy_only: ignore the z axis
'''
if xy_only:
mmc.setXYPosition(self.x,self.y)
mmc.waitForSystem()
else:
mmc.setXYPosition(self.x,self.y)
mmc.setPosition(self.z)
mmc.waitForSystem()
| 31.721477 | 117 | 0.554533 |
acf18f5249ef2c184599148439087bc9c42473a0 | 13,520 | py | Python | azurelinuxagent/common/osutil/bigip.py | koifans/WALinuxAgent | 236c6c12d89757589411651ae015640d371251a4 | [
"Apache-2.0"
] | null | null | null | azurelinuxagent/common/osutil/bigip.py | koifans/WALinuxAgent | 236c6c12d89757589411651ae015640d371251a4 | [
"Apache-2.0"
] | null | null | null | azurelinuxagent/common/osutil/bigip.py | koifans/WALinuxAgent | 236c6c12d89757589411651ae015640d371251a4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import array
import fcntl
import os
import platform
import re
import socket
import struct
import time
try:
# WAAgent > 2.1.3
import azurelinuxagent.common.logger as logger
import azurelinuxagent.common.utils.shellutil as shellutil
from azurelinuxagent.common.exception import OSUtilError
from azurelinuxagent.common.osutil.default import DefaultOSUtil
except ImportError:
# WAAgent <= 2.1.3
import azurelinuxagent.logger as logger
import azurelinuxagent.utils.shellutil as shellutil
from azurelinuxagent.exception import OSUtilError
from azurelinuxagent.distro.default.osutil import DefaultOSUtil
class BigIpOSUtil(DefaultOSUtil):
def __init__(self):
super(BigIpOSUtil, self).__init__()
def _wait_until_mcpd_is_initialized(self):
"""Wait for mcpd to become available
All configuration happens in mcpd so we need to wait that this is
available before we go provisioning the system. I call this method
at the first opportunity I have (during the DVD mounting call).
This ensures that the rest of the provisioning does not need to wait
for mcpd to be available unless it absolutely wants to.
:return bool: Returns True upon success
:raises OSUtilError: Raises exception if mcpd does not come up within
roughly 50 minutes (100 * 30 seconds)
"""
for retries in range(1, 100):
# Retry until mcpd completes startup:
logger.info("Checking to see if mcpd is up")
rc = shellutil.run("/usr/bin/tmsh -a show sys mcp-state field-fmt 2>/dev/null | grep phase | grep running", chk_err=False)
if rc == 0:
logger.info("mcpd is up!")
break
time.sleep(30)
if rc is 0:
return True
raise OSUtilError(
"mcpd hasn't completed initialization! Cannot proceed!"
)
def _save_sys_config(self):
cmd = "/usr/bin/tmsh save sys config"
rc = shellutil.run(cmd)
if rc != 0:
logger.error("WARNING: Cannot save sys config on 1st boot.")
return rc
def restart_ssh_service(self):
return shellutil.run("/usr/bin/bigstart restart sshd", chk_err=False)
def stop_agent_service(self):
return shellutil.run("/sbin/service {0} stop".format(self.service_name), chk_err=False)
def start_agent_service(self):
return shellutil.run("/sbin/service {0} start".format(self.service_name), chk_err=False)
def register_agent_service(self):
return shellutil.run("/sbin/chkconfig --add {0}".format(self.service_name), chk_err=False)
def unregister_agent_service(self):
return shellutil.run("/sbin/chkconfig --del {0}".format(self.service_name), chk_err=False)
def get_dhcp_pid(self):
ret = shellutil.run_get_output("/sbin/pidof dhclient")
return ret[1] if ret[0] == 0 else None
def set_hostname(self, hostname):
"""Set the static hostname of the device
Normally, tmsh is used to set the hostname for the system. For our
purposes at this time though, I would hesitate to trust this function.
Azure(Stack) uses the name that you provide in the Web UI or ARM (for
example) as the value of the hostname argument to this method. The
problem is that there is nowhere in the UI that specifies the
restrictions and checks that tmsh has for the hostname.
For example, if you set the name "bigip1" in the Web UI, Azure(Stack)
considers that a perfectly valid name. When WAAgent gets around to
running though, tmsh will reject that value because it is not a fully
qualified domain name. The proper value should have been bigip.xxx.yyy
WAAgent will not fail if this command fails, but the hostname will not
be what the user set either. Currently we do not set the hostname when
WAAgent starts up, so I am passing on setting it here too.
:param hostname: The hostname to set on the device
"""
return None
def set_dhcp_hostname(self, hostname):
"""Sets the DHCP hostname
See `set_hostname` for an explanation of why I pass here
:param hostname: The hostname to set on the device
"""
return None
def useradd(self, username, expiration=None, comment=None):
"""Create user account using tmsh
Our policy is to create two accounts when booting a BIG-IP instance.
The first account is the one that the user specified when they did
the instance creation. The second one is the admin account that is,
or should be, built in to the system.
:param username: The username that you want to add to the system
:param expiration: The expiration date to use. We do not use this
value.
:param comment: description of the account. We do not use this value.
"""
if self.get_userentry(username):
logger.info("User {0} already exists, skip useradd", username)
return None
cmd = "/usr/bin/tmsh create auth user %s partition-access add { all-partitions { role admin } } shell bash" % (username)
retcode, out = shellutil.run_get_output(cmd, log_cmd=True, chk_err=True)
if retcode != 0:
raise OSUtilError(
"Failed to create user account:{0}, retcode:{1}, output:{2}".format(username, retcode, out)
)
self._save_sys_config()
return retcode
def chpasswd(self, username, password, crypt_id=6, salt_len=10):
"""Change a user's password with tmsh
Since we are creating the user specified account and additionally
changing the password of the built-in 'admin' account, both must
be modified in this method.
Note that the default method also checks for a "system level" of the
user; based on the value of UID_MIN in /etc/login.defs. In our env,
all user accounts have the UID 0. So we can't rely on this value.
:param username: The username whose password to change
:param password: The unencrypted password to set for the user
:param crypt_id: If encrypting the password, the crypt_id that was used
:param salt_len: If encrypting the password, the length of the salt
value used to do it.
"""
# Start by setting the password of the user provided account
cmd = "/usr/bin/tmsh modify auth user {0} password '{1}'".format(username, password)
ret, output = shellutil.run_get_output(cmd, log_cmd=False, chk_err=True)
if ret != 0:
raise OSUtilError(
"Failed to set password for {0}: {1}".format(username, output)
)
# Next, set the password of the built-in 'admin' account to be have
# the same password as the user provided account
userentry = self.get_userentry('admin')
if userentry is None:
raise OSUtilError("The 'admin' user account was not found!")
cmd = "/usr/bin/tmsh modify auth user 'admin' password '{0}'".format(password)
ret, output = shellutil.run_get_output(cmd, log_cmd=False, chk_err=True)
if ret != 0:
raise OSUtilError(
"Failed to set password for 'admin': {0}".format(output)
)
self._save_sys_config()
return ret
def del_account(self, username):
"""Deletes a user account.
Note that the default method also checks for a "system level" of the
user; based on the value of UID_MIN in /etc/login.defs. In our env,
all user accounts have the UID 0. So we can't rely on this value.
We also don't use sudo, so we remove that method call as well.
:param username:
:return:
"""
shellutil.run("> /var/run/utmp")
shellutil.run("/usr/bin/tmsh delete auth user " + username)
def get_dvd_device(self, dev_dir='/dev'):
"""Find BIG-IP's CD/DVD device
This device is almost certainly /dev/cdrom so I added the ? to this pattern.
Note that this method will return upon the first device found, but in my
tests with 12.1.1 it will also find /dev/sr0 on occasion. This is NOT the
correct CD/DVD device though.
:todo: Consider just always returning "/dev/cdrom" here if that device device
exists on all platforms that are supported on Azure(Stack)
:param dev_dir: The root directory from which to look for devices
"""
patten = r'(sr[0-9]|hd[c-z]|cdrom[0-9]?)'
for dvd in [re.match(patten, dev) for dev in os.listdir(dev_dir)]:
if dvd is not None:
return "/dev/{0}".format(dvd.group(0))
raise OSUtilError("Failed to get dvd device")
def mount_dvd(self, **kwargs):
"""Mount the DVD containing the provisioningiso.iso file
This is the _first_ hook that WAAgent provides for us, so this is the
point where we should wait for mcpd to load. I am just overloading
this method to add the mcpd wait. Then I proceed with the stock code.
:param max_retry: Maximum number of retries waagent will make when
mounting the provisioningiso.iso DVD
:param chk_err: Whether to check for errors or not in the mounting
commands
"""
self._wait_until_mcpd_is_initialized()
return super(BigIpOSUtil, self).mount_dvd(**kwargs)
def eject_dvd(self, chk_err=True):
"""Runs the eject command to eject the provisioning DVD
BIG-IP does not include an eject command. It is sufficient to just
umount the DVD disk. But I will log that we do not support this for
future reference.
:param chk_err: Whether or not to check for errors raised by the eject
command
"""
logger.warn("Eject is not supported on this platform")
def get_first_if(self):
"""Return the interface name, and ip addr of the management interface.
We need to add a struct_size check here because, curiously, our 64bit
platform is identified by python in Azure(Stack) as 32 bit and without
adjusting the struct_size, we can't get the information we need.
I believe this may be caused by only python i686 being shipped with
BIG-IP instead of python x86_64??
"""
iface = ''
expected = 16 # how many devices should I expect...
python_arc = platform.architecture()[0]
if python_arc == '64bit':
struct_size = 40 # for 64bit the size is 40 bytes
else:
struct_size = 32 # for 32bit the size is 32 bytes
sock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP)
buff = array.array('B', b'\0' * (expected * struct_size))
param = struct.pack('iL',
expected*struct_size,
buff.buffer_info()[0])
ret = fcntl.ioctl(sock.fileno(), 0x8912, param)
retsize = (struct.unpack('iL', ret)[0])
if retsize == (expected * struct_size):
logger.warn(('SIOCGIFCONF returned more than {0} up '
'network interfaces.'), expected)
sock = buff.tostring()
for i in range(0, struct_size * expected, struct_size):
iface = self._format_single_interface_name(sock, i)
# Azure public was returning "lo:1" when deploying WAF
if b'lo' in iface:
continue
else:
break
return iface.decode('latin-1'), socket.inet_ntoa(sock[i+20:i+24])
def _format_single_interface_name(self, sock, offset):
return sock[offset:offset+16].split(b'\0', 1)[0]
def route_add(self, net, mask, gateway):
"""Add specified route using tmsh.
:param net:
:param mask:
:param gateway:
:return:
"""
cmd = ("/usr/bin/tmsh create net route "
"{0}/{1} gw {2}").format(net, mask, gateway)
return shellutil.run(cmd, chk_err=False)
def device_for_ide_port(self, port_id):
"""Return device name attached to ide port 'n'.
Include a wait in here because BIG-IP may not have yet initialized
this list of devices.
:param port_id:
:return:
"""
for retries in range(1, 100):
# Retry until devices are ready
if os.path.exists("/sys/bus/vmbus/devices/"):
break
else:
time.sleep(10)
return super(BigIpOSUtil, self).device_for_ide_port(port_id)
| 40.358209 | 134 | 0.635281 |
acf18f95773b13744260d6c358e6be535d671f29 | 2,282 | py | Python | player/subscriber.py | Amoki/Amoki-Music | 77b0e426fe9cc6c9cd12346a5e5e81a62362bb83 | [
"MIT"
] | 3 | 2015-06-16T11:12:29.000Z | 2019-05-03T09:09:21.000Z | player/subscriber.py | Amoki/Amoki-Music | 77b0e426fe9cc6c9cd12346a5e5e81a62362bb83 | [
"MIT"
] | 16 | 2015-08-18T14:35:55.000Z | 2021-06-10T17:31:04.000Z | player/subscriber.py | Amoki/Amoki-Music | 77b0e426fe9cc6c9cd12346a5e5e81a62362bb83 | [
"MIT"
] | 1 | 2016-10-19T14:48:52.000Z | 2016-10-19T14:48:52.000Z | # -*- coding: utf-8 -*-
from django.conf import settings
from ws4redis.subscriber import RedisSubscriber
from player.models import Room
from ws4redis.publisher import RedisPublisher
from ws4redis.redis_store import RedisMessage
import json
class CustomSubscriber(RedisSubscriber):
"""
Subscriber class, used by the websocket code to listen for subscribed channels
"""
subscription_channels = ['subscribe-session', 'subscribe-group', 'subscribe-user', 'subscribe-broadcast']
publish_channels = ['publish-session', 'publish-group', 'publish-user', 'publish-broadcast']
def __init__(self, connection):
self.request = None
super(CustomSubscriber, self).__init__(connection)
def set_pubsub_channels(self, request, channels):
"""
Initialize the channels used for publishing and subscribing messages through the message queue.
"""
super(CustomSubscriber, self).set_pubsub_channels(request, channels)
self.request = request
self.update_room_listeners(self.request)
def release(self):
"""
New implementation to free up Redis subscriptions when websockets close. This prevents
memory sap when Redis Output Buffer and Output Lists build when websockets are abandoned.
"""
super(CustomSubscriber, self).release()
self.update_room_listeners(self.request)
def update_room_listeners(self, request):
facility = request.path_info.replace(settings.WEBSOCKET_URL, '', 1)
if facility not in [room.token for room in Room.objects.all()]:
raise Exception("Unknow room")
prefix = self.get_prefix()
key = prefix + 'broadcast:' + facility
query = self._connection.execute_command('PUBSUB', 'NUMSUB', key)
room_to_update = Room.objects.get(token=facility)
room_to_update.listeners = int(query[1]) if len(query) > 1 else 0
room_to_update.save()
redis_publisher = RedisPublisher(facility=room_to_update.token, broadcast=True)
message = {
'action': 'listeners_updated',
'listeners': room_to_update.listeners
}
listenersMessage = RedisMessage(json.dumps(message))
redis_publisher.publish_message(listenersMessage)
| 38.677966 | 109 | 0.694566 |
acf1901afeca266d1384d52e1d61ec2b6f29f41d | 707 | py | Python | 7-assets/past-student-repos/LambdaSchool-master/m6/61c1/src/oop/test_oop2.py | eengineergz/Lambda | 1fe511f7ef550aed998b75c18a432abf6ab41c5f | [
"MIT"
] | null | null | null | 7-assets/past-student-repos/LambdaSchool-master/m6/61c1/src/oop/test_oop2.py | eengineergz/Lambda | 1fe511f7ef550aed998b75c18a432abf6ab41c5f | [
"MIT"
] | 8 | 2020-03-24T17:47:23.000Z | 2022-03-12T00:33:21.000Z | cs/lambda_cs/01_intro_python/Sprint-Challenge--Intro-Python/src/oop/test_oop2.py | tobias-fyi/vela | b0b3d3c6dc3fa397c8c7a492098a02cf75e0ff82 | [
"MIT"
] | null | null | null | import unittest
from oop2 import *
class Oop2Tests(unittest.TestCase):
def setUp(self):
self.ground_vehicle = GroundVehicle()
self.motorcycle = Motorcycle()
def test_motorcycle_inheritance(self):
self.assertTrue(isinstance(self.motorcycle, GroundVehicle))
def test_ground_vehicle_num_wheels(self):
self.assertEqual(self.ground_vehicle.num_wheels, 4)
def test_motocycle_num_wheels(self):
self.assertEqual(self.motorcycle.num_wheels, 2)
def test_ground_vehicle_drive(self):
self.assertEqual(self.ground_vehicle.drive(), "vroooom")
def test_motorcyle_drive(self):
self.assertEqual(self.motorcycle.drive(), "BRAAAP!!")
if __name__ == '__main__':
unittest.main() | 27.192308 | 63 | 0.759547 |
acf1911c813ac9220ac53090ccc806b75cf55c6a | 384 | py | Python | test/time_oper.py | sunwenquan/iSurvey | bc14a102e56b33fb4b3612b3bef98183f6e46612 | [
"Apache-2.0"
] | null | null | null | test/time_oper.py | sunwenquan/iSurvey | bc14a102e56b33fb4b3612b3bef98183f6e46612 | [
"Apache-2.0"
] | null | null | null | test/time_oper.py | sunwenquan/iSurvey | bc14a102e56b33fb4b3612b3bef98183f6e46612 | [
"Apache-2.0"
] | null | null | null | import time,datetime
import pytz
from tzlocal import get_localzone # $ pip install tzlocal
# 1. 如何设置时区
# set timezone
tz = pytz.timezone('Asia/Shanghai')
# get local timezone
local_tz = get_localzone()
# get all_timezones
all_timezones = pytz.all_timezones
print(all_timezones)
print(tz)
print(local_tz)
# 2. 获取时间
print(datetime.datetime.now())
print(datetime.datetime.utcnow())
| 22.588235 | 57 | 0.770833 |
acf191a3ce4734ca41e068b33ebc1c34f547969a | 6,774 | py | Python | bot/cogs/kick.py | phantom0174/HSQCC_bot | 93d4b40f7d8885bcf927590926370d67e05a5760 | [
"MIT"
] | 4 | 2020-11-25T16:31:41.000Z | 2021-08-28T21:35:01.000Z | bot/cogs/kick.py | phantom0174/HSQCC_bot | 93d4b40f7d8885bcf927590926370d67e05a5760 | [
"MIT"
] | 12 | 2020-12-21T09:42:13.000Z | 2021-05-16T06:17:49.000Z | bot/cogs/kick.py | phantom0174/HSQCC_bot | 93d4b40f7d8885bcf927590926370d67e05a5760 | [
"MIT"
] | 2 | 2021-04-13T08:28:12.000Z | 2021-07-11T02:41:35.000Z | import discord
from discord.ext import commands
from ..core.cog_config import CogExtension
from ..core.db.jsonstorage import JsonApi
from ..core.db.mongodb import Mongo
from ..core.fluctlight_ext import Fluct
from typing import Union
class KickMember(CogExtension):
@commands.group()
@commands.has_any_role('總召', 'Administrator')
async def kick(self, ctx):
pass
@kick.command()
async def list(self, ctx):
"""cmd
列出待踢除名單。
"""
await ctx.send(content=':hourglass_flowing_sand: 尋找中...', delete_after=3.0)
kick_cursor = Mongo('sqcs-bot').get_cur('ReadyToKick')
data = kick_cursor.find({})
if data.count() == 0:
return await ctx.send(':x: 待踢除名單為空!')
kick_member_list = ''
for member in data:
member_info: str = (
f'Id: {member["_id"]},'
f'Name: {member["name"]},'
f'Contrib: {member["contrib"]},'
f'lvl_ind: {member["lvl_ind"]}\n'
)
kick_member_list += member_info
if len(kick_member_list) > 1600:
await ctx.send(kick_member_list)
kick_member_list = ''
if len(kick_member_list) > 0:
await ctx.send(kick_member_list)
await ctx.send(':white_check_mark: 記錄尋找完畢!')
@kick.command(aliases=['insert'])
async def add(self, ctx, target_member: Union[discord.Member, int]):
"""cmd
將 成員<target_member> 加入待踢除名單。
.target_member: 可直接標註成員,或是成員在Discord中的id
"""
if isinstance(target_member, discord.Member):
member_id = target_member.id
else:
member_id = target_member
fluctlight_cursor = Mongo('LightCube').get_cur('MainFluctlights')
data = fluctlight_cursor.find_one({"_id": member_id})
if not data:
return await ctx.send(f':x: 沒有成員 {target_member} 的搖光資料!')
member_info = {
"_id": member_id,
"name": data["name"],
"contrib": data["contrib"],
"lvl_ind": data["lvl_ind"]
}
kick_cursor = Mongo('sqcs-bot').get_cur('ReadyToKick')
kick_cursor.insert_one(member_info)
await ctx.send(f':white_check_mark: 成員 {data["name"]} - {member_id} 已被加到待踢除名單!')
@kick.command(aliases=['delete', 'del'])
async def remove(self, ctx, target_member: Union[discord.Member, int]):
"""cmd
將 成員<target_member> 移出待踢除名單。
.target_member: 可直接標註成員,或是成員在Discord中的id
"""
if isinstance(target_member, discord.Member):
member_id = target_member.id
else:
member_id = target_member
kick_cursor = Mongo('sqcs-bot').get_cur('ReadyToKick')
data = kick_cursor.find_one({"_id": member_id})
if not data:
return await ctx.send(f':x: 成員 {member_id} 不在待踢除名單中!')
kick_cursor.delete_one({"_id": member_id})
await ctx.send(f':white_check_mark: 已將成員 {data["name"]} - {member_id} 從待踢除名單中移除!')
@kick.command(aliases=['single'])
async def kick_single(self, ctx, target_member: Union[discord.Member, int], kick_reason: str):
"""cmd
將 成員<target_member> 踢除(需要在待踢除名單中)。
.target_member: 可直接標註成員,或是成員在Discord中的id
.kick_reason: 踢除原因
"""
if isinstance(target_member, discord.Member):
member_id = target_member.id
else:
member_id = target_member
kick_cursor = Mongo('sqcs-bot').get_cur('ReadyToKick')
data = kick_cursor.find_one({"_id": member_id})
if not data:
return await ctx.send(f':x: 成員 {member_id} 不在待踢除名單中!')
kick_user = ctx.guild.get_member(member_id)
if kick_reason == 'default':
kick_reason = f':skull_crossbones: 違反指數達到了 {data["lvl_ind"]}'
msg = await JsonApi.get_humanity('kick/kick_single', '\n')
msg += f'> {kick_reason}\n'
msg += await JsonApi.get_humanity('kick/re_join')
# no perm to send msg to user via server
try:
await kick_user.send(msg)
except BaseException:
pass
try:
await kick_user.kick(reason=kick_reason)
fluct_ext = Fluct(member_id=member_id)
await fluct_ext.delete_main()
await fluct_ext.delete_vice()
kick_cursor.delete_one({"_id": member_id})
await ctx.send(f':white_check_mark: 成員 {data["name"]} - {data["_id"]} 已被踢除!')
except Exception as e:
await ctx.send(f':x: 踢除 {data["name"]} - {data["_id"]} 時發生了錯誤!')
await ctx.send(content=e, delete_after=5.0)
@kick.command(aliases=['all'])
async def kick_all(self, ctx):
"""cmd
將所有在踢除名單中的成員踢除。
"""
kick_cursor = Mongo('sqcs-bot').get_cur('ReadyToKick')
data = kick_cursor.find({})
if data.count() == 0:
return await ctx.send(':x: 待踢除名單為空!')
fluct_ext = Fluct()
for member in data:
kick_user = ctx.guild.get_member(member["_id"])
msg = await JsonApi.get_humanity('kick/kick_all', '\n')
msg += f'> Levelling index reached {member["lvl_ind"]}.\n'
msg += await JsonApi.get_humanity('kick/re_join')
# no perm to send msg to user via server
try:
await kick_user.send(msg)
except BaseException:
pass
try:
await kick_user.kick(reason=f'違反指數達到了 {member["lvl_ind"]}')
await fluct_ext.delete_main(member["_id"])
await fluct_ext.delete_vice(member["_id"])
except Exception as e:
await ctx.send(f':x: 踢除 {member["name"]} - {member["_id"]} 時發生了錯誤!')
await ctx.send(content=e, delete_after=5.0)
kick_cursor.delete_many({})
await ctx.send(':white_check_mark: 所有在待踢除名單中的成員已被踢除!')
class NT(CogExtension):
@commands.group()
@commands.has_any_role('總召', 'Administrator')
async def nt(self, ctx):
pass
@commands.command()
async def list(self, ctx):
"""cmd
列出黑名單。
"""
id_list = JsonApi.get('NT')["id_list"]
await ctx.send(id_list)
@commands.command(aliases=['push', 'insert'])
async def add(self, ctx, user_id: int = None):
"""cmd
將 成員<user_id> 加入黑名單。
.user_id: 成員的Discord id
"""
nt_json = JsonApi.get('NT')
if user_id is None:
return
nt_json['id_list'].append(user_id)
JsonApi.put('NT', nt_json)
await ctx.send(':white_check_mark: 指令執行完畢!')
def setup(bot):
bot.add_cog(KickMember(bot))
bot.add_cog(NT(bot))
| 30.790909 | 98 | 0.575583 |
acf191ad6d9f8818dae37db275bbd461162963d9 | 241 | py | Python | tests/test_reporting.py | emaric/poor-trader-py | 77b38938d55ad223cf1f1e87637c8f2035fbbf3d | [
"MIT"
] | 3 | 2018-05-26T00:57:58.000Z | 2019-03-28T08:24:16.000Z | tests/test_reporting.py | emaric/poor-trader-py | 77b38938d55ad223cf1f1e87637c8f2035fbbf3d | [
"MIT"
] | null | null | null | tests/test_reporting.py | emaric/poor-trader-py | 77b38938d55ad223cf1f1e87637c8f2035fbbf3d | [
"MIT"
] | 1 | 2018-12-31T06:02:36.000Z | 2018-12-31T06:02:36.000Z | import unittest
class TestReporting(unittest.TestCase):
def test_create_trades_csv(self):
self.fail('TODO')
def test_create_performance_csv(self):
self.fail('TODO')
if __name__ == '__main__':
unittest.main()
| 17.214286 | 42 | 0.684647 |
acf191b9841e9395d3339a359bcba689d68ddfd3 | 860 | py | Python | q2_mystery_stew/transformers.py | qiime2/q2-mystery-stew | 20cfd76bd5158e208abd6e26e99be3bc9353d7d1 | [
"BSD-3-Clause"
] | 1 | 2020-03-02T18:46:18.000Z | 2020-03-02T18:46:18.000Z | q2_mystery_stew/transformers.py | qiime2/q2-mystery-stew | 20cfd76bd5158e208abd6e26e99be3bc9353d7d1 | [
"BSD-3-Clause"
] | 13 | 2020-03-26T18:42:28.000Z | 2021-12-16T21:31:50.000Z | q2_mystery_stew/transformers.py | qiime2/q2-mystery-stew | 20cfd76bd5158e208abd6e26e99be3bc9353d7d1 | [
"BSD-3-Clause"
] | 5 | 2020-03-04T15:36:57.000Z | 2021-08-29T22:54:32.000Z | # ----------------------------------------------------------------------------
# Copyright (c) 2020-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import qiime2
from q2_mystery_stew.format import SingleIntFormat, MetadataLikeFormat
def to_single_int_format(data: int) -> SingleIntFormat:
ff = SingleIntFormat()
with ff.open() as fh:
fh.write('%d\n' % data)
return ff
def transform_to_metadata(ff: MetadataLikeFormat) -> qiime2.Metadata:
return qiime2.Metadata.load(str(ff))
def transform_from_metatadata(data: qiime2.Metadata) -> MetadataLikeFormat:
ff = MetadataLikeFormat()
data.save(str(ff), ext=None)
return ff
| 30.714286 | 78 | 0.601163 |
acf192f836dad5d336edf55c344ad370bfe1b7c9 | 13,705 | py | Python | 271120201642.py | O8pen/PhraseTranslate | 62e657d1e58ab36df27f181f51410840526e939f | [
"Apache-2.0"
] | null | null | null | 271120201642.py | O8pen/PhraseTranslate | 62e657d1e58ab36df27f181f51410840526e939f | [
"Apache-2.0"
] | null | null | null | 271120201642.py | O8pen/PhraseTranslate | 62e657d1e58ab36df27f181f51410840526e939f | [
"Apache-2.0"
] | null | null | null | import clipboard
import win32api, win32con
import time
from pyautogui import *
import pyautogui
from pynput.keyboard import Listener, Key
next_x = 612
next_y = 562
prev_x = 359
prev_y = 562
phrasereader_blank_x = 480
phrasereader_blank_y = 840
translate_text_x = 1356
translate_text_y = 352
translate_blank_x = 1392
translate_blank_y = 222
text = ""
x = []
hasbeencaptured = False
last_key = 0
was_pressed_next = False
was_pressed_prev = False
was_pressed_typing = False
was_pressed_listen = False
was_pressed_one = False
was_pressed_two = False
was_pressed_three = False
was_pressed_four = False
was_pressed_five = False
was_pressed_six = False
was_pressed_seven = False
was_pressed_eight = False
was_pressed_nine = False
was_pressed_ten = False
was_pressed_allwords = False
def on_press(key):
global was_pressed_next
global was_pressed_prev
global was_pressed_typing
global was_pressed_listen
global last_key
global was_pressed_one
global was_pressed_two
global was_pressed_three
global was_pressed_four
global was_pressed_five
global was_pressed_six
global was_pressed_seven
global was_pressed_eight
global was_pressed_nine
global was_pressed_ten
global was_pressed_allwords
global translate_text_x
global translate_text_y
global translate_blank_x
global translate_blank_y
# hasattr(key, 'vk')
# print("Key pressed: {0}".format(key))
# print(key.vk)
if hasattr(key, 'vk') and key.vk == 101: # Numpad 5 (Next Button)
if was_pressed_next == False:
was_pressed_next = True
last_key = 101
nextbutton()
elif hasattr(key, 'vk') and key.vk == 100: # Numpad 4 (Prev button)
if was_pressed_prev == False:
was_pressed_prev = True
last_key = 100
prevbutton()
elif hasattr(key, 'vk') and key.vk == 102: # Numpad 6 (Deletes translate, start writing)
if was_pressed_typing == False:
was_pressed_typing = True
last_key = 102
click(translate_text_x,translate_text_y)
# click(translate_blank_x,translate_blank_y)
pyautogui.hotkey('ctrl', 'a')
time.sleep(0.1)
pyautogui.hotkey('backspace')
elif hasattr(key, 'vk') and key.vk == 107: # Numpad + (alt + j, Translate listen)
if was_pressed_listen == False:
was_pressed_listen = True
if last_key == 107:
last_key = 107
click(translate_blank_x,translate_blank_y)
time.sleep(0.05)
pyautogui.hotkey('alt', 'j')
else:
last_key = 107
pyautogui.hotkey('backspace')
time.sleep(0.05)
click(translate_blank_x,translate_blank_y)
time.sleep(0.25)
pyautogui.hotkey('alt', 'j')
elif hasattr(key, 'vk') and key.vk == 49:
if was_pressed_one == False:
was_pressed_one = True
if last_key == 49:
last_key = 49
click(translate_blank_x,translate_blank_y)
time.sleep(0.05)
pyautogui.hotkey('alt', 'j')
else:
last_key = 49
capture()
if(len(x) >= int(key.vk)-48):
clipboard.copy(x[int(key.vk)-49])
playsound()
elif hasattr(key, 'vk') and key.vk == 50:
if was_pressed_two == False:
was_pressed_two = True
if last_key == 50:
last_key = 50
click(translate_blank_x,translate_blank_y)
time.sleep(0.05)
pyautogui.hotkey('alt', 'j')
else:
last_key = 50
capture()
if(len(x) >= int(key.vk)-48):
clipboard.copy(x[int(key.vk)-49])
playsound()
elif hasattr(key, 'vk') and key.vk == 51:
if was_pressed_three == False:
was_pressed_three = True
if last_key == 51:
last_key = 51
click(translate_blank_x,translate_blank_y)
time.sleep(0.05)
pyautogui.hotkey('alt', 'j')
else:
last_key = 51
capture()
if(len(x) >= int(key.vk)-48):
clipboard.copy(x[int(key.vk)-49])
playsound()
elif hasattr(key, 'vk') and key.vk == 52:
if was_pressed_four == False:
was_pressed_four = True
if last_key == 52:
last_key = 52
click(translate_blank_x,translate_blank_y)
time.sleep(0.05)
pyautogui.hotkey('alt', 'j')
else:
last_key = 52
capture()
if(len(x) >= int(key.vk)-48):
clipboard.copy(x[int(key.vk)-49])
playsound()
elif hasattr(key, 'vk') and key.vk == 53:
if was_pressed_five == False:
was_pressed_five = True
if last_key == 53:
last_key = 53
click(translate_blank_x,translate_blank_y)
time.sleep(0.05)
pyautogui.hotkey('alt', 'j')
else:
last_key = 53
capture()
if(len(x) >= int(key.vk)-48):
clipboard.copy(x[int(key.vk)-49])
playsound()
elif hasattr(key, 'vk') and key.vk == 54:
if was_pressed_six == False:
was_pressed_six = True
if last_key == 54:
last_key = 54
click(translate_blank_x,translate_blank_y)
time.sleep(0.05)
pyautogui.hotkey('alt', 'j')
else:
last_key = 54
capture()
if(len(x) >= int(key.vk)-48):
clipboard.copy(x[int(key.vk)-49])
playsound()
elif hasattr(key, 'vk') and key.vk == 55:
if was_pressed_seven == False:
was_pressed_seven = True
if last_key == 55:
last_key = 55
click(translate_blank_x,translate_blank_y)
time.sleep(0.05)
pyautogui.hotkey('alt', 'j')
else:
last_key = 55
capture()
if(len(x) >= int(key.vk)-48):
clipboard.copy(x[int(key.vk)-49])
playsound()
elif hasattr(key, 'vk') and key.vk == 56:
if was_pressed_eight == False:
was_pressed_eight = True
if last_key == 56:
last_key = 56
click(translate_blank_x,translate_blank_y)
time.sleep(0.05)
pyautogui.hotkey('alt', 'j')
else:
last_key = 56
capture()
if(len(x) >= int(key.vk)-48):
clipboard.copy(x[int(key.vk)-49])
playsound()
elif hasattr(key, 'vk') and key.vk == 57:
if was_pressed_nine == False:
was_pressed_nine = True
if last_key == 57:
last_key = 57
click(translate_blank_x,translate_blank_y)
time.sleep(0.05)
pyautogui.hotkey('alt', 'j')
else:
last_key = 57
capture()
if(len(x) >= int(key.vk)-48):
clipboard.copy(x[int(key.vk)-49])
playsound()
elif hasattr(key, 'vk') and key.vk == 58:
if was_pressed_ten == False:
was_pressed_ten = True
if last_key == 58:
last_key = 58
click(translate_blank_x,translate_blank_y)
time.sleep(0.05)
pyautogui.hotkey('alt', 'j')
else:
last_key = 58
capture()
if(len(x) >= int(key.vk)-48):
clipboard.copy(x[int(key.vk)-49])
playsound()
elif hasattr(key, 'vk') and key.vk == 96: # Numpad 0
if was_pressed_allwords == False:
was_pressed_allwords = True
if last_key == 96:
last_key = 96
click(translate_blank_x,translate_blank_y)
time.sleep(0.05)
pyautogui.hotkey('alt', 'j')
else:
last_key = 96
capture()
clipboard.copy(text)
playsound()
def on_release(key):
global was_pressed_next
global was_pressed_prev
global was_pressed_typing
global was_pressed_listen
global was_pressed_one
global was_pressed_two
global was_pressed_three
global was_pressed_four
global was_pressed_five
global was_pressed_six
global was_pressed_seven
global was_pressed_eight
global was_pressed_nine
global was_pressed_ten
global was_pressed_allwords
# print("Key released: {0}".format(key))
if hasattr(key, 'vk') and key.vk == 101: # Numpad 5
was_pressed_next = False
elif hasattr(key, 'vk') and key.vk == 100: # Numpad 4
was_pressed_prev = False
elif hasattr(key, 'vk') and key.vk == 102: # Numpad 6
was_pressed_typing = False
elif hasattr(key, 'vk') and key.vk == 107: # Numpad 1
was_pressed_listen = False
elif hasattr(key, 'vk') and key.vk == 49: # 1
was_pressed_one = False
elif hasattr(key, 'vk') and key.vk == 50: # 2
was_pressed_two = False
elif hasattr(key, 'vk') and key.vk == 51: # 3
was_pressed_three = False
elif hasattr(key, 'vk') and key.vk == 52: # 4
was_pressed_four = False
elif hasattr(key, 'vk') and key.vk == 53: # 5
was_pressed_five = False
elif hasattr(key, 'vk') and key.vk == 54: # 6
was_pressed_six = False
elif hasattr(key, 'vk') and key.vk == 55: # 7
was_pressed_seven = False
elif hasattr(key, 'vk') and key.vk == 56: # 8
was_pressed_eight = False
elif hasattr(key, 'vk') and key.vk == 57: # 9
was_pressed_nine = False
elif hasattr(key, 'vk') and key.vk == 58: # 0
was_pressed_ten = False
elif hasattr(key, 'vk') and key.vk == 96: # Numpad 0
was_pressed_allwords = False
def click(x,y):
win32api.SetCursorPos((x,y))
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)
time.sleep(0.05)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)
def nextbutton():
global hasbeencaptured
global next_x
global next_y
click(next_x,next_y)
hasbeencaptured = False
def prevbutton():
global hasbeencaptured
global prev_x
global prev_y
click(prev_x,prev_y)
hasbeencaptured = False
def playsound():
global translate_text_x
global translate_text_y
global translate_blank_x
global translate_blank_y
click(translate_text_x,translate_text_y)
# click(translate_blank_x,translate_blank_y)
pyautogui.hotkey('ctrl', 'a')
time.sleep(0.1)
pyautogui.hotkey('ctrl', 'v')
time.sleep(0.05)
pyautogui.hotkey('alt', 'j')
time.sleep(0.55)
click(translate_blank_x,translate_blank_y)
def capture():
global text
global x
global hasbeencaptured
global phrasereader_blank_x
global phrasereader_blank_y
if hasbeencaptured == False:
# click(phrasereader_blank_x,phrasereader_blank_y)
click(phrasereader_blank_x,phrasereader_blank_y)
time.sleep(0.25)
pyautogui.hotkey('ctrl', 'a')
time.sleep(0.05)
pyautogui.hotkey('ctrl', 'c')
time.sleep(0.05)
click(phrasereader_blank_x,phrasereader_blank_y)
# time.sleep(0.25)
text = clipboard.paste()
text = text[2:]
endNumber = text.find('\n')-1
text = text[0:endNumber]
text = text.rstrip("\n") # 1!2@3#4$5%6^7&8*9(0)=-+#way
text = text.rstrip("@")
text = text.rstrip("#")
text = text.rstrip("$")
text = text.rstrip("%")
text = text.rstrip("^")
text = text.rstrip("&")
text = text.rstrip("*")
text = text.rstrip("(")
text = text.rstrip(")")
# text = text.rstrip("_")
# text = text.rstrip("-")
text = text.rstrip("=")
text = text.rstrip("+")
text = text.rstrip("/")
# text = text.rstrip("\")
text = text.rstrip("[")
text = text.rstrip("]")
text = text.rstrip("{")
text = text.rstrip("}")
text = text.rstrip(";")
text = text.rstrip(":")
text = text.rstrip(">")
text = text.rstrip("<")
text = text.rstrip("|")
text = text.rstrip("?")
text = text.rstrip(",")
text = text.rstrip(".")
# text = text.rstrip("")
# text = text.rstrip("")
# text = text.rstrip("")
# text = text.rstrip("")
# text = text.rstrip("")
# text = text.rstrip("")
# text = text.rstrip("")
x = text.split(" ")
hasbeencaptured = True
with Listener(on_press=on_press, on_release=on_release) as listener:
listener.join() | 32.865707 | 93 | 0.528128 |
acf195866227915f68f94bae1801525024528ee5 | 15,194 | py | Python | photutils/aperture/circle.py | shivangeerathi/photutils | 446b9701b14ab80a307a7da04d1c1609cc24e569 | [
"BSD-3-Clause"
] | null | null | null | photutils/aperture/circle.py | shivangeerathi/photutils | 446b9701b14ab80a307a7da04d1c1609cc24e569 | [
"BSD-3-Clause"
] | null | null | null | photutils/aperture/circle.py | shivangeerathi/photutils | 446b9701b14ab80a307a7da04d1c1609cc24e569 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module defines circular and circular-annulus apertures in both
pixel and sky coordinates.
"""
import math
import numpy as np
from .attributes import (AngleOrPixelScalarQuantity, PixelPositions,
PositiveScalar, SkyCoordPositions)
from .core import PixelAperture, SkyAperture
from .mask import ApertureMask
from ..geometry import circular_overlap_grid
__all__ = ['CircularMaskMixin', 'CircularAperture', 'CircularAnnulus',
'SkyCircularAperture', 'SkyCircularAnnulus']
class CircularMaskMixin:
"""
Mixin class to create masks for circular and circular-annulus
aperture objects.
"""
def to_mask(self, method='exact', subpixels=5):
"""
Return a mask for the aperture.
Parameters
----------
method : {'exact', 'center', 'subpixel'}, optional
The method used to determine the overlap of the aperture on
the pixel grid. Not all options are available for all
aperture types. Note that the more precise methods are
generally slower. The following methods are available:
* ``'exact'`` (default):
The the exact fractional overlap of the aperture and
each pixel is calculated. The returned mask will
contain values between 0 and 1.
* ``'center'``:
A pixel is considered to be entirely in or out of the
aperture depending on whether its center is in or out
of the aperture. The returned mask will contain
values only of 0 (out) and 1 (in).
* ``'subpixel'``:
A pixel is divided into subpixels (see the
``subpixels`` keyword), each of which are considered
to be entirely in or out of the aperture depending on
whether its center is in or out of the aperture. If
``subpixels=1``, this method is equivalent to
``'center'``. The returned mask will contain values
between 0 and 1.
subpixels : int, optional
For the ``'subpixel'`` method, resample pixels by this factor
in each dimension. That is, each pixel is divided into
``subpixels ** 2`` subpixels.
Returns
-------
mask : `~photutils.aperture.ApertureMask` or list of `~photutils.aperture.ApertureMask`
A mask for the aperture. If the aperture is scalar then a
single `~photutils.aperture.ApertureMask` is returned,
otherwise a list of `~photutils.aperture.ApertureMask` is
returned.
"""
use_exact, subpixels = self._translate_mask_mode(method, subpixels)
if hasattr(self, 'r'):
radius = self.r
elif hasattr(self, 'r_out'): # annulus
radius = self.r_out
else:
raise ValueError('Cannot determine the aperture radius.')
masks = []
for bbox, edges in zip(np.atleast_1d(self.bbox),
self._centered_edges):
ny, nx = bbox.shape
mask = circular_overlap_grid(edges[0], edges[1], edges[2],
edges[3], nx, ny, radius, use_exact,
subpixels)
# subtract the inner circle for an annulus
if hasattr(self, 'r_in'):
mask -= circular_overlap_grid(edges[0], edges[1], edges[2],
edges[3], nx, ny, self.r_in,
use_exact, subpixels)
masks.append(ApertureMask(mask, bbox))
if self.isscalar:
return masks[0]
else:
return masks
class CircularAperture(CircularMaskMixin, PixelAperture):
"""
A circular aperture defined in pixel coordinates.
The aperture has a single fixed size/shape, but it can have multiple
positions (see the ``positions`` input).
Parameters
----------
positions : array_like or `~astropy.units.Quantity`
The pixel coordinates of the aperture center(s) in one of the
following formats:
* single ``(x, y)`` pair as a tuple, list, or `~numpy.ndarray`
* tuple, list, or `~numpy.ndarray` of ``(x, y)`` pairs
* `~astropy.units.Quantity` instance of ``(x, y)`` pairs in
pixel units
r : float
The radius of the circle in pixels.
Raises
------
ValueError : `ValueError`
If the input radius, ``r``, is negative.
Examples
--------
>>> from photutils import CircularAperture
>>> aper = CircularAperture([10., 20.], 3.)
>>> aper = CircularAperture((10., 20.), 3.)
>>> pos1 = (10., 20.) # (x, y)
>>> pos2 = (30., 40.)
>>> pos3 = (50., 60.)
>>> aper = CircularAperture([pos1, pos2, pos3], 3.)
>>> aper = CircularAperture((pos1, pos2, pos3), 3.)
"""
_shape_params = ('r',)
positions = PixelPositions('positions')
r = PositiveScalar('r')
def __init__(self, positions, r):
self.positions = positions
self.r = r
@property
def _xy_extents(self):
return self.r, self.r
@property
def area(self):
return math.pi * self.r ** 2
def _to_patch(self, origin=(0, 0), **kwargs):
"""
Return a `~matplotlib.patches.patch` for the aperture.
Parameters
----------
origin : array_like, optional
The ``(x, y)`` position of the origin of the displayed
image.
kwargs : `dict`
Any keyword arguments accepted by
`matplotlib.patches.Patch`.
Returns
-------
patch : `~matplotlib.patches.patch` or list of `~matplotlib.patches.patch`
A patch for the aperture. If the aperture is scalar then a
single `~matplotlib.patches.patch` is returned, otherwise a
list of `~matplotlib.patches.patch` is returned.
"""
import matplotlib.patches as mpatches
xy_positions, patch_kwargs = self._define_patch_params(origin=origin,
**kwargs)
patches = []
for xy_position in xy_positions:
patches.append(mpatches.Circle(xy_position, self.r,
**patch_kwargs))
if self.isscalar:
return patches[0]
else:
return patches
def to_sky(self, wcs):
"""
Convert the aperture to a `SkyCircularAperture` object defined
in celestial coordinates.
Parameters
----------
wcs : WCS object
A world coordinate system (WCS) transformation that
supports the `astropy shared interface for WCS
<https://docs.astropy.org/en/stable/wcs/wcsapi.html>`_
(e.g., `astropy.wcs.WCS`, `gwcs.wcs.WCS`).
Returns
-------
aperture : `SkyCircularAperture` object
A `SkyCircularAperture` object.
"""
return SkyCircularAperture(**self._to_sky_params(wcs))
class CircularAnnulus(CircularMaskMixin, PixelAperture):
"""
A circular annulus aperture defined in pixel coordinates.
The aperture has a single fixed size/shape, but it can have multiple
positions (see the ``positions`` input).
Parameters
----------
positions : array_like or `~astropy.units.Quantity`
The pixel coordinates of the aperture center(s) in one of the
following formats:
* single ``(x, y)`` pair as a tuple, list, or `~numpy.ndarray`
* tuple, list, or `~numpy.ndarray` of ``(x, y)`` pairs
* `~astropy.units.Quantity` instance of ``(x, y)`` pairs in
pixel units
r_in : float
The inner radius of the circular annulus in pixels.
r_out : float
The outer radius of the circular annulus in pixels.
Raises
------
ValueError : `ValueError`
If inner radius (``r_in``) is greater than outer radius (``r_out``).
ValueError : `ValueError`
If inner radius (``r_in``) is negative.
Examples
--------
>>> from photutils import CircularAnnulus
>>> aper = CircularAnnulus([10., 20.], 3., 5.)
>>> aper = CircularAnnulus((10., 20.), 3., 5.)
>>> pos1 = (10., 20.) # (x, y)
>>> pos2 = (30., 40.)
>>> pos3 = (50., 60.)
>>> aper = CircularAnnulus([pos1, pos2, pos3], 3., 5.)
>>> aper = CircularAnnulus((pos1, pos2, pos3), 3., 5.)
"""
_shape_params = ('r_in', 'r_out')
positions = PixelPositions('positions')
r_in = PositiveScalar('r_in')
r_out = PositiveScalar('r_out')
def __init__(self, positions, r_in, r_out):
if not r_out > r_in:
raise ValueError('r_out must be greater than r_in')
self.positions = positions
self.r_in = r_in
self.r_out = r_out
@property
def _xy_extents(self):
return self.r_out, self.r_out
@property
def area(self):
return math.pi * (self.r_out ** 2 - self.r_in ** 2)
def _to_patch(self, origin=(0, 0), **kwargs):
"""
Return a `~matplotlib.patches.patch` for the aperture.
Parameters
----------
origin : array_like, optional
The ``(x, y)`` position of the origin of the displayed
image.
kwargs : `dict`
Any keyword arguments accepted by
`matplotlib.patches.Patch`.
Returns
-------
patch : `~matplotlib.patches.patch` or list of `~matplotlib.patches.patch`
A patch for the aperture. If the aperture is scalar then a
single `~matplotlib.patches.patch` is returned, otherwise a
list of `~matplotlib.patches.patch` is returned.
"""
import matplotlib.patches as mpatches
xy_positions, patch_kwargs = self._define_patch_params(origin=origin,
**kwargs)
patches = []
for xy_position in xy_positions:
patch_inner = mpatches.Circle(xy_position, self.r_in)
patch_outer = mpatches.Circle(xy_position, self.r_out)
path = self._make_annulus_path(patch_inner, patch_outer)
patches.append(mpatches.PathPatch(path, **patch_kwargs))
if self.isscalar:
return patches[0]
else:
return patches
def to_sky(self, wcs):
"""
Convert the aperture to a `SkyCircularAnnulus` object defined
in celestial coordinates.
Parameters
----------
wcs : WCS object
A world coordinate system (WCS) transformation that
supports the `astropy shared interface for WCS
<https://docs.astropy.org/en/stable/wcs/wcsapi.html>`_
(e.g., `astropy.wcs.WCS`, `gwcs.wcs.WCS`).
Returns
-------
aperture : `SkyCircularAnnulus` object
A `SkyCircularAnnulus` object.
"""
return SkyCircularAnnulus(**self._to_sky_params(wcs))
class SkyCircularAperture(SkyAperture):
"""
A circular aperture defined in sky coordinates.
The aperture has a single fixed size/shape, but it can have multiple
positions (see the ``positions`` input).
Parameters
----------
positions : `~astropy.coordinates.SkyCoord`
The celestial coordinates of the aperture center(s). This can be
either scalar coordinates or an array of coordinates.
r : scalar `~astropy.units.Quantity`
The radius of the circle, either in angular or pixel units.
Examples
--------
>>> from astropy.coordinates import SkyCoord
>>> import astropy.units as u
>>> from photutils import SkyCircularAperture
>>> positions = SkyCoord(ra=[10., 20.], dec=[30., 40.], unit='deg')
>>> aper = SkyCircularAperture(positions, 0.5*u.arcsec)
"""
_shape_params = ('r',)
positions = SkyCoordPositions('positions')
r = AngleOrPixelScalarQuantity('r')
def __init__(self, positions, r):
self.positions = positions
self.r = r
def to_pixel(self, wcs):
"""
Convert the aperture to a `CircularAperture` object defined in
pixel coordinates.
Parameters
----------
wcs : WCS object
A world coordinate system (WCS) transformation that
supports the `astropy shared interface for WCS
<https://docs.astropy.org/en/stable/wcs/wcsapi.html>`_
(e.g., `astropy.wcs.WCS`, `gwcs.wcs.WCS`).
Returns
-------
aperture : `CircularAperture` object
A `CircularAperture` object.
"""
return CircularAperture(**self._to_pixel_params(wcs))
class SkyCircularAnnulus(SkyAperture):
"""
A circular annulus aperture defined in sky coordinates.
The aperture has a single fixed size/shape, but it can have multiple
positions (see the ``positions`` input).
Parameters
----------
positions : `~astropy.coordinates.SkyCoord`
The celestial coordinates of the aperture center(s). This can be
either scalar coordinates or an array of coordinates.
r_in : scalar `~astropy.units.Quantity`
The inner radius of the circular annulus, either in angular or
pixel units.
r_out : scalar `~astropy.units.Quantity`
The outer radius of the circular annulus, either in angular or
pixel units.
Examples
--------
>>> from astropy.coordinates import SkyCoord
>>> import astropy.units as u
>>> from photutils import SkyCircularAnnulus
>>> positions = SkyCoord(ra=[10., 20.], dec=[30., 40.], unit='deg')
>>> aper = SkyCircularAnnulus(positions, 0.5*u.arcsec, 1.0*u.arcsec)
"""
_shape_params = ('r_in', 'r_out')
positions = SkyCoordPositions('positions')
r_in = AngleOrPixelScalarQuantity('r_in')
r_out = AngleOrPixelScalarQuantity('r_out')
def __init__(self, positions, r_in, r_out):
if r_in.unit.physical_type != r_out.unit.physical_type:
raise ValueError("r_in and r_out should either both be angles "
"or in pixels.")
self.positions = positions
self.r_in = r_in
self.r_out = r_out
def to_pixel(self, wcs):
"""
Convert the aperture to a `CircularAnnulus` object defined in
pixel coordinates.
Parameters
----------
wcs : WCS object
A world coordinate system (WCS) transformation that
supports the `astropy shared interface for WCS
<https://docs.astropy.org/en/stable/wcs/wcsapi.html>`_
(e.g., `astropy.wcs.WCS`, `gwcs.wcs.WCS`).
Returns
-------
aperture : `CircularAnnulus` object
A `CircularAnnulus` object.
"""
return CircularAnnulus(**self._to_pixel_params(wcs))
| 32.74569 | 95 | 0.579308 |
acf195dfc54a4a12a05d4fc50cf1e2d0be17a1c5 | 979 | py | Python | bin/split.py | kieranhj/nova-invite | 937bb6530f36bd87be2e79ce78c310be5cece9be | [
"MIT"
] | 1 | 2021-01-26T16:52:31.000Z | 2021-01-26T16:52:31.000Z | bin/split.py | kieranhj/nova-invite | 937bb6530f36bd87be2e79ce78c310be5cece9be | [
"MIT"
] | null | null | null | bin/split.py | kieranhj/nova-invite | 937bb6530f36bd87be2e79ce78c310be5cece9be | [
"MIT"
] | null | null | null | # Split our large binary
import sys
TRACK_SIZE = 10 * 256
SPLIT_SIZE = 79 * TRACK_SIZE
FIRST_SIZE = 50 * TRACK_SIZE
if __name__ == '__main__':
argv = sys.argv
argc = len(argv)
f = open(argv[1], 'rb')
data = f.read()
size = len(data)
first_chunk = FIRST_SIZE
# Needs to be rounded up to a full track
if first_chunk % TRACK_SIZE != 0:
first_chunk += TRACK_SIZE - (first_chunk % TRACK_SIZE)
print("size=", size, " first=",first_chunk, " split=",SPLIT_SIZE)
nf = open('scene1_disk.00.bin', 'wb')
nf.write(data[0:first_chunk])
nf.close()
nf = open('scene1_disk.01.bin', 'wb')
nf.write(data[first_chunk:first_chunk + 1*SPLIT_SIZE])
nf.close()
nf = open('scene1_disk.02.bin', 'wb')
nf.write(data[first_chunk + 1*SPLIT_SIZE:first_chunk + 2*SPLIT_SIZE])
nf.close()
nf = open('scene1_disk.03.bin', 'wb')
nf.write(data[first_chunk + 2*SPLIT_SIZE:first_chunk + 3*SPLIT_SIZE])
nf.close()
| 23.878049 | 73 | 0.634321 |
acf198349e26d0895db6cd43f13c88c765b8eade | 584 | py | Python | Exercicios/EX014.py | rodrigojleandro/PYTHON | 6fc49693576398d4e97f5695788939f8c50deb2a | [
"MIT"
] | null | null | null | Exercicios/EX014.py | rodrigojleandro/PYTHON | 6fc49693576398d4e97f5695788939f8c50deb2a | [
"MIT"
] | null | null | null | Exercicios/EX014.py | rodrigojleandro/PYTHON | 6fc49693576398d4e97f5695788939f8c50deb2a | [
"MIT"
] | null | null | null |
###############################################################################################################
# DESAFIO: 014
# TÍTULO: Conversor de Temperaturas
# AULA: 07
# EXERCÍCIO: Escreva um programa qu converta uma temperatura digitada em ºC e converta para ºF.
###############################################################################################################
temp = int(input("Digite a temperatura em ºC: "))
f =((1.8*temp))+32 #Fahrenheit
k = (temp + 273) #kelvin
print(" Celsius: {:.1f} ºc \n Fahrenheit: {:.0f} ºf \n Kelvin: {:.0f} K.".format(temp,f,k))
| 38.933333 | 111 | 0.414384 |
acf1988545c4f62fe37a66651a82b8ac9e37b952 | 128 | py | Python | source/blockchain_backup/version.py | denova-com/blockchain-backup | a445bcbd67bd6485a4969dc1e24d51fbffc43cff | [
"OLDAP-2.6",
"OLDAP-2.4"
] | null | null | null | source/blockchain_backup/version.py | denova-com/blockchain-backup | a445bcbd67bd6485a4969dc1e24d51fbffc43cff | [
"OLDAP-2.6",
"OLDAP-2.4"
] | null | null | null | source/blockchain_backup/version.py | denova-com/blockchain-backup | a445bcbd67bd6485a4969dc1e24d51fbffc43cff | [
"OLDAP-2.6",
"OLDAP-2.4"
] | null | null | null | '''
Blockchain Backup version.
Copyright 2020-2022 DeNova
Last modified: 2022-02-01
'''
CURRENT_VERSION = '1.3.5'
| 14.222222 | 30 | 0.65625 |
acf198b4417c7c3770f3999ac6da2e469e4d95fe | 2,179 | py | Python | pyutils/test_grow_sim.py | eltrompetero/forests | 32f38a22ec305ef08d01ee72f2656d57228f6579 | [
"MIT"
] | 2 | 2021-04-20T07:34:15.000Z | 2021-04-25T23:22:37.000Z | pyutils/test_grow_sim.py | eltrompetero/forests | 32f38a22ec305ef08d01ee72f2656d57228f6579 | [
"MIT"
] | 1 | 2021-04-25T09:43:02.000Z | 2021-04-25T09:43:02.000Z | pyutils/test_grow_sim.py | eltrompetero/forests | 32f38a22ec305ef08d01ee72f2656d57228f6579 | [
"MIT"
] | null | null | null | # ====================================================================================== #
# Automata compartment model for forest growth.
# Author : Eddie Lee, edlee@santafe.edu
#
#
# MIT License
#
# Copyright (c) 2021 Edward D. Lee
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ====================================================================================== #
from .grow_sim import *
def test_overlap_area():
assert overlap_area(2, 1, 1)==0
assert overlap_area(2, 1, 1)==0
assert overlap_area(0, 1, 1)==np.pi
def test_delete_flat_dist_rowcol():
np.random.seed(0)
n = 10
d = np.random.rand(n * (n-1) // 2)
dsquare = squareform(d)
for i in range(n):
newd = delete_flat_dist_rowcol(d, i, n)
newdsquare = np.delete(np.delete(dsquare, i, axis=0), i, axis=1)
assert np.array_equal(squareform(newd), newdsquare)
def test_append_flat_dist_rowcol(n=10):
np.random.seed(0)
d = np.random.rand(n * (n-1) // 2)
print(d)
newd = delete_flat_dist_rowcol(append_flat_dist_rowcol(d, -1, n), n, n+1)
print(newd)
assert np.array_equal(d, newd)
| 38.22807 | 90 | 0.64525 |
acf199c303ea0c9b69b1541e9e2e2f4093281597 | 7,887 | py | Python | bentoml/_internal/utils/formparser.py | francoisserra/BentoML | 213e9e9b39e055286f2649c733907df88e6d2503 | [
"Apache-2.0"
] | 1 | 2021-06-12T17:04:07.000Z | 2021-06-12T17:04:07.000Z | bentoml/_internal/utils/formparser.py | francoisserra/BentoML | 213e9e9b39e055286f2649c733907df88e6d2503 | [
"Apache-2.0"
] | 4 | 2021-05-16T08:06:25.000Z | 2021-11-13T08:46:36.000Z | bentoml/_internal/utils/formparser.py | francoisserra/BentoML | 213e9e9b39e055286f2649c733907df88e6d2503 | [
"Apache-2.0"
] | null | null | null | import io
import uuid
import typing as t
import multipart.multipart as multipart
from starlette.requests import Request
from starlette.responses import Response
from starlette.formparsers import MultiPartMessage
from starlette.datastructures import Headers
from starlette.datastructures import MutableHeaders
from ...exceptions import BentoMLException
_ItemsBody = t.List[t.Tuple[str, t.List[t.Tuple[bytes, bytes]], bytes]]
def user_safe_decode(src: bytes, codec: str) -> str:
try:
return src.decode(codec)
except (UnicodeDecodeError, LookupError):
return src.decode("latin-1")
class MultiPartParser:
"""
An modified version of starlette MultiPartParser.
"""
def __init__(self, headers: Headers, stream: t.AsyncGenerator[bytes, None]) -> None:
assert (
multipart is not None
), "The `python-multipart` library must be installed to use form parsing."
self.headers: Headers = headers
self.stream = stream
self.messages: t.List[t.Tuple[MultiPartMessage, bytes]] = list()
def on_part_begin(self) -> None:
message = (MultiPartMessage.PART_BEGIN, b"")
self.messages.append(message)
def on_part_data(self, data: bytes, start: int, end: int) -> None:
message = (MultiPartMessage.PART_DATA, data[start:end])
self.messages.append(message)
def on_part_end(self) -> None:
message = (MultiPartMessage.PART_END, b"")
self.messages.append(message)
def on_header_field(self, data: bytes, start: int, end: int) -> None:
message = (MultiPartMessage.HEADER_FIELD, data[start:end])
self.messages.append(message)
def on_header_value(self, data: bytes, start: int, end: int) -> None:
message = (MultiPartMessage.HEADER_VALUE, data[start:end])
self.messages.append(message)
def on_header_end(self) -> None:
message = (MultiPartMessage.HEADER_END, b"")
self.messages.append(message)
def on_headers_finished(self) -> None:
message = (MultiPartMessage.HEADERS_FINISHED, b"")
self.messages.append(message)
def on_end(self) -> None:
message = (MultiPartMessage.END, b"")
self.messages.append(message)
async def parse(self) -> _ItemsBody:
# Parse the Content-Type header to get the multipart boundary.
_, params = multipart.parse_options_header(self.headers["Content-Type"])
params = t.cast(t.Dict[bytes, bytes], params)
charset = params.get(b"charset", b"utf-8")
charset = charset.decode("latin-1")
boundary = params.get(b"boundary")
# Callbacks dictionary.
callbacks = {
"on_part_begin": self.on_part_begin,
"on_part_data": self.on_part_data,
"on_part_end": self.on_part_end,
"on_header_field": self.on_header_field,
"on_header_value": self.on_header_value,
"on_header_end": self.on_header_end,
"on_headers_finished": self.on_headers_finished,
"on_end": self.on_end,
}
# Create the parser.
parser = multipart.MultipartParser(boundary, callbacks)
header_field = b""
header_value = b""
field_name = ""
data = b""
items: _ItemsBody = []
headers: t.List[t.Tuple[bytes, bytes]] = []
# Feed the parser with data from the request.
async for chunk in self.stream:
parser.write(chunk)
messages = list(self.messages)
self.messages.clear()
for message_type, message_bytes in messages:
if message_type == MultiPartMessage.PART_BEGIN:
field_name = ""
data = b""
headers = list()
elif message_type == MultiPartMessage.HEADER_FIELD: # type: ignore
header_field += message_bytes
elif message_type == MultiPartMessage.HEADER_VALUE: # type: ignore
header_value += message_bytes
elif message_type == MultiPartMessage.HEADER_END: # type: ignore
field = header_field.lower()
if field == b"content-disposition":
_, options = multipart.parse_options_header(header_value)
options = t.cast(t.Dict[bytes, bytes], options)
field_name = user_safe_decode(options[b"name"], charset)
elif field == b"bentoml-payload-field":
field_name = user_safe_decode(header_value, charset)
else:
headers.append((field, header_value))
header_field = b""
header_value = b""
elif message_type == MultiPartMessage.HEADERS_FINISHED: # type: ignore
assert (
field_name
), "`Content-Disposition` is not available in headers"
elif message_type == MultiPartMessage.PART_DATA: # type: ignore
data += message_bytes
elif message_type == MultiPartMessage.PART_END: # type: ignore
items.append((field_name, headers, data))
parser.finalize()
return items
async def populate_multipart_requests(request: Request) -> t.Dict[str, Request]:
content_type_header = request.headers.get("Content-Type")
content_type, _ = multipart.parse_options_header(content_type_header)
assert content_type in (b"multipart/form-data", b"multipart/mixed")
stream = t.cast(t.AsyncGenerator[bytes, None], request.stream())
multipart_parser = MultiPartParser(request.headers, stream)
try:
form = await multipart_parser.parse()
except multipart.MultipartParseError:
raise BentoMLException("Invalid multipart requests")
reqs = dict() # type: t.Dict[str, Request]
for field_name, headers, data in form:
scope = dict(request.scope)
ori_headers = dict(scope.get("headers", list()))
ori_headers = t.cast(t.Dict[bytes, bytes], ori_headers)
ori_headers.update(dict(headers))
scope["headers"] = list(ori_headers.items())
req = Request(scope)
req._body = data
reqs[field_name] = req
return reqs
def _get_disp_filename(headers: MutableHeaders) -> t.Optional[bytes]:
if "content-disposition" in headers:
_, options = multipart.parse_options_header(headers["content-disposition"])
if b"filename" in options:
return t.cast(bytes, options[b"filename"])
return None
async def concat_to_multipart_responses(
responses: t.Mapping[str, Response]
) -> Response:
boundary = uuid.uuid4().hex
headers = {"content-type": f"multipart/form-data; boundary={boundary}"}
boundary_bytes = boundary.encode("latin1")
writer = io.BytesIO()
for field_name, resp in responses.items():
writer.write(b"--%b\r\n" % boundary_bytes)
# headers
filename = _get_disp_filename(resp.headers)
if filename:
writer.write(
b'Content-Disposition: form-data; name="%b"; filename="%b"\r\n'
% (field_name.encode("latin1"), filename)
)
else:
writer.write(
b'Content-Disposition: form-data; name="%b"\r\n'
% field_name.encode("latin1")
)
for header_key, header_value in resp.raw_headers:
if header_key == b"content-disposition":
continue
writer.write(b"%b: %b\r\n" % (header_key, header_value))
writer.write(b"\r\n")
# body
writer.write(resp.body)
writer.write(b"\r\n")
writer.write(b"--%b--\r\n" % boundary_bytes)
return Response(writer.getvalue(), headers=headers)
| 37.736842 | 88 | 0.614556 |
acf19a04d80e7ca24ec3b33ed829bf7a5fdb3e48 | 1,337 | py | Python | config.py | tianlinzhong/python-mysql-connector-demo | 72dbae14ca6bb04dea486192afeb77da6206e8ee | [
"MIT"
] | null | null | null | config.py | tianlinzhong/python-mysql-connector-demo | 72dbae14ca6bb04dea486192afeb77da6206e8ee | [
"MIT"
] | null | null | null | config.py | tianlinzhong/python-mysql-connector-demo | 72dbae14ca6bb04dea486192afeb77da6206e8ee | [
"MIT"
] | null | null | null | import mysql.connector
import delete
import insert
import select
import update
import logging
from logging.handlers import RotatingFileHandler
myems_demo_db = {
'user': 'sql12382808',
'password': 'CvugQvJ8pr',
'host': 'sql12.freemysqlhosting.net',
'database': 'sql12382808',
'port': 3306,
}
# if __name__ == "__main__":
# """main"""
# # create logger
# logger = logging.getLogger('mysql-connector-demo1')
# # specifies the lowest-severity log message a logger will handle,
# # where debug is the lowest built-in severity level and critical is the highest built-in severity.
# # For example, if the severity level is INFO, the logger will handle only INFO, WARNING, ERROR, and CRITICAL
# # messages and will ignore DEBUG messages.
# logger.setLevel(logging.ERROR)
# # create file handler which logs messages
# fh = RotatingFileHandler('mysql-connector-demo.log', maxBytes=1024 * 1024, backupCount=1)
# # create formatter and add it to the handlers
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# fh.setFormatter(formatter)
# # add the handlers to logger
# logger.addHandler(fh)
# delete.main(logger)
# select.main(logger)
# insert.main(logger)
# update.main(logger) | 38.2 | 115 | 0.673149 |
acf19a29e14cadbb0535e3bf37b3e8bd34aeb6d8 | 36 | py | Python | tests/scruples/demos/__init__.py | allenai/scruples | 9a43459c507e57d89ab8442a4f3985cedecb8710 | [
"Apache-2.0"
] | 29 | 2020-05-09T10:55:45.000Z | 2022-03-28T16:18:02.000Z | tests/scruples/demos/__init__.py | allenai/scruples | 9a43459c507e57d89ab8442a4f3985cedecb8710 | [
"Apache-2.0"
] | null | null | null | tests/scruples/demos/__init__.py | allenai/scruples | 9a43459c507e57d89ab8442a4f3985cedecb8710 | [
"Apache-2.0"
] | 6 | 2020-10-05T12:24:28.000Z | 2021-12-06T19:51:06.000Z | """Tests for ``scruples.demos``."""
| 18 | 35 | 0.583333 |
acf19a94af3403cf983799b3db1b3c3003808497 | 8,026 | py | Python | tests/test_schema_editor_partitioning.py | adamchainz/django-postgres-extra | c11dbb5b75e16f7bd8fd336cc051806cf587269f | [
"MIT"
] | 529 | 2017-03-20T08:16:30.000Z | 2022-03-31T13:23:09.000Z | tests/test_schema_editor_partitioning.py | adamchainz/django-postgres-extra | c11dbb5b75e16f7bd8fd336cc051806cf587269f | [
"MIT"
] | 137 | 2017-06-08T07:59:22.000Z | 2022-02-07T08:34:38.000Z | tests/test_schema_editor_partitioning.py | adamchainz/django-postgres-extra | c11dbb5b75e16f7bd8fd336cc051806cf587269f | [
"MIT"
] | 67 | 2017-06-21T10:01:13.000Z | 2022-02-24T21:23:24.000Z | import pytest
from django.core.exceptions import ImproperlyConfigured
from django.db import connection, models
from psqlextra.backend.schema import PostgresSchemaEditor
from psqlextra.types import PostgresPartitioningMethod
from . import db_introspection
from .fake_model import define_fake_partitioned_model
def test_schema_editor_create_delete_partitioned_model_range():
"""Tests whether creating a partitioned model and adding a list partition
to it using the :see:PostgresSchemaEditor works."""
method = PostgresPartitioningMethod.RANGE
key = ["timestamp"]
model = define_fake_partitioned_model(
{"name": models.TextField(), "timestamp": models.DateTimeField()},
{"method": method, "key": key},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
schema_editor.add_range_partition(model, "pt1", "2019-01-01", "2019-02-01")
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert table.name == model._meta.db_table
assert table.method == method
assert table.key == key
assert table.partitions[0].full_name == model._meta.db_table + "_pt1"
schema_editor.delete_partitioned_model(model)
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert not table
partitions = db_introspection.get_partitions(model._meta.db_table)
assert len(partitions) == 0
def test_schema_editor_create_delete_partitioned_model_list():
"""Tests whether creating a partitioned model and adding a range partition
to it using the :see:PostgresSchemaEditor works."""
method = PostgresPartitioningMethod.LIST
key = ["category"]
model = define_fake_partitioned_model(
{"name": models.TextField(), "category": models.TextField()},
{"method": method, "key": key},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
schema_editor.add_list_partition(model, "pt1", ["car", "boat"])
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert table.name == model._meta.db_table
assert table.method == method
assert table.key == key
assert table.partitions[0].full_name == model._meta.db_table + "_pt1"
schema_editor.delete_partitioned_model(model)
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert not table
partitions = db_introspection.get_partitions(model._meta.db_table)
assert len(partitions) == 0
def test_schema_editor_create_delete_partitioned_model_default():
"""Tests whether creating a partitioned model and adding a default
partition to it using the :see:PostgresSchemaEditor works."""
method = PostgresPartitioningMethod.LIST
key = ["category"]
model = define_fake_partitioned_model(
{"name": models.TextField(), "category": models.TextField()},
{"method": method, "key": key},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
schema_editor.add_default_partition(model, "default")
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert table.name == model._meta.db_table
assert table.method == method
assert table.key == key
assert table.partitions[0].full_name == model._meta.db_table + "_default"
schema_editor.delete_partitioned_model(model)
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert not table
partitions = db_introspection.get_partitions(model._meta.db_table)
assert len(partitions) == 0
def test_schema_editor_create_partitioned_model_no_method():
"""Tests whether its possible to create a partitioned model without
explicitly setting a partitioning method.
The default is "range" so setting one explicitely should not be
needed.
"""
model = define_fake_partitioned_model(
{"name": models.TextField(), "timestamp": models.DateTimeField()},
{"key": ["timestamp"]},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
pt = db_introspection.get_partitioned_table(model._meta.db_table)
assert pt.method == PostgresPartitioningMethod.RANGE
assert len(pt.partitions) == 0
def test_schema_editor_create_partitioned_model_no_key():
"""Tests whether trying to create a partitioned model without a
partitioning key raises :see:ImproperlyConfigured as its not possible to
create a partitioned model without one and we cannot have a sane
default."""
model = define_fake_partitioned_model(
{"name": models.TextField(), "timestamp": models.DateTimeField()},
{"method": PostgresPartitioningMethod.RANGE},
)
schema_editor = PostgresSchemaEditor(connection)
with pytest.raises(ImproperlyConfigured):
schema_editor.create_partitioned_model(model)
def test_schema_editor_add_range_partition():
"""Tests whether adding a range partition works."""
model = define_fake_partitioned_model(
{"name": models.TextField(), "timestamp": models.DateTimeField()},
{"key": ["timestamp"]},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
schema_editor.add_range_partition(
model,
name="mypartition",
from_values="2019-1-1",
to_values="2019-2-1",
comment="test",
)
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert len(table.partitions) == 1
assert table.partitions[0].name == "mypartition"
assert (
table.partitions[0].full_name == f"{model._meta.db_table}_mypartition"
)
assert table.partitions[0].comment == "test"
schema_editor.delete_partition(model, "mypartition")
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert len(table.partitions) == 0
def test_schema_editor_add_list_partition():
"""Tests whether adding a list partition works."""
model = define_fake_partitioned_model(
{"name": models.TextField()},
{"method": PostgresPartitioningMethod.LIST, "key": ["name"]},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
schema_editor.add_list_partition(
model, name="mypartition", values=["1"], comment="test"
)
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert len(table.partitions) == 1
assert table.partitions[0].name == "mypartition"
assert (
table.partitions[0].full_name == f"{model._meta.db_table}_mypartition"
)
assert table.partitions[0].comment == "test"
schema_editor.delete_partition(model, "mypartition")
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert len(table.partitions) == 0
@pytest.mark.parametrize(
"method,key",
[
(PostgresPartitioningMethod.RANGE, ["timestamp"]),
(PostgresPartitioningMethod.LIST, ["name"]),
],
)
def test_schema_editor_add_default_partition(method, key):
model = define_fake_partitioned_model(
{"name": models.TextField(), "timestamp": models.DateTimeField()},
{"method": method, "key": key},
)
schema_editor = PostgresSchemaEditor(connection)
schema_editor.create_partitioned_model(model)
schema_editor.add_default_partition(
model, name="mypartition", comment="test"
)
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert len(table.partitions) == 1
assert table.partitions[0].name == "mypartition"
assert (
table.partitions[0].full_name == f"{model._meta.db_table}_mypartition"
)
assert table.partitions[0].comment == "test"
schema_editor.delete_partition(model, "mypartition")
table = db_introspection.get_partitioned_table(model._meta.db_table)
assert len(table.partitions) == 0
| 33.722689 | 79 | 0.72614 |
acf19b91adc13228b62532409c74f984aeb50ef5 | 17,885 | py | Python | sympy/solvers/bivariate.py | brandondavid/sympy | f1769a2a515e801ef4354ad7f0e85ea3bb8ee4d9 | [
"BSD-3-Clause"
] | null | null | null | sympy/solvers/bivariate.py | brandondavid/sympy | f1769a2a515e801ef4354ad7f0e85ea3bb8ee4d9 | [
"BSD-3-Clause"
] | null | null | null | sympy/solvers/bivariate.py | brandondavid/sympy | f1769a2a515e801ef4354ad7f0e85ea3bb8ee4d9 | [
"BSD-3-Clause"
] | null | null | null | from sympy.core.add import Add
from sympy.core.exprtools import factor_terms
from sympy.core.function import expand_log, _mexpand
from sympy.core.power import Pow
from sympy.core.singleton import S
from sympy.core.sorting import ordered
from sympy.core.symbol import Dummy
from sympy.functions.elementary.exponential import (LambertW, exp, log)
from sympy.functions.elementary.miscellaneous import root
from sympy.polys.polyroots import roots
from sympy.polys.polytools import Poly, factor
from sympy.simplify.simplify import separatevars
from sympy.simplify.radsimp import collect
from sympy.simplify.simplify import powsimp
from sympy.solvers.solvers import solve, _invert
from sympy.utilities.iterables import uniq
def _filtered_gens(poly, symbol):
"""process the generators of ``poly``, returning the set of generators that
have ``symbol``. If there are two generators that are inverses of each other,
prefer the one that has no denominator.
Examples
========
>>> from sympy.solvers.bivariate import _filtered_gens
>>> from sympy import Poly, exp
>>> from sympy.abc import x
>>> _filtered_gens(Poly(x + 1/x + exp(x)), x)
{x, exp(x)}
"""
# TODO it would be good to pick the smallest divisible power
# instead of the base for something like x**4 + x**2 -->
# return x**2 not x
gens = {g for g in poly.gens if symbol in g.free_symbols}
for g in list(gens):
ag = 1/g
if g in gens and ag in gens:
if ag.as_numer_denom()[1] is not S.One:
g = ag
gens.remove(g)
return gens
def _mostfunc(lhs, func, X=None):
"""Returns the term in lhs which contains the most of the
func-type things e.g. log(log(x)) wins over log(x) if both terms appear.
``func`` can be a function (exp, log, etc...) or any other SymPy object,
like Pow.
If ``X`` is not ``None``, then the function returns the term composed with the
most ``func`` having the specified variable.
Examples
========
>>> from sympy.solvers.bivariate import _mostfunc
>>> from sympy import exp
>>> from sympy.abc import x, y
>>> _mostfunc(exp(x) + exp(exp(x) + 2), exp)
exp(exp(x) + 2)
>>> _mostfunc(exp(x) + exp(exp(y) + 2), exp)
exp(exp(y) + 2)
>>> _mostfunc(exp(x) + exp(exp(y) + 2), exp, x)
exp(x)
>>> _mostfunc(x, exp, x) is None
True
>>> _mostfunc(exp(x) + exp(x*y), exp, x)
exp(x)
"""
fterms = [tmp for tmp in lhs.atoms(func) if (not X or
X.is_Symbol and X in tmp.free_symbols or
not X.is_Symbol and tmp.has(X))]
if len(fterms) == 1:
return fterms[0]
elif fterms:
return max(list(ordered(fterms)), key=lambda x: x.count(func))
return None
def _linab(arg, symbol):
"""Return ``a, b, X`` assuming ``arg`` can be written as ``a*X + b``
where ``X`` is a symbol-dependent factor and ``a`` and ``b`` are
independent of ``symbol``.
Examples
========
>>> from sympy.solvers.bivariate import _linab
>>> from sympy.abc import x, y
>>> from sympy import exp, S
>>> _linab(S(2), x)
(2, 0, 1)
>>> _linab(2*x, x)
(2, 0, x)
>>> _linab(y + y*x + 2*x, x)
(y + 2, y, x)
>>> _linab(3 + 2*exp(x), x)
(2, 3, exp(x))
"""
arg = factor_terms(arg.expand())
ind, dep = arg.as_independent(symbol)
if arg.is_Mul and dep.is_Add:
a, b, x = _linab(dep, symbol)
return ind*a, ind*b, x
if not arg.is_Add:
b = 0
a, x = ind, dep
else:
b = ind
a, x = separatevars(dep).as_independent(symbol, as_Add=False)
if x.could_extract_minus_sign():
a = -a
x = -x
return a, b, x
def _lambert(eq, x):
"""
Given an expression assumed to be in the form
``F(X, a..f) = a*log(b*X + c) + d*X + f = 0``
where X = g(x) and x = g^-1(X), return the Lambert solution,
``x = g^-1(-c/b + (a/d)*W(d/(a*b)*exp(c*d/a/b)*exp(-f/a)))``.
"""
eq = _mexpand(expand_log(eq))
mainlog = _mostfunc(eq, log, x)
if not mainlog:
return [] # violated assumptions
other = eq.subs(mainlog, 0)
if isinstance(-other, log):
eq = (eq - other).subs(mainlog, mainlog.args[0])
mainlog = mainlog.args[0]
if not isinstance(mainlog, log):
return [] # violated assumptions
other = -(-other).args[0]
eq += other
if x not in other.free_symbols:
return [] # violated assumptions
d, f, X2 = _linab(other, x)
logterm = collect(eq - other, mainlog)
a = logterm.as_coefficient(mainlog)
if a is None or x in a.free_symbols:
return [] # violated assumptions
logarg = mainlog.args[0]
b, c, X1 = _linab(logarg, x)
if X1 != X2:
return [] # violated assumptions
# invert the generator X1 so we have x(u)
u = Dummy('rhs')
xusolns = solve(X1 - u, x)
# There are infinitely many branches for LambertW
# but only branches for k = -1 and 0 might be real. The k = 0
# branch is real and the k = -1 branch is real if the LambertW argumen
# in in range [-1/e, 0]. Since `solve` does not return infinite
# solutions we will only include the -1 branch if it tests as real.
# Otherwise, inclusion of any LambertW in the solution indicates to
# the user that there are imaginary solutions corresponding to
# different k values.
lambert_real_branches = [-1, 0]
sol = []
# solution of the given Lambert equation is like
# sol = -c/b + (a/d)*LambertW(arg, k),
# where arg = d/(a*b)*exp((c*d-b*f)/a/b) and k in lambert_real_branches.
# Instead of considering the single arg, `d/(a*b)*exp((c*d-b*f)/a/b)`,
# the individual `p` roots obtained when writing `exp((c*d-b*f)/a/b)`
# as `exp(A/p) = exp(A)**(1/p)`, where `p` is an Integer, are used.
# calculating args for LambertW
num, den = ((c*d-b*f)/a/b).as_numer_denom()
p, den = den.as_coeff_Mul()
e = exp(num/den)
t = Dummy('t')
args = [d/(a*b)*t for t in roots(t**p - e, t).keys()]
# calculating solutions from args
for arg in args:
for k in lambert_real_branches:
w = LambertW(arg, k)
if k and not w.is_real:
continue
rhs = -c/b + (a/d)*w
for xu in xusolns:
sol.append(xu.subs(u, rhs))
return sol
def _solve_lambert(f, symbol, gens):
"""Return solution to ``f`` if it is a Lambert-type expression
else raise NotImplementedError.
For ``f(X, a..f) = a*log(b*X + c) + d*X - f = 0`` the solution
for ``X`` is ``X = -c/b + (a/d)*W(d/(a*b)*exp(c*d/a/b)*exp(f/a))``.
There are a variety of forms for `f(X, a..f)` as enumerated below:
1a1)
if B**B = R for R not in [0, 1] (since those cases would already
be solved before getting here) then log of both sides gives
log(B) + log(log(B)) = log(log(R)) and
X = log(B), a = 1, b = 1, c = 0, d = 1, f = log(log(R))
1a2)
if B*(b*log(B) + c)**a = R then log of both sides gives
log(B) + a*log(b*log(B) + c) = log(R) and
X = log(B), d=1, f=log(R)
1b)
if a*log(b*B + c) + d*B = R and
X = B, f = R
2a)
if (b*B + c)*exp(d*B + g) = R then log of both sides gives
log(b*B + c) + d*B + g = log(R) and
X = B, a = 1, f = log(R) - g
2b)
if g*exp(d*B + h) - b*B = c then the log form is
log(g) + d*B + h - log(b*B + c) = 0 and
X = B, a = -1, f = -h - log(g)
3)
if d*p**(a*B + g) - b*B = c then the log form is
log(d) + (a*B + g)*log(p) - log(b*B + c) = 0 and
X = B, a = -1, d = a*log(p), f = -log(d) - g*log(p)
"""
def _solve_even_degree_expr(expr, t, symbol):
"""Return the unique solutions of equations derived from
``expr`` by replacing ``t`` with ``+/- symbol``.
Parameters
==========
expr : Expr
The expression which includes a dummy variable t to be
replaced with +symbol and -symbol.
symbol : Symbol
The symbol for which a solution is being sought.
Returns
=======
List of unique solution of the two equations generated by
replacing ``t`` with positive and negative ``symbol``.
Notes
=====
If ``expr = 2*log(t) + x/2` then solutions for
``2*log(x) + x/2 = 0`` and ``2*log(-x) + x/2 = 0`` are
returned by this function. Though this may seem
counter-intuitive, one must note that the ``expr`` being
solved here has been derived from a different expression. For
an expression like ``eq = x**2*g(x) = 1``, if we take the
log of both sides we obtain ``log(x**2) + log(g(x)) = 0``. If
x is positive then this simplifies to
``2*log(x) + log(g(x)) = 0``; the Lambert-solving routines will
return solutions for this, but we must also consider the
solutions for ``2*log(-x) + log(g(x))`` since those must also
be a solution of ``eq`` which has the same value when the ``x``
in ``x**2`` is negated. If `g(x)` does not have even powers of
symbol then we do not want to replace the ``x`` there with
``-x``. So the role of the ``t`` in the expression received by
this function is to mark where ``+/-x`` should be inserted
before obtaining the Lambert solutions.
"""
nlhs, plhs = [
expr.xreplace({t: sgn*symbol}) for sgn in (-1, 1)]
sols = _solve_lambert(nlhs, symbol, gens)
if plhs != nlhs:
sols.extend(_solve_lambert(plhs, symbol, gens))
# uniq is needed for a case like
# 2*log(t) - log(-z**2) + log(z + log(x) + log(z))
# where subtituting t with +/-x gives all the same solution;
# uniq, rather than list(set()), is used to maintain canonical
# order
return list(uniq(sols))
nrhs, lhs = f.as_independent(symbol, as_Add=True)
rhs = -nrhs
lamcheck = [tmp for tmp in gens
if (tmp.func in [exp, log] or
(tmp.is_Pow and symbol in tmp.exp.free_symbols))]
if not lamcheck:
raise NotImplementedError()
if lhs.is_Add or lhs.is_Mul:
# replacing all even_degrees of symbol with dummy variable t
# since these will need special handling; non-Add/Mul do not
# need this handling
t = Dummy('t', **symbol.assumptions0)
lhs = lhs.replace(
lambda i: # find symbol**even
i.is_Pow and i.base == symbol and i.exp.is_even,
lambda i: # replace t**even
t**i.exp)
if lhs.is_Add and lhs.has(t):
t_indep = lhs.subs(t, 0)
t_term = lhs - t_indep
_rhs = rhs - t_indep
if not t_term.is_Add and _rhs and not (
t_term.has(S.ComplexInfinity, S.NaN)):
eq = expand_log(log(t_term) - log(_rhs))
return _solve_even_degree_expr(eq, t, symbol)
elif lhs.is_Mul and rhs:
# this needs to happen whether t is present or not
lhs = expand_log(log(lhs), force=True)
rhs = log(rhs)
if lhs.has(t) and lhs.is_Add:
# it expanded from Mul to Add
eq = lhs - rhs
return _solve_even_degree_expr(eq, t, symbol)
# restore symbol in lhs
lhs = lhs.xreplace({t: symbol})
lhs = powsimp(factor(lhs, deep=True))
# make sure we have inverted as completely as possible
r = Dummy()
i, lhs = _invert(lhs - r, symbol)
rhs = i.xreplace({r: rhs})
# For the first forms:
#
# 1a1) B**B = R will arrive here as B*log(B) = log(R)
# lhs is Mul so take log of both sides:
# log(B) + log(log(B)) = log(log(R))
# 1a2) B*(b*log(B) + c)**a = R will arrive unchanged so
# lhs is Mul, so take log of both sides:
# log(B) + a*log(b*log(B) + c) = log(R)
# 1b) d*log(a*B + b) + c*B = R will arrive unchanged so
# lhs is Add, so isolate c*B and expand log of both sides:
# log(c) + log(B) = log(R - d*log(a*B + b))
soln = []
if not soln:
mainlog = _mostfunc(lhs, log, symbol)
if mainlog:
if lhs.is_Mul and rhs != 0:
soln = _lambert(log(lhs) - log(rhs), symbol)
elif lhs.is_Add:
other = lhs.subs(mainlog, 0)
if other and not other.is_Add and [
tmp for tmp in other.atoms(Pow)
if symbol in tmp.free_symbols]:
if not rhs:
diff = log(other) - log(other - lhs)
else:
diff = log(lhs - other) - log(rhs - other)
soln = _lambert(expand_log(diff), symbol)
else:
#it's ready to go
soln = _lambert(lhs - rhs, symbol)
# For the next forms,
#
# collect on main exp
# 2a) (b*B + c)*exp(d*B + g) = R
# lhs is mul, so take log of both sides:
# log(b*B + c) + d*B = log(R) - g
# 2b) g*exp(d*B + h) - b*B = R
# lhs is add, so add b*B to both sides,
# take the log of both sides and rearrange to give
# log(R + b*B) - d*B = log(g) + h
if not soln:
mainexp = _mostfunc(lhs, exp, symbol)
if mainexp:
lhs = collect(lhs, mainexp)
if lhs.is_Mul and rhs != 0:
soln = _lambert(expand_log(log(lhs) - log(rhs)), symbol)
elif lhs.is_Add:
# move all but mainexp-containing term to rhs
other = lhs.subs(mainexp, 0)
mainterm = lhs - other
rhs = rhs - other
if (mainterm.could_extract_minus_sign() and
rhs.could_extract_minus_sign()):
mainterm *= -1
rhs *= -1
diff = log(mainterm) - log(rhs)
soln = _lambert(expand_log(diff), symbol)
# For the last form:
#
# 3) d*p**(a*B + g) - b*B = c
# collect on main pow, add b*B to both sides,
# take log of both sides and rearrange to give
# a*B*log(p) - log(b*B + c) = -log(d) - g*log(p)
if not soln:
mainpow = _mostfunc(lhs, Pow, symbol)
if mainpow and symbol in mainpow.exp.free_symbols:
lhs = collect(lhs, mainpow)
if lhs.is_Mul and rhs != 0:
# b*B = 0
soln = _lambert(expand_log(log(lhs) - log(rhs)), symbol)
elif lhs.is_Add:
# move all but mainpow-containing term to rhs
other = lhs.subs(mainpow, 0)
mainterm = lhs - other
rhs = rhs - other
diff = log(mainterm) - log(rhs)
soln = _lambert(expand_log(diff), symbol)
if not soln:
raise NotImplementedError('%s does not appear to have a solution in '
'terms of LambertW' % f)
return list(ordered(soln))
def bivariate_type(f, x, y, *, first=True):
"""Given an expression, f, 3 tests will be done to see what type
of composite bivariate it might be, options for u(x, y) are::
x*y
x+y
x*y+x
x*y+y
If it matches one of these types, ``u(x, y)``, ``P(u)`` and dummy
variable ``u`` will be returned. Solving ``P(u)`` for ``u`` and
equating the solutions to ``u(x, y)`` and then solving for ``x`` or
``y`` is equivalent to solving the original expression for ``x`` or
``y``. If ``x`` and ``y`` represent two functions in the same
variable, e.g. ``x = g(t)`` and ``y = h(t)``, then if ``u(x, y) - p``
can be solved for ``t`` then these represent the solutions to
``P(u) = 0`` when ``p`` are the solutions of ``P(u) = 0``.
Only positive values of ``u`` are considered.
Examples
========
>>> from sympy import solve
>>> from sympy.solvers.bivariate import bivariate_type
>>> from sympy.abc import x, y
>>> eq = (x**2 - 3).subs(x, x + y)
>>> bivariate_type(eq, x, y)
(x + y, _u**2 - 3, _u)
>>> uxy, pu, u = _
>>> usol = solve(pu, u); usol
[sqrt(3)]
>>> [solve(uxy - s) for s in solve(pu, u)]
[[{x: -y + sqrt(3)}]]
>>> all(eq.subs(s).equals(0) for sol in _ for s in sol)
True
"""
u = Dummy('u', positive=True)
if first:
p = Poly(f, x, y)
f = p.as_expr()
_x = Dummy()
_y = Dummy()
rv = bivariate_type(Poly(f.subs({x: _x, y: _y}), _x, _y), _x, _y, first=False)
if rv:
reps = {_x: x, _y: y}
return rv[0].xreplace(reps), rv[1].xreplace(reps), rv[2]
return
p = f
f = p.as_expr()
# f(x*y)
args = Add.make_args(p.as_expr())
new = []
for a in args:
a = _mexpand(a.subs(x, u/y))
free = a.free_symbols
if x in free or y in free:
break
new.append(a)
else:
return x*y, Add(*new), u
def ok(f, v, c):
new = _mexpand(f.subs(v, c))
free = new.free_symbols
return None if (x in free or y in free) else new
# f(a*x + b*y)
new = []
d = p.degree(x)
if p.degree(y) == d:
a = root(p.coeff_monomial(x**d), d)
b = root(p.coeff_monomial(y**d), d)
new = ok(f, x, (u - b*y)/a)
if new is not None:
return a*x + b*y, new, u
# f(a*x*y + b*y)
new = []
d = p.degree(x)
if p.degree(y) == d:
for itry in range(2):
a = root(p.coeff_monomial(x**d*y**d), d)
b = root(p.coeff_monomial(y**d), d)
new = ok(f, x, (u - b*y)/a/y)
if new is not None:
return a*x*y + b*y, new, u
x, y = y, x
| 35 | 86 | 0.541403 |
acf19c6ad1cb80de16d9d21f49550c2c620f5c31 | 14,290 | py | Python | tests/tagulous_tests_app/models.py | viaregio/django-tagulous | 41b836aa96b29d29fbd80a7d05d633d188c71785 | [
"Apache-2.0"
] | null | null | null | tests/tagulous_tests_app/models.py | viaregio/django-tagulous | 41b836aa96b29d29fbd80a7d05d633d188c71785 | [
"Apache-2.0"
] | null | null | null | tests/tagulous_tests_app/models.py | viaregio/django-tagulous | 41b836aa96b29d29fbd80a7d05d633d188c71785 | [
"Apache-2.0"
] | null | null | null | """
Test models
"""
from __future__ import unicode_literals
from django.db import models
from django.utils import six
import tagulous
###############################################################################
####### Models for testing TagModel
###############################################################################
class TagMetaAbstractModel(tagulous.models.TagModel):
"""
An abstract tag model with TagMeta definition
"""
class Meta:
abstract = True
class TagMeta:
initial = 'Adam, Brian, Chris'
force_lowercase = True
max_count = 5
class TagMetaModel(TagMetaAbstractModel):
"""
A tag model which inherits from TagMetaAbstractModel, with new and changed
TagMeta values
"""
class TagMeta:
max_count = 10
case_sensitive = True
class TagSlugShorterModel(tagulous.models.BaseTagModel):
"""
A tag model with a slug field shorter than the name
"""
name = models.CharField(max_length=20, unique=True)
slug = models.SlugField(max_length=10)
# Other fields we're not interested in testing but need to have anyway
count = models.IntegerField(default=0)
protected = models.BooleanField(default=False)
class Meta:
ordering = ('name',)
unique_together = (('slug',),)
class TagMetaUser(models.Model):
"""
A tagged model which uses the TagMetaModel
"""
name = models.CharField(blank=True, max_length=100)
two = tagulous.models.TagField(TagMetaModel, blank=True)
###############################################################################
####### Models for testing SingleTagField
###############################################################################
class SingleTagFieldModel(models.Model):
"""
For testing simple single tag fields
"""
name = models.CharField(blank=True, max_length=100)
title = tagulous.models.SingleTagField(blank=True, null=True)
class SingleTagFieldOptionalModel(models.Model):
"""
Test optional single tag fields
"""
name = models.CharField(blank=True, max_length=100)
tag = tagulous.models.SingleTagField(blank=True, null=True)
class SingleTagFieldRequiredModel(models.Model):
"""
Test required single tag fields
"""
name = models.CharField(blank=True, max_length=100)
tag = tagulous.models.SingleTagField(blank=False, null=False)
class SingleTagFieldMultipleModel(models.Model):
"""
For testing multiple single tag fields
"""
name = models.CharField(blank=True, max_length=100)
tag1 = tagulous.models.SingleTagField(blank=False, null=False)
tag2 = tagulous.models.SingleTagField(blank=False, null=False)
tag3 = tagulous.models.SingleTagField(blank=False, null=False)
class SingleTagFieldOptionsModel(models.Model):
"""
For testing model and form SingleTagField options
"""
name = models.CharField(blank=True, max_length=100)
initial_string = tagulous.models.SingleTagField(
blank=True, null=True, initial='Mr, Mrs, Ms',
)
initial_list = tagulous.models.SingleTagField(
blank=True, null=True, initial=['Mr', 'Mrs', 'Ms'],
autocomplete_initial=True,
)
protect_initial_true = tagulous.models.SingleTagField(
blank=True, null=True, protect_initial=True, initial='Mr',
)
protect_initial_false = tagulous.models.SingleTagField(
blank=True, null=True, protect_initial=False, initial='Mr',
)
protect_all_true = tagulous.models.SingleTagField(
blank=True, null=True, protect_all=True,
)
protect_all_false = tagulous.models.SingleTagField(
blank=True, null=True, protect_all=False,
)
case_sensitive_true = tagulous.models.SingleTagField(
blank=True, null=True, case_sensitive=True, initial='Mr',
)
case_sensitive_false = tagulous.models.SingleTagField(
blank=True, null=True, case_sensitive=False, initial='Mr',
)
force_lowercase_true = tagulous.models.SingleTagField(
blank=True, null=True, force_lowercase=True,
)
force_lowercase_false = tagulous.models.SingleTagField(
blank=True, null=True, force_lowercase=False,
)
# max_count doesn't apply to SingleTagField
autocomplete_view = tagulous.models.SingleTagField(
blank=True, null=True,
autocomplete_view='tagulous_tests_app-null',
)
autocomplete_limit = tagulous.models.SingleTagField(
blank=True, null=True,
autocomplete_limit=3,
# Limit only takes effect when there's a view
autocomplete_view='tagulous_tests_app-null',
)
autocomplete_settings = tagulous.models.SingleTagField(
blank=True, null=True, autocomplete_settings={
'setting1': 1,
'setting2': True,
'setting3': 'example',
}
)
class Meta:
# Must set a short verbose name - tagulous auto-generated model
# verbose names will be too long otherwise
verbose_name = 'STFOM'
ordering = ('name',)
###############################################################################
####### Models for testing TagField
###############################################################################
class TagFieldModel(models.Model):
"""
For testing basic tags
"""
name = models.CharField(blank=True, max_length=100)
tags = tagulous.models.TagField()
class TagFieldOptionalModel(models.Model):
"""
Test optional tag fields
"""
name = models.CharField(blank=True, max_length=100)
tag = tagulous.models.TagField(blank=True)
class TagFieldRequiredModel(models.Model):
"""
Test required tag fields
"""
name = models.CharField(blank=True, max_length=100)
tag = tagulous.models.TagField(blank=False, null=False)
class TagFieldMultipleModel(models.Model):
"""
For testing multiple tag fields
"""
name = models.CharField(blank=True, max_length=100)
tags1 = tagulous.models.TagField(blank=False, null=False)
tags2 = tagulous.models.TagField(blank=False, null=False)
tags3 = tagulous.models.TagField(blank=False, null=False)
class TagFieldOptionsModel(models.Model):
"""
For testing model and form TagField options
"""
name = models.CharField(blank=True, max_length=100)
initial_string = tagulous.models.TagField(
blank=True, initial='Adam, Brian, Chris',
)
initial_list = tagulous.models.TagField(
blank=True, initial=['Adam', 'Brian', 'Chris'],
autocomplete_initial=True,
)
protect_initial_true = tagulous.models.TagField(
blank=True, protect_initial=True, initial='Adam',
)
protect_initial_false = tagulous.models.TagField(
blank=True, protect_initial=False, initial='Adam',
)
protect_all_true = tagulous.models.TagField(
blank=True, protect_all=True,
)
protect_all_false = tagulous.models.TagField(
blank=True, protect_all=False,
)
case_sensitive_true = tagulous.models.TagField(
blank=True, case_sensitive=True, initial='Adam',
)
case_sensitive_false = tagulous.models.TagField(
blank=True, case_sensitive=False, initial='Adam',
)
force_lowercase_true = tagulous.models.TagField(
blank=True, force_lowercase=True,
)
force_lowercase_false = tagulous.models.TagField(
blank=True, force_lowercase=False,
)
# case_sensitive_true_force_lowercase_true - abbreviated to avoid problems
# with databases that have field name length limits
cs_true_fl_true = tagulous.models.TagField(
blank=True, case_sensitive=False, force_lowercase=True,
verbose_name_singular='case sensitive test',
)
max_count = tagulous.models.TagField(
blank=True, max_count=3,
)
autocomplete_view = tagulous.models.TagField(
blank=True,
autocomplete_view='tagulous_tests_app-unlimited',
)
autocomplete_limit = tagulous.models.TagField(
blank=True,
autocomplete_limit=3,
# Limit only takes effect when there's a view
autocomplete_view='tagulous_tests_app-limited',
)
autocomplete_settings = tagulous.models.TagField(
blank=True, autocomplete_settings={
'setting1': 1,
'setting2': True,
'setting3': 'example',
}
)
class Meta:
# Set a short verbose name for tagulous auto-generated verbose name
verbose_name = 'TFOM'
ordering = ('name',)
###############################################################################
####### Models for testing a mix of fields
###############################################################################
class SimpleMixedTest(models.Model):
"""
For tests which need a SingleTagField and TagField
"""
name = models.CharField(max_length=10)
singletag = tagulous.models.SingleTagField(blank=True)
tags = tagulous.models.TagField(blank=True)
class MixedTestTagModel(tagulous.models.TagModel):
class TagMeta:
get_absolute_url = lambda self: 'url for %s' % self
class MixedTest(models.Model):
"""
For tests where it's useful for the SingleTagField and TagField to share
a tag model
"""
name = models.CharField(max_length=10)
singletag = tagulous.models.SingleTagField(
MixedTestTagModel, related_name='mixed_singletag',
blank=True,
)
tags = tagulous.models.TagField(
MixedTestTagModel, related_name='mixed_tags',
blank=True,
)
class Meta:
ordering = ('name',)
class MixedRefTest(models.Model):
"""
Multiple models referencing tag tables
"""
name = models.CharField(max_length=10)
singletag = tagulous.models.SingleTagField(
MixedTest.singletag.tag_model, related_name='mixed_ref_singletag',
blank=True,
)
tags = tagulous.models.TagField(
MixedTest.tags.tag_model, related_name='mixed_ref_tags',
blank=True,
)
class NonTagRefTest(models.Model):
"""
ForeignKeys and ManyToManyFields directly referencing a tag model
"""
name = models.CharField(max_length=10)
fk = models.ForeignKey(
MixedTest.singletag.tag_model, related_name='non_tag_fk',
blank=True, on_delete=models.CASCADE,
)
mm = models.ManyToManyField(
MixedTest.tags.tag_model, related_name='non_tag_mm',
blank=True,
)
class MixedNonTagModel(tagulous.models.TagModel):
pass
class MixedNonTagRefTest(models.Model):
"""
Tag fields and conventional relationships referencing a tag model
"""
name = models.CharField(max_length=10)
singletag = tagulous.models.SingleTagField(MixedNonTagModel, blank=True, related_name='singletags')
tags = tagulous.models.TagField(MixedNonTagModel, blank=True, related_name='tags')
fk = models.ForeignKey(MixedNonTagModel, blank=True, null=True, related_name='fk', on_delete=models.CASCADE)
mm = models.ManyToManyField(MixedNonTagModel, blank=True, related_name='mm')
class MixedOrderTest(models.Model):
"""
For testing ordering of a SingleTagField and TagField when next to other
M2M and non-M2M fields
"""
char1 = models.CharField(blank=True, max_length=10)
fk1 = models.ForeignKey(MixedTest, related_name="order_fk1", on_delete=models.CASCADE)
char2 = models.CharField(blank=True, max_length=10)
single1 = tagulous.models.SingleTagField()
char3 = models.CharField(blank=True, max_length=10)
m2m1 = models.ManyToManyField(MixedTest, related_name="order_m2m1")
char4 = models.CharField(blank=True, max_length=10)
multi1 = tagulous.models.TagField()
char5 = models.CharField(blank=True, max_length=10)
m2m2 = models.ManyToManyField(MixedTest, related_name="order_m2m2")
char6 = models.CharField(blank=True, max_length=10)
fk2 = models.ForeignKey(MixedTest, related_name="order_fk2", on_delete=models.CASCADE)
char7 = models.CharField(blank=True, max_length=10)
class MixedStringTagModel(tagulous.models.TagModel):
pass
class MixedStringTo(models.Model):
"""
A tagged model with fields which refers to a tag model by string, rather
than by class
"""
name = models.CharField(max_length=10)
singletag = tagulous.models.SingleTagField(
'MixedStringTagModel', related_name='tag_meta_string_singletag',
blank=True,
)
tags = tagulous.models.TagField(
'MixedStringTagModel', related_name='tag_meta_string_tags',
blank=True,
)
class MixedSelfTo(tagulous.models.TagModel):
"""
A tagged tag model, with tag fields which refers itself using 'self'
"""
alternate = tagulous.models.SingleTagField('self', blank=True)
related = tagulous.models.TagField('self', blank=True)
class TagMeta:
force_lowercase = True
class TreeTest(models.Model):
"""
For testing tag trees
"""
name = models.CharField(max_length=10)
singletag = tagulous.models.SingleTagField(tree=True, blank=True)
tags = tagulous.models.TagField(tree=True, blank=True)
class CustomTagTree(tagulous.models.TagTreeModel):
"""
Custom tag tree model
"""
pass
class CustomTreeTest(models.Model):
"""
For testing custom tag trees
"""
name = models.CharField(max_length=10)
singletag = tagulous.models.SingleTagField(
'CustomTagTree', blank=True, related_name='custom_singletag',
)
tags = tagulous.models.TagField(
'CustomTagTree', blank=True, related_name='custom_tags',
)
class ManyToOneTest(models.Model):
"""
Add a reverse FK to MixedRefTest for serialization tests
"""
name = models.CharField(max_length=10)
mixed_ref_test = models.ForeignKey(MixedRefTest, related_name='many_to_one')
class CustomTagBase(tagulous.models.TagModel):
"""
For testing custom tag base models for auto-generated tag models
"""
is_custom = True
class Meta:
abstract = True
class CustomTagBaseTest(models.Model):
"""
Test custom tag base model
"""
name = models.CharField(max_length=10)
singletag = tagulous.models.SingleTagField(
to_base=CustomTagBase, blank=True, related_name='custom_singletag',
)
| 32.926267 | 112 | 0.656963 |
acf19c7d669b97020e3b689c4099017115436882 | 103,225 | py | Python | lib/python2.7/site-packages/django/test/_doctest.py | Shailendre/simpleproject | cd7319636d0569be06bb9dab4c5546c1e9542b07 | [
"BSD-2-Clause"
] | 2 | 2016-09-27T09:30:19.000Z | 2016-10-17T01:47:43.000Z | env/lib/python2.7/site-packages/django/test/_doctest.py | luiscarlosgph/nas | e5acee61e8bbf12c34785fe971ce7df8dee775d4 | [
"MIT"
] | 10 | 2019-12-26T17:31:31.000Z | 2022-03-21T22:17:33.000Z | env/lib/python2.7/site-packages/django/test/_doctest.py | luiscarlosgph/nas | e5acee61e8bbf12c34785fe971ce7df8dee775d4 | [
"MIT"
] | 1 | 2015-02-21T07:59:08.000Z | 2015-02-21T07:59:08.000Z | # This is a slightly modified version of the doctest.py that shipped with Python 2.5
# It incorporates changes that have been submitted to the Python ticket tracker
# as ticket #1521051. These changes allow for a DoctestRunner and Doctest base
# class to be specified when constructing a DoctestSuite.
# Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
import warnings
from django.utils.deprecation import RemovedInDjango18Warning
warnings.warn(
"The django.test._doctest module is deprecated; "
"use the doctest module from the Python standard library instead.",
RemovedInDjango18Warning)
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'SKIP',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
# 1. Utility Functions
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Tester
'Tester',
# 8. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 9. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import sys, traceback, inspect, linecache, os, re
import unittest, difflib, pdb, tempfile
import warnings
from django.utils import six
from django.utils.six.moves import StringIO
if sys.platform.startswith('java'):
# On Jython, isclass() reports some modules as classes. Patch it.
def patch_isclass(isclass):
def patched_isclass(obj):
return isclass(obj) and hasattr(obj, '__module__')
return patched_isclass
inspect.isclass = patch_isclass(inspect.isclass)
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
# Create a new flag unless `name` is already known.
return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
SKIP = register_optionflag('SKIP')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
SKIP |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Tester Class -- for backwards compatibility
# 8. Unittest Support
# 9. Debugging Support
# 10. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, six.string_types):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _load_testfile(filename, package, module_relative):
if module_relative:
package = _normalize_module(package, 3)
filename = _module_relative_path(package, filename)
if hasattr(package, '__loader__'):
if hasattr(package.__loader__, 'get_data'):
file_contents = package.__loader__.get_data(filename)
# get_data() opens files as 'rb', so one must do the equivalent
# conversion as universal newlines would do.
return file_contents.replace(os.linesep, '\n'), filename
with open(filename) as fp:
return fp.read(), filename
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning every
non-blank line in `s`, and return the result.
"""
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
# Prevent softspace from screwing up the next test case, in
# case they used print with a trailing comma in an example.
if hasattr(self, "softspace"):
del self.softspace
return result
def truncate(self, size=None):
StringIO.truncate(self, size)
if hasattr(self, "softspace"):
del self.softspace
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
if ELLIPSIS_MARKER not in want:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
self.__debugger_used = False
pdb.Pdb.__init__(self, stdout=out)
def set_trace(self, frame=None):
self.__debugger_used = True
if frame is None:
frame = sys._getframe().f_back
pdb.Pdb.set_trace(self, frame)
def set_continue(self):
# Calling set_continue unconditionally would break unit test
# coverage reporting, as Bdb.set_continue calls sys.settrace(None).
if self.__debugger_used:
pdb.Pdb.set_continue(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, path):
if not inspect.ismodule(module):
raise TypeError('Expected a module: %r' % module)
if path.startswith('/'):
raise ValueError('Module-relative files may not have absolute paths')
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module " +
module + " (it has no __file__)")
# Combine the base directory and the path.
return os.path.join(basedir, *(path.split('/')))
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that preceed the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, six.string_types), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<DocTest %s from %s:%s (%s)>' %
(self.name, self.filename, self.lineno, examples))
# This lets us sort tests by name:
def _cmpkey(self):
return (self.name, self.filename, self.lineno, id(self))
def __cmp__(self, other):
if not isinstance(other, DocTest):
return -1
return cmp(self._cmpkey(), other._cmpkey())
def __lt__(self, other):
return self._cmpkey() < other._cmpkey()
def __le__(self, other):
return self._cmpkey() <= other._cmpkey()
def __gt__(self, other):
return self._cmpkey() > other._cmpkey()
def __ge__(self, other):
return self._cmpkey() >= other._cmpkey()
def __eq__(self, other):
return self._cmpkey() == other._cmpkey()
def __ne__(self, other):
return self._cmpkey() != other._cmpkey()
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.*$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value if its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join(l[min_indent:] for l in string.split('\n'))
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append(Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options))
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join(sl[indent+4:] for sl in source_lines)
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join(wl[indent:] for wl in want_lines)
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
def find(self, obj, name=None, module=None, globs=None, extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj) or inspect.getfile(obj)
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
except TypeError:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
# Recursively explore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
return tests
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.isfunction(object):
return module.__dict__ is six.get_function_globals(object)
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print('Finding tests in %s' % name)
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isfunction(val) or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, six.string_types):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, six.string_types)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, six.string_types):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, six.string_types):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.__func__
if inspect.isfunction(obj): obj = six.get_function_code(obj)
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile('(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> for test in tests:
... print(runner.run(test))
(0, 2)
(0, 1)
(0, 2)
(0, 2)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
(0, 7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then suppress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# If 'SKIP' is set, then skip this example.
if self.optionflags & SKIP:
continue
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Doctest and Py3 issue:
# If the current example that we wish to run is going to fail
# because it expects a leading u"", then use an alternate displayhook
original_displayhook = sys.displayhook
if six.PY3:
# only set alternate displayhook if Python 3.x or after
lines = []
def py3_displayhook(value):
if value is None:
# None should not be considered at all
return original_displayhook(value)
# Collect the repr output in one variable
s = repr(value)
# Strip b"" and u"" prefixes from the repr and expected output
# TODO: better way of stripping the prefixes?
expected = example.want
expected = expected.strip() # be wary of newlines
s = s.replace("u", "")
s = s.replace("b", "")
expected = expected.replace("u", "")
expected = expected.replace("b", "")
# single quote vs. double quote should not matter
# default all quote marks to double quote
s = s.replace("'", '"')
expected = expected.replace("'", '"')
# In case of multi-line expected result
lines.append(s)
# let them match
if s == expected: # be wary of false positives here
# they should be the same, print expected value
sys.stdout.write("%s\n" % example.want.strip())
# multi-line expected output, doctest uses loop
elif len(expected.split("\n")) == len(lines):
if "\n".join(lines) == expected:
sys.stdout.write("%s\n" % example.want.strip())
else:
sys.stdout.write("%s\n" % repr(value))
elif len(expected.split("\n")) != len(lines):
# we are not done looping yet, do not print anything!
pass
else:
sys.stdout.write("%s\n" % repr(value))
sys.displayhook = py3_displayhook
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
six.exec_(compile(example.source, filename, "single",
compileflags, 1), test.globs)
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
finally:
# restore the original displayhook
sys.displayhook = original_displayhook
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
# Python 3.1 requires seek after truncate
self._fakeout.seek(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_msg = traceback.format_exception_only(*exception[:2])[-1]
if six.PY3:
# module name will be in group(1) and the expected
# exception message will be in group(2)
m = re.match(r'(.*)\.(\w+:.+\s)', exc_msg)
# make sure there's a match
if m is not None:
f_name = m.group(1)
# check to see if m.group(1) contains the module name
if f_name == exception[0].__module__:
# strip the module name from exc_msg
exc_msg = m.group(2)
if not quiet:
got += _exception_traceback(exception)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
m1 = re.match(r'[^:]*:', example.exc_msg)
m2 = re.match(r'[^:]*:', exc_msg)
if m1 and m2 and check(m1.group(0), m2.group(0),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exception)
failures += 1
else:
assert False, ("unknown outcome", outcome)
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return failures, tries
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>[\w\.]+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
return example.source.splitlines(True)
else:
return self.save_linecache_getlines(filename, module_globals)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append((name, t))
else:
failed.append(x)
if verbose:
if notests:
print("%d items had no tests:" % len(notests))
notests.sort()
for thing in notests:
print(" %s" % thing)
if passed:
print("%d items passed all tests:" % len(passed))
passed.sort()
for thing, count in passed:
print(" %3d tests in %s" % (count, thing))
if failed:
print(self.DIVIDER)
print("%d items had failures:" % len(failed))
failed.sort()
for thing, (f, t) in failed:
print(" %3d of %3d in %s" % (f, t, thing))
if verbose:
print("%d tests in % d items" % (len(self._name2ft), totalt))
print("%d passed and %d failed." % (totalt - totalf, totalf))
if totalf:
print("***Test Failed*** %d failures." % totalf)
elif verbose:
print("Test passed.")
return totalf, totalt
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
print("*** DocTestRunner.merge: '" + name + "' in both" \
" testers; summing outcomes.")
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(True) # True == keep line ends
got_lines = got.splitlines(True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
# Remove trailing whitespace on diff output.
diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
r"""Run doc tests but raise an exception as soon as there is a failure.
If an unexpected exception occurs, an UnexpectedException is raised.
It contains the test, the example, and the original exception:
>>> runner = DebugRunner(verbose=False)
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except UnexpectedException as e:
... failure = e
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
We wrap the original exception to give the calling application
access to the test and example information.
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except DocTestFailure as e:
... failure = e
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
If a failure or error occurs, the globals are left intact:
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 1}
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... >>> raise KeyError
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
Traceback (most recent call last):
...
UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 2}
But the globals are cleared if there is no error:
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
(0, 1)
>>> test.globs
{}
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, report=True,
optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See doctest.__doc__ for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser(),
encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg "encoding" specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
text, filename = _load_testfile(filename, package, module_relative)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
if encoding is not None:
text = text.decode(encoding)
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Tester
######################################################################
# This is provided only for backwards compatibility. It's not
# actually used in any way.
class Tester:
def __init__(self, mod=None, globs=None, verbose=None, optionflags=0):
warnings.warn("class Tester is deprecated; "
"use class doctest.DocTestRunner instead",
RemovedInDjango18Warning, stacklevel=2)
if mod is None and globs is None:
raise TypeError("Tester.__init__: must specify mod or globs")
if mod is not None and not inspect.ismodule(mod):
raise TypeError("Tester.__init__: mod must be a module; %r" %
(mod,))
if globs is None:
globs = mod.__dict__
self.globs = globs
self.verbose = verbose
self.optionflags = optionflags
self.testfinder = DocTestFinder()
self.testrunner = DocTestRunner(verbose=verbose,
optionflags=optionflags)
def runstring(self, s, name):
test = DocTestParser().get_doctest(s, self.globs, name, None, None)
if self.verbose:
print("Running string %s" % name)
(f,t) = self.testrunner.run(test)
if self.verbose:
print("%s of %s examples failed in string %s" % (f, t, name))
return (f,t)
def rundoc(self, object, name=None, module=None):
f = t = 0
tests = self.testfinder.find(object, name, module=module,
globs=self.globs)
for test in tests:
(f2, t2) = self.testrunner.run(test)
(f,t) = (f+f2, t+t2)
return (f,t)
def rundict(self, d, name, module=None):
import new
m = new.module(name)
m.__dict__.update(d)
if module is None:
module = False
return self.rundoc(m, name, module)
def run__test__(self, d, name):
import new
m = new.module(name)
m.__test__ = d
return self.rundoc(m, name)
def summarize(self, verbose=None):
return self.testrunner.summarize(verbose)
def merge(self, other):
self.testrunner.merge(other.testrunner)
######################################################################
## 8. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
"""Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> old = _unittest_reportflags
>>> set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> import doctest
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None, runner=DocTestRunner):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
self._dt_runner = runner
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = self._dt_runner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
r"""Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexpected
exception:
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException as e:
... failure = e
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure as e:
... failure = e
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
"""
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test)
self.tearDown()
def id(self):
return self._dt_test.name
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
test_class=DocTestCase, **options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if globs is None:
globs = module.__dict__
if not tests:
# Why do we want to do this? Because it reveals a bug that might
# otherwise be hidden.
raise ValueError(module, "has no tests")
tests.sort()
suite = unittest.TestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
test.filename = filename
suite.addTest(test_class(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(),
encoding=None, **options):
if globs is None:
globs = {}
else:
globs = globs.copy()
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
doc, path = _load_testfile(path, package, module_relative)
if "__file__" not in globs:
globs["__file__"] = path
# Find the file and read it.
name = os.path.basename(path)
# If an encoding is specified, use it to convert the file to unicode
if encoding is not None:
doc = doc.decode(encoding)
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
encoding
An encoding that will be used to convert the files to unicode.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 9. Debugging Support
######################################################################
def script_from_examples(s):
r"""Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print(script_from_examples(text))
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
"""
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
return '\n'.join(output)
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
# Note that tempfile.NameTemporaryFile() cannot be used. As the
# docs say, a file so created cannot be opened by name a second time
# on modern Windows boxes, and execfile() needs to open it.
srcfilename = tempfile.mktemp(".py", "doctestdebug")
with open(srcfilename, 'w') as fp:
fp.write(src)
try:
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
execfile(srcfilename, globs, globs)
except:
print(sys.exc_info()[1])
pdb.post_mortem(sys.exc_info()[2])
else:
# Note that %r is vital here. '%s' instead can, e.g., cause
# backslashes to get treated as metacharacters on Windows.
pdb.run("execfile(%r)" % srcfilename, globs, globs)
finally:
os.remove(srcfilename)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
######################################################################
## 10. Example Usage
######################################################################
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print(t.get())
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print(x.get())
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
""",
"bool-int equivalence": r"""
In 2.2, boolean expressions displayed
0 or 1. By default, we still accept
them. This can be disabled by passing
DONT_ACCEPT_TRUE_FOR_1 to the new
optionflags argument.
>>> 4 == 4
1
>>> 4 == 4
True
>>> 4 > 4
0
>>> 4 > 4
False
""",
"blank lines": r"""
Blank lines can be marked with <BLANKLINE>:
>>> print('foo\n\nbar\n')
foo
<BLANKLINE>
bar
<BLANKLINE>
""",
"ellipsis": r"""
If the ellipsis flag is used, then '...' can be used to
elide substrings in the desired output:
>>> print(range(1000)) #doctest: +ELLIPSIS
[0, 1, 2, ..., 999]
""",
"whitespace normalization": r"""
If the whitespace normalization flag is used, then
differences in whitespace are ignored.
>>> print(list(xrange(30))) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]
""",
}
def _test():
r = unittest.TextTestRunner()
r.run(DocTestSuite())
if __name__ == "__main__":
_test()
| 37.427484 | 84 | 0.578939 |
acf19caf97e806c40529dd1d913ad6081e706d60 | 873 | py | Python | operator-pipeline-images/operatorcert/entrypoints/pipelinerun_summary.py | mgreczi/operator-pipelines | 2fd5cfbb702cecce98b80a307decf9d27e337416 | [
"Apache-2.0"
] | null | null | null | operator-pipeline-images/operatorcert/entrypoints/pipelinerun_summary.py | mgreczi/operator-pipelines | 2fd5cfbb702cecce98b80a307decf9d27e337416 | [
"Apache-2.0"
] | null | null | null | operator-pipeline-images/operatorcert/entrypoints/pipelinerun_summary.py | mgreczi/operator-pipelines | 2fd5cfbb702cecce98b80a307decf9d27e337416 | [
"Apache-2.0"
] | null | null | null | import argparse
import logging
import sys
from operatorcert.tekton import PipelineRun
def parse_args() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description="Construct a markdown summary for a Tekton PipelineRun."
)
parser.add_argument("pr_path", help="File path to a PipelineRun object")
parser.add_argument("trs_path", help="File path to a JSON list of TaskRun objects")
parser.add_argument(
"--include-final-tasks",
help="Include final tasks in the output",
action="store_true",
)
return parser.parse_args()
def main() -> None:
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
args = parse_args()
pr = PipelineRun.from_files(args.pr_path, args.trs_path)
logging.info(pr.markdown_summary(include_final_tasks=args.include_final_tasks))
| 29.1 | 87 | 0.718213 |
acf19cdb5702c1ad4832a3c5de8006fba8010e58 | 54,612 | py | Python | src/sentry/south_migrations/0175_auto__del_pendingteammember__del_unique_pendingteammember_team_email.py | uandco/sentry | 5b8d45cb71c6617dac8e64265848623fbfce9c99 | [
"BSD-3-Clause"
] | 2 | 2019-03-04T12:45:54.000Z | 2019-03-04T12:45:55.000Z | src/sentry/south_migrations/0175_auto__del_pendingteammember__del_unique_pendingteammember_team_email.py | uandco/sentry | 5b8d45cb71c6617dac8e64265848623fbfce9c99 | [
"BSD-3-Clause"
] | 196 | 2019-06-10T08:34:10.000Z | 2022-02-22T01:26:13.000Z | src/sentry/south_migrations/0175_auto__del_pendingteammember__del_unique_pendingteammember_team_email.py | uandco/sentry | 5b8d45cb71c6617dac8e64265848623fbfce9c99 | [
"BSD-3-Clause"
] | 1 | 2017-02-09T06:36:57.000Z | 2017-02-09T06:36:57.000Z | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'PendingTeamMember', fields ['team', 'email']
db.delete_unique(u'sentry_pendingteammember', ['team_id', 'email'])
# Deleting model 'PendingTeamMember'
db.delete_table(u'sentry_pendingteammember')
def backwards(self, orm):
# Adding model 'PendingTeamMember'
db.create_table(
u'sentry_pendingteammember', (
(
'id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(
primary_key=True
)
), (
'team', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(
related_name='pending_member_set', to=orm['sentry.Team']
)
), (
'date_added',
self.gf('django.db.models.fields.DateTimeField')()
), (
'type',
self.gf('sentry.db.models.fields.bounded.BoundedIntegerField')(default=50)
), ('email', self.gf('django.db.models.fields.EmailField')(max_length=75)),
)
)
db.send_create_signal('sentry', ['PendingTeamMember'])
# Adding unique constraint on 'PendingTeamMember', fields ['team', 'email']
db.create_unique(u'sentry_pendingteammember', ['team_id', 'email'])
models = {
'sentry.accessgroup': {
'Meta': {
'unique_together': "(('team', 'name'),)",
'object_name': 'AccessGroup'
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.User']",
'symmetrical': 'False'
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'projects': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Project']",
'symmetrical': 'False'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '50'
})
},
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Event']",
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.alert': {
'Meta': {
'object_name': 'Alert'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'related_groups': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'related_alerts'",
'symmetrical': 'False',
'through': "orm['sentry.AlertRelatedGroup']",
'to': "orm['sentry.Group']"
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.alertrelatedgroup': {
'Meta': {
'unique_together': "(('group', 'alert'),)",
'object_name': 'AlertRelatedGroup'
},
'alert': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Alert']"
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
})
},
'sentry.apikey': {
'Meta': {
'object_name': 'ApiKey'
},
'allowed_origins':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32'
}),
'label': (
'django.db.models.fields.CharField', [], {
'default': "'Default'",
'max_length': '64',
'blank': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Organization']"
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.auditlogentry': {
'Meta': {
'object_name': 'AuditLogEntry'
},
'actor': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'audit_actors'",
'to': "orm['sentry.User']"
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'target_object':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'target_user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_targets'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.authidentity': {
'Meta': {
'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))",
'object_name': 'AuthIdentity'
},
'auth_provider': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.AuthProvider']"
}
),
'data': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'last_synced':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_verified':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authprovider': {
'Meta': {
'object_name': 'AuthProvider'
},
'config': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_global_access':
('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'default_role':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'default_teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'unique': 'True'
}
),
'provider': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'sync_time':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
})
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'badge': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'blank': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
})
},
'sentry.event': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group', 'datetime'),)"
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'event_set'",
'null': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'time_spent':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.file': {
'Meta': {
'object_name': 'File'
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'null': 'True'
}),
'headers': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'storage':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'storage_options': ('sentry.db.models.fields.jsonfield.JSONField', [], {
'default': '{}'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'checksum'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'time_spent_total':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'times_seen': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_active': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'rule': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}
),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value', 'group'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'null': 'True',
'to': "orm['sentry.Project']"
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.helppage': {
'Meta': {
'object_name': 'HelpPage'
},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_visible': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'key': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'unique': 'True',
'null': 'True'
}
),
'priority':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {
'object_name': 'Organization'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'org_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.organizationaccessrequest': {
'Meta': {
'unique_together': "(('team', 'member'),)",
'object_name': 'OrganizationAccessRequest'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'member': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationmember': {
'Meta': {
'unique_together': "(('organization', 'user'), ('organization', 'email'))",
'object_name': 'OrganizationMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': (
'django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Organization']"
}
),
'teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMemberTeam']",
'blank': 'True'
}
),
'type':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'sentry_orgmember_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.organizationmemberteam': {
'Meta': {
'unique_together': "(('team', 'organizationmember'),)",
'object_name': 'OrganizationMemberTeam',
'db_table': "'sentry_organizationmember_teams'"
},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'organizationmember': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'), ('organization', 'slug'))",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True'
}),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {
'unique_together': "(('project', 'version'),)",
'object_name': 'Release'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.releasefile': {
'Meta': {
'unique_together': "(('release', 'ident'),)",
'object_name': 'ReleaseFile'
},
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'unique_together': "(('organization', 'slug'),)",
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'first_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry']
| 36.456609 | 94 | 0.398978 |
acf19d5ce4da9b3bb14d9e17d2385057abaa4e88 | 798 | py | Python | toDoList/urls.py | SagnikH/ToDoList | 1fe4047c60c7ebe3af815c64091002aa5b501f35 | [
"MIT"
] | null | null | null | toDoList/urls.py | SagnikH/ToDoList | 1fe4047c60c7ebe3af815c64091002aa5b501f35 | [
"MIT"
] | null | null | null | toDoList/urls.py | SagnikH/ToDoList | 1fe4047c60c7ebe3af815c64091002aa5b501f35 | [
"MIT"
] | null | null | null | """toDoList URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('records.urls')),
]
| 34.695652 | 77 | 0.703008 |
acf19eec19a21cd7ffb9ddf05539f736f991747f | 2,235 | py | Python | lcnn/postprocess.py | basemprince/court_detection | e5fb9549234b0f3c8fd350ea9abc1c7daa1c2306 | [
"MIT"
] | null | null | null | lcnn/postprocess.py | basemprince/court_detection | e5fb9549234b0f3c8fd350ea9abc1c7daa1c2306 | [
"MIT"
] | null | null | null | lcnn/postprocess.py | basemprince/court_detection | e5fb9549234b0f3c8fd350ea9abc1c7daa1c2306 | [
"MIT"
] | null | null | null | import numpy as np
def pline(x1, y1, x2, y2, x, y):
px = x2 - x1
py = y2 - y1
dd = px * px + py * py
u = ((x - x1) * px + (y - y1) * py) / max(1e-9, float(dd))
dx = x1 + u * px - x
dy = y1 + u * py - y
return dx * dx + dy * dy
def psegment(x1, y1, x2, y2, x, y):
px = x2 - x1
py = y2 - y1
dd = px * px + py * py
u = max(min(((x - x1) * px + (y - y1) * py) / float(dd), 1), 0)
dx = x1 + u * px - x
dy = y1 + u * py - y
return dx * dx + dy * dy
def plambda(x1, y1, x2, y2, x, y):
px = x2 - x1
py = y2 - y1
dd = px * px + py * py
return ((x - x1) * px + (y - y1) * py) / max(1e-9, float(dd))
def postprocess(lines, scores, threshold=0.01, tol=1e9, do_clip=False):
nlines, nscores = [], []
for (p, q), score in zip(lines, scores):
start, end = 0, 1
for a, b in nlines:
if (
min(
max(pline(*p, *q, *a), pline(*p, *q, *b)),
max(pline(*a, *b, *p), pline(*a, *b, *q)),
)
> threshold ** 2
):
continue
lambda_a = plambda(*p, *q, *a)
lambda_b = plambda(*p, *q, *b)
if lambda_a > lambda_b:
lambda_a, lambda_b = lambda_b, lambda_a
lambda_a -= tol
lambda_b += tol
# case 1: skip (if not do_clip)
if start < lambda_a and lambda_b < end:
continue
# not intersect
if lambda_b < start or lambda_a > end:
continue
# cover
if lambda_a <= start and end <= lambda_b:
start = 10
break
# case 2 & 3:
if lambda_a <= start and start <= lambda_b:
start = lambda_b
if lambda_a <= end and end <= lambda_b:
end = lambda_a
if start >= end:
break
if start >= end:
continue
nlines.append(np.array([p + (q - p) * start, p + (q - p) * end]))
nscores.append(score)
return np.array(nlines), np.array(nscores)
| 28.653846 | 74 | 0.412528 |
acf1a1640a880a418298a263b085928b7079a7a5 | 283 | py | Python | hacker-rank/implementation/repeated_string.py | denisrmp/hacker-rank | db490b1b2d41ed6913b4cacee1b4bb40e15186b7 | [
"MIT"
] | null | null | null | hacker-rank/implementation/repeated_string.py | denisrmp/hacker-rank | db490b1b2d41ed6913b4cacee1b4bb40e15186b7 | [
"MIT"
] | null | null | null | hacker-rank/implementation/repeated_string.py | denisrmp/hacker-rank | db490b1b2d41ed6913b4cacee1b4bb40e15186b7 | [
"MIT"
] | null | null | null | # https://www.hackerrank.com/challenges/repeated-string
LETTER = 'a'
def repeated_string(s, n):
if len(s) == 0:
return 0
return s.count('a') * (n // len(s)) + s[:(n % len(s))].count('a')
s = input().strip()
n = int(input().strip())
print(repeated_string(s, n))
| 17.6875 | 69 | 0.575972 |
acf1a38c7a39d42e470f0163a34626d98015b396 | 4,712 | py | Python | libs/google_sheet_helpers.py | ConsultingMD/covid-data-model | f3d23f98eccca1ce33c7a11ab546e9aab5d0aa4d | [
"MIT"
] | 155 | 2020-03-18T13:23:42.000Z | 2022-03-19T23:57:10.000Z | libs/google_sheet_helpers.py | ConsultingMD/covid-data-model | f3d23f98eccca1ce33c7a11ab546e9aab5d0aa4d | [
"MIT"
] | 418 | 2020-03-20T02:22:01.000Z | 2022-03-31T23:05:08.000Z | libs/google_sheet_helpers.py | ConsultingMD/covid-data-model | f3d23f98eccca1ce33c7a11ab546e9aab5d0aa4d | [
"MIT"
] | 63 | 2020-03-19T07:04:23.000Z | 2022-02-25T13:51:26.000Z | from typing import Optional
import datetime
import base64
import os
import tempfile
import structlog
import gspread
import pathlib
from libs.datasets import dataset_utils
from libs.datasets import dataset_pointer
from libs.datasets.dataset_utils import DatasetType
from libs.datasets.dataset_pointer import DatasetPointer
_logger = structlog.getLogger(__name__)
# base 64 encoded service account json file.
SERVICE_ACCOUNT_DATA_ENV_NAME = "GOOGLE_SHEETS_SERVICE_ACCOUNT_DATA"
def init_client() -> gspread.Client:
service_account_data = os.environ.get(SERVICE_ACCOUNT_DATA_ENV_NAME)
if service_account_data:
_logger.info("Loading service account from env variable data.")
service_account_data = base64.b64decode(service_account_data)
with tempfile.NamedTemporaryFile() as tmp_file:
tmp_file.write(service_account_data)
tmp_file.flush()
return gspread.service_account(filename=tmp_file.name)
return gspread.service_account()
def create_or_replace_worksheet(
spreadsheet: gspread.Spreadsheet, worksheet_name: str
) -> gspread.Worksheet:
"""Creates or replaces a worksheet with name `worksheet_name`.
Note(chris): Taking the approach of deleting worksheet to make sure that
state of worksheet is totally clean. Other methods of clearing worksheet using gspread
did not clear conditional formatting rules.
Args:
sheet: Spreadsheet
worksheet_name: Name of worksheet.
Returns: Newly created Worksheet.
"""
try:
worksheet = spreadsheet.worksheet(worksheet_name)
try:
spreadsheet.del_worksheet(worksheet)
except Exception:
# If worksheet name exists but is the only worksheet, need to add a new tmp sheet
# first then delete the old one
new_worksheet = spreadsheet.add_worksheet("tmp", 100, 100)
spreadsheet.del_worksheet(worksheet)
new_worksheet.update_title(worksheet_name)
return new_worksheet
except gspread.WorksheetNotFound:
pass
return spreadsheet.add_worksheet(worksheet_name, 100, 100)
def create_or_clear_worksheet(sheet: gspread.Spreadsheet, worksheet_name: str) -> gspread.Worksheet:
"""Creates or clears a worksheet with name `worksheet_name`.
Args:
sheet: Spreadsheet
worksheet_name: Name of worksheet.
Returns: Worksheet with name `worksheet_name`.
"""
try:
worksheet = sheet.worksheet(worksheet_name)
worksheet.clear()
return worksheet
except gspread.WorksheetNotFound:
pass
return sheet.add_worksheet(worksheet_name, 100, 100)
def open_spreadsheet(
sheet_id: str, gspread_client: Optional[gspread.Client] = None,
) -> gspread.Spreadsheet:
"""Opens or creates a spreadsheet, optionally sharing with `share_email`.
Args:
sheet_name: Name of sheet to open or create.
share_email: Email to share sheet with.
Returns: Spreadsheet.
"""
gspread_client = gspread_client or init_client()
return gspread_client.open_by_key(sheet_id)
def open_or_create_spreadsheet(
sheet_name: str,
share_email: Optional[str] = None,
gspread_client: Optional[gspread.Client] = None,
) -> gspread.Spreadsheet:
"""Opens or creates a spreadsheet, optionally sharing with `share_email`.
Args:
sheet_name: Name of sheet to open or create.
share_email: Email to share sheet with.
Returns: Spreadsheet.
"""
gspread_client = gspread_client or init_client()
try:
sheet = gspread_client.open(sheet_name)
except gspread.SpreadsheetNotFound:
_logger.info("Sheet not found, creating.", sheet_name=sheet_name)
sheet = gspread_client.create(sheet_name)
if share_email:
_logger.info("Sharing sheet", email=share_email)
sheet.share(share_email, perm_type="user", role="writer")
return sheet
def update_info_sheet(
sheet: gspread.Spreadsheet,
sheet_name: str = "Update Info",
pointer_directory: pathlib.Path = dataset_utils.DATA_DIRECTORY,
):
filename = dataset_pointer.form_filename(DatasetType.MULTI_REGION)
pointer_path = pointer_directory / filename
pointer = DatasetPointer.parse_raw(pointer_path.read_text())
data = [
("Field", "Value"),
("Updated at", datetime.datetime.utcnow().isoformat()),
("Covid Data Model SHA", pointer.model_git_info.sha),
("Covid Data Public SHA", pointer.data_git_info.sha),
]
worksheet = create_or_replace_worksheet(sheet, sheet_name)
worksheet.update(data)
_logger.info("Successfully updated Info worksheet")
return worksheet
| 31.205298 | 100 | 0.714983 |
acf1a46bf162cdfb336ba5d86c2aefb4130bb529 | 3,213 | py | Python | evernote/controller/storage.py | lbb4511/python | d2716f1d9c29d1797ede294ae1f9c19c18c77827 | [
"MIT"
] | null | null | null | evernote/controller/storage.py | lbb4511/python | d2716f1d9c29d1797ede294ae1f9c19c18c77827 | [
"MIT"
] | null | null | null | evernote/controller/storage.py | lbb4511/python | d2716f1d9c29d1797ede294ae1f9c19c18c77827 | [
"MIT"
] | null | null | null | #coding=utf8
import sys
import evernote.edam.type.ttypes as Types
import evernote.edam.notestore.NoteStore as NoteStore
# Data Structure
# notebookName:{
# 'notebook': notebook
# 'notes': {
# noteName: note
# ...
# }
# }
# noteDictFormat: {
# 'notebookName':[('note1', timeStamp), ..],
# }
class Storage():
storage = {}
def __init__(self):
self.available = False
def update(self, token, noteStore):
for nb in noteStore.listNotebooks():
self.storage[nb.name] = {}
self.storage[nb.name]['notebook'] = nb
self.storage[nb.name]['notes'] = {}
f = NoteStore.NoteFilter()
f.notebookGuid = nb.guid
for ns in noteStore.findNotes(token, f, 0, 999).notes:
self.storage[nb.name]['notes'][ns.title] = ns
self.defaultNotebook = noteStore.getDefaultNotebook(token).name
def create_note(self, note, notebookName=None):
if notebookName is None: notebookName = self.defaultNotebook
self.storage[notebookName]['notes'][note.title] = note
return True
def create_notebook(self, notebook):
if self.storage.get(notebook.name) is None: return False
self.storage[notebook.name] = {}
self.storage[notebook.name]['notebook'] = notebook
self.storage[notebook.name]['notes'] = {}
return True
def copy_note(self, fullNotePath, _to=None):
if _to is None: _to = self.defaultNotebook
note = self.get(fullNotePath)
if note is None: return False
self.storage[_to]['notes'][note.title] = note
return True
def move_note(self, fullNotePath, _to=None):
r = self.copy_note(fullNotePath, _to)
if r == False: return False
del self.storage[fullNotePath.split('/')[0]]['notes'][note.title]
return True
def delete_note(self, fullNotePath):
if self.get(fullNotePath) is None: return False
del self.storage[fullNotePath.split('/')[0]]['notes'][
fullNotePath.split('/')[1]]
return True
def delete_notebook(self, notebook):
if self.get(notebook) is None: return False
del self.storage[notebook]
return True
def get(self, s):
f = s.split('/')
r = self.storage.get(f[0])
if r is None: return
if '/' in s: return r['notes'].get(f[1])
return r.get('notebook')
def get_note_dict(self):
noteDict = {}
for nbName, nb in self.storage.iteritems():
noteDict[nbName] = []
for nName, n in nb['notes'].iteritems():
noteDict[nbName].append((nName, n.updated / 1000))
return noteDict
def show_notebook(self):
for bn, nb in self.storage.items():
print_line(bn)
def show_notes(self, notebook=None):
for bn, nb in self.storage.items():
if not notebook: print_line(bn + ':')
if not notebook or bn == notebook:
for nn, ns in nb['notes'].items():
print_line(('' if notebook else ' ') + nn)
def print_line(s):
t = sys.getfilesystemencoding()
print s.decode('UTF-8').encode(t)
| 31.5 | 73 | 0.588235 |
acf1a57db0569080a878fac7a071ee2a701023be | 12,440 | py | Python | cppwg/writers/class_writer.py | StefanBoca/cppwg | b41ce191be5b8d45607faaa032af8cfb3ead15fd | [
"MIT"
] | null | null | null | cppwg/writers/class_writer.py | StefanBoca/cppwg | b41ce191be5b8d45607faaa032af8cfb3ead15fd | [
"MIT"
] | null | null | null | cppwg/writers/class_writer.py | StefanBoca/cppwg | b41ce191be5b8d45607faaa032af8cfb3ead15fd | [
"MIT"
] | null | null | null | import ntpath
from pygccxml import declarations
from cppwg.writers import base_writer
from cppwg.writers import method_writer
from cppwg.writers import constructor_writer
class CppClassWrapperWriter(base_writer.CppBaseWrapperWriter):
"""
This class generates wrapper code for Cpp classes
"""
def __init__(self, class_info, wrapper_templates):
super(CppClassWrapperWriter, self).__init__(wrapper_templates)
self.hpp_string = ""
self.cpp_string = ""
self.class_info = class_info
self.class_decls = []
self.exposed_class_full_names = []
self.class_full_names = self.class_info.get_full_names()
self.class_short_names = self.class_info.get_short_names()
self.has_shared_ptr = True
self.is_abstract = False
if len(self.class_full_names) != len(self.class_short_names):
message = "Full and short name lists should be the same length"
raise ValueError(message)
def write_files(self, work_dir, class_short_name):
"""
Write the hpp and cpp wrapper codes to file
"""
path = work_dir + "/" + class_short_name
hpp_file = open(path + ".cppwg.hpp", "w")
hpp_file.write(self.hpp_string)
hpp_file.close()
cpp_file = open(path + ".cppwg.cpp", "w")
cpp_file.write(self.cpp_string)
cpp_file.close()
def add_hpp(self, class_short_name):
"""
Add the class wrapper hpp file
"""
wrapper_dict = {"class_short_name": class_short_name}
self.hpp_string += self.wrapper_templates["class_hpp_header"].format(
**wrapper_dict
)
def add_cpp_header(self, class_full_name, class_short_name):
"""
Add the 'top' of the class wrapper cpp file
"""
header = "wrapper_header_collection"
# Check for custom smart pointers
smart_ptr_handle = ""
smart_pointer_handle = self.class_info.hierarchy_attribute("smart_ptr_type")
if smart_pointer_handle != None:
smart_ptr_template = self.wrapper_templates["smart_pointer_holder"]
smart_ptr_handle = (
"\n" + smart_ptr_template.format(smart_pointer_handle) + ";"
)
header_dict = {
"wrapper_header_collection": header,
"class_short_name": class_short_name,
"class_full_name": class_full_name,
"smart_ptr_handle": smart_ptr_handle,
"includes": '#include "' + header + '.hpp"\n',
}
extra_include_string = ""
common_include_file = self.class_info.hierarchy_attribute("common_include_file")
source_includes = self.class_info.hierarchy_attribute_gather("source_includes")
if not common_include_file:
for eachInclude in source_includes:
if eachInclude[0] != "<":
extra_include_string += '#include "' + eachInclude + '"\n'
else:
extra_include_string += "#include " + eachInclude + "\n"
if self.class_info.source_file is not None:
extra_include_string += (
'#include "' + self.class_info.source_file + '"\n'
)
else:
include_name = ntpath.basename(self.class_info.decl.location.file_name)
extra_include_string += '#include "' + include_name + '"\n'
header_dict["includes"] = extra_include_string
header_string = self.wrapper_templates["class_cpp_header"].format(**header_dict)
self.cpp_string += header_string
for eachLine in self.class_info.prefix_code:
self.cpp_string += eachLine + "\n"
# Any custom generators
if self.class_info.custom_generator is not None:
self.cpp_string += self.class_info.custom_generator.get_class_cpp_pre_code(
class_short_name
)
def add_virtual_overides(self, class_decl, short_class_name):
"""
Virtual over-rides if neeeded
"""
# Identify any methods needing over-rides, i.e. any that are virtual
# here or in a parent.
methods_needing_override = []
return_types = []
for eachMemberFunction in class_decl.member_functions(allow_empty=True):
is_pure_virtual = eachMemberFunction.virtuality == "pure virtual"
is_virtual = eachMemberFunction.virtuality == "virtual"
if is_pure_virtual or is_virtual:
methods_needing_override.append(eachMemberFunction)
return_types.append(eachMemberFunction.return_type.decl_string)
if is_pure_virtual:
self.is_abstract = True
for eachReturnString in return_types:
if eachReturnString != self.tidy_name(eachReturnString):
typdef_string = "typedef {full_name} {tidy_name};\n"
typdef_dict = {
"full_name": eachReturnString,
"tidy_name": self.tidy_name(eachReturnString),
}
self.cpp_string += typdef_string.format(**typdef_dict)
self.cpp_string += "\n"
needs_override = len(methods_needing_override) > 0
if needs_override:
over_ride_dict = {
"class_short_name": short_class_name,
"class_base_name": self.class_info.name,
}
override_template = self.wrapper_templates["class_virtual_override_header"]
self.cpp_string += override_template.format(**over_ride_dict)
for eachMethod in methods_needing_override:
writer = method_writer.CppMethodWrapperWriter(
self.class_info,
eachMethod,
class_decl,
self.wrapper_templates,
short_class_name,
)
self.cpp_string = writer.add_override(self.cpp_string)
self.cpp_string += "\n};\n"
return methods_needing_override
def write(self, work_dir):
if len(self.class_decls) != len(self.class_full_names):
message = "Not enough class decls added to do write."
raise ValueError(message)
for idx, full_name in enumerate(self.class_full_names):
short_name = self.class_short_names[idx]
class_decl = self.class_decls[idx]
self.hpp_string = ""
self.cpp_string = ""
# Add the cpp file header
self.add_cpp_header(full_name, short_name)
# Check for struct-enum pattern
if declarations.is_struct(class_decl):
enums = class_decl.enumerations(allow_empty=True)
if len(enums) == 1:
replacements = {"class": class_decl.name, "enum": enums[0].name}
self.cpp_string += (
"void register_{class}_class(py::module &m){{\n".format(
**replacements
)
)
self.cpp_string += (
' py::class_<{class}> myclass(m, "{class}");\n'.format(
**replacements
)
)
self.cpp_string += (
' py::enum_<{class}::{enum}>(myclass, "{enum}")\n'.format(
**replacements
)
)
for eachval in enums[0].values:
replacements = {
"class": class_decl.name,
"enum": enums[0].name,
"val": eachval[0],
}
self.cpp_string += (
' .value("{val}", {class}::{enum}::{val})\n'.format(
**replacements
)
)
self.cpp_string += " .export_values();\n}\n"
# Set up the hpp
self.add_hpp(short_name)
# Do the write
self.write_files(work_dir, short_name)
continue
# Define any virtual function overloads
methods_needing_override = self.add_virtual_overides(class_decl, short_name)
# Add overrides if needed
overrides_string = ""
if len(methods_needing_override) > 0:
overrides_string = ", " + short_name + "_Overloads"
# Add smart ptr support if needed
smart_pointer_handle = self.class_info.hierarchy_attribute("smart_ptr_type")
ptr_support = ""
if self.has_shared_ptr and smart_pointer_handle is not None:
ptr_support = ", " + smart_pointer_handle + "<" + short_name + " > "
# Add base classes if needed
bases = ""
for eachBase in class_decl.bases:
cleaned_base = eachBase.related_class.name.replace(" ", "")
exposed = any(
cleaned_base in t.replace(" ", "")
for t in self.exposed_class_full_names
)
public = not eachBase.access_type == "private"
if exposed and public:
bases += ", " + eachBase.related_class.name + " "
# Add the class refistration
class_definition_dict = {
"short_name": short_name,
"overrides_string": overrides_string,
"ptr_support": ptr_support,
"bases": bases,
}
class_definition_template = self.wrapper_templates["class_definition"]
self.cpp_string += class_definition_template.format(**class_definition_dict)
# Add constructors
# if not self.is_abstract and not class_decl.is_abstract:
# No constructors for classes with private pure virtual methods!
ppv_class = False
for eachMemberFunction in class_decl.member_functions(allow_empty=True):
if (
eachMemberFunction.virtuality == "pure virtual"
and eachMemberFunction.access_type == "private"
):
ppv_class = True
break
if not ppv_class:
query = declarations.access_type_matcher_t("public")
for eachConstructor in class_decl.constructors(
function=query, allow_empty=True
):
writer = constructor_writer.CppConsturctorWrapperWriter(
self.class_info,
eachConstructor,
class_decl,
self.wrapper_templates,
short_name,
)
self.cpp_string = writer.add_self(self.cpp_string)
# Add public member functions
query = declarations.access_type_matcher_t("public")
for eachMemberFunction in class_decl.member_functions(
function=query, allow_empty=True
):
exlcuded = False
if self.class_info.excluded_methods is not None:
exlcuded = (
eachMemberFunction.name in self.class_info.excluded_methods
)
if not exlcuded:
writer = method_writer.CppMethodWrapperWriter(
self.class_info,
eachMemberFunction,
class_decl,
self.wrapper_templates,
short_name,
)
self.cpp_string = writer.add_self(self.cpp_string)
# Any custom generators
if self.class_info.custom_generator is not None:
self.cpp_string += (
self.class_info.custom_generator.get_class_cpp_def_code(short_name)
)
# Close the class definition
self.cpp_string += " ;\n}\n"
# Set up the hpp
self.add_hpp(short_name)
# Do the write
self.write_files(work_dir, short_name)
| 39.242902 | 88 | 0.547749 |
acf1a5d36198f398ae5b209f37a0f353fdfdce76 | 254 | py | Python | jobsp/wsgi.py | rds0751/opensource-job-portal | 4d72161e67e01ee5a9e295f1f76e5d734a05cb22 | [
"MIT"
] | null | null | null | jobsp/wsgi.py | rds0751/opensource-job-portal | 4d72161e67e01ee5a9e295f1f76e5d734a05cb22 | [
"MIT"
] | null | null | null | jobsp/wsgi.py | rds0751/opensource-job-portal | 4d72161e67e01ee5a9e295f1f76e5d734a05cb22 | [
"MIT"
] | null | null | null | import os
import sys
from django.core.wsgi import get_wsgi_application
PROJECT_DIR = os.path.abspath(__file__)
sys.path.append(PROJECT_DIR)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jobsp.settings_server")
application = get_wsgi_application()
| 21.166667 | 72 | 0.822835 |
acf1a63fd5d521ad7f598a33a941b114b7d8ab0f | 4,037 | py | Python | functions.py | oudoubleyang/TGBot | dc4e4ac2592ae313056307c31ad6764633b3c648 | [
"MIT"
] | null | null | null | functions.py | oudoubleyang/TGBot | dc4e4ac2592ae313056307c31ad6764633b3c648 | [
"MIT"
] | null | null | null | functions.py | oudoubleyang/TGBot | dc4e4ac2592ae313056307c31ad6764633b3c648 | [
"MIT"
] | null | null | null | import info
import json
import time
from title import title # noqa
from session import kuma
from tools import trimmer, trim_key
from pyrogram.enums.parse_mode import ParseMode
def debug(client, message):
debug_message = json.loads(str(message))
if message.reply_to_message:
debug_message = debug_message['reply_to_message']
debug_message = trim_key(trimmer(debug_message))
resp = message.reply(f'`{debug_message}`', parse_mode=ParseMode.MARKDOWN)
return resp
def delay(client, message):
chat_id = message.chat.id
first_timestamp = time.perf_counter()
checking_message = message.reply('Checking delay...')
second_timestamp = time.perf_counter()
second_msg_id = checking_message.id
duration = second_timestamp - first_timestamp
duration_str = '{:.3f} ms'.format(1000 * duration)
if duration < 0.1:
status = 'excellent'
elif duration < 0.5:
status = 'good'
elif duration < 1:
status = 'ok'
else:
status = 'bad'
result = kuma.edit_message_text(chat_id, second_msg_id, f'Delay is {duration_str}.\nThe connectivity is {status}.')
return result
def repeat(client, message):
command = message.text
content_index = command.find(' ')
if content_index == -1:
reply = message.reply_to_message
if reply:
if reply.text:
first = reply.from_user.first_name
last = ' ' + reply.from_user.last_name if reply.from_user.last_name else ''
repeat_message = first + last + ': \n' + reply.text
resp = message.reply(repeat_message)
else:
if reply.sticker:
resp = message.reply_sticker(reply.sticker.file_id)
elif reply.photo:
resp = message.reply_photo(reply.photo.file_id)
elif reply.animation:
resp = message.reply_animation(reply.animation.file_id)
elif reply.video:
resp = message.reply_video(reply.video.file_id)
elif reply.document:
resp = message.reply_document(reply.document.file_id)
else:
resp = None
else:
resp = message.reply(command)
else:
reply_text = command[content_index+1:]
resp = message.reply(reply_text)
return resp
def private_start(client, message):
return message.reply(info.start_message)
def private_help(client, message):
help_msg = f'{info.help_message}\n\nI\'m in my {info.version} ({info.channel}) version.'
return message.reply(help_msg)
def private_forward(client, message):
command = message.text
content_index = command.find(' ')
user = message.from_user
if content_index == -1:
resp = message.reply('You haven\'t type in your message!')
else:
first = user.first_name
last = (' ' + user.last_name) if user.last_name else ''
user_id = user.id
username = ' (' + (('@' + user.username + ', ') if user.username else '') + str(user_id) + ')'
forward_msg = first + last + username + '\n\n' + command[content_index+1:]
kuma.send_message(info.creator, forward_msg)
resp = message.reply('Message successfully sent.')
return resp
def private_get_file_id(client, message):
if message.from_user.id == info.self_id:
return None
file_id = 'Unknown type of media.'
if message.text:
file_id = message.text
elif message.sticker:
file_id = message.sticker.file_id
elif message.photo:
file_id = message.photo.file_id
elif message.animation:
file_id = message.animation.file_id
elif message.video:
file_id = message.video.file_id
elif message.document:
file_id = message.document.file_id
return message.reply(file_id)
def private_unknown(client, message):
return message.reply('I can\'t understand your message or command. You may try /help.')
| 33.090164 | 119 | 0.634382 |
acf1ab20272e3e6a68d8a7e572199a9b4941a7eb | 667 | py | Python | __init__.py | billyeatcookies/tkterminal | 8fd4b7c0fb75c85158232cf35a2918122ff0ece6 | [
"Apache-2.0"
] | null | null | null | __init__.py | billyeatcookies/tkterminal | 8fd4b7c0fb75c85158232cf35a2918122ff0ece6 | [
"Apache-2.0"
] | null | null | null | __init__.py | billyeatcookies/tkterminal | 8fd4b7c0fb75c85158232cf35a2918122ff0ece6 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Saad Mairaj
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from vendor.tkterminal.terminal import Terminal
| 41.6875 | 77 | 0.721139 |
acf1ab36b11ff9957e34f2b077ee4708091d8aec | 208 | py | Python | zachsFolderBecauseHeIsTooLazyToDownloadAtom/prettify.py | Jason-Paprocki/hacknjit | e4dae18b76da4234728173eb06e939f50a7970fa | [
"MIT"
] | null | null | null | zachsFolderBecauseHeIsTooLazyToDownloadAtom/prettify.py | Jason-Paprocki/hacknjit | e4dae18b76da4234728173eb06e939f50a7970fa | [
"MIT"
] | null | null | null | zachsFolderBecauseHeIsTooLazyToDownloadAtom/prettify.py | Jason-Paprocki/hacknjit | e4dae18b76da4234728173eb06e939f50a7970fa | [
"MIT"
] | null | null | null | import json
with open("metadata_dump.json", 'r') as in_file:
with open("cleaned_metadata.json", 'w') as out_file:
parsed = json.load(in_file)
out_file.write(json.dumps(parsed, indent=4))
| 29.714286 | 56 | 0.673077 |
acf1ab6b8911fc14512f46bfc1c5b26fe7654276 | 2,703 | py | Python | vendor-local/lib/python/celery/security/certificate.py | Mozilla-GitHub-Standards/54c69db06ef83bda60e995a6c34ecfd168ca028994e40ce817295415bb409f0c | f80e7c0cff97a1e9b301aa04015db983c7645778 | [
"BSD-3-Clause"
] | 4 | 2015-05-08T16:58:53.000Z | 2019-09-06T05:30:59.000Z | vendor-local/lib/python/celery/security/certificate.py | Mozilla-GitHub-Standards/54c69db06ef83bda60e995a6c34ecfd168ca028994e40ce817295415bb409f0c | f80e7c0cff97a1e9b301aa04015db983c7645778 | [
"BSD-3-Clause"
] | 2 | 2019-02-17T17:44:53.000Z | 2019-03-28T03:54:39.000Z | vendor-local/lib/python/celery/security/certificate.py | Mozilla-GitHub-Standards/54c69db06ef83bda60e995a6c34ecfd168ca028994e40ce817295415bb409f0c | f80e7c0cff97a1e9b301aa04015db983c7645778 | [
"BSD-3-Clause"
] | 7 | 2015-05-21T15:38:29.000Z | 2019-10-28T23:39:06.000Z | from __future__ import absolute_import
from __future__ import with_statement
import glob
import os
import sys
try:
from OpenSSL import crypto
except ImportError:
crypto = None # noqa
from ..exceptions import SecurityError
class Certificate(object):
"""X.509 certificate."""
def __init__(self, cert):
assert crypto is not None
try:
self._cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
except crypto.Error, exc:
raise SecurityError, SecurityError(
"Invalid certificate: %r" % (exc, )), sys.exc_info()[2]
def has_expired(self):
"""Check if the certificate has expired."""
return self._cert.has_expired()
def get_serial_number(self):
"""Returns the certificates serial number."""
return self._cert.get_serial_number()
def get_issuer(self):
"""Returns issuer (CA) as a string"""
return ' '.join(x[1] for x in
self._cert.get_issuer().get_components())
def get_id(self):
"""Serial number/issuer pair uniquely identifies a certificate"""
return "%s %s" % (self.get_issuer(), self.get_serial_number())
def verify(self, data, signature, digest):
"""Verifies the signature for string containing data."""
try:
crypto.verify(self._cert, signature, data, digest)
except crypto.Error, exc:
raise SecurityError, SecurityError(
"Bad signature: %r" % (exc, )), sys.exc_info()[2]
class CertStore(object):
"""Base class for certificate stores"""
def __init__(self):
self._certs = {}
def itercerts(self):
"""an iterator over the certificates"""
for c in self._certs.itervalues():
yield c
def __getitem__(self, id):
"""get certificate by id"""
try:
return self._certs[id]
except KeyError:
raise SecurityError("Unknown certificate: %r" % (id, ))
def add_cert(self, cert):
if cert.get_id() in self._certs:
raise SecurityError("Duplicate certificate: %r" % (id, ))
self._certs[cert.get_id()] = cert
class FSCertStore(CertStore):
"""File system certificate store"""
def __init__(self, path):
CertStore.__init__(self)
if os.path.isdir(path):
path = os.path.join(path, '*')
for p in glob.glob(path):
with open(p) as f:
cert = Certificate(f.read())
if cert.has_expired():
raise SecurityError(
"Expired certificate: %r" % (cert.get_id(), ))
self.add_cert(cert)
| 29.703297 | 75 | 0.589345 |
acf1ab6cd09d4d2bd6b1836d33550914d5dc6056 | 4,100 | py | Python | tests/test_pipeline/components/feature_preprocessing/test_nystroem_sampler.py | jianzhnie/AutoTabular | fb407300adf97532a26d33f7442d2a606fa30512 | [
"Apache-2.0"
] | 48 | 2021-09-06T08:09:26.000Z | 2022-03-28T13:02:54.000Z | tests/test_pipeline/components/feature_preprocessing/test_nystroem_sampler.py | Fanxingye/Autotabular | d630c78290a52f8c73885afb16884e18135c34f6 | [
"Apache-2.0"
] | null | null | null | tests/test_pipeline/components/feature_preprocessing/test_nystroem_sampler.py | Fanxingye/Autotabular | d630c78290a52f8c73885afb16884e18135c34f6 | [
"Apache-2.0"
] | 7 | 2021-09-23T07:28:46.000Z | 2021-10-02T21:15:18.000Z | import unittest
import numpy as np
import sklearn.preprocessing
from autotabular.pipeline.components.feature_preprocessing.nystroem_sampler import Nystroem
from autotabular.pipeline.util import _test_preprocessing, get_dataset
class NystroemComponentTest(unittest.TestCase):
def test_default_configuration(self):
transformation, original = _test_preprocessing(Nystroem)
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertEqual(transformation.shape[1], 100)
self.assertFalse((transformation == 0).all())
# Custon preprocessing test to check if clipping to zero works
X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
original_X_train = X_train.copy()
ss = sklearn.preprocessing.StandardScaler()
X_train = ss.fit_transform(X_train)
configuration_space = Nystroem.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = Nystroem(
random_state=1,
**{
hp_name: default[hp_name]
for hp_name in default if default[hp_name] is not None
},
)
transformer = preprocessor.fit(X_train, Y_train)
transformation, original = transformer.transform(
X_train), original_X_train
self.assertEqual(transformation.shape[0], original.shape[0])
self.assertEqual(transformation.shape[1], 100)
# @unittest.skip("Right now, the RBFSampler returns a float64 array!")
def _test_preprocessing_dtype(self):
# Dense
# np.float32
X_train, Y_train, X_test, Y_test = get_dataset('iris')
self.assertEqual(X_train.dtype, np.float32)
configuration_space = Nystroem.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = Nystroem(
random_state=1,
**{
hp.hyperparameter.name: hp.value
for hp in default.values.values()
},
)
preprocessor.fit(X_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float32)
# np.float64
X_train, Y_train, X_test, Y_test = get_dataset('iris')
X_train = X_train.astype(np.float64)
configuration_space = Nystroem.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = Nystroem(
random_state=1,
**{
hp.hyperparameter.name: hp.value
for hp in default.values.values()
},
)
preprocessor.fit(X_train, Y_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float64)
# Sparse
# np.float32
X_train, Y_train, X_test, Y_test = get_dataset(
'iris', make_sparse=True)
self.assertEqual(X_train.dtype, np.float32)
configuration_space = Nystroem.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = Nystroem(
random_state=1,
**{
hp.hyperparameter.name: hp.value
for hp in default.values.values()
},
)
preprocessor.fit(X_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float32)
# np.float64
X_train, Y_train, X_test, Y_test = get_dataset(
'iris', make_sparse=True)
X_train = X_train.astype(np.float64)
configuration_space = Nystroem.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = Nystroem(
random_state=1,
**{
hp.hyperparameter.name: hp.value
for hp in default.values.values()
},
)
preprocessor.fit(X_train)
Xt = preprocessor.transform(X_train)
self.assertEqual(Xt.dtype, np.float64)
| 37.614679 | 91 | 0.637317 |
acf1ac0ed370e6be2eb2c62845b9e5e46eb0c5bd | 5,360 | py | Python | tests/control_strategies/test_month_of_modes_of_operation.py | Jipje/local_smart_grid_simulation | ca11bcf349c51bd24e0a8dffd21ca82e35c8255a | [
"MIT"
] | null | null | null | tests/control_strategies/test_month_of_modes_of_operation.py | Jipje/local_smart_grid_simulation | ca11bcf349c51bd24e0a8dffd21ca82e35c8255a | [
"MIT"
] | null | null | null | tests/control_strategies/test_month_of_modes_of_operation.py | Jipje/local_smart_grid_simulation | ca11bcf349c51bd24e0a8dffd21ca82e35c8255a | [
"MIT"
] | null | null | null | import unittest
import os
import datetime as dt
from unittest.mock import MagicMock
import dateutil.tz
from helper_objects.strategies.CsvStrategy import CsvStrategy
from network_objects.Battery import Battery
from network_objects.control_strategies.ModesOfOperationController import ModesOfOperationController
from network_objects.control_strategies.MonthOfModesOfOperationController import MonthOfModesOfOperationController
from network_objects.control_strategies.SolveCongestionAndLimitedChargeControlTower import \
SolveCongestionAndLimitedChargeControlTower
from network_objects.control_strategies.StrategyWithLimitedChargeCapacityControlTower import \
StrategyWithLimitedChargeCapacityControlTower
ams = dateutil.tz.gettz('Europe/Amsterdam')
utc = dateutil.tz.tzutc()
class TestMonthOfModesOfOperationController(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
strategy_one_path = '..{0}..{0}data{0}strategies{0}cleaner_simplified_passive_imbalance_1.csv'.format(os.path.sep)
greedy_discharge_path = '..{0}..{0}data{0}strategies{0}greedy_discharge_60.csv'.format(os.path.sep)
always_discharge_path = '..{0}..{0}data{0}strategies{0}always_discharge.csv'.format(os.path.sep)
try:
CsvStrategy('Rhino strategy 1', strategy_csv=strategy_one_path)
except FileNotFoundError:
strategy_one_path = '..{0}data{0}strategies{0}cleaner_simplified_passive_imbalance_1.csv'.format(os.path.sep)
greedy_discharge_path = '..{0}data{0}strategies{0}greedy_discharge_60.csv'.format(os.path.sep)
always_discharge_path = '..{0}data{0}strategies{0}always_discharge.csv'.format(os.path.sep)
cls.strategy_one_path = strategy_one_path
cls.greedy_discharge_path = greedy_discharge_path
cls.always_discharge_path = always_discharge_path
def test_base_workings(self):
rhino = Battery('test_simple_congestion', 7500, 12000, starting_soc_kwh=3000, verbose_lvl=4)
month_of_modes_of_operation_control_tower = MonthOfModesOfOperationController('test_base', rhino)
month_names = ['January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December']
month_moo = []
for i in range(12):
name = month_names[i]
moo = ModesOfOperationController(name, rhino)
moo.determine_step = MagicMock(name='determine_step')
moo.determine_step.return_value = 'CHARGE', i * 500
month_moo.append(moo)
month_of_modes_of_operation_control_tower.add_controller(moo)
month_of_modes_of_operation_control_tower.determine_step([-200, -200, 12000, dt.datetime(2021, 5, 6, 1, 30, tzinfo=utc)], [0, 1, 2, 3])
month_moo[4].determine_step.assert_called_once_with([-200, -200, 12000, dt.datetime(2021, 5, 6, 1, 30, tzinfo=utc)], [0, 1, 2, 3])
month_of_modes_of_operation_control_tower.determine_step([500, 500, 8000, dt.datetime(2021, 6, 6, 1, 31, tzinfo=utc)], [0, 1, 2, 3])
month_moo[5].determine_step.assert_called_once_with([500, 500, 8000, dt.datetime(2021, 6, 6, 1, 31, tzinfo=utc)], [0, 1, 2, 3])
month_of_modes_of_operation_control_tower.take_step([-200, -200, 6000, dt.datetime(2021, 7, 6, 1, 32, tzinfo=utc)], [0, 1, 2, 3])
month_moo[6].determine_step.assert_called_once_with([-200, -200, 6000, dt.datetime(2021, 7, 6, 1, 32, tzinfo=utc)], [0, 1, 2, 3])
self.assertEqual(3045, rhino.state_of_charge_kwh)
def test_faulty_initialization(self):
rhino = Battery('test_simple_congestion', 7500, 12000, starting_soc_kwh=3000, verbose_lvl=4)
month_of_modes_of_operation_control_tower = MonthOfModesOfOperationController('test_base', rhino)
month_names = ['January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December']
month_moo = []
for i in range(12):
name = month_names[i]
moo = ModesOfOperationController(name, rhino)
moo.determine_step = MagicMock(name='determine_step')
moo.determine_step.return_value = 'CHARGE', i * 500
month_moo.append(moo)
month_of_modes_of_operation_control_tower.add_controller(moo)
self.assertRaises(AttributeError, month_of_modes_of_operation_control_tower.add_controller, None)
def test_not_ready_yet(self):
rhino = Battery('test_simple_congestion', 7500, 12000, starting_soc_kwh=3000, verbose_lvl=4)
month_of_modes_of_operation_control_tower = MonthOfModesOfOperationController('test_base', rhino)
month_names = ['January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December']
month_moo = []
for i in range(10):
name = month_names[i]
moo = ModesOfOperationController(name, rhino)
moo.determine_step = MagicMock(name='determine_step')
moo.determine_step.return_value = 'CHARGE', i * 500
month_moo.append(moo)
month_of_modes_of_operation_control_tower.add_controller(moo)
self.assertRaises(AttributeError, month_of_modes_of_operation_control_tower.determine_step, [], [])
| 54.141414 | 143 | 0.704478 |
acf1ac6c6d9a52bbd7895bfe3095d878367489c3 | 8,008 | py | Python | contrib/seeds/makeseeds.py | poriun/PoriunCoin | 7d675d4d163702eb7072cafda52e7a58ef6e169c | [
"MIT"
] | 2 | 2021-01-15T14:10:55.000Z | 2021-05-07T04:23:11.000Z | contrib/seeds/makeseeds.py | poriun/PoriunCoin | 7d675d4d163702eb7072cafda52e7a58ef6e169c | [
"MIT"
] | null | null | null | contrib/seeds/makeseeds.py | poriun/PoriunCoin | 7d675d4d163702eb7072cafda52e7a58ef6e169c | [
"MIT"
] | 1 | 2022-02-23T21:59:34.000Z | 2022-02-23T21:59:34.000Z | #!/usr/bin/env python3
# Copyright (c) 2013-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
import re
import sys
import dns.resolver
import collections
NSEEDS=512
MAX_SEEDS_PER_ASN=4
MIN_BLOCKS = 1530000
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
with open("suspicious_hosts.txt", mode="r", encoding="utf-8") as f:
SUSPICIOUS_HOSTS = {s.strip() for s in f if s.strip()}
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(
r"^/PoriunCore:("
r"4.0.(0|1|2|99|99.1|99.2)|"
r"4.1.(0|99)"
r")")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
if len(sline) > 11:
agent = sline[11][1:] + sline[12][:-1]
else:
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def dedup(ips):
'''deduplicate by address,port'''
d = {}
for ip in ips:
d[ip['ip'],ip['port']] = ip
return list(d.values())
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
def lookup_asn(net, ip):
'''
Look up the asn for an IP (4 or 6) address by querying cymru.com, or None
if it could not be found.
'''
try:
if net == 'ipv4':
ipaddr = ip
prefix = '.origin'
else: # http://www.team-cymru.com/IP-ASN-mapping.html
res = str() # 2001:4860:b002:23::68
for nb in ip.split(':')[:4]: # pick the first 4 nibbles
for c in nb.zfill(4): # right padded with '0'
res += c + '.' # 2001 4860 b002 0023
ipaddr = res.rstrip('.') # 2.0.0.1.4.8.6.0.b.0.0.2.0.0.2.3
prefix = '.origin6'
asn = int([x.to_text() for x in dns.resolver.query('.'.join(
reversed(ipaddr.split('.'))) + prefix + '.asn.cymru.com',
'TXT').response.answer][0].split('\"')[1].split(' ')[0])
return asn
except Exception:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip + '"\n')
return None
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_per_net):
# Sift out ips by type
ips_ipv46 = [ip for ip in ips if ip['net'] in ['ipv4', 'ipv6']]
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv46 by ASN, and limit to max_per_net per network
result = []
net_count = collections.defaultdict(int)
asn_count = collections.defaultdict(int)
for ip in ips_ipv46:
if net_count[ip['net']] == max_per_net:
continue
asn = lookup_asn(ip['net'], ip['ip'])
if asn is None or asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
net_count[ip['net']] += 1
result.append(ip)
# Add back Onions (up to max_per_net)
result.extend(ips_onion[0:max_per_net])
return result
def ip_stats(ips):
hist = collections.defaultdict(int)
for ip in ips:
if ip is not None:
hist[ip['net']] += 1
return '%6d %6d %6d' % (hist['ipv4'], hist['ipv6'], hist['onion'])
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
print('\x1b[7m IPv4 IPv6 Onion Pass \x1b[0m', file=sys.stderr)
print('%s Initial' % (ip_stats(ips)), file=sys.stderr)
# Skip entries with invalid address.
ips = [ip for ip in ips if ip is not None]
print('%s Skip entries with invalid address' % (ip_stats(ips)), file=sys.stderr)
# Skip duplicates (in case multiple seeds files were concatenated)
ips = dedup(ips)
print('%s After removing duplicates' % (ip_stats(ips)), file=sys.stderr)
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
print('%s Skip entries from suspicious hosts' % (ip_stats(ips)), file=sys.stderr)
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
print('%s Enforce minimal number of blocks' % (ip_stats(ips)), file=sys.stderr)
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
print('%s Require service bit 1' % (ip_stats(ips)), file=sys.stderr)
# Require at least 50% 30-day uptime for clearnet, 10% for onion.
req_uptime = {
'ipv4': 50,
'ipv6': 50,
'onion': 10,
}
ips = [ip for ip in ips if ip['uptime'] > req_uptime[ip['net']]]
print('%s Require minimum uptime' % (ip_stats(ips)), file=sys.stderr)
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
print('%s Require a known and recent user agent' % (ip_stats(ips)), file=sys.stderr)
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple poriun ports, these are likely abusive
ips = filtermultiport(ips)
print('%s Filter out hosts with multiple poriun ports' % (ip_stats(ips)), file=sys.stderr)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
print('%s Look up ASNs and limit results per ASN and per net' % (ip_stats(ips)), file=sys.stderr)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| 35.433628 | 116 | 0.568681 |
acf1acf610834e09eb0768e2a525fae4d9e658c6 | 25,575 | py | Python | Tests/Infobanner_2/main.py | yupasik/AT | 8f4f6535cef8b7714cb325a32dadf36f6f8664ee | [
"Apache-2.0"
] | null | null | null | Tests/Infobanner_2/main.py | yupasik/AT | 8f4f6535cef8b7714cb325a32dadf36f6f8664ee | [
"Apache-2.0"
] | null | null | null | Tests/Infobanner_2/main.py | yupasik/AT | 8f4f6535cef8b7714cb325a32dadf36f6f8664ee | [
"Apache-2.0"
] | null | null | null | # Test name = Infobanner_2
# Script dir = R:\Stingray\Tests\Infobanner_2\main\main.py
from time import sleep
from device import handler, updateTestResult
import RC
import UART
import DO
import GRAB
import MOD
import os
from DO import status
import OPER
def runTest():
status("active")
TestName = "Infobanner_2"
ScriptName = "main"
ScriptIndex = "1"
Grabber = DO.grab_define()
platform = DO.load_platform()
Modulation = "DVBS"
FEC = "3/4"
SR = "27500000"
Stream = "\\X_0000_00000_MUX_32000_EPG_Software_20130328a.ts"
Stream_1 = "\\DRE Services\\X_0000_00000_MUX_38000_DRE4_Infocas_1.ts"
Stream_2 = "\\DRE Services\\X_0000_00000_MUX_38000_DRE4_TVMail_1.ts"
Frequency = 1476
Modulator = "1"
COM = "COM7"
settings = [ScriptName, ScriptIndex, Grabber, Modulation, FEC, SR, Stream, Frequency, Modulator, COM]
DO.save_settings(settings)
GRAB.start_capture()
MOD.stop(Modulator)
# macros
searching_from_wizard_general_E501 = ["ok 1 3400", "ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_general_english_E501 = ["up 2 3400", "right 1 1000", "down 2 3400", "ok 1 3400", "ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_centre_E501 = ["ok 1 3400", "ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "down", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_centre_english_E501 = ["up 3 3400", "right 1 1000", "down 3 3400", "ok 1 3400", "ok 1 3400", "ok 1 3400", "right", "ok 1 3400", "ok 1 22200", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_south_E501 = ["ok 1 3400", "ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "down", "down", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_general_ALL = ["ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_general_english_ALL = ["up 2 3400", "right 1 1000", "down 2 3400", "ok 1 3400", "ok 1 3400", "right", "ok 1 3400", "ok 1 22200", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_centre_ALL = ["ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "down", "ok 1 5000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_centre_english_ALL = ["up 3 3400", "right 1 1000", "down 3 3400", "ok 1 3400", "ok 1 3400", "right", "ok 1 3400", "ok 1 22200", "down 1 1000", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
searching_from_wizard_south_ALL = ["ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200", "down", "down", "ok 1 15000", "ok 1 10000", "exit 2 3000"]
load_regions_E501 = ["ok 1 3400", "ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200"]
load_regions_english_E501 = ["up 2 2400", "right 1 1000", "down 2 2400", "ok 1 3400", "ok 1 3400", "ok 1 3400", "right", "ok 1 3400", "ok 1 22200"]
load_regions_ALL = ["ok 1 3400", "ok 1 3400", "right 1 3400", "ok 1 3400", "ok 1 22200"]
load_regions_english_ALL = ["up 2 2400", "right 1 1000", "down 2 2400", "ok 1 3400", "ok 1 3400", "right", "ok 1 3400", "ok 1 22200"]
############################ TestCase 1 ##########################################
testcase = 1
status("active")
MOD.play_stream(Modulation, FEC, SR, Stream, Frequency, Modulator)
UART.default_settings()
RC.push(["exit 1 3000", "red 1 3000"]) # RC macros from remote_control.ini file
GRAB.compare(testcase)
############################ TestCase 2 ##########################################
testcase = 2
status("active")
UART.default_settings()
OPER.search()
MOD.stop(Modulator)
MOD.play(Modulator)
RC.push(["exit 1 3000", "red 1 3000"])
GRAB.compare(testcase)
############################ TestCase 3 ##########################################
testcase = 3
status("active")
RC.push(["exit 1 3000", "green 1 3000"])
GRAB.compare(testcase)
############################ TestCase 4 ##########################################
testcase = 4
status("active")
RC.push(["exit 1 3000", "6 1 5000", "yellow 1 3000"])
GRAB.compare(testcase)
############################ TestCase 5 ##########################################
testcase = 5
status("active")
RC.push(["exit 2 5000", "0 1 5000", "play/pause 1 50000", "play/pause 1 5000", "red 1 5000"])
GRAB.compare(testcase)
############################ TestCase 6 ##########################################
testcase = 6
status("active")
RC.push(["exit 1 5000", "green 1 5000"])
GRAB.compare(testcase)
############################ TestCase 7 ##########################################
testcase = 7
status("active")
RC.push(["exit 1 5000", "yellow 1 5000"])
GRAB.compare(testcase)
############################ TestCase 8 ##########################################
testcase = 8
status("active")
RC.push(["stop 1 5000", "exit 2 5000", "red 1 5000", "up"])
GRAB.compare(testcase)
############################ TestCase 9 ##########################################
testcase = 9
status("active")
RC.push(["down 1 3000"])
GRAB.compare(testcase)
############################ TestCase 10 ##########################################
testcase = 10
status("active")
RC.push(["up 1 3000", "left 1 3000"])
GRAB.compare(testcase)
############################ TestCase 11 ##########################################
testcase = 11
status("active")
RC.push(["right 2 3000"])
GRAB.compare(testcase)
############################ TestCase 12 ##########################################
testcase = 12
status("active")
RC.push(["red 1 3000"])
GRAB.compare(testcase)
############################ TestCase 13 ##########################################
testcase = 13
status("active")
RC.push(["red 1 3000", "mute"])
GRAB.compare(testcase)
############################ TestCase 14 ##########################################
testcase = 14
status("active")
RC.push(["exit 1 3000"])
GRAB.compare(testcase)
############################ TestCase 15 ##########################################
testcase = 15
status("active")
RC.push(["red 1 3000", "standby 1 5000", "standby 1 10000"])
GRAB.compare(testcase)
############################ TestCase 16 ##########################################
testcase = 16
status("active")
RC.push(["exit 1 3000", "red 1 5000", "menu 1 3000"])
GRAB.compare(testcase)
############################ TestCase 17 ##########################################
testcase = 17
status("active")
RC.push(["exit 1 3000", "red 1 5000", "VolUp 1 2500"])
GRAB.compare(testcase)
############################ TestCase 18 ##########################################
testcase = 18
status("active")
RC.push(["VolDown 1 2500"])
GRAB.compare(testcase)
############################ TestCase 19 ##########################################
testcase = 19
status("active")
RC.push(["ChUp 1 2500"])
GRAB.compare(testcase)
############################ TestCase 20 ##########################################
testcase = 20
status("active")
RC.push(["ChDown 1 2500"])
GRAB.compare(testcase)
############################ TestCase 21 ##########################################
testcase = 21
status("active")
RC.push(["last 1 3000"])
GRAB.compare(testcase)
############################ TestCase 22 ##########################################
testcase = 22
status("active")
RC.push(["red 1 5000", "1 1 3000"])
GRAB.compare(testcase)
############################ TestCase 23 ##########################################
testcase = 23
status("active")
RC.push(["left 1 3000", "ok 1 3000"])
GRAB.compare(testcase)
############################ TestCase 24 ##########################################
testcase = 24
status("active")
RC.push(["green 1 3000"])
GRAB.compare(testcase)
############################ TestCase 25 ##########################################
testcase = 25
status("active")
RC.push(["exit 1 3000", "6 1 5000", "red 1 3000", "yellow 1 5000"])
GRAB.compare(testcase)
############################ TestCase 26 ##########################################
testcase = 26
status("active")
RC.push(["blue 1 3000"])
GRAB.compare(testcase)
############################ TestCase 27 ##########################################
testcase = 27
status("active")
RC.push(["exit 2 3000", "rec 1 3000"])
GRAB.compare(testcase)
############################ TestCase 28 ##########################################
testcase = 28
status("active")
RC.push(["red 1 3000", "rec 1 3000"])
GRAB.compare(testcase)
############################ TestCase 29 ##########################################
testcase = 29
status("active")
RC.push(["left 1 3000", "ok 1 3000", "exit 2 5000", "play/pause 1 40000"])
GRAB.compare(testcase)
############################ TestCase 30 ##########################################
testcase = 30
status("active")
RC.push(["red 1 5000"])
GRAB.compare(testcase)
############################ TestCase 31 ##########################################
testcase = 31
status("active")
RC.push(["play/pause 1 5000"])
GRAB.compare(testcase)
############################ TestCase 32 ##########################################
testcase = 32
status("active")
GRAB.compare(testcase)
############################ TestCase 33 ##########################################
testcase = 33
status("active")
RC.push(["stop 1 5000"])
GRAB.compare(testcase)
############################ TestCase 34 ##########################################
testcase = 34
status("active")
RC.push(["exit 2 5000", "0 1 5000", "red 1 3000", "down 1 3000", "up 1 3000"])
GRAB.compare(testcase)
############################ TestCase 35 ##########################################
testcase = 35
status("active")
RC.push(["down 2 3000"])
GRAB.compare(testcase)
############################ TestCase 36 ##########################################
testcase = 36
status("active")
RC.push(["exit 2 5000", "6 1 5000", "red 1 3000", "down 2 3000"])
GRAB.compare(testcase)
############################ TestCase 37 ##########################################
testcase = 37
status("active")
RC.push(["left 1 3000"])
GRAB.compare(testcase)
############################ TestCase 38 ##########################################
testcase = 38
status("active")
RC.push(["right 1 3000"])
GRAB.compare(testcase)
############################ TestCase 39 ##########################################
testcase = 39
status("active")
RC.push(["ChUp 1 3000"])
GRAB.compare(testcase)
############################ TestCase 40 ##########################################
testcase = 40
status("active")
RC.push(["ChDown 1 3000"])
GRAB.compare(testcase)
############################ TestCase 41 ##########################################
testcase = 41
status("active")
RC.push(["exit 2 5000", "6 1 5000", "red 1 3000", "down 1 3000", "right 3 2000", "ok 1 5000"])
GRAB.compare(testcase)
############################ TestCase 42 ##########################################
testcase = 42
status("active")
RC.push(["exit 1 3000"])
GRAB.compare(testcase)
############################ TestCase 43 ##########################################
testcase = 43
status("active")
RC.push(["up 1 3000", "left 1 3000", "ok 1 5000", "down 1 2000", "right 1 2000", "ok 1 3000"])
GRAB.compare(testcase)
############################ TestCase 44 ##########################################
testcase = 44
status("active")
GRAB.compare(testcase)
############################ TestCase 45 ##########################################
testcase = 45
status("active")
RC.push(["down 2 3000", "ok 1 3000"])
GRAB.compare(testcase)
############################ TestCase 46 ##########################################
testcase = 46
status("active")
RC.push(["exit 1 3000"])
GRAB.compare(testcase)
############################ TestCase 47 ##########################################
testcase = 47
status("active")
RC.push(["exit 2 3000", "6 1 5000", "yellow 1 3000", "ok 1 3000"])
GRAB.compare(testcase)
############################ TestCase 48 ##########################################
testcase = 48
status("active")
GRAB.compare(testcase)
############################ TestCase 49 ##########################################
testcase = 49
status("active")
RC.push(["down 2 3000", "ok 1 3000"])
GRAB.compare(testcase)
############################ TestCase 50 ##########################################
testcase = 50
status("active")
RC.push(["exit 1 3000"])
GRAB.compare(testcase)
############################ TestCase 51 ##########################################
testcase = 51
status("active")
RC.push(["exit 1 6000"])
GRAB.compare(testcase)
############################ TestCase 52 ##########################################
testcase = 52
status("active")
RC.push(["yellow 1 3000", "ok 1 3000", "up 2 3000", "ok 1 3000"])
GRAB.compare(testcase)
############################ TestCase 53 ##########################################
testcase = 53
status("active")
RC.push(["exit 2 6000"])
GRAB.compare(testcase)
############################ TestCase 54 ##########################################
testcase = 54
status("active")
RC.push(["0 1 6000", "red 1 3000", "down 2 3000"])
GRAB.compare(testcase)
############################ TestCase 55 ##########################################
testcase = 55
status("active")
RC.push(["ChDown 1 3000"])
GRAB.compare(testcase)
############################ TestCase 56 ##########################################
testcase = 56
status("active")
RC.push(["ChDown 12 3000"])
GRAB.compare(testcase)
############################ TestCase 57 ##########################################
testcase = 57
status("active")
RC.push(["ChUp 1 3000"])
GRAB.compare(testcase)
############################ TestCase 58 ##########################################
testcase = 58
status("active")
RC.push(["ChUp 12 3000"])
GRAB.compare(testcase)
############################ TestCase 59 ##########################################
testcase = 59
status("active")
RC.push(["down 1 3000"])
GRAB.compare(testcase)
############################ TestCase 60 ##########################################
testcase = 60
status("active")
RC.push(["down 12 3000"])
GRAB.compare(testcase)
############################ TestCase 61 ##########################################
testcase = 61
status("active")
RC.push(["up 1 3000"])
GRAB.compare(testcase)
############################ TestCase 62 ##########################################
testcase = 62
status("active")
RC.push(["up 12 3000"])
GRAB.compare(testcase)
############################ TestCase 63 ##########################################
testcase = 63
status("active")
RC.push(["exit 1 3000", "ok 1 3000", "down 1 2000", "ok 1 3500"])
GRAB.compare(testcase)
############################ TestCase 64 ##########################################
testcase = 64
status("active")
RC.push(["id 1 3000", "exit 1 3000"])
GRAB.compare(testcase)
############################ TestCase 65 ##########################################
testcase = 65
status("active")
RC.push(["0 1 3500"])
GRAB.compare(testcase)
############################ TestCase 66 ##########################################
testcase = 66
status("active")
RC.push(["6 1 3500"])
GRAB.compare(testcase)
############################ TestCase 67 ##########################################
testcase = 67
status("active")
RC.push(["0 1 4000"])
GRAB.compare(testcase)
############################ TestCase 68 ##########################################
testcase = 68
status("active")
RC.push(["red 1 3000", "exit 1 1500"])
GRAB.compare(testcase)
############################ TestCase 69 ##########################################
testcase = 69
status("active")
RC.push(["red 1 3000", "right 1 2000", "ok 1 3000", "exit 1 1500"])
GRAB.compare(testcase)
############################ TestCase 70 ##########################################
testcase = 70
status("active")
RC.push(["red 1 3000", "left 3 2000"])
GRAB.compare(testcase)
############################ TestCase 71 ##########################################
testcase = 71
status("active")
RC.push(["ok 1 3000"])
GRAB.compare(testcase)
############################ TestCase 72 ##########################################
testcase = 72
status("active")
RC.push(["left 1 2000"])
GRAB.compare(testcase)
############################ TestCase 73 ##########################################
testcase = 73
status("active")
RC.push(["exit 1 3000", "0 1 5000", "red 1 5000"])
sleep(60)
GRAB.compare(testcase)
############################ TestCase 74 ##########################################
testcase = 74
status("active")
UART.reboot()
RC.push(["exit 1 3000", "red 1 5000", "right 1 2000"])
GRAB.compare(testcase)
############################ TestCase 75 ##########################################
testcase = 75
status("active")
RC.push(["ok 1 5000"])
GRAB.compare(testcase)
############################ TestCase 76 ##########################################
testcase = 76
status("active")
sleep(120)
GRAB.compare(testcase)
############################ TestCase 77 ##########################################
testcase = 77
status("active")
RC.push(["right 1 3000"])
GRAB.compare(testcase)
############################ TestCase 78 ##########################################
testcase = 78
status("active")
RC.push(["ok 1 5000"])
GRAB.compare(testcase)
############################ TestCase 79 ##########################################
testcase = 79
status("active")
RC.push(["right 3 5000"])
GRAB.compare(testcase)
############################ TestCase 80 ##########################################
testcase = 80
status("active")
GRAB.compare(testcase)
############################ TestCase 81 ##########################################
testcase = 81
status("manual")
GRAB.compare(testcase)
############################ TestCase 82 ##########################################
testcase = 82
status("active")
RC.push(["ok 1 5000"])
GRAB.compare(testcase)
############################ TestCase 83 ##########################################
testcase = 83
status("active")
RC.push(["right 1 5000"])
sleep(120)
GRAB.compare(testcase)
############################ TestCase 84 ##########################################
testcase = 84
status("active")
RC.push(["exit 2 3000", "0 1 5000", "red 1 3000"])
GRAB.compare(testcase)
############################ TestCase 85 ##########################################
testcase = 85
status("active")
GRAB.compare(testcase)
############################ TestCase 86 ##########################################
testcase = 86
status("manual")
GRAB.compare(testcase)
############################ TestCase 87 ##########################################
testcase = 87
status("active")
RC.push(["exit 2 3000", "red 1 5000", "play/pause 1 5000"])
GRAB.compare(testcase)
############################ TestCase 88 ##########################################
testcase = 88
status("active")
RC.push(["stop 1 5000", "rec 1 5000"])
GRAB.compare(testcase)
############################ TestCase 89 ##########################################
testcase = 89
status("active")
RC.push(["stop 1 10000", "left 1 2000", "ok 1 3000"])
GRAB.compare(testcase)
############################ TestCase 90 ##########################################
testcase = 90
status("active")
GRAB.compare(testcase)
############################ TestCase 91 ##########################################
testcase = 91
status("active")
GRAB.compare(testcase)
############################ TestCase 92 ##########################################
testcase = 92
status("manual")
GRAB.compare(testcase)
############################ TestCase 93 ##########################################
testcase = 93
status("active")
RC.push(["exit 2 5000", "6 1 5000", "red 1 3000"])
GRAB.compare(testcase)
############################ TestCase 94 ##########################################
testcase = 94
status("active")
RC.push(["exit 2 5000", "0 1 5000", "red 1 3000"])
GRAB.compare(testcase)
############################ TestCase 95 ##########################################
testcase = 95
status("manual")
GRAB.compare(testcase)
############################ TestCase 96 ##########################################
testcase = 96
status("active")
GRAB.compare(testcase)
############################ TestCase 97 ##########################################
testcase = 97
status("active")
RC.push(["blue 1 3000"])
GRAB.compare(testcase)
############################ TestCase 98 ##########################################
testcase = 98
status("active")
GRAB.compare(testcase)
############################ TestCase 99 ##########################################
testcase = 99
status("active")
MOD.stop(Modulator)
GRAB.compare(testcase)
############################ TestCase 100 ##########################################
testcase = 100
status("active")
MOD.play(Modulator)
RC.push(["right 1 3000"])
GRAB.compare(testcase)
############################ TestCase 101 ##########################################
testcase = 101
status("active")
RC.push(["left 1 3000"])
GRAB.compare(testcase)
############################ TestCase 102 ##########################################
testcase = 102
status("active")
RC.push(["ChDown 1 3000"])
GRAB.compare(testcase)
############################ TestCase 103 ##########################################
testcase = 103
status("active")
RC.push(["ChUp 1 3000"])
GRAB.compare(testcase)
############################ TestCase 104 ##########################################
testcase = 104
status("active")
RC.push(["exit 1 3000"])
GRAB.compare(testcase)
############################ TestCase 105 ##########################################
testcase = 105
if platform == "E212":
status("active")
else:
status("inactive")
GRAB.compare(testcase)
############################ TestCase 106 ##########################################
testcase = 106
status("active")
RC.push(["exit 2 4000", "red 1 3000", "cinemahalls 1 3000"])
GRAB.compare(testcase)
############################ TestCase 107 ##########################################
testcase = 107
status("active")
RC.push(["help 1 3000"])
GRAB.compare(testcase)
############################ TestCase 108 ##########################################
testcase = 108
status("active")
UART.start_app("scheduler")
RC.push(["red", "ok 1 3000", "down 2 1000", "ok", "ok 1 2000", "right 1 1000", "up 2 2000", "ok", "ok", "exit 5 2400", "6 1 5000", "red 1 5000"])
sleep(120)
GRAB.compare(testcase)
############################ TestCase 109 ##########################################
testcase = 109
status("manual")
GRAB.compare(testcase)
############################ TestCase 110 ##########################################
testcase = 110
status("active")
UART.default_settings()
MOD.play_stream(Modulation, FEC, SR, Stream_1, Frequency, Modulator)
OPER.search()
RC.push(["exit 3 3000"])
MOD.stop(Modulator)
MOD.play(Modulator)
RC.push(["red 1 40000", "exit 1 3000"])
GRAB.compare(testcase)
############################ TestCase 111 ##########################################
testcase = 111
status("active")
UART.default_settings()
MOD.play_stream(Modulation, FEC, SR, Stream_2, Frequency, Modulator)
OPER.search()
RC.push(["exit 3 3000"])
MOD.stop(Modulator)
MOD.play(Modulator)
RC.push(["red 1 34000"])
GRAB.compare(testcase)
############################ TestCase 112 ##########################################
testcase = 112
status("active")
RC.push(["tv/mail 1 3000"])
GRAB.compare(testcase)
############################ TestCase 113 ##########################################
testcase = 113
status("active")
RC.push(["sharp 1 3000"])
GRAB.compare(testcase)
############################ TestCase 114 ##########################################
testcase = 114
status("active")
RC.push(["star 1 3000"])
GRAB.compare(testcase)
###################################################################################
status("active")
MOD.stop(Modulator)
GRAB.stop_capture()
| 40.023474 | 218 | 0.421896 |
acf1ad0022c248b4286c3c299be8bfa027380b67 | 5,273 | py | Python | tensorflow_probability/python/distributions/lognormal_test.py | renatoviolin/probability | 557351a2b79e4bd5a9025e791c81b612ab17bf38 | [
"Apache-2.0"
] | 1 | 2020-11-08T17:03:46.000Z | 2020-11-08T17:03:46.000Z | tensorflow_probability/python/distributions/lognormal_test.py | etarakci-hvl/probability | 7a0ce5e5beff91051028258dfbc7bc6cf0c4998d | [
"Apache-2.0"
] | null | null | null | tensorflow_probability/python/distributions/lognormal_test.py | etarakci-hvl/probability | 7a0ce5e5beff91051028258dfbc7bc6cf0c4998d | [
"Apache-2.0"
] | 1 | 2020-05-27T19:42:06.000Z | 2020-05-27T19:42:06.000Z | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for LogNormal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.internal import test_util
@test_util.test_all_tf_execution_regimes
class LogNormalTest(test_util.TestCase):
def setUp(self):
self._rng = np.random.RandomState(123)
def testLogNormalStats(self):
loc = np.float32([3., 1.5])
scale = np.float32([0.4, 1.1])
dist = tfd.LogNormal(loc=loc, scale=scale, validate_args=True)
self.assertAllClose(self.evaluate(dist.mean()),
np.exp(loc + scale**2 / 2))
self.assertAllClose(self.evaluate(dist.variance()),
(np.exp(scale**2) - 1) * np.exp(2 * loc + scale**2))
self.assertAllClose(self.evaluate(dist.stddev()),
np.sqrt(self.evaluate(dist.variance())))
self.assertAllClose(self.evaluate(dist.mode()),
np.exp(loc - scale**2))
self.assertAllClose(self.evaluate(dist.entropy()),
np.log(scale * np.exp(loc + 0.5) * np.sqrt(2 * np.pi)))
def testLogNormalSample(self):
loc, scale = 1.5, 0.4
dist = tfd.LogNormal(loc=loc, scale=scale, validate_args=True)
samples = self.evaluate(dist.sample(6000, seed=test_util.test_seed()))
self.assertAllClose(np.mean(samples),
self.evaluate(dist.mean()),
atol=0.1)
self.assertAllClose(np.std(samples),
self.evaluate(dist.stddev()),
atol=0.1)
def testLogNormalPDF(self):
loc, scale = 1.5, 0.4
dist = tfd.LogNormal(loc=loc, scale=scale, validate_args=True)
x = np.array([1e-4, 1.0, 2.0], dtype=np.float32)
log_pdf = dist.log_prob(x)
analytical_log_pdf = -np.log(x * scale * np.sqrt(2 * np.pi)) - (
np.log(x) - loc)**2 / (2. * scale**2)
self.assertAllClose(self.evaluate(log_pdf), analytical_log_pdf)
def testLogNormalCDF(self):
loc, scale = 1.5, 0.4
dist = tfd.LogNormal(loc=loc, scale=scale, validate_args=True)
x = np.array([1e-4, 1.0, 2.0], dtype=np.float32)
cdf = dist.cdf(x)
analytical_cdf = .5 + .5 * tf.math.erf(
(np.log(x) - loc) / (scale * np.sqrt(2)))
self.assertAllClose(self.evaluate(cdf),
self.evaluate(analytical_cdf))
def testLogNormalLogNormalKL(self):
batch_size = 6
mu_a = np.array([3.0] * batch_size)
sigma_a = np.array([1.0, 2.0, 3.0, 1.5, 2.5, 3.5])
mu_b = np.array([-3.0] * batch_size)
sigma_b = np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0])
ln_a = tfd.LogNormal(loc=mu_a, scale=sigma_a, validate_args=True)
ln_b = tfd.LogNormal(loc=mu_b, scale=sigma_b, validate_args=True)
kl = tfd.kl_divergence(ln_a, ln_b)
kl_val = self.evaluate(kl)
normal_a = tfd.Normal(loc=mu_a, scale=sigma_a, validate_args=True)
normal_b = tfd.Normal(loc=mu_b, scale=sigma_b, validate_args=True)
kl_expected_from_normal = tfd.kl_divergence(normal_a, normal_b)
kl_expected_from_formula = ((mu_a - mu_b)**2 / (2 * sigma_b**2) + 0.5 * (
(sigma_a**2 / sigma_b**2) - 1 - 2 * np.log(sigma_a / sigma_b)))
x = ln_a.sample(int(2e5), seed=test_util.test_seed())
kl_sample = tf.reduce_mean(ln_a.log_prob(x) - ln_b.log_prob(x), axis=0)
kl_sample_ = self.evaluate(kl_sample)
self.assertEqual(kl.shape, (batch_size,))
self.assertAllClose(kl_val, kl_expected_from_normal)
self.assertAllClose(kl_val, kl_expected_from_formula)
self.assertAllClose(
kl_expected_from_formula, kl_sample_, atol=0.0, rtol=1e-2)
# TODO(b/144948687) Avoid `nan` at boundary. Ideally we'd do this test:
# def testPdfAtBoundary(self):
# dist = tfd.LogNormal(loc=5., scale=2.)
# pdf = self.evaluate(dist.prob(0.))
# log_pdf = self.evaluate(dist.log_prob(0.))
# self.assertEqual(pdf, 0.)
# self.assertAllNegativeInf(log_pdf)
def testAssertValidSample(self):
dist = tfd.LogNormal(loc=[-3., 1., 4.], scale=2., validate_args=True)
with self.assertRaisesOpError('Sample must be non-negative.'):
self.evaluate(dist.cdf([3., -0.2, 1.]))
def testSupportBijectorOutsideRange(self):
dist = tfd.LogNormal(loc=1., scale=2., validate_args=True)
with self.assertRaisesOpError('must be greater than 0'):
dist._experimental_default_event_space_bijector().inverse(
[-4.2, -1e-6, -1.3])
if __name__ == '__main__':
tf.test.main()
| 38.210145 | 79 | 0.650294 |
acf1ad42c916420665d5fa882fd6cb14be091a81 | 4,068 | py | Python | psana/psana/tests/test_xtc.py | AntoineDujardin/lcls2 | 8b9d2815497fbbabb4d37800fd86a7be1728b552 | [
"BSD-3-Clause-LBNL"
] | null | null | null | psana/psana/tests/test_xtc.py | AntoineDujardin/lcls2 | 8b9d2815497fbbabb4d37800fd86a7be1728b552 | [
"BSD-3-Clause-LBNL"
] | null | null | null | psana/psana/tests/test_xtc.py | AntoineDujardin/lcls2 | 8b9d2815497fbbabb4d37800fd86a7be1728b552 | [
"BSD-3-Clause-LBNL"
] | null | null | null | import os, shutil
import subprocess
import sys
import pytest
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
from xtc import xtc
from det import det
import hashlib
from psana import DataSource
import dgramCreate as dc
from setup_input_files import setup_input_files
class Test:
# Use pytest fixtures for creating test folders.
# Test data are in /tmp/pytest-of-username
@staticmethod
@pytest.fixture(scope='function')
def xtc_file(tmp_path):
fname = str(tmp_path / 'data.xtc2')
subprocess.call(['xtcwriter', '-f', fname])
return fname
# cpo: remove this test because cydgram doesn't support charstr/enum
# datatypes which are produced by xtcwriter. also, cydgram is tested
# in the test_py2xtc.py test (without these datatypes).
"""
def test_cydgram(self, xtc_file, tmp_path):
fname = str(tmp_path / 'data_cydgram.xtc2')
# read in an old xtc file
ds = DataSource(files=xtc_file)
for run in ds.runs():
pyxtc = dc.parse_xtc(run.configs[0])
for evt in run.events():
pyxtc.parse_event(evt)
# put the dictionaries in a new xtc file
cydgram = dc.CyDgram()
pyxtc.write_events(fname, cydgram)
# test that the values in the new file are correct
xtc(fname, nsegments=1, cydgram=True)
"""
def test_xtcdata(self, xtc_file):
xtc(xtc_file, nsegments=2)
def test_serial(self, tmp_path):
setup_input_files(tmp_path)
env = dict(list(os.environ.items()) + [
('TEST_XTC_DIR', str(tmp_path)),
])
loop_based = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'user_loops.py')
subprocess.check_call(['python',loop_based], env=env)
callback_based = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'user_callbacks.py')
subprocess.check_call(['python',callback_based], env=env)
callback_based = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'parallelreader.py')
subprocess.check_call(['python',callback_based], env=env)
callback_based = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ds.py')
subprocess.check_call(['python',callback_based], env=env)
@pytest.mark.skipif(sys.platform == 'darwin', reason="psana with legion not supported on mac")
def test_legion(self, tmp_path):
setup_input_files(tmp_path)
# Legion script mode.
env = dict(list(os.environ.items()) + [
('PS_PARALLEL', 'legion'),
('TEST_XTC_DIR', str(tmp_path)),
])
callback_based = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'user_callbacks.py')
subprocess.check_call(['legion_python', callback_based, '-ll:py', '1'], env=env)
# Legion module mode.
python_path = os.environ.get('PYTHONPATH', '').split(':')
python_path.append(os.path.dirname(os.path.realpath(__file__)))
env.update({
'PYTHONPATH': ':'.join(python_path),
})
subprocess.check_call(['legion_python', 'user_callbacks', '-ll:py', '1'], env=env)
def test_run_pickle(self, tmp_path):
# Test that run is pickleable
setup_input_files(tmp_path)
import run_pickle
run_pickle.test_run_pickle(tmp_path)
@pytest.mark.skipif(sys.platform == 'darwin', reason="psana with legion not supported on mac")
def test_legion_pickle(self, tmp_path):
# Again, in Legion
setup_input_files(tmp_path)
python_path = os.environ.get('PYTHONPATH', '').split(':')
python_path.append(os.path.dirname(os.path.realpath(__file__)))
env = dict(list(os.environ.items()) + [
('PYTHONPATH', ':'.join(python_path)),
('PS_PARALLEL', 'legion'),
('TEST_XTC_DIR', str(tmp_path)),
])
subprocess.check_call(['legion_python', 'run_pickle', '-ll:py', '1'], env=env)
def test_det(self, xtc_file):
det(xtc_file)
| 36 | 103 | 0.638397 |
acf1ad4c8b45b059a0f8dd5718b01dd57b16e84c | 141 | py | Python | setup.py | iilei/identify | d82b4a26cfb2512d4bf4e1be4d18c0bba6887448 | [
"MIT"
] | 1 | 2020-11-20T12:22:25.000Z | 2020-11-20T12:22:25.000Z | setup.py | iilei/identify | d82b4a26cfb2512d4bf4e1be4d18c0bba6887448 | [
"MIT"
] | 3 | 2020-11-20T14:40:17.000Z | 2020-11-27T00:57:50.000Z | setup.py | iilei/identify | d82b4a26cfb2512d4bf4e1be4d18c0bba6887448 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
from setuptools import setup
setup()
| 20.142857 | 39 | 0.787234 |
acf1ad5d9d7d0dff3234e0f2d25abf3e45ede77e | 7,463 | py | Python | sentry_sdk/integrations/django/__init__.py | honestica/sentry-python | 42a3df0dd5e4a1982a82d60d7213fe732c985005 | [
"BSD-2-Clause"
] | null | null | null | sentry_sdk/integrations/django/__init__.py | honestica/sentry-python | 42a3df0dd5e4a1982a82d60d7213fe732c985005 | [
"BSD-2-Clause"
] | null | null | null | sentry_sdk/integrations/django/__init__.py | honestica/sentry-python | 42a3df0dd5e4a1982a82d60d7213fe732c985005 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import sys
import weakref
from django import VERSION as DJANGO_VERSION
from django.core import signals
try:
from django.urls import resolve
except ImportError:
from django.core.urlresolvers import resolve
from sentry_sdk import Hub
from sentry_sdk.hub import _should_send_default_pii
from sentry_sdk.utils import (
capture_internal_exceptions,
event_from_exception,
safe_repr,
format_and_strip,
)
from sentry_sdk.integrations import Integration
from sentry_sdk.integrations.logging import ignore_logger
from sentry_sdk.integrations._wsgi import RequestExtractor, run_wsgi_app
from sentry_sdk.integrations.django.transactions import (
LEGACY_RESOLVER,
transaction_from_function,
)
if DJANGO_VERSION < (1, 10):
def is_authenticated(request_user):
return request_user.is_authenticated()
else:
def is_authenticated(request_user):
return request_user.is_authenticated
class DjangoIntegration(Integration):
identifier = "django"
transaction_style = None
def __init__(self, transaction_style="url"):
TRANSACTION_STYLE_VALUES = ("function_name", "url")
if transaction_style not in TRANSACTION_STYLE_VALUES:
raise ValueError(
"Invalid value for transaction_style: %s (must be in %s)"
% (transaction_style, TRANSACTION_STYLE_VALUES)
)
self.transaction_style = transaction_style
@staticmethod
def setup_once():
install_sql_hook()
# Patch in our custom middleware.
from django.core.handlers.wsgi import WSGIHandler
old_app = WSGIHandler.__call__
def sentry_patched_wsgi_handler(self, environ, start_response):
if Hub.current.get_integration(DjangoIntegration) is None:
return old_app(self, environ, start_response)
return run_wsgi_app(
lambda *a, **kw: old_app(self, *a, **kw), environ, start_response
)
WSGIHandler.__call__ = sentry_patched_wsgi_handler
# patch get_response, because at that point we have the Django request
# object
from django.core.handlers.base import BaseHandler
old_get_response = BaseHandler.get_response
def sentry_patched_get_response(self, request):
hub = Hub.current
integration = hub.get_integration(DjangoIntegration)
if integration is not None:
with hub.configure_scope() as scope:
scope.add_event_processor(
_make_event_processor(weakref.ref(request), integration)
)
return old_get_response(self, request)
BaseHandler.get_response = sentry_patched_get_response
signals.got_request_exception.connect(_got_request_exception)
def _make_event_processor(weak_request, integration):
def event_processor(event, hint):
# if the request is gone we are fine not logging the data from
# it. This might happen if the processor is pushed away to
# another thread.
request = weak_request()
if request is None:
return event
if "transaction" not in event:
try:
if integration.transaction_style == "function_name":
event["transaction"] = transaction_from_function(
resolve(request.path).func
)
elif integration.transaction_style == "url":
event["transaction"] = LEGACY_RESOLVER.resolve(request.path)
except Exception:
pass
with capture_internal_exceptions():
DjangoRequestExtractor(request).extract_into_event(event)
if _should_send_default_pii():
with capture_internal_exceptions():
_set_user_info(request, event)
return event
return event_processor
def _got_request_exception(request=None, **kwargs):
hub = Hub.current
integration = hub.get_integration(DjangoIntegration)
if integration is not None:
event, hint = event_from_exception(
sys.exc_info(),
client_options=hub.client.options,
mechanism={"type": "django", "handled": False},
)
hub.capture_event(event, hint=hint)
class DjangoRequestExtractor(RequestExtractor):
def url(self):
return self.request.build_absolute_uri(self.request.path)
def env(self):
return self.request.META
def cookies(self):
return self.request.COOKIES
def raw_data(self):
return self.request.body
def form(self):
return self.request.POST
def files(self):
return self.request.FILES
def size_of_file(self, file):
return file.size
def _set_user_info(request, event):
user_info = event.setdefault("user", {})
user = getattr(request, "user", None)
if user is None or not is_authenticated(user):
return
if "email" not in user_info:
try:
user_info["email"] = user.email
except Exception:
pass
if "username" not in user_info:
try:
user_info["username"] = user.get_username()
except Exception:
pass
class _FormatConverter(object):
def __init__(self, param_mapping):
self.param_mapping = param_mapping
self.params = []
def __getitem__(self, val):
self.params.append(self.param_mapping.get(val))
return "%s"
def format_sql(sql, params):
rv = []
if isinstance(params, dict):
# convert sql with named parameters to sql with unnamed parameters
conv = _FormatConverter(params)
if params:
sql = sql % conv
params = conv.params
else:
params = ()
for param in params or ():
if param is None:
rv.append("NULL")
param = safe_repr(param)
rv.append(param)
return sql, rv
def record_sql(sql, params):
hub = Hub.current
if hub.get_integration(DjangoIntegration) is None:
return
real_sql, real_params = format_sql(sql, params)
if real_params:
try:
real_sql = format_and_strip(real_sql, real_params)
except Exception:
pass
hub.add_breadcrumb(message=real_sql, category="query")
def install_sql_hook():
"""If installed this causes Django's queries to be captured."""
try:
from django.db.backends.utils import CursorWrapper
except ImportError:
from django.db.backends.util import CursorWrapper
try:
real_execute = CursorWrapper.execute
real_executemany = CursorWrapper.executemany
except AttributeError:
# This won't work on Django versions < 1.6
return
def record_many_sql(sql, param_list):
for params in param_list:
record_sql(sql, params)
def execute(self, sql, params=None):
try:
return real_execute(self, sql, params)
finally:
record_sql(sql, params)
def executemany(self, sql, param_list):
try:
return real_executemany(self, sql, param_list)
finally:
record_many_sql(sql, param_list)
CursorWrapper.execute = execute
CursorWrapper.executemany = executemany
ignore_logger("django.db.backends")
| 28.162264 | 81 | 0.647461 |
acf1ae20a98edde0c6c1233ede91136d0f7c91fc | 9,799 | py | Python | ffmpeg-split.py | onlyeat3/video-splitter | a7e7cc4e616c9b857b9a505a370018514b3af35e | [
"Apache-2.0"
] | null | null | null | ffmpeg-split.py | onlyeat3/video-splitter | a7e7cc4e616c9b857b9a505a370018514b3af35e | [
"Apache-2.0"
] | null | null | null | ffmpeg-split.py | onlyeat3/video-splitter | a7e7cc4e616c9b857b9a505a370018514b3af35e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from __future__ import print_function
from multiprocessing import Process,Pool
import csv
import json
import math
import os
import shlex
import subprocess
import datetime
from optparse import OptionParser
def split_by_manifest(filename, manifest, vcodec="copy", acodec="copy",
extra="", **kwargs):
""" Split video into segments based on the given manifest file.
Arguments:
filename (str) - Location of the video.
manifest (str) - Location of the manifest file.
vcodec (str) - Controls the video codec for the ffmpeg video
output.
acodec (str) - Controls the audio codec for the ffmpeg video
output.
extra (str) - Extra options for ffmpeg.
"""
if not os.path.exists(manifest):
print("File does not exist: %s" % manifest)
raise SystemExit
with open(manifest) as manifest_file:
manifest_type = manifest.split(".")[-1]
if manifest_type == "json":
config = json.load(manifest_file)
elif manifest_type == "csv":
config = csv.DictReader(manifest_file)
else:
print("Format not supported. File must be a csv or json file")
raise SystemExit
split_cmd = ["ffmpeg", "-i", filename, "-vcodec", vcodec,
"-acodec", acodec, "-y"] + shlex.split(extra)
try:
fileext = filename.split(".")[-1]
except IndexError as e:
raise IndexError("No . in filename. Error: " + str(e))
for video_config in config:
split_args = []
try:
split_start = video_config["start_time"]
split_length = video_config.get("end_time", None)
if not split_length:
split_length = video_config["length"]
filebase = video_config["rename_to"]
if fileext in filebase:
filebase = ".".join(filebase.split(".")[:-1])
split_args += ["-ss", str(split_start), "-t",
str(split_length), filebase + "." + fileext]
print("########################################################")
print("About to run: " + " ".join(split_cmd + split_args))
print("########################################################")
subprocess.check_output(split_cmd + split_args)
except KeyError as e:
print("############# Incorrect format ##############")
if manifest_type == "json":
print("The format of each json array should be:")
print("{start_time: <int>, length: <int>, rename_to: <string>}")
elif manifest_type == "csv":
print("start_time,length,rename_to should be the first line ")
print("in the csv file.")
print("#############################################")
print(e)
raise SystemExit
def get_video_length(filename):
output = subprocess.check_output(("ffprobe", "-v", "error", "-show_entries", "format=duration", "-of",
"default=noprint_wrappers=1:nokey=1", filename)).strip()
video_length = int(float(output))
print("Video length in seconds: " + str(video_length))
return video_length
def ceildiv(a, b):
return int(math.ceil(a / float(b)))
def exec_task(task_cmd):
subprocess.check_output(task_cmd)
def split_by_seconds(filename, split_length, vcodec="copy", acodec="copy",
extra="", video_length=None, **kwargs):
if split_length and split_length <= 0:
print("Split length can't be 0")
raise SystemExit
if not video_length:
video_length = get_video_length(filename)
split_count = ceildiv(video_length, split_length)
if split_count == 1:
print("Video length is less then the target split length.")
raise SystemExit
split_cmd = ["ffmpeg", "-i", filename, "-vcodec", vcodec, "-acodec", acodec] + shlex.split(extra)
try:
filebase = ".".join(filename.split(".")[:-1])
fileext = filename.split(".")[-1]
except IndexError as e:
raise IndexError("No . in filename. Error: " + str(e))
task_start_time = datetime.datetime.now()
process_list = []
pool = Pool(split_count+1)
for n in range(0, split_count):
split_args = []
if n == 0:
split_start = 0
else:
split_start = split_length * n
index = str(n + 1)
target_file_name = filebase + "-" + index + "-of-" + str(split_count) + "." + fileext
split_args += ["-ss", str(split_start), "-t", str(split_length),
target_file_name]
print("About to run: " + " ".join(split_cmd + split_args))
subprocess.check_output(split_cmd + split_args)
transcode_cmd = "ffmpeg -i "+target_file_name + " output-"+ index +"-of-"+str(split_count)+".mp4"
print(transcode_cmd)
pool.apply_async(func=exec_task,args=(transcode_cmd,))
# subprocess.check_output(transcode_cmd)
pool.close()
pool.join()
files = []
for n in range(1,split_count):
files.append("output-{n}-of-{split_count}.mp4".format(n=n,split_count=split_count))
files_str = "|".join(files)
concat_cmd = "ffmpeg -i \"concat:{files_str}\" -c copy output.mp4".format(files_str=files_str)
print(concat_cmd)
subprocess.check_output(concat_cmd)
print('耗时:'+str((datetime.datetime.now() - task_start_time).seconds))
def main():
parser = OptionParser()
parser.add_option("-f", "--file",
dest="filename",
help="File to split, for example sample.avi",
type="string",
action="store"
)
parser.add_option("-s", "--split-size",
dest="split_length",
help="Split or chunk size in seconds, for example 10",
type="int",
action="store"
)
parser.add_option("-c", "--split-chunks",
dest="split_chunks",
help="Number of chunks to split to",
type="int",
action="store"
)
parser.add_option("-S", "--split-filesize",
dest="split_filesize",
help="Split or chunk size in bytes (approximate)",
type="int",
action="store"
)
parser.add_option("--filesize-factor",
dest="filesize_factor",
help="with --split-filesize, use this factor in time to"
" size heuristics [default: %default]",
type="float",
action="store",
default=0.95
)
parser.add_option("--chunk-strategy",
dest="chunk_strategy",
help="with --split-filesize, allocate chunks according to"
" given strategy (eager or even)",
type="choice",
action="store",
choices=['eager', 'even'],
default='eager'
)
parser.add_option("-m", "--manifest",
dest="manifest",
help="Split video based on a json manifest file. ",
type="string",
action="store"
)
parser.add_option("-v", "--vcodec",
dest="vcodec",
help="Video codec to use. ",
type="string",
default="copy",
action="store"
)
parser.add_option("-a", "--acodec",
dest="acodec",
help="Audio codec to use. ",
type="string",
default="copy",
action="store"
)
parser.add_option("-e", "--extra",
dest="extra",
help="Extra options for ffmpeg, e.g. '-e -threads 8'. ",
type="string",
default="",
action="store"
)
(options, args) = parser.parse_args()
def bailout():
parser.print_help()
raise SystemExit
if not options.filename:
bailout()
if options.manifest:
split_by_manifest(**options.__dict__)
else:
video_length = None
if not options.split_length:
video_length = get_video_length(options.filename)
file_size = os.stat(options.filename).st_size
split_filesize = None
if options.split_filesize:
split_filesize = int(options.split_filesize * options.filesize_factor)
if split_filesize and options.chunk_strategy == 'even':
options.split_chunks = ceildiv(file_size, split_filesize)
if options.split_chunks:
options.split_length = ceildiv(video_length, options.split_chunks)
if not options.split_length and split_filesize:
options.split_length = int(split_filesize / float(file_size) * video_length)
if not options.split_length:
bailout()
split_by_seconds(video_length=video_length, **options.__dict__)
if __name__ == '__main__':
main()
| 39.039841 | 106 | 0.511277 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.