max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
features/steps/ps_sdf_state_msg.py | PolySync/core-python-api | 0 | 6621951 | # WARNING: Auto-generated file. Any changes are subject to being overwritten
# by setup.py build script.
#!/usr/bin/python
import time
from behave import given
from behave import when
from behave import then
from hamcrest import assert_that, equal_to
try:
import polysync.node as ps_node
from polysync.data_model.types import Py_ps_sdf_state_msg
from polysync.data_model._internal.compare import ps_sdf_state_msg_type_convert_testable, Py_ps_sdf_state_msg_initialize_random
from polysync.data_model.message_support.ps_sdf_state_msg import publish, subscribe
except ImportError:
raise ImportError(
'Py_ps_sdf_state_msg module dependencies \
missing for tests, is the project built?')
@given('I have a Py_ps_sdf_state_msg object')
def step_impl(context):
pass
@when('I convert it to its C API equivalent a ps_sdf_state_msg')
def step_impl(context):
pass
@when('I convert the ps_sdf_state_msg back to a Py_ps_sdf_state_msg')
def step_impl(context):
pass
@then('the ps_sdf_state_msg values are equivalent to each Py_ps_sdf_state_msg value')
def step_impl(context):
msg = Py_ps_sdf_state_msg_initialize_random()
result = ps_sdf_state_msg_type_convert_testable(msg)
assert not result, result
@given('a ps_sdf_state_msg.publish function exists')
def step_impl(context):
assert callable(publish)
@when('I try to publish something that is not of type Py_ps_sdf_state_msg')
def step_impl(context):
bad_obj = "not the right type of object!"
context.exception = None
try:
publish(bad_obj)
except Exception as e:
context.exception = e
@then('a {exeption} indicates the type was not Py_ps_sdf_state_msg')
def step_impl(context, exeption):
assert isinstance(context.exception, eval(exeption)), \
"Invalid exception %s - expected %s" \
% (type(context.exception).__name__, exeption)
GLOBAL_TIMESTAMP = None
GLOBAL_GUID = None
def Py_ps_sdf_state_msg_handler(msg):
if msg.header.src_guid == GLOBAL_GUID:
global GLOBAL_TIMESTAMP
GLOBAL_TIMESTAMP = msg.header.timestamp
@given(u'I have a licensed PsNode for publishing Py_ps_sdf_state_msg')
def step_impl(context):
assert context.node_ref
global GLOBAL_GUID
GLOBAL_GUID = context.my_guid
@given(u'I have a Py_ps_sdf_state_msg')
def step_impl(context):
context.msg = Py_ps_sdf_state_msg()
context.msg.header.timestamp = 0xFFFF
@given(u'I have a handler for Py_ps_sdf_state_msg subscription')
def step_impl(context):
assert Py_ps_sdf_state_msg_handler
subscribe(handler=Py_ps_sdf_state_msg_handler)
@when(u'I publish my Py_ps_sdf_state_msg')
def step_impl(context):
publish(context.msg)
@then(u'I receive the corresponding Py_ps_sdf_state_msg in my handler')
def step_impl(context):
global GLOBAL_TIMESTAMP
while not GLOBAL_TIMESTAMP:
time.sleep(1)
assert_that(context.msg.header.timestamp, equal_to(GLOBAL_TIMESTAMP))
| # WARNING: Auto-generated file. Any changes are subject to being overwritten
# by setup.py build script.
#!/usr/bin/python
import time
from behave import given
from behave import when
from behave import then
from hamcrest import assert_that, equal_to
try:
import polysync.node as ps_node
from polysync.data_model.types import Py_ps_sdf_state_msg
from polysync.data_model._internal.compare import ps_sdf_state_msg_type_convert_testable, Py_ps_sdf_state_msg_initialize_random
from polysync.data_model.message_support.ps_sdf_state_msg import publish, subscribe
except ImportError:
raise ImportError(
'Py_ps_sdf_state_msg module dependencies \
missing for tests, is the project built?')
@given('I have a Py_ps_sdf_state_msg object')
def step_impl(context):
pass
@when('I convert it to its C API equivalent a ps_sdf_state_msg')
def step_impl(context):
pass
@when('I convert the ps_sdf_state_msg back to a Py_ps_sdf_state_msg')
def step_impl(context):
pass
@then('the ps_sdf_state_msg values are equivalent to each Py_ps_sdf_state_msg value')
def step_impl(context):
msg = Py_ps_sdf_state_msg_initialize_random()
result = ps_sdf_state_msg_type_convert_testable(msg)
assert not result, result
@given('a ps_sdf_state_msg.publish function exists')
def step_impl(context):
assert callable(publish)
@when('I try to publish something that is not of type Py_ps_sdf_state_msg')
def step_impl(context):
bad_obj = "not the right type of object!"
context.exception = None
try:
publish(bad_obj)
except Exception as e:
context.exception = e
@then('a {exeption} indicates the type was not Py_ps_sdf_state_msg')
def step_impl(context, exeption):
assert isinstance(context.exception, eval(exeption)), \
"Invalid exception %s - expected %s" \
% (type(context.exception).__name__, exeption)
GLOBAL_TIMESTAMP = None
GLOBAL_GUID = None
def Py_ps_sdf_state_msg_handler(msg):
if msg.header.src_guid == GLOBAL_GUID:
global GLOBAL_TIMESTAMP
GLOBAL_TIMESTAMP = msg.header.timestamp
@given(u'I have a licensed PsNode for publishing Py_ps_sdf_state_msg')
def step_impl(context):
assert context.node_ref
global GLOBAL_GUID
GLOBAL_GUID = context.my_guid
@given(u'I have a Py_ps_sdf_state_msg')
def step_impl(context):
context.msg = Py_ps_sdf_state_msg()
context.msg.header.timestamp = 0xFFFF
@given(u'I have a handler for Py_ps_sdf_state_msg subscription')
def step_impl(context):
assert Py_ps_sdf_state_msg_handler
subscribe(handler=Py_ps_sdf_state_msg_handler)
@when(u'I publish my Py_ps_sdf_state_msg')
def step_impl(context):
publish(context.msg)
@then(u'I receive the corresponding Py_ps_sdf_state_msg in my handler')
def step_impl(context):
global GLOBAL_TIMESTAMP
while not GLOBAL_TIMESTAMP:
time.sleep(1)
assert_that(context.msg.header.timestamp, equal_to(GLOBAL_TIMESTAMP))
| en | 0.82422 | # WARNING: Auto-generated file. Any changes are subject to being overwritten # by setup.py build script. #!/usr/bin/python | 1.799874 | 2 |
Project/python/PySpy/sp1.py | RayleighChen/Improve | 1 | 6621952 | import urllib2
response = urllib2.urlopen("http://www.baidu.com")
print response.read() | import urllib2
response = urllib2.urlopen("http://www.baidu.com")
print response.read() | none | 1 | 2.719876 | 3 | |
maxima/src/maxima/share/pytranslate/cantorr.py | nilqed/spadlib | 1 | 6621953 | from pytranslate import *
#########################
### Cantorr2 Function ###
#########################
def block17340(v):
v = Stack({}, v)
if ((v["x"] > 0) and (v["x"] <= (1 / 3))):
v["ret"] = cantorr2((3 * v["x"]), (v["n"] + (-1)))
if (((1 / 3) < v["x"]) and (v["x"] < (2 / 3))):
v["ret"] = 1
if ((v["x"] >= (2 / 3)) and (v["x"] < 1)):
v["ret"] = (1 + cantorr2(((3 * v["x"]) + (-2)), (v["n"] + (-1))))
return((v["ret"] / 2))
def cantorr2(x, n, v = v):
v = Stack({}, v)
v.ins({"x" : x, "n" : n})
v.ins({"ret" : 0, "k" : 0})
if not(f["numberp"](v["x"])):
return(f["cantorr2"](v["x"], v["n"]))
if (v["x"] == 0):
return(0)
if (v["x"] == 1):
return(1)
f["print"](v["x"], v["n"])
return((block17340(v) if (v["n"] > 0) else v["x"]))
f["cantorr2"] = cantorr2
#########################
### Cantorri Function ###
#########################
def block34784(v):
v = Stack({}, v)
v["ret"] = 0
return(v["ret"])
def cantorri(x, n, v = v):
v = Stack({}, v)
v.ins({"x" : x, "n" : n})
v.ins({"ret" : 1, "q" : None})
if not(f["numberp"](v["x"])):
return(f["cantorri"](v["x"], v["n"]))
if ((v["x"] == 0) or (v["x"] == 1)):
return(1)
v["x"] = f["mod"](v["x"], 1)
for v["i"] in range(1, (v["n"] + 1)):
v["x"] = (3 * v["x"])
v["q"] = math.floor(v["x"])
if (v["q"] == 1):
return(block34784(v))
v["x"] = (v["x"] + (-v["q"]))
return(v["ret"])
f["cantorri"] = cantorri
#########################
### Cantorrd Function ###
#########################
def block47665(v):
v = Stack({}, v)
if ((v["x"] > 0) and (v["x"] <= (1 / 3))):
v["ret"] = cantorrd((3 * v["x"]), (v["n"] + (-1)))
if (((1 / 3) < v["x"]) and (v["x"] < (2 / 3))):
v["ret"] = 0
if (((2 / 3) <= v["x"]) and (v["x"] < 1)):
v["ret"] = cantorrd(((3 * v["x"]) + (-2)), (v["n"] + (-1)))
return(v["ret"])
def cantorrd(x, *n, v = v):
v = Stack({}, v)
v.ins({"x" : x, "n" : list(n)})
v.ins({"ret" : 0})
if f["emptyp"](v["n"]):
v["n"] = v["fpprec"]
else:
v["n"] = f["first"](v["n"])
if not(f["numberp"](v["x"])):
return(f["cantorrd"](v["x"], v["n"]))
if (v["x"] == 0):
return(1)
if (v["x"] == 1):
return(1)
v["x"] = f["mod"](v["x"], 1)
return((block47665(v) if (v["n"] > 0) else 1))
f["cantorrd"] = cantorrd
########################
## Cantorr_p Function ##
########################
def block71117(v):
v = Stack({}, v)
v["p"] = f["denom"](v["b"])
v["q"] = f["num"](v["b"])
if ((v["x"] > 0) and (v["x"] < v["b"])):
v["ret"] = cantorr_p((v["p"] * v["x"]), v["b"], (v["n"] + (-1)))
if ((v["b"] <= v["x"]) and (v["x"] <= (1 + (-v["b"])))):
v["ret"] = 1
if ((v["x"] > (1 + (-v["b"]))) and (v["x"] < 1)):
v["ret"] = (1 + cantorr_p(((v["p"] * v["x"]) + (-(v["p"] + (-v["q"])))), v["b"], (v["n"] + (-1))))
return((v["ret"] / 2))
def cantorr_p(x, b, n, v = v):
v = Stack({}, v)
v.ins({"x" : x, "b" : b, "n" : n})
v.ins({"ret" : 0, "p" : None, "q" : None, "d" : None})
if not(f["numberp"](v["x"])):
return(f["cantorr_p"](v["x"], v["b"], v["n"]))
if (v["x"] == 0):
return(0)
if (v["x"] == 1):
return(1)
if (v["b"] > (1 / 2)):
v["b"] = (1 + (-f["mod"](v["b"], 1)))
return((block71117(v) if (v["n"] > 0) else v["x"]))
#########################
## gcantorseq Function ##
#########################
def block85290(v):
v = Stack({}, v)
v["s"] = f["append"](gcantorseq(v["u"], (v["q"] + (-(v["p"] / 2))), (v["p"] * v["r"]), v["r"], (v["n"] + (-1))), gcantorseq((v["q"] + (v["p"] / 2)), v["w"], (v["p"] * v["r"]), v["r"], (v["n"] + (-1))))
return(v["s"])
def gcantorseq(u, w, p, r, n, v = v):
v = Stack({}, v)
v.ins({"u" : u, "w" : w, "p" : p, "r" : r, "n" : n})
v.ins({"s" : None, "q" : None})
if not(f["integerp"](v["n"])):
return(None)
v["q"] = ((v["u"] + v["w"]) / 2)
v["s"] = [v["u"], v["w"]]
return((block85290(v) if (v["n"] > 1) else v["s"]))
########################
## cantorseq Function ##
########################
def cantorseq(n, v = v):
v = Stack({}, v)
v.ins({"n" : n})
v.ins({"seq" : [0, 1], "l" : [], "r" : []})
if not(f["integerp"](v["n"])):
return(f["cantorseq"](v["n"]))
for v["k"] in range(1, ((v["n"] + (-1)) + 1)):
v["r"] = v["seq"]
# Map divide using Lambda in the following
v["l"] = (v["r"] / 3)
v["r"] = f["reverse"]((1 + (-v["l"])))
v["seq"] = f["append"](v["l"], v["r"])
return(v["seq"])
f["plot2d"](lambda x, v = Stack({}, v): f["cantorr2"](x, 10), ['x', 0, 1])
f["plot2d"](lambda x, v = Stack({}, v): f["cantorrd"](x, 10), ['x', 0, 1])
f["plot2d"](lambda x, v = Stack({}, v): f["cantorri"](x, 10), ['x', 0, 1])
| from pytranslate import *
#########################
### Cantorr2 Function ###
#########################
def block17340(v):
v = Stack({}, v)
if ((v["x"] > 0) and (v["x"] <= (1 / 3))):
v["ret"] = cantorr2((3 * v["x"]), (v["n"] + (-1)))
if (((1 / 3) < v["x"]) and (v["x"] < (2 / 3))):
v["ret"] = 1
if ((v["x"] >= (2 / 3)) and (v["x"] < 1)):
v["ret"] = (1 + cantorr2(((3 * v["x"]) + (-2)), (v["n"] + (-1))))
return((v["ret"] / 2))
def cantorr2(x, n, v = v):
v = Stack({}, v)
v.ins({"x" : x, "n" : n})
v.ins({"ret" : 0, "k" : 0})
if not(f["numberp"](v["x"])):
return(f["cantorr2"](v["x"], v["n"]))
if (v["x"] == 0):
return(0)
if (v["x"] == 1):
return(1)
f["print"](v["x"], v["n"])
return((block17340(v) if (v["n"] > 0) else v["x"]))
f["cantorr2"] = cantorr2
#########################
### Cantorri Function ###
#########################
def block34784(v):
v = Stack({}, v)
v["ret"] = 0
return(v["ret"])
def cantorri(x, n, v = v):
v = Stack({}, v)
v.ins({"x" : x, "n" : n})
v.ins({"ret" : 1, "q" : None})
if not(f["numberp"](v["x"])):
return(f["cantorri"](v["x"], v["n"]))
if ((v["x"] == 0) or (v["x"] == 1)):
return(1)
v["x"] = f["mod"](v["x"], 1)
for v["i"] in range(1, (v["n"] + 1)):
v["x"] = (3 * v["x"])
v["q"] = math.floor(v["x"])
if (v["q"] == 1):
return(block34784(v))
v["x"] = (v["x"] + (-v["q"]))
return(v["ret"])
f["cantorri"] = cantorri
#########################
### Cantorrd Function ###
#########################
def block47665(v):
v = Stack({}, v)
if ((v["x"] > 0) and (v["x"] <= (1 / 3))):
v["ret"] = cantorrd((3 * v["x"]), (v["n"] + (-1)))
if (((1 / 3) < v["x"]) and (v["x"] < (2 / 3))):
v["ret"] = 0
if (((2 / 3) <= v["x"]) and (v["x"] < 1)):
v["ret"] = cantorrd(((3 * v["x"]) + (-2)), (v["n"] + (-1)))
return(v["ret"])
def cantorrd(x, *n, v = v):
v = Stack({}, v)
v.ins({"x" : x, "n" : list(n)})
v.ins({"ret" : 0})
if f["emptyp"](v["n"]):
v["n"] = v["fpprec"]
else:
v["n"] = f["first"](v["n"])
if not(f["numberp"](v["x"])):
return(f["cantorrd"](v["x"], v["n"]))
if (v["x"] == 0):
return(1)
if (v["x"] == 1):
return(1)
v["x"] = f["mod"](v["x"], 1)
return((block47665(v) if (v["n"] > 0) else 1))
f["cantorrd"] = cantorrd
########################
## Cantorr_p Function ##
########################
def block71117(v):
v = Stack({}, v)
v["p"] = f["denom"](v["b"])
v["q"] = f["num"](v["b"])
if ((v["x"] > 0) and (v["x"] < v["b"])):
v["ret"] = cantorr_p((v["p"] * v["x"]), v["b"], (v["n"] + (-1)))
if ((v["b"] <= v["x"]) and (v["x"] <= (1 + (-v["b"])))):
v["ret"] = 1
if ((v["x"] > (1 + (-v["b"]))) and (v["x"] < 1)):
v["ret"] = (1 + cantorr_p(((v["p"] * v["x"]) + (-(v["p"] + (-v["q"])))), v["b"], (v["n"] + (-1))))
return((v["ret"] / 2))
def cantorr_p(x, b, n, v = v):
v = Stack({}, v)
v.ins({"x" : x, "b" : b, "n" : n})
v.ins({"ret" : 0, "p" : None, "q" : None, "d" : None})
if not(f["numberp"](v["x"])):
return(f["cantorr_p"](v["x"], v["b"], v["n"]))
if (v["x"] == 0):
return(0)
if (v["x"] == 1):
return(1)
if (v["b"] > (1 / 2)):
v["b"] = (1 + (-f["mod"](v["b"], 1)))
return((block71117(v) if (v["n"] > 0) else v["x"]))
#########################
## gcantorseq Function ##
#########################
def block85290(v):
v = Stack({}, v)
v["s"] = f["append"](gcantorseq(v["u"], (v["q"] + (-(v["p"] / 2))), (v["p"] * v["r"]), v["r"], (v["n"] + (-1))), gcantorseq((v["q"] + (v["p"] / 2)), v["w"], (v["p"] * v["r"]), v["r"], (v["n"] + (-1))))
return(v["s"])
def gcantorseq(u, w, p, r, n, v = v):
v = Stack({}, v)
v.ins({"u" : u, "w" : w, "p" : p, "r" : r, "n" : n})
v.ins({"s" : None, "q" : None})
if not(f["integerp"](v["n"])):
return(None)
v["q"] = ((v["u"] + v["w"]) / 2)
v["s"] = [v["u"], v["w"]]
return((block85290(v) if (v["n"] > 1) else v["s"]))
########################
## cantorseq Function ##
########################
def cantorseq(n, v = v):
v = Stack({}, v)
v.ins({"n" : n})
v.ins({"seq" : [0, 1], "l" : [], "r" : []})
if not(f["integerp"](v["n"])):
return(f["cantorseq"](v["n"]))
for v["k"] in range(1, ((v["n"] + (-1)) + 1)):
v["r"] = v["seq"]
# Map divide using Lambda in the following
v["l"] = (v["r"] / 3)
v["r"] = f["reverse"]((1 + (-v["l"])))
v["seq"] = f["append"](v["l"], v["r"])
return(v["seq"])
f["plot2d"](lambda x, v = Stack({}, v): f["cantorr2"](x, 10), ['x', 0, 1])
f["plot2d"](lambda x, v = Stack({}, v): f["cantorrd"](x, 10), ['x', 0, 1])
f["plot2d"](lambda x, v = Stack({}, v): f["cantorri"](x, 10), ['x', 0, 1])
| de | 0.708855 | ######################### ### Cantorr2 Function ### ######################### ######################### ### Cantorri Function ### ######################### ######################### ### Cantorrd Function ### ######################### ######################## ## Cantorr_p Function ## ######################## ######################### ## gcantorseq Function ## ######################### ######################## ## cantorseq Function ## ######################## # Map divide using Lambda in the following | 3.309628 | 3 |
cytoskeleton_analyser/plasma_membrane.py | vsukhor/cytoskeleton-analyser | 0 | 6621954 | <reponame>vsukhor/cytoskeleton-analyser
# Copyright (c) 2021 <NAME>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
""" Plasma membrane class. Encapsulates a 3d mesh of cell boundary.
It defines volume available for the microtubules.
"""
from pathlib import Path
import meshio
import numpy as np
from scipy.spatial.distance import cdist
from .cells import CellType
class PlasmaMembrane:
"""A minimalistic cell plasma membrane.
I is used mostly as a 3d cell boundary to limit the volume
available for microtubules.
"""
def __init__(
self,
path: Path,
cell: CellType,
origin: np.ndarray = np.zeros(3)
):
"""
:param path: Path to the mesh file in .stl format.
:param cell: Cell type.
:param origin: Cell geometric origin point.
"""
#: meshio.Mesh object containing mesh representing the membrane.
self.mesh: meshio.Mesh = self.load(path, cell)
#: Minimal position of mesh points.
self.min_ = self.mesh.points.min(0)
#: Maxiimal position of mesh points.
self.max_ = self.mesh.points.max(0)
#: Point of ell geometric origin.
self.origin: np.ndarray = origin
@staticmethod
def load(
path: Path,
cell: CellType,
) -> meshio.Mesh:
"""Read in cell membrane from file into a meshio.Mesh object.
:param path: Path to the mesh file in .stl format.
:param cell: Cell type.
:return: Initialized mesh.
"""
fname = path / f"plasmaMesh_{cell.plmind}.stl"
return meshio.read(fname)
def radial_extent(self) -> float:
"""Max extents of the membrane mesh in xy plane.
:return: Distance to the furthest mesh node in xy plane.
"""
return max(cdist(self.mesh.points[:, :2],
np.array([self.origin[:2]])).T[0])
| # Copyright (c) 2021 <NAME>. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
""" Plasma membrane class. Encapsulates a 3d mesh of cell boundary.
It defines volume available for the microtubules.
"""
from pathlib import Path
import meshio
import numpy as np
from scipy.spatial.distance import cdist
from .cells import CellType
class PlasmaMembrane:
"""A minimalistic cell plasma membrane.
I is used mostly as a 3d cell boundary to limit the volume
available for microtubules.
"""
def __init__(
self,
path: Path,
cell: CellType,
origin: np.ndarray = np.zeros(3)
):
"""
:param path: Path to the mesh file in .stl format.
:param cell: Cell type.
:param origin: Cell geometric origin point.
"""
#: meshio.Mesh object containing mesh representing the membrane.
self.mesh: meshio.Mesh = self.load(path, cell)
#: Minimal position of mesh points.
self.min_ = self.mesh.points.min(0)
#: Maxiimal position of mesh points.
self.max_ = self.mesh.points.max(0)
#: Point of ell geometric origin.
self.origin: np.ndarray = origin
@staticmethod
def load(
path: Path,
cell: CellType,
) -> meshio.Mesh:
"""Read in cell membrane from file into a meshio.Mesh object.
:param path: Path to the mesh file in .stl format.
:param cell: Cell type.
:return: Initialized mesh.
"""
fname = path / f"plasmaMesh_{cell.plmind}.stl"
return meshio.read(fname)
def radial_extent(self) -> float:
"""Max extents of the membrane mesh in xy plane.
:return: Distance to the furthest mesh node in xy plane.
"""
return max(cdist(self.mesh.points[:, :2],
np.array([self.origin[:2]])).T[0]) | en | 0.713039 | # Copyright (c) 2021 <NAME>. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER ''AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ----------------------------------------------------------------------------- Plasma membrane class. Encapsulates a 3d mesh of cell boundary. It defines volume available for the microtubules. A minimalistic cell plasma membrane. I is used mostly as a 3d cell boundary to limit the volume available for microtubules. :param path: Path to the mesh file in .stl format. :param cell: Cell type. :param origin: Cell geometric origin point. #: meshio.Mesh object containing mesh representing the membrane. #: Minimal position of mesh points. #: Maxiimal position of mesh points. #: Point of ell geometric origin. Read in cell membrane from file into a meshio.Mesh object. :param path: Path to the mesh file in .stl format. :param cell: Cell type. :return: Initialized mesh. Max extents of the membrane mesh in xy plane. :return: Distance to the furthest mesh node in xy plane. | 1.408543 | 1 |
snooty/test_devhub.py | schmalliso/snooty-parser | 0 | 6621955 | <gh_stars>0
from pathlib import Path
from typing import cast, Any, Dict, List
from .types import BuildIdentifierSet, FileId, SerializableType
from .parser import Project
from .test_project import Backend
from .util_test import check_ast_testing_string
import pytest
@pytest.fixture
def backend() -> Backend:
backend = Backend()
build_identifiers: BuildIdentifierSet = {}
with Project(Path("test_data/test_devhub"), backend, build_identifiers) as project:
project.build()
return backend
def test_queryable_fields(backend: Backend) -> None:
page_id = FileId("includes/authors/lastname-firstname.rst")
page = backend.pages[page_id]
query_fields: Dict[str, SerializableType] = page.query_fields
assert len(page.static_assets) == 1
page_id = FileId("index.txt")
page = backend.pages[page_id]
query_fields = page.query_fields
assert len(page.static_assets) == 1
assert query_fields is not None
assert query_fields["author"] == {
"name": "<NAME>",
"image": "/images/bio-ken.jpg",
"checksum": "324b32910cb1080451f033fea7f916c6d33ac851b868b4bca829a4b900a809d6",
}
assert query_fields["tags"] == ["foo", "bar", "baz"]
assert query_fields["languages"] == ["nodejs", "java"]
assert query_fields["products"] == ["Realm", "MongoDB"]
# Incorrectly formatted date is omitted
assert query_fields.get("pubdate") is None
assert query_fields["updated-date"] == "2019-02-02"
assert query_fields["atf-image"] == "/images/atf-images/generic/pattern-green.png"
assert query_fields["type"] == "article, quickstart, how-to, video, live"
assert query_fields["level"] == "beginner, intermediate, advanced"
assert query_fields["slug"] == "/"
related = cast(Any, query_fields["related"])
check_ast_testing_string(
related[0], "<literal><text>list of related articles</text></literal>"
)
check_ast_testing_string(
related[1], """<role name="doc" target="/path/to/article"></role>"""
)
check_ast_testing_string(
related[2], """<literal><text>:doc:`/path/to/other/article`</text></literal>"""
)
meta_description = cast(Any, query_fields["meta-description"])
check_ast_testing_string(
meta_description[0],
"<paragraph><text>meta description (160 characters or fewer)</text></paragraph>",
)
title = cast(Any, query_fields["title"])
assert len(title) == 1
check_ast_testing_string(title[0], "<text>h1 Article Title</text>")
def test_page_groups(backend: Backend) -> None:
"""Test that page groups are correctly filtered and cleaned."""
page_groups: Dict[str, List[str]] = cast(Any, backend.metadata["pageGroups"])
assert page_groups == {"Group 1": ["index", "index"]}
| from pathlib import Path
from typing import cast, Any, Dict, List
from .types import BuildIdentifierSet, FileId, SerializableType
from .parser import Project
from .test_project import Backend
from .util_test import check_ast_testing_string
import pytest
@pytest.fixture
def backend() -> Backend:
backend = Backend()
build_identifiers: BuildIdentifierSet = {}
with Project(Path("test_data/test_devhub"), backend, build_identifiers) as project:
project.build()
return backend
def test_queryable_fields(backend: Backend) -> None:
page_id = FileId("includes/authors/lastname-firstname.rst")
page = backend.pages[page_id]
query_fields: Dict[str, SerializableType] = page.query_fields
assert len(page.static_assets) == 1
page_id = FileId("index.txt")
page = backend.pages[page_id]
query_fields = page.query_fields
assert len(page.static_assets) == 1
assert query_fields is not None
assert query_fields["author"] == {
"name": "<NAME>",
"image": "/images/bio-ken.jpg",
"checksum": "324b32910cb1080451f033fea7f916c6d33ac851b868b4bca829a4b900a809d6",
}
assert query_fields["tags"] == ["foo", "bar", "baz"]
assert query_fields["languages"] == ["nodejs", "java"]
assert query_fields["products"] == ["Realm", "MongoDB"]
# Incorrectly formatted date is omitted
assert query_fields.get("pubdate") is None
assert query_fields["updated-date"] == "2019-02-02"
assert query_fields["atf-image"] == "/images/atf-images/generic/pattern-green.png"
assert query_fields["type"] == "article, quickstart, how-to, video, live"
assert query_fields["level"] == "beginner, intermediate, advanced"
assert query_fields["slug"] == "/"
related = cast(Any, query_fields["related"])
check_ast_testing_string(
related[0], "<literal><text>list of related articles</text></literal>"
)
check_ast_testing_string(
related[1], """<role name="doc" target="/path/to/article"></role>"""
)
check_ast_testing_string(
related[2], """<literal><text>:doc:`/path/to/other/article`</text></literal>"""
)
meta_description = cast(Any, query_fields["meta-description"])
check_ast_testing_string(
meta_description[0],
"<paragraph><text>meta description (160 characters or fewer)</text></paragraph>",
)
title = cast(Any, query_fields["title"])
assert len(title) == 1
check_ast_testing_string(title[0], "<text>h1 Article Title</text>")
def test_page_groups(backend: Backend) -> None:
"""Test that page groups are correctly filtered and cleaned."""
page_groups: Dict[str, List[str]] = cast(Any, backend.metadata["pageGroups"])
assert page_groups == {"Group 1": ["index", "index"]} | en | 0.675505 | # Incorrectly formatted date is omitted <role name="doc" target="/path/to/article"></role> <literal><text>:doc:`/path/to/other/article`</text></literal> Test that page groups are correctly filtered and cleaned. | 2.283867 | 2 |
neutronpy/crystal/tools.py | neutronpy/neutronpy | 14 | 6621956 | <reponame>neutronpy/neutronpy
# -*- coding: utf-8 -*-
import numpy as np
def gram_schmidt(in_vecs, row_vecs=True, normalize=True):
r"""
"""
if not row_vecs:
in_vecs = in_vecs.T
out_vecs = in_vecs[0:1, :]
for i in range(1, in_vecs.shape[0]):
proj = np.diag((in_vecs[i, :].dot(out_vecs.T) / np.linalg.norm(out_vecs, axis=1) ** 2).flat).dot(out_vecs)
out_vecs = np.vstack((out_vecs, in_vecs[i, :] - proj.sum(0)))
if normalize:
out_vecs = np.diag(1 / np.linalg.norm(out_vecs, axis=1)).dot(out_vecs)
if not row_vecs:
out_vecs = out_vecs.T
return out_vecs
| # -*- coding: utf-8 -*-
import numpy as np
def gram_schmidt(in_vecs, row_vecs=True, normalize=True):
r"""
"""
if not row_vecs:
in_vecs = in_vecs.T
out_vecs = in_vecs[0:1, :]
for i in range(1, in_vecs.shape[0]):
proj = np.diag((in_vecs[i, :].dot(out_vecs.T) / np.linalg.norm(out_vecs, axis=1) ** 2).flat).dot(out_vecs)
out_vecs = np.vstack((out_vecs, in_vecs[i, :] - proj.sum(0)))
if normalize:
out_vecs = np.diag(1 / np.linalg.norm(out_vecs, axis=1)).dot(out_vecs)
if not row_vecs:
out_vecs = out_vecs.T
return out_vecs | en | 0.769321 | # -*- coding: utf-8 -*- | 2.681219 | 3 |
DPS_Huijben2020/CIFAR10_MNIST/trainingUpdate_callback.py | IamHuijben/Deep-Probabilistic-Subsampling | 12 | 6621957 | """
=============================================================================
Eindhoven University of Technology
==============================================================================
Source Name : trainingUpdate_callback.py
Callback which displays the training graph for the train
and validation set at the end of each X epochs.
If a save directory is provided, the graph is saved
Author : <NAME>
Date : 15/01/2019
Reference : <NAME>, <NAME>, and <NAME>,
"Deep probabilistic subsampling for task-adaptive compressed sensing", 2019
==============================================================================
"""
import keras
import numpy as np
import matplotlib.pyplot as plt
class training_callback(keras.callbacks.Callback):
def __init__(self, outputPerNepochs, outputLastNepochs,savedir,reconVSclassif):
self.outputPerNepochs = outputPerNepochs
self.outputLastNepochs = outputLastNepochs[0]
self.n_epochs = outputLastNepochs[1]
self.savedir = savedir
self.reconVSclassif = reconVSclassif
self.train_MSE_im = []
self.val_MSE_im = []
self.train_PSNR_im = []
self.val_PSNR_im = []
self.train_SSIM_im = []
self.val_SSIM_im = []
self.train_MSE_feat = []
self.val_MSE_feat = []
self.train_acc = []
self.val_acc = []
def on_train_begin(self, logs={}):
return
def on_train_end(self, logs={}):
return
def on_epoch_begin(self, epoch, logs={}):
return
def on_epoch_end(self, epoch, logs={}):
if self.reconVSclassif == 'recon':
self.train_MSE_im.append(logs.get('ImageOutput_mean_squared_error'))
self.val_MSE_im.append(logs.get('val_ImageOutput_mean_squared_error'))
self.train_PSNR_im.append(logs.get('ImageOutput_PSNR'))
self.val_PSNR_im.append(logs.get('val_ImageOutput_PSNR'))
self.train_SSIM_im.append(logs.get('ImageOutput_SSIM'))
self.val_SSIM_im.append(logs.get('val_ImageOutput_SSIM'))
self.train_MSE_feat.append(logs.get('FeatureOutput_mean_squared_error'))
self.val_MSE_feat.append(logs.get('val_FeatureOutput_mean_squared_error'))
else:
self.train_acc.append(logs.get('acc'))
self.val_acc.append(logs.get('val_acc'))
if (epoch+1) % self.outputPerNepochs == 0 or (epoch+1) > (self.n_epochs-self.outputLastNepochs):
if self.reconVSclassif == 'recon':
plt.figure(figsize=(10,10))
plt.gcf().clear()
plt.subplot(221)
plt.plot(np.arange(epoch+1),self.train_MSE_im)
plt.plot(np.arange(epoch+1),self.val_MSE_im)
plt.title('MSE - images')
plt.xlabel('Epoch')
plt.ylabel('MSE')
plt.legend(['Train','Val'], loc='upper right')
plt.grid()
plt.subplot(222)
plt.plot(np.arange(epoch+1),self.train_PSNR_im)
plt.plot(np.arange(epoch+1),self.val_PSNR_im)
plt.title('PSNR - images')
plt.xlabel('Epoch')
plt.ylabel('PSNR')
plt.legend(['Train','Val'], loc='lower right')
plt.grid()
plt.subplot(223)
plt.plot(np.arange(epoch+1),self.train_SSIM_im)
plt.plot(np.arange(epoch+1),self.val_SSIM_im)
plt.title('SSIM - images')
plt.xlabel('Epoch')
plt.ylabel('SSIM')
plt.legend(['Train','Val'], loc='lower right')
plt.grid()
plt.subplot(224)
plt.plot(np.arange(epoch+1),self.train_MSE_feat)
plt.plot(np.arange(epoch+1),self.val_MSE_feat)
plt.title('MSE - features')
plt.xlabel('Epoch')
plt.ylabel('MSE')
plt.legend(['Train','Val'], loc='upper right')
plt.grid()
else:
plt.figure()
plt.plot(np.arange(epoch+1),self.train_acc)
plt.plot(np.arange(epoch+1),self.val_acc)
plt.title('Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Acc')
plt.legend(['Train','Val'], loc='lower right')
plt.grid()
if self.savedir:
plt.savefig(self.savedir+'\\TrainingGraph.svg',bbox_inches='tight')
plt.savefig(self.savedir+'\\TrainingGraph.png',bbox_inches='tight')
plt.pause(.1)
return | """
=============================================================================
Eindhoven University of Technology
==============================================================================
Source Name : trainingUpdate_callback.py
Callback which displays the training graph for the train
and validation set at the end of each X epochs.
If a save directory is provided, the graph is saved
Author : <NAME>
Date : 15/01/2019
Reference : <NAME>, <NAME>, and <NAME>,
"Deep probabilistic subsampling for task-adaptive compressed sensing", 2019
==============================================================================
"""
import keras
import numpy as np
import matplotlib.pyplot as plt
class training_callback(keras.callbacks.Callback):
def __init__(self, outputPerNepochs, outputLastNepochs,savedir,reconVSclassif):
self.outputPerNepochs = outputPerNepochs
self.outputLastNepochs = outputLastNepochs[0]
self.n_epochs = outputLastNepochs[1]
self.savedir = savedir
self.reconVSclassif = reconVSclassif
self.train_MSE_im = []
self.val_MSE_im = []
self.train_PSNR_im = []
self.val_PSNR_im = []
self.train_SSIM_im = []
self.val_SSIM_im = []
self.train_MSE_feat = []
self.val_MSE_feat = []
self.train_acc = []
self.val_acc = []
def on_train_begin(self, logs={}):
return
def on_train_end(self, logs={}):
return
def on_epoch_begin(self, epoch, logs={}):
return
def on_epoch_end(self, epoch, logs={}):
if self.reconVSclassif == 'recon':
self.train_MSE_im.append(logs.get('ImageOutput_mean_squared_error'))
self.val_MSE_im.append(logs.get('val_ImageOutput_mean_squared_error'))
self.train_PSNR_im.append(logs.get('ImageOutput_PSNR'))
self.val_PSNR_im.append(logs.get('val_ImageOutput_PSNR'))
self.train_SSIM_im.append(logs.get('ImageOutput_SSIM'))
self.val_SSIM_im.append(logs.get('val_ImageOutput_SSIM'))
self.train_MSE_feat.append(logs.get('FeatureOutput_mean_squared_error'))
self.val_MSE_feat.append(logs.get('val_FeatureOutput_mean_squared_error'))
else:
self.train_acc.append(logs.get('acc'))
self.val_acc.append(logs.get('val_acc'))
if (epoch+1) % self.outputPerNepochs == 0 or (epoch+1) > (self.n_epochs-self.outputLastNepochs):
if self.reconVSclassif == 'recon':
plt.figure(figsize=(10,10))
plt.gcf().clear()
plt.subplot(221)
plt.plot(np.arange(epoch+1),self.train_MSE_im)
plt.plot(np.arange(epoch+1),self.val_MSE_im)
plt.title('MSE - images')
plt.xlabel('Epoch')
plt.ylabel('MSE')
plt.legend(['Train','Val'], loc='upper right')
plt.grid()
plt.subplot(222)
plt.plot(np.arange(epoch+1),self.train_PSNR_im)
plt.plot(np.arange(epoch+1),self.val_PSNR_im)
plt.title('PSNR - images')
plt.xlabel('Epoch')
plt.ylabel('PSNR')
plt.legend(['Train','Val'], loc='lower right')
plt.grid()
plt.subplot(223)
plt.plot(np.arange(epoch+1),self.train_SSIM_im)
plt.plot(np.arange(epoch+1),self.val_SSIM_im)
plt.title('SSIM - images')
plt.xlabel('Epoch')
plt.ylabel('SSIM')
plt.legend(['Train','Val'], loc='lower right')
plt.grid()
plt.subplot(224)
plt.plot(np.arange(epoch+1),self.train_MSE_feat)
plt.plot(np.arange(epoch+1),self.val_MSE_feat)
plt.title('MSE - features')
plt.xlabel('Epoch')
plt.ylabel('MSE')
plt.legend(['Train','Val'], loc='upper right')
plt.grid()
else:
plt.figure()
plt.plot(np.arange(epoch+1),self.train_acc)
plt.plot(np.arange(epoch+1),self.val_acc)
plt.title('Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Acc')
plt.legend(['Train','Val'], loc='lower right')
plt.grid()
if self.savedir:
plt.savefig(self.savedir+'\\TrainingGraph.svg',bbox_inches='tight')
plt.savefig(self.savedir+'\\TrainingGraph.png',bbox_inches='tight')
plt.pause(.1)
return | en | 0.622086 | ============================================================================= Eindhoven University of Technology ============================================================================== Source Name : trainingUpdate_callback.py Callback which displays the training graph for the train and validation set at the end of each X epochs. If a save directory is provided, the graph is saved Author : <NAME> Date : 15/01/2019 Reference : <NAME>, <NAME>, and <NAME>, "Deep probabilistic subsampling for task-adaptive compressed sensing", 2019 ============================================================================== | 2.52243 | 3 |
8th_exercise/so.py | jerluebke/comp_phys | 2 | 6621958 | <reponame>jerluebke/comp_phys
# -*- coding: utf-8 -*-
"""
harmonic oscillator with stochastic noise
ddX + ω**2 X dt = μ dW
as a 2nd order system:
dX = V dt
dV = μ dW - ω**2 * X dt
note: euler's method causes the system to 'gain' energy (growing amplitudes)
"""
# TODO:
# check/discuss results
# compare with undisturbed and exact analytic solution
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(100)
X0 = 1; V0 = 1;
ω = 10; μ = .2;
T = 1; N = 2**10; dt = T / N;
t = np.arange(0, T+dt, dt)
dW = np.random.randn(N)
X = np.zeros(N+1)
V = np.zeros(N+1)
X[0] = X0
V[0] = V0
for i in range(1, N+1):
X[i] = X[i-1] + V[i-1] * dt
V[i] = V[i-1] - ω**2 * X[i-1] * dt + μ * dW[i-1]
plt.plot(t, X, 'r-', label='X(t)')
plt.plot(t, V, 'g--', label='V(t)')
plt.xlabel('t')
plt.ylabel('Amplitude')
plt.legend()
| # -*- coding: utf-8 -*-
"""
harmonic oscillator with stochastic noise
ddX + ω**2 X dt = μ dW
as a 2nd order system:
dX = V dt
dV = μ dW - ω**2 * X dt
note: euler's method causes the system to 'gain' energy (growing amplitudes)
"""
# TODO:
# check/discuss results
# compare with undisturbed and exact analytic solution
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(100)
X0 = 1; V0 = 1;
ω = 10; μ = .2;
T = 1; N = 2**10; dt = T / N;
t = np.arange(0, T+dt, dt)
dW = np.random.randn(N)
X = np.zeros(N+1)
V = np.zeros(N+1)
X[0] = X0
V[0] = V0
for i in range(1, N+1):
X[i] = X[i-1] + V[i-1] * dt
V[i] = V[i-1] - ω**2 * X[i-1] * dt + μ * dW[i-1]
plt.plot(t, X, 'r-', label='X(t)')
plt.plot(t, V, 'g--', label='V(t)')
plt.xlabel('t')
plt.ylabel('Amplitude')
plt.legend() | en | 0.820624 | # -*- coding: utf-8 -*- harmonic oscillator with stochastic noise ddX + ω**2 X dt = μ dW as a 2nd order system: dX = V dt dV = μ dW - ω**2 * X dt note: euler's method causes the system to 'gain' energy (growing amplitudes) # TODO: # check/discuss results # compare with undisturbed and exact analytic solution | 3.418534 | 3 |
restservice/endpoints.py | CenturyLink/ExpertDHCP | 1 | 6621959 | <reponame>CenturyLink/ExpertDHCP<filename>restservice/endpoints.py
"""
REST API endpoints written in Flask
Only POST Calls for requests requiring data
"""
import sys
import json
import os
from flask import request, Blueprint
from flask_cors import cross_origin
from restservice import LOGGER
from restservice.config import SERVICE_CODE, REQUIRE_API_AUTHENTICATION
from restservice.utilities import response_generator, get_config, leases_hook, get_reservation_info, add_reservation, \
add_subnets, add_reservation_options, add_subnet_options, del_reservation, delete_subnet_options, modify_subnets, \
delete_reservation_options, delete_subnets, add_client_classes, delete_client_classes, modify_client_classes
from restservice.apikeyhandler import verify_api_key
BP = Blueprint('dhcp4', __name__, url_prefix='/dhcp4')
STATUS_OK = "OK"
STATUS_KO = "KO"
HTTP_200 = 200 # Success
HTTP_400 = 404 # Bad request
HTTP_401 = 401 # None or bad credentials sent
HTTP_500 = 500 # General internal server error
# API CODE FOR SERVICE 100
HOME_API_CODE = "001"
GET_CONFIG_API_CODE = "002"
GET_IP_FROM_MAC_ADDRESS_API_CODE = "003"
GET_ALL_RESERVATIONS_API_CODE = "004"
ADD_RESERVATION_API_CODE = "005"
DELETE_RESERVATION_API_CODE = "006"
ADD_SUBNET4_API_CODE = "007"
DELETE_SUBNET4_API_CODE = "008"
MODIFY_SUBNET4_API_CODE = "009"
ADD_SUBNET_OPTION_API_CODE = "010"
DELETE_SUBNET_OPTION_API_CODE = "011"
ADD_SUBNET_RESERVATION_OPTION_API_CODE = "012"
DELETE_SUBNET_RESERVATION_OPTION_API_CODE = "013"
GET_ALL_LEASES_API_CODE = "014"
ADD_LEASES_API_CODE = "015"
UPDATE_LEASES_API_CODE = "016"
DELETE_LEASES_API_CODE = "017"
WIPE_LEASES_API_CODE = "018"
ADD_CLIENT_CLASSES_API_CODE = "019"
DELETE_CLIENT_CLASSES_API_CODE = "020"
MODIFY_CLIENT_CLASSES_API_CODE = "021"
RESULT_SUCCESS = 0
RESULT_FAILURE = 1
RESULT_EMPTY = 3
# RESPONSE CODES FOR SERVICE 900 - FastDHCP
SUCCESS_1000 = "1000"
SUCCESS_1000_VALUE = "Command execution successful"
SUCCESS_1001 = "1001"
SUCCESS_1001_VALUE = "Leases are empty"
SUCCESS_1002 = "1002"
SUCCESS_1002_VALUE = "No lease found to delete"
SUCCESS_1003 = "1003"
SUCCESS_1003_VALUE = "No lease found to wipe"
ERROR_4000 = "4000"
ERROR_4000_VALUE = "Exception occured in the server. Command unsuccessful"
ERROR_4001 = "4001"
ERROR_4001_VALUE = "Could not find JSON key"
ERROR_4002 = "4002"
ERROR_4002_VALUE = "Unable to fetch server Configuration"
ERROR_4003 = "4003"
ERROR_4003_VALUE = "IP Address not found"
ERROR_4004 = '4004'
ERROR_4004_VALUE = 'Could not fetch leases'
ERROR_4005 = '4005'
ERROR_4005_VALUE = 'Unable to fetch reservations from server.'
ERROR_4006 = "4006"
ERROR_4006_VALUE = "Failed to add reservation due to an issue with KEA server response"
ERROR_4007 = "4007"
ERROR_4007_VALUE = "Failed to add reservation"
ERROR_4008 = "4008"
ERROR_4008_VALUE = "Failed to remove reservation due to an issue with KEA server response"
ERROR_4009 = "4009"
ERROR_4009_VALUE = "Failed to remove reservation"
ERROR_4010 = "4010"
ERROR_4010_VALUE = "Failed to add subnet due to an issue with KEA server response"
ERROR_4011 = "4011"
ERROR_4011_VALUE = "Failed to add subnet"
ERROR_4012 = "4012"
ERROR_4012_VALUE = "Failed to remove subnet due to an issue with KEA server response"
ERROR_4013 = "4013"
ERROR_4013_VALUE = "Failed to remove subnet"
ERROR_4014 = "4014"
ERROR_4014_VALUE = "Failed to add subnet option due to an issue with KEA server response"
ERROR_4015 = "4015"
ERROR_4015_VALUE = "Failed to add subnet option"
ERROR_4016 = "4016"
ERROR_4016_VALUE = "Failed to remove subnet option due to an issue with KEA server response"
ERROR_4017 = "4017"
ERROR_4017_VALUE = "Failed to remove subnet option"
ERROR_4018 = "4018"
ERROR_4018_VALUE = "Failed to add subnet reservation option due to an issue with KEA server response"
ERROR_4019 = "4019"
ERROR_4019_VALUE = "Failed to add subnet reservation option"
ERROR_4020 = "4020"
ERROR_4020_VALUE = "Failed to remove subnet reservation option due to an issue with KEA server response"
ERROR_4021 = "4021"
ERROR_4021_VALUE = "Failed to remove subnet reservation option"
ERROR_4022 = "4022"
ERROR_4022_VALUE = "No Leases found"
ERROR_4023 = "4023"
ERROR_4023_VALUE = "Could not add lease successfully"
ERROR_4024 = "4024"
ERROR_4024_VALUE = "Could not update lease successfully"
ERROR_4025 = "4025"
ERROR_4025_VALUE = "Could not delete lease successfully"
ERROR_4026 = "4026"
ERROR_4026_VALUE = "Could not wipe subnet leases successfully"
ERROR_4027 = "4027"
ERROR_4027_VALUE = "Failed to add client class due to an issue with KEA server response"
ERROR_4028 = "4028"
ERROR_4028_VALUE = "Failed to add client class"
ERROR_4029 = "4029"
ERROR_4029_VALUE = "Failed to remove client class due to an issue with KEA server response"
ERROR_4030 = "4030"
ERROR_4030_VALUE = "Failed to remove client class"
ERROR_4031 = "4031"
ERROR_4031_VALUE = "Failed to modify client class due to an issue with KEA server response"
ERROR_4032 = "4032"
ERROR_4032_VALUE = "Failed to modify client class"
ERROR_4033 = "4033"
ERROR_4033_VALUE = "Failed to modify subnet due to an issue with KEA server response"
ERROR_4034 = "4034"
ERROR_4034_VALUE = "Failed to modify subnet"
# Check for API keys if REQUIRE_API_AUTHENTICATION is enabled in
if REQUIRE_API_AUTHENTICATION:
CHECK_AUTH_API = '022'
ERROR_5000 = "5000"
ERROR_5000_VALUE = "Invalid request"
ERROR_5001 = "5001"
ERROR_5001_VALUE = "Could not find API key in request"
ERROR_5002 = "5002"
ERROR_5002_VALUE = "Incorrect API key"
# This function will be called every time a request is received. In case
# the request is a GET, then apikey is extracted from the GET request and
# checked for validity. Other requests are handled similarly.
@BP.before_request
def check_auth():
print("DEBUG: request.endpoint = " + str(request.endpoint))
if request.endpoint == "dhcp4.home":
return
# If request method is POST
if request.method == 'POST':
print("DEBUG: request is POST")
# Extract POST data, look for API key and handle verification
json_req_data = request.get_json()
# If no JSON POST request data is found, then return error
if not json_req_data:
LOGGER.info("Error - No JSON data")
print("Error - No JSON data")
response = response_generator(
STATUS_KO,
HTTP_400,
SERVICE_CODE + CHECK_AUTH_API + ERROR_5000,
ERROR_5000_VALUE,
{'error': ERROR_5000_VALUE})
LOGGER.error("JSON ERROR - > %s", ERROR_5000_VALUE)
return response
# If JSON POST request data is found, then ...
else:
# If API key is found in JSON data then ...
if "apikey" in json_req_data:
apikey = json_req_data['apikey']
print("DEBUG: apikey = " + str(apikey))
verify_value = verify_api_key(apikey)
print("DEBUG: verify_value = " + str(verify_value))
# If API key is incorrect, send an error back
if verify_value == False:
LOGGER.error("JSON ERROR - > %s", ERROR_5000_VALUE)
return return_incorrect_api_key()
else:
print("DEBUG: Could not find API key in request")
LOGGER.error("JSON ERROR - > %s", ERROR_5001_VALUE)
return return_no_api_key_found()
if request.method == 'GET':
print("DEBUG: request is GET")
# Extract GET arguments, look for API key and handle verification
api_key = request.args.get('apikey')
print("DEBUG: api_key = " + str(api_key))
# If no apikey is found then return error
if api_key is None:
return return_no_api_key_found()
else:
print("DEBUG: api_key = " + str(api_key))
verify_value = verify_api_key(api_key)
print("DEBUG: check_auth(): verify_value = " + str(verify_value))
if verify_value == False:
print("DEBUG: check_auth(): returning incorrect api key response")
return return_incorrect_api_key()
# Implement other methods here
def return_no_api_key_found():
response = response_generator(STATUS_KO, HTTP_400,
SERVICE_CODE + CHECK_AUTH_API + ERROR_5001,
ERROR_5001_VALUE, {'error': ERROR_5001_VALUE})
return response
def return_incorrect_api_key():
response = response_generator(STATUS_KO, HTTP_401,
SERVICE_CODE + CHECK_AUTH_API + ERROR_5002, ERROR_5002_VALUE,
{'error': ERROR_5002_VALUE})
return response
@BP.route("/")
@cross_origin()
def home():
"""
Homepage to check if FASTDHCP server is working
"""
LOGGER.info("Executing")
LOGGER.info("Request url -> /dhcp4/")
try:
return response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + HOME_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{"data": "DHCP Management server is up and running"})
except Exception as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + HOME_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(exc)}
)
return response
@BP.route("/config", methods=['GET'])
@cross_origin()
def get_kea_config():
"""
A HTTP GET function to fetch the KEA configuration
from DHCP server.
:return : Response config object
"""
LOGGER.info("Request url -> /dhcp4/config")
LOGGER.info("Request Method -> GET")
try:
kea_config = get_config()
LOGGER.info("Get config result -> %s", kea_config)
if "message" in kea_config:
LOGGER.error('Error message --> %s', str(kea_config["message"]))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_CONFIG_API_CODE + ERROR_4002,
ERROR_4002_VALUE,
{"error": kea_config["message"]}
)
else:
LOGGER.info("Successfully fetched configuration")
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + GET_CONFIG_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{'keaConfig': kea_config}
)
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_CONFIG_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_CONFIG_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(exc)}
)
return response
@BP.route("/getipfrommac", methods=['POST'])
@cross_origin()
def get_ip_from_mac():
"""
A HTTP GET function to fetch the IP address for mentioned MAC and Subnet
in the JSON request parameters
Parameters ---
mac_address :
subnet_id : <int> : Subnet ID
"""
LOGGER.info("Request url -> /dhcp4/getipfrommac")
LOGGER.info("Request Method -> POST")
json_req_data = request.get_json()
if not json_req_data:
LOGGER.info("Error - No JSON data")
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + GET_IP_FROM_MAC_ADDRESS_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.error("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
LOGGER.info("subnet_id = %s", json_req_data['subnet_id'])
LOGGER.info("mac_address = %s", json_req_data['mac_address'])
try:
leases = leases_hook(mac=json_req_data['mac_address'],
subnet_id=json_req_data['subnet_id'], get_one=True)
LOGGER.info("type(response) = %s", str(type(leases)))
LOGGER.info("response[0] = %s", str(leases[0]))
LOGGER.info("type(response[0]) = %s", str(type(leases[0])))
if 'message' not in leases:
if leases is not None and 'arguments' in leases[0].keys():
arguments = leases[0].get("arguments")
LOGGER.info("Response -> %s", json.dumps(leases))
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + GET_IP_FROM_MAC_ADDRESS_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{"ip_address": arguments}) # .get("ip-address")})
else:
LOGGER.info("Response -> %s", json.dumps(leases))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_IP_FROM_MAC_ADDRESS_API_CODE + ERROR_4003,
ERROR_4003_VALUE,
{"error": leases}) # .get("text")})
else:
LOGGER.error('Error message --> %s', str(leases["message"]))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_IP_FROM_MAC_ADDRESS_API_CODE + ERROR_4004,
ERROR_4004_VALUE,
{"error": leases['message']})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_IP_FROM_MAC_ADDRESS_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type), str(file_name), str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_IP_FROM_MAC_ADDRESS_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": str(excp)}
)
return response
@BP.route("/reservations", methods=['POST'])
@cross_origin()
def get_all_reservations():
"""
A HTTP GET function to fetch the reservations
within a Subnet from KEA server.
Parameters ---
subnet_id : <str> : Subnet ID
"""
LOGGER.info("Request url -> /dhcp4/reservations")
LOGGER.info("Request Method -> POST")
json_req_data = request.get_json()
if not json_req_data:
LOGGER.info("Error - No JSON data")
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + GET_ALL_RESERVATIONS_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.error("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
LOGGER.info("subnet_id = %s", str(json_req_data['subnet_id']))
try:
dhcp4_config = get_config()
LOGGER.info("configuration -> %s", str(dhcp4_config))
LOGGER.info("subnet_id -> %s", json_req_data['subnet_id'])
if "message" in dhcp4_config:
LOGGER.error('Error message --> %s', str(dhcp4_config["message"]))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_ALL_RESERVATIONS_API_CODE + ERROR_4002,
ERROR_4002_VALUE,
{'error': dhcp4_config.get("message")})
else:
reservations = get_reservation_info(dhcp4_config,
json_req_data['subnet_id'])
LOGGER.info("reservations -> %s", reservations)
if "message" in reservations:
LOGGER.error('Error message --> %s', str(reservations["message"]))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_ALL_RESERVATIONS_API_CODE + ERROR_4005,
ERROR_4005_VALUE,
{'error': reservations.get("message")})
else:
LOGGER.info("Successfully fetched reservations")
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + GET_ALL_RESERVATIONS_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{'reservations': reservations})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_ALL_RESERVATIONS_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(
exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_ALL_RESERVATIONS_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(exc)}
)
return response
@BP.route("/reservations/add", methods=['POST'])
@cross_origin()
def add_reservations():
"""
A HTTP POST function to add new Reservations to Subnet.
Parameters ---
subnet_id : <int> : Subnet ID
reservations:
"""
LOGGER.info("Request url -> /dhcp4/reservations/add")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + ADD_RESERVATION_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.error("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
LOGGER.info("subnet_id = %s", json_req_data['subnet_id'])
try:
reservations = json_req_data["reservations"]
LOGGER.info("Reservations from request -> %s",
str(reservations))
LOGGER.info("Going to add reservation for subnet id : %s",
json_req_data['subnet_id'])
result = add_reservation(reservations,
int(json_req_data['subnet_id']))
LOGGER.info("Result -> %s", str(result))
# check if resp is boolean and proceed with the response
if isinstance(result, bool) and result:
LOGGER.info("Successfully added reservation")
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + ADD_RESERVATION_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{'data': result}
)
elif "message" in result:
LOGGER.info("Failed to add reservation")
LOGGER.error(result.get("message"))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_RESERVATION_API_CODE + ERROR_4006,
ERROR_4006_VALUE,
{'error': result.get("message")}
)
else:
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_RESERVATION_API_CODE + ERROR_4007,
ERROR_4007_VALUE,
{'error': result}
)
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_RESERVATION_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(
exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_RESERVATION_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(exc)}
)
return response
@BP.route("/reservations/delete", methods=['POST'])
@cross_origin()
def remove_reservation():
"""
A HTTP POST function to delete reservations from Subnet
Parameters ---
subnet_id : <int> : Subnet ID
hw_addresses:
"""
LOGGER.info("Request url -> /dhcp4/reservations/delete")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + DELETE_RESERVATION_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE}
)
LOGGER.error("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
LOGGER.info("subnet_id = %s", json_req_data['subnet_id'])
LOGGER.info("hw_addresses = %s",
str(json_req_data['hw_addresses']))
# Fetch user input
reservations = json_req_data["hw_addresses"]
LOGGER.info("hw_addresses -> %s", str(reservations))
# Delete the reservation and returns a boolean value if successful
LOGGER.info("Going to delete reservation for subnetId : %s",
json_req_data['subnet_id'])
result = del_reservation(reservations, json_req_data['subnet_id'])
# check if resp is boolean and proceed with the response
if isinstance(result, bool) and result:
LOGGER.info("Successfully removed reservation")
__success_value = "Successfully removed reservation"
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + DELETE_RESERVATION_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{'data': result}
)
LOGGER.info("Response -> %s", str(response))
elif "message" in result:
LOGGER.info("Failed to remove reservation. message in result")
LOGGER.error(result.get("message"))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_RESERVATION_API_CODE + ERROR_4008,
ERROR_4008_VALUE,
{'error': result.get("message")}
)
LOGGER.debug("Response -> %s", str(response))
else:
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_RESERVATION_API_CODE + ERROR_4009,
ERROR_4009_VALUE,
{'error': result}
)
LOGGER.debug("Response -> %s", str(response))
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_RESERVATION_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(
exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_RESERVATION_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(exc)}
)
return response
@BP.route("/subnets/add", methods=['POST'])
@cross_origin()
def add_subnet():
"""
A HTTP POST function to add a Subnet
Parameters ---
subnet_list : <int> : Subnet ID
"""
LOGGER.info("Request url -> /dhcp4/subnets/add")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + ADD_SUBNET4_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.error("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
subnet_list = json_req_data["subnet_list"]
LOGGER.info("subnet -> %s", str(subnet_list))
LOGGER.info("Going to add a subnet")
result = add_subnets(subnet_list)
if result is True:
LOGGER.info("Successfully added subnet")
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + ADD_SUBNET4_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{'data': result}
)
elif "message" in result:
LOGGER.info("Failed to add subnet. MESSAGE in result")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_SUBNET4_API_CODE + ERROR_4010,
ERROR_4010_VALUE,
{'error': result['message']})
else:
LOGGER.info("Failed to add subnet")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_SUBNET4_API_CODE + ERROR_4011,
ERROR_4011_VALUE,
{'error': result}
)
LOGGER.debug("Response -> %s", str(response))
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_SUBNET4_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type),
str(file_name), str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_SUBNET4_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(excp)})
return response
@BP.route("/subnets/delete", methods=['POST'])
@cross_origin()
def remove_subnet():
"""
A HTTP POST function to delete a Subnet
Parameters ---
subnet_ids : list : Subnet IDs
"""
LOGGER.info("Request url -> /dhcp4/subnets/delete")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + DELETE_SUBNET4_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.error("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
subnet_ids = json_req_data["subnet_ids"]
LOGGER.info("subnet_ids -> %s", str(subnet_ids))
LOGGER.info("Going to remove a subnet")
result = delete_subnets(subnet_ids)
if result is True:
LOGGER.info("Successfully removed subnet")
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET4_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{'data': result})
elif "message" in result:
LOGGER.info("Failed to remove subnet")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET4_API_CODE + ERROR_4012,
ERROR_4012_VALUE,
{'error': result['message']})
else:
LOGGER.info("Failed to remove subnet")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET4_API_CODE + ERROR_4013,
ERROR_4013_VALUE,
{'error': result})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET4_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type),
str(file_name), str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET4_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(excp)})
return response
@BP.route("/subnets/modify", methods=['POST'])
@cross_origin()
def modify_subnet():
"""
A HTTP POST function to delete a Subnet
Parameters ---
subnet_ids : list : Subnet IDs
"""
LOGGER.info("Request url -> /dhcp4/subnets/modify")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + MODIFY_SUBNET4_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.error("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
subnet_list = json_req_data["subnet_list"]
LOGGER.info("subnet_list -> %s", str(subnet_list))
LOGGER.info("Going to modify subnet")
result = modify_subnets(subnet_list)
if result is True:
LOGGER.info("Successfully modified subnet")
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + MODIFY_SUBNET4_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{'data': result})
elif "message" in result:
LOGGER.info("Failed to modify subnet")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + MODIFY_SUBNET4_API_CODE + ERROR_4033,
ERROR_4033_VALUE,
{'error': result['message']})
else:
LOGGER.info("Failed to modify subnet")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + MODIFY_SUBNET4_API_CODE + ERROR_4034,
ERROR_4034_VALUE,
{'error': result})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + MODIFY_SUBNET4_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type),
str(file_name), str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + MODIFY_SUBNET4_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(excp)})
return response
@BP.route("/subnets/options/add", methods=['POST'])
@cross_origin()
def add_option_subnet():
"""
A HTTP POST function to add option data in Subnet
Parameters ---
subnet_id : <int> : Subnet ID
options_list:
"""
LOGGER.info("Request url -> /dhcp4/subnets/options/add")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + ADD_SUBNET_OPTION_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.error("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
dhcp4_options = json_req_data["options_list"]
LOGGER.info("Option Data -> %s", str(dhcp4_options))
subnet_id = json_req_data["subnet_id"]
LOGGER.info("Going to add option data to a subnet")
result = add_subnet_options(dhcp4_options, subnet_id)
if result is True:
LOGGER.info("Successfully added option to subnet")
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + ADD_SUBNET_OPTION_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{'data': result})
elif "message" in result:
LOGGER.info("Failed to add option to subnet")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_SUBNET_OPTION_API_CODE + ERROR_4014,
ERROR_4014_VALUE,
{'error': result.get("message")})
else:
LOGGER.info("Failed to add option to subnet")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_SUBNET_OPTION_API_CODE + ERROR_4015,
ERROR_4015_VALUE,
{'error': result})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_SUBNET_OPTION_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type),
str(file_name), str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_SUBNET_OPTION_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(excp)})
return response
@BP.route("/subnets/options/delete", methods=['POST'])
@cross_origin()
def delete_option_subnet():
"""
A HTTP POST function to delete option data from Subnet
Parameters ---
subnet_id : <int> : Subnet ID
codes : <obj> : List of codes to be deleted
"""
LOGGER.info("Request url -> /dhcp4/subnets/options/delete")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + DELETE_SUBNET_OPTION_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.error("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
subnet_id = int(json_req_data['subnet_id'])
codes = json_req_data["codes"]
LOGGER.info("codes -> %s", str(codes))
LOGGER.info("Going to delete option data from a subnet")
result = delete_subnet_options(codes, subnet_id)
if result is True:
LOGGER.info("Successfully deleted option from subnet")
__success_value = "Successfully deleted option from subnet"
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET_OPTION_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{'data': result})
elif "message" in result:
LOGGER.info("Failed to delete option from subnet")
LOGGER.info(result.get("message"))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET_OPTION_API_CODE + ERROR_4016,
ERROR_4016_VALUE,
{'error': result.get("message")})
else:
LOGGER.info("Failed to delete option from subnet")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET_OPTION_API_CODE + ERROR_4017,
ERROR_4017_VALUE,
{'error': result})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET_OPTION_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type),
str(file_name), str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET_OPTION_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(excp)})
return response
@BP.route("/subnets/reservation/options/add", methods=['POST'])
@cross_origin()
def add_options_reservation():
"""
A HTTP POST function to add options to reservations in Subnet
Parameters ---
subnet_id : <int> : Subnet ID
mac : <str> : Hardware address
options_list : <obj> : List of options to be added into the subnet
"""
LOGGER.info("Request url -> /dhcp4/subnets/reservation/options/add")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + ADD_SUBNET_RESERVATION_OPTION_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.error("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
LOGGER.info("Going to add options to reservations in Subnet")
result = add_reservation_options(json_req_data["options_list"],
json_req_data["mac"],
int(json_req_data['subnet_id']))
if result is True:
LOGGER.info("Successfully added option to subnet reservation")
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + ADD_SUBNET_RESERVATION_OPTION_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{'data': result})
elif "message" in result:
LOGGER.info("Failed to add option to subnet reservation")
LOGGER.info(result.get("message"))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_SUBNET_RESERVATION_OPTION_API_CODE + ERROR_4018,
ERROR_4018_VALUE,
{'error': result.get("message")})
else:
LOGGER.info("Failed to add option to subnet reservation")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_SUBNET_RESERVATION_OPTION_API_CODE + ERROR_4019,
ERROR_4019_VALUE,
{'error': result})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_SUBNET_RESERVATION_OPTION_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type), str(file_name),
str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_SUBNET_RESERVATION_OPTION_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(excp)})
return response
@BP.route("/subnets/reservation/options/delete", methods=['POST'])
@cross_origin()
def delete_options_reservation():
"""
A HTTP POST function to delete option data in reservations for a Subnet
Parameters ---
subnet_id : <int> : Subnet ID
mac : <str> : Hardware address
codes : <obj> : List of codes to be deleted
"""
LOGGER.info("Request url -> /dhcp4/subnets/reservation/options/delete")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + DELETE_SUBNET_RESERVATION_OPTION_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.info("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
LOGGER.info("Going to delete option data in reservations for a Subnet")
result = delete_reservation_options(json_req_data['codes'],
json_req_data['mac'],
int(json_req_data['subnet_id']))
if result is True:
LOGGER.info("Successfully deleted option on reservation")
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET_RESERVATION_OPTION_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{"data": result}
)
elif "message" in result:
LOGGER.info("Failed to delete option on reservation")
LOGGER.info(result.get("message"))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET_RESERVATION_OPTION_API_CODE + ERROR_4020,
ERROR_4020_VALUE,
{'error': result.get("message")}
)
else:
LOGGER.info("Failed to delete option on reservation")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET_RESERVATION_OPTION_API_CODE + ERROR_4021,
ERROR_4021_VALUE,
{'error': result}
)
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET_RESERVATION_OPTION_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type),
str(file_name), str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET_RESERVATION_OPTION_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(excp)})
return response
@BP.route("/leases", methods=['GET'])
@cross_origin()
def get_all_leases():
"""
A HTTP GET function to fetch the all active leases
"""
LOGGER.info("Request url -> /dhcp4/leases")
LOGGER.info("Request method -> GET")
try:
leases = leases_hook()
LOGGER.info("type(response) = %s", str(type(leases)))
LOGGER.info("response[0] = %s", leases)
if 'message' not in leases:
if leases is not None and 'arguments' in leases[0].keys():
arguments = leases[0].get("arguments")
result = leases[0].get("result")
# 0 => SUCCESS , 1 => FAILED, 3 => Empty object
LOGGER.info("Response -> %s", json.dumps(leases))
if result == RESULT_SUCCESS:
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + GET_ALL_LEASES_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
arguments)
elif result == RESULT_EMPTY:
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + GET_ALL_LEASES_API_CODE + SUCCESS_1001,
SUCCESS_1001_VALUE,
arguments)
else:
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_ALL_LEASES_API_CODE + ERROR_4022,
ERROR_4022_VALUE,
{"error": leases})
else:
LOGGER.info("Response -> %s", json.dumps(leases))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_ALL_LEASES_API_CODE + ERROR_4022,
ERROR_4022_VALUE,
{"error": leases})
else:
LOGGER.error('Error message --> %s', str(leases["message"]))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_ALL_LEASES_API_CODE + ERROR_4004,
ERROR_4004_VALUE,
{"error": leases['message']})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_ALL_LEASES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type), str(file_name),
str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_ALL_LEASES_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": str(excp)}
)
return response
@BP.route("/lease/add", methods=['POST'])
@cross_origin()
def add_lease():
"""
A HTTP GET function to add a new lease
"""
LOGGER.info("Request url -> /dhcp4/lease/add")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + ADD_LEASES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.info("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
leases = leases_hook(add_lease=True, arguments=json_req_data['arguments'])
LOGGER.info("type(response) = %s", str(type(leases)))
LOGGER.info("response[0] = %s", leases)
if 'message' not in leases:
if leases is not None and 'result' in leases[0].keys():
result = leases[0].get("result")
text = leases[0].get("text")
LOGGER.info("Response -> %s", json.dumps(leases))
# 0 => SUCCESS , 1 => FAILED
if result == RESULT_SUCCESS:
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + ADD_LEASES_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
leases)
else:
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_LEASES_API_CODE + ERROR_4023,
ERROR_4023_VALUE,
leases)
else:
LOGGER.info("Response -> %s", json.dumps(leases))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_LEASES_API_CODE + ERROR_4023,
ERROR_4023_VALUE,
{"error": leases})
else:
LOGGER.error('Error message --> %s', str(leases["message"]))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_LEASES_API_CODE + ERROR_4023,
ERROR_4023_VALUE,
{"error": leases['message']})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_LEASES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type), str(file_name),
str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_LEASES_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": str(excp)}
)
return response
@BP.route("/lease/update", methods=['POST'])
@cross_origin()
def update_lease():
"""
A HTTP GET function to update active lease
"""
LOGGER.info("Request url -> /dhcp4/lease/update")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + UPDATE_LEASES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.info("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
leases = leases_hook(update_lease=True, arguments=json_req_data['arguments'])
LOGGER.info("type(response) = %s", str(type(leases)))
LOGGER.info("response[0] = %s", leases)
if 'message' not in leases:
if leases is not None and 'result' in leases[0].keys():
result = leases[0].get("result")
LOGGER.info("Response -> %s", json.dumps(leases))
# 0 => SUCCESS , 1 => FAILED
if result == RESULT_SUCCESS:
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + UPDATE_LEASES_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
leases)
else:
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + UPDATE_LEASES_API_CODE + ERROR_4024,
ERROR_4024_VALUE,
leases)
else:
LOGGER.info("Response -> %s", json.dumps(leases))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + UPDATE_LEASES_API_CODE + ERROR_4024,
ERROR_4024_VALUE,
{"error": leases})
else:
LOGGER.error('Error message --> %s', str(leases["message"]))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + UPDATE_LEASES_API_CODE + ERROR_4024,
ERROR_4024_VALUE,
{"error": leases['message']})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + UPDATE_LEASES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type), str(file_name),
str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + UPDATE_LEASES_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": str(excp)}
)
return response
@BP.route("/lease/delete", methods=['POST'])
@cross_origin()
def delete_lease():
"""
A HTTP GET function to delete active leases
"""
LOGGER.info("Request url -> /dhcp4/lease/delete")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + DELETE_LEASES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.info("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
leases = leases_hook(delete_lease=True, ip=json_req_data['ip'])
LOGGER.info("type(response) = %s", str(type(leases)))
LOGGER.info("response[0] = %s", leases)
if 'message' not in leases:
if leases is not None and 'result' in leases[0].keys():
result = leases[0].get("result")
LOGGER.info("Response -> %s", json.dumps(leases))
# 0 => SUCCESS , 1 => FAILED, 3 => Empty object
if result == RESULT_SUCCESS:
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + DELETE_LEASES_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
leases)
elif result == RESULT_EMPTY:
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + DELETE_LEASES_API_CODE + SUCCESS_1002,
SUCCESS_1002_VALUE,
leases)
else:
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_LEASES_API_CODE + ERROR_4025,
ERROR_4025_VALUE,
leases)
else:
LOGGER.info("Response -> %s", json.dumps(leases))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_LEASES_API_CODE + ERROR_4025,
ERROR_4025_VALUE,
{"error": leases})
else:
LOGGER.error('Error message --> %s', str(leases["message"]))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_LEASES_API_CODE + ERROR_4025,
ERROR_4025_VALUE,
{"error": leases['message']})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_LEASES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type), str(file_name),
str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_LEASES_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": str(excp)}
)
return response
@BP.route("/lease/wipe", methods=['POST'])
@cross_origin()
def wipe_lease():
"""
A HTTP GET function to remove all active leases of a subnet
"""
LOGGER.info("Request url -> /dhcp4/lease/wipe")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + WIPE_LEASES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.info("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
leases = leases_hook(wipe_subnet_lease=True, subnet_id=int(json_req_data['subnet_id']))
LOGGER.info("type(response) = %s", str(type(leases)))
LOGGER.info("response[0] = %s", leases)
if 'message' not in leases:
if leases is not None and 'result' in leases[0].keys():
result = leases[0].get("result")
LOGGER.info("Response -> %s", json.dumps(leases))
# 0 => SUCCESS , 1 => FAILED, 3 => Empty object
if result == RESULT_SUCCESS:
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + WIPE_LEASES_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
leases)
elif result == RESULT_EMPTY:
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + WIPE_LEASES_API_CODE + SUCCESS_1003,
SUCCESS_1003_VALUE,
leases)
else:
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + WIPE_LEASES_API_CODE + ERROR_4026,
ERROR_4026_VALUE,
leases)
else:
LOGGER.info("Response -> %s", json.dumps(leases))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + WIPE_LEASES_API_CODE + ERROR_4026,
ERROR_4026_VALUE,
{"error": leases})
else:
LOGGER.error('Error message --> %s', str(leases["message"]))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + WIPE_LEASES_API_CODE + ERROR_4026,
ERROR_4026_VALUE,
{"error": leases['message']})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + WIPE_LEASES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type), str(file_name),
str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + WIPE_LEASES_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": str(excp)}
)
return response
@BP.route("/classes/add", methods=['POST'])
@cross_origin()
def add_class():
"""
A HTTP POST function to add a class
Parameters ---
subnet_list : <int> : Subnet ID
"""
LOGGER.info("Request url -> /dhcp4/classes/add")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + ADD_CLIENT_CLASSES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.error("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
client_class_list = json_req_data["client_class_list"]
LOGGER.info("client_classes -> %s", str(client_class_list))
LOGGER.info("Going to add a client_classes")
result = add_client_classes(client_class_list)
if result is True:
LOGGER.info("Successfully added client class")
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + ADD_CLIENT_CLASSES_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{'data': result}
)
elif "message" in result:
LOGGER.info("Failed to add client class. MESSAGE in result")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_CLIENT_CLASSES_API_CODE + ERROR_4027,
ERROR_4027_VALUE,
{'error': result['message']})
else:
LOGGER.info("Failed to add client class")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_CLIENT_CLASSES_API_CODE + ERROR_4028,
ERROR_4028_VALUE,
{'error': result}
)
LOGGER.debug("Response -> %s", str(response))
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_CLIENT_CLASSES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type),
str(file_name), str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_CLIENT_CLASSES_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(excp)})
return response
@BP.route("/classes/delete", methods=['POST'])
@cross_origin()
def remove_class():
"""
A HTTP POST function to delete a class
Parameters ---
subnet_ids : list : Subnet IDs
"""
LOGGER.info("Request url -> /dhcp4/classes/delete")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + DELETE_CLIENT_CLASSES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.error("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
class_names = json_req_data["class_names"]
LOGGER.info("class_names -> %s", str(class_names))
LOGGER.info("Going to remove a class")
result = delete_client_classes(class_names)
if result is True:
LOGGER.info("Successfully removed class")
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + DELETE_CLIENT_CLASSES_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{'data': result})
elif "message" in result:
LOGGER.info("Failed to remove class")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_CLIENT_CLASSES_API_CODE + ERROR_4029,
ERROR_4029_VALUE,
{'error': result['message']})
else:
LOGGER.info("Failed to remove class")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_CLIENT_CLASSES_API_CODE + ERROR_4030,
ERROR_4030_VALUE,
{'error': result})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_CLIENT_CLASSES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type),
str(file_name), str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_CLIENT_CLASSES_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(excp)})
return response
@BP.route("/classes/modify", methods=['POST'])
@cross_origin()
def modify_class():
"""
A HTTP POST function to modify a CLIENT CLASS
"""
LOGGER.info("Request url -> /dhcp4/classes/modify")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + MODIFY_CLIENT_CLASSES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.error("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
classes = json_req_data["client_class_list"]
LOGGER.info("classes -> %s", str(classes))
LOGGER.info("Going to modify class")
result = modify_client_classes(classes)
if result is True:
LOGGER.info("Successfully modified class")
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + MODIFY_CLIENT_CLASSES_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{'data': result})
elif "message" in result:
LOGGER.info("Failed to modify class")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + MODIFY_CLIENT_CLASSES_API_CODE + ERROR_4031,
ERROR_4031_VALUE,
{'error': result['message']})
else:
LOGGER.info("Failed to modify class")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + MODIFY_CLIENT_CLASSES_API_CODE + ERROR_4032,
ERROR_4032_VALUE,
{'error': result})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + MODIFY_CLIENT_CLASSES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type),
str(file_name), str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + MODIFY_CLIENT_CLASSES_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(excp)})
return response
| """
REST API endpoints written in Flask
Only POST Calls for requests requiring data
"""
import sys
import json
import os
from flask import request, Blueprint
from flask_cors import cross_origin
from restservice import LOGGER
from restservice.config import SERVICE_CODE, REQUIRE_API_AUTHENTICATION
from restservice.utilities import response_generator, get_config, leases_hook, get_reservation_info, add_reservation, \
add_subnets, add_reservation_options, add_subnet_options, del_reservation, delete_subnet_options, modify_subnets, \
delete_reservation_options, delete_subnets, add_client_classes, delete_client_classes, modify_client_classes
from restservice.apikeyhandler import verify_api_key
BP = Blueprint('dhcp4', __name__, url_prefix='/dhcp4')
STATUS_OK = "OK"
STATUS_KO = "KO"
HTTP_200 = 200 # Success
HTTP_400 = 404 # Bad request
HTTP_401 = 401 # None or bad credentials sent
HTTP_500 = 500 # General internal server error
# API CODE FOR SERVICE 100
HOME_API_CODE = "001"
GET_CONFIG_API_CODE = "002"
GET_IP_FROM_MAC_ADDRESS_API_CODE = "003"
GET_ALL_RESERVATIONS_API_CODE = "004"
ADD_RESERVATION_API_CODE = "005"
DELETE_RESERVATION_API_CODE = "006"
ADD_SUBNET4_API_CODE = "007"
DELETE_SUBNET4_API_CODE = "008"
MODIFY_SUBNET4_API_CODE = "009"
ADD_SUBNET_OPTION_API_CODE = "010"
DELETE_SUBNET_OPTION_API_CODE = "011"
ADD_SUBNET_RESERVATION_OPTION_API_CODE = "012"
DELETE_SUBNET_RESERVATION_OPTION_API_CODE = "013"
GET_ALL_LEASES_API_CODE = "014"
ADD_LEASES_API_CODE = "015"
UPDATE_LEASES_API_CODE = "016"
DELETE_LEASES_API_CODE = "017"
WIPE_LEASES_API_CODE = "018"
ADD_CLIENT_CLASSES_API_CODE = "019"
DELETE_CLIENT_CLASSES_API_CODE = "020"
MODIFY_CLIENT_CLASSES_API_CODE = "021"
RESULT_SUCCESS = 0
RESULT_FAILURE = 1
RESULT_EMPTY = 3
# RESPONSE CODES FOR SERVICE 900 - FastDHCP
SUCCESS_1000 = "1000"
SUCCESS_1000_VALUE = "Command execution successful"
SUCCESS_1001 = "1001"
SUCCESS_1001_VALUE = "Leases are empty"
SUCCESS_1002 = "1002"
SUCCESS_1002_VALUE = "No lease found to delete"
SUCCESS_1003 = "1003"
SUCCESS_1003_VALUE = "No lease found to wipe"
ERROR_4000 = "4000"
ERROR_4000_VALUE = "Exception occured in the server. Command unsuccessful"
ERROR_4001 = "4001"
ERROR_4001_VALUE = "Could not find JSON key"
ERROR_4002 = "4002"
ERROR_4002_VALUE = "Unable to fetch server Configuration"
ERROR_4003 = "4003"
ERROR_4003_VALUE = "IP Address not found"
ERROR_4004 = '4004'
ERROR_4004_VALUE = 'Could not fetch leases'
ERROR_4005 = '4005'
ERROR_4005_VALUE = 'Unable to fetch reservations from server.'
ERROR_4006 = "4006"
ERROR_4006_VALUE = "Failed to add reservation due to an issue with KEA server response"
ERROR_4007 = "4007"
ERROR_4007_VALUE = "Failed to add reservation"
ERROR_4008 = "4008"
ERROR_4008_VALUE = "Failed to remove reservation due to an issue with KEA server response"
ERROR_4009 = "4009"
ERROR_4009_VALUE = "Failed to remove reservation"
ERROR_4010 = "4010"
ERROR_4010_VALUE = "Failed to add subnet due to an issue with KEA server response"
ERROR_4011 = "4011"
ERROR_4011_VALUE = "Failed to add subnet"
ERROR_4012 = "4012"
ERROR_4012_VALUE = "Failed to remove subnet due to an issue with KEA server response"
ERROR_4013 = "4013"
ERROR_4013_VALUE = "Failed to remove subnet"
ERROR_4014 = "4014"
ERROR_4014_VALUE = "Failed to add subnet option due to an issue with KEA server response"
ERROR_4015 = "4015"
ERROR_4015_VALUE = "Failed to add subnet option"
ERROR_4016 = "4016"
ERROR_4016_VALUE = "Failed to remove subnet option due to an issue with KEA server response"
ERROR_4017 = "4017"
ERROR_4017_VALUE = "Failed to remove subnet option"
ERROR_4018 = "4018"
ERROR_4018_VALUE = "Failed to add subnet reservation option due to an issue with KEA server response"
ERROR_4019 = "4019"
ERROR_4019_VALUE = "Failed to add subnet reservation option"
ERROR_4020 = "4020"
ERROR_4020_VALUE = "Failed to remove subnet reservation option due to an issue with KEA server response"
ERROR_4021 = "4021"
ERROR_4021_VALUE = "Failed to remove subnet reservation option"
ERROR_4022 = "4022"
ERROR_4022_VALUE = "No Leases found"
ERROR_4023 = "4023"
ERROR_4023_VALUE = "Could not add lease successfully"
ERROR_4024 = "4024"
ERROR_4024_VALUE = "Could not update lease successfully"
ERROR_4025 = "4025"
ERROR_4025_VALUE = "Could not delete lease successfully"
ERROR_4026 = "4026"
ERROR_4026_VALUE = "Could not wipe subnet leases successfully"
ERROR_4027 = "4027"
ERROR_4027_VALUE = "Failed to add client class due to an issue with KEA server response"
ERROR_4028 = "4028"
ERROR_4028_VALUE = "Failed to add client class"
ERROR_4029 = "4029"
ERROR_4029_VALUE = "Failed to remove client class due to an issue with KEA server response"
ERROR_4030 = "4030"
ERROR_4030_VALUE = "Failed to remove client class"
ERROR_4031 = "4031"
ERROR_4031_VALUE = "Failed to modify client class due to an issue with KEA server response"
ERROR_4032 = "4032"
ERROR_4032_VALUE = "Failed to modify client class"
ERROR_4033 = "4033"
ERROR_4033_VALUE = "Failed to modify subnet due to an issue with KEA server response"
ERROR_4034 = "4034"
ERROR_4034_VALUE = "Failed to modify subnet"
# Check for API keys if REQUIRE_API_AUTHENTICATION is enabled in
if REQUIRE_API_AUTHENTICATION:
CHECK_AUTH_API = '022'
ERROR_5000 = "5000"
ERROR_5000_VALUE = "Invalid request"
ERROR_5001 = "5001"
ERROR_5001_VALUE = "Could not find API key in request"
ERROR_5002 = "5002"
ERROR_5002_VALUE = "Incorrect API key"
# This function will be called every time a request is received. In case
# the request is a GET, then apikey is extracted from the GET request and
# checked for validity. Other requests are handled similarly.
@BP.before_request
def check_auth():
print("DEBUG: request.endpoint = " + str(request.endpoint))
if request.endpoint == "dhcp4.home":
return
# If request method is POST
if request.method == 'POST':
print("DEBUG: request is POST")
# Extract POST data, look for API key and handle verification
json_req_data = request.get_json()
# If no JSON POST request data is found, then return error
if not json_req_data:
LOGGER.info("Error - No JSON data")
print("Error - No JSON data")
response = response_generator(
STATUS_KO,
HTTP_400,
SERVICE_CODE + CHECK_AUTH_API + ERROR_5000,
ERROR_5000_VALUE,
{'error': ERROR_5000_VALUE})
LOGGER.error("JSON ERROR - > %s", ERROR_5000_VALUE)
return response
# If JSON POST request data is found, then ...
else:
# If API key is found in JSON data then ...
if "apikey" in json_req_data:
apikey = json_req_data['apikey']
print("DEBUG: apikey = " + str(apikey))
verify_value = verify_api_key(apikey)
print("DEBUG: verify_value = " + str(verify_value))
# If API key is incorrect, send an error back
if verify_value == False:
LOGGER.error("JSON ERROR - > %s", ERROR_5000_VALUE)
return return_incorrect_api_key()
else:
print("DEBUG: Could not find API key in request")
LOGGER.error("JSON ERROR - > %s", ERROR_5001_VALUE)
return return_no_api_key_found()
if request.method == 'GET':
print("DEBUG: request is GET")
# Extract GET arguments, look for API key and handle verification
api_key = request.args.get('apikey')
print("DEBUG: api_key = " + str(api_key))
# If no apikey is found then return error
if api_key is None:
return return_no_api_key_found()
else:
print("DEBUG: api_key = " + str(api_key))
verify_value = verify_api_key(api_key)
print("DEBUG: check_auth(): verify_value = " + str(verify_value))
if verify_value == False:
print("DEBUG: check_auth(): returning incorrect api key response")
return return_incorrect_api_key()
# Implement other methods here
def return_no_api_key_found():
response = response_generator(STATUS_KO, HTTP_400,
SERVICE_CODE + CHECK_AUTH_API + ERROR_5001,
ERROR_5001_VALUE, {'error': ERROR_5001_VALUE})
return response
def return_incorrect_api_key():
response = response_generator(STATUS_KO, HTTP_401,
SERVICE_CODE + CHECK_AUTH_API + ERROR_5002, ERROR_5002_VALUE,
{'error': ERROR_5002_VALUE})
return response
@BP.route("/")
@cross_origin()
def home():
"""
Homepage to check if FASTDHCP server is working
"""
LOGGER.info("Executing")
LOGGER.info("Request url -> /dhcp4/")
try:
return response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + HOME_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{"data": "DHCP Management server is up and running"})
except Exception as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + HOME_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(exc)}
)
return response
@BP.route("/config", methods=['GET'])
@cross_origin()
def get_kea_config():
"""
A HTTP GET function to fetch the KEA configuration
from DHCP server.
:return : Response config object
"""
LOGGER.info("Request url -> /dhcp4/config")
LOGGER.info("Request Method -> GET")
try:
kea_config = get_config()
LOGGER.info("Get config result -> %s", kea_config)
if "message" in kea_config:
LOGGER.error('Error message --> %s', str(kea_config["message"]))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_CONFIG_API_CODE + ERROR_4002,
ERROR_4002_VALUE,
{"error": kea_config["message"]}
)
else:
LOGGER.info("Successfully fetched configuration")
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + GET_CONFIG_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{'keaConfig': kea_config}
)
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_CONFIG_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_CONFIG_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(exc)}
)
return response
@BP.route("/getipfrommac", methods=['POST'])
@cross_origin()
def get_ip_from_mac():
"""
A HTTP GET function to fetch the IP address for mentioned MAC and Subnet
in the JSON request parameters
Parameters ---
mac_address :
subnet_id : <int> : Subnet ID
"""
LOGGER.info("Request url -> /dhcp4/getipfrommac")
LOGGER.info("Request Method -> POST")
json_req_data = request.get_json()
if not json_req_data:
LOGGER.info("Error - No JSON data")
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + GET_IP_FROM_MAC_ADDRESS_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.error("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
LOGGER.info("subnet_id = %s", json_req_data['subnet_id'])
LOGGER.info("mac_address = %s", json_req_data['mac_address'])
try:
leases = leases_hook(mac=json_req_data['mac_address'],
subnet_id=json_req_data['subnet_id'], get_one=True)
LOGGER.info("type(response) = %s", str(type(leases)))
LOGGER.info("response[0] = %s", str(leases[0]))
LOGGER.info("type(response[0]) = %s", str(type(leases[0])))
if 'message' not in leases:
if leases is not None and 'arguments' in leases[0].keys():
arguments = leases[0].get("arguments")
LOGGER.info("Response -> %s", json.dumps(leases))
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + GET_IP_FROM_MAC_ADDRESS_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{"ip_address": arguments}) # .get("ip-address")})
else:
LOGGER.info("Response -> %s", json.dumps(leases))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_IP_FROM_MAC_ADDRESS_API_CODE + ERROR_4003,
ERROR_4003_VALUE,
{"error": leases}) # .get("text")})
else:
LOGGER.error('Error message --> %s', str(leases["message"]))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_IP_FROM_MAC_ADDRESS_API_CODE + ERROR_4004,
ERROR_4004_VALUE,
{"error": leases['message']})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_IP_FROM_MAC_ADDRESS_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type), str(file_name), str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_IP_FROM_MAC_ADDRESS_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": str(excp)}
)
return response
@BP.route("/reservations", methods=['POST'])
@cross_origin()
def get_all_reservations():
"""
A HTTP GET function to fetch the reservations
within a Subnet from KEA server.
Parameters ---
subnet_id : <str> : Subnet ID
"""
LOGGER.info("Request url -> /dhcp4/reservations")
LOGGER.info("Request Method -> POST")
json_req_data = request.get_json()
if not json_req_data:
LOGGER.info("Error - No JSON data")
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + GET_ALL_RESERVATIONS_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.error("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
LOGGER.info("subnet_id = %s", str(json_req_data['subnet_id']))
try:
dhcp4_config = get_config()
LOGGER.info("configuration -> %s", str(dhcp4_config))
LOGGER.info("subnet_id -> %s", json_req_data['subnet_id'])
if "message" in dhcp4_config:
LOGGER.error('Error message --> %s', str(dhcp4_config["message"]))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_ALL_RESERVATIONS_API_CODE + ERROR_4002,
ERROR_4002_VALUE,
{'error': dhcp4_config.get("message")})
else:
reservations = get_reservation_info(dhcp4_config,
json_req_data['subnet_id'])
LOGGER.info("reservations -> %s", reservations)
if "message" in reservations:
LOGGER.error('Error message --> %s', str(reservations["message"]))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_ALL_RESERVATIONS_API_CODE + ERROR_4005,
ERROR_4005_VALUE,
{'error': reservations.get("message")})
else:
LOGGER.info("Successfully fetched reservations")
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + GET_ALL_RESERVATIONS_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{'reservations': reservations})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_ALL_RESERVATIONS_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(
exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_ALL_RESERVATIONS_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(exc)}
)
return response
@BP.route("/reservations/add", methods=['POST'])
@cross_origin()
def add_reservations():
"""
A HTTP POST function to add new Reservations to Subnet.
Parameters ---
subnet_id : <int> : Subnet ID
reservations:
"""
LOGGER.info("Request url -> /dhcp4/reservations/add")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + ADD_RESERVATION_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.error("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
LOGGER.info("subnet_id = %s", json_req_data['subnet_id'])
try:
reservations = json_req_data["reservations"]
LOGGER.info("Reservations from request -> %s",
str(reservations))
LOGGER.info("Going to add reservation for subnet id : %s",
json_req_data['subnet_id'])
result = add_reservation(reservations,
int(json_req_data['subnet_id']))
LOGGER.info("Result -> %s", str(result))
# check if resp is boolean and proceed with the response
if isinstance(result, bool) and result:
LOGGER.info("Successfully added reservation")
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + ADD_RESERVATION_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{'data': result}
)
elif "message" in result:
LOGGER.info("Failed to add reservation")
LOGGER.error(result.get("message"))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_RESERVATION_API_CODE + ERROR_4006,
ERROR_4006_VALUE,
{'error': result.get("message")}
)
else:
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_RESERVATION_API_CODE + ERROR_4007,
ERROR_4007_VALUE,
{'error': result}
)
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_RESERVATION_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(
exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_RESERVATION_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(exc)}
)
return response
@BP.route("/reservations/delete", methods=['POST'])
@cross_origin()
def remove_reservation():
"""
A HTTP POST function to delete reservations from Subnet
Parameters ---
subnet_id : <int> : Subnet ID
hw_addresses:
"""
LOGGER.info("Request url -> /dhcp4/reservations/delete")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + DELETE_RESERVATION_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE}
)
LOGGER.error("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
LOGGER.info("subnet_id = %s", json_req_data['subnet_id'])
LOGGER.info("hw_addresses = %s",
str(json_req_data['hw_addresses']))
# Fetch user input
reservations = json_req_data["hw_addresses"]
LOGGER.info("hw_addresses -> %s", str(reservations))
# Delete the reservation and returns a boolean value if successful
LOGGER.info("Going to delete reservation for subnetId : %s",
json_req_data['subnet_id'])
result = del_reservation(reservations, json_req_data['subnet_id'])
# check if resp is boolean and proceed with the response
if isinstance(result, bool) and result:
LOGGER.info("Successfully removed reservation")
__success_value = "Successfully removed reservation"
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + DELETE_RESERVATION_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{'data': result}
)
LOGGER.info("Response -> %s", str(response))
elif "message" in result:
LOGGER.info("Failed to remove reservation. message in result")
LOGGER.error(result.get("message"))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_RESERVATION_API_CODE + ERROR_4008,
ERROR_4008_VALUE,
{'error': result.get("message")}
)
LOGGER.debug("Response -> %s", str(response))
else:
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_RESERVATION_API_CODE + ERROR_4009,
ERROR_4009_VALUE,
{'error': result}
)
LOGGER.debug("Response -> %s", str(response))
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_RESERVATION_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(
exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_RESERVATION_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(exc)}
)
return response
@BP.route("/subnets/add", methods=['POST'])
@cross_origin()
def add_subnet():
"""
A HTTP POST function to add a Subnet
Parameters ---
subnet_list : <int> : Subnet ID
"""
LOGGER.info("Request url -> /dhcp4/subnets/add")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + ADD_SUBNET4_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.error("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
subnet_list = json_req_data["subnet_list"]
LOGGER.info("subnet -> %s", str(subnet_list))
LOGGER.info("Going to add a subnet")
result = add_subnets(subnet_list)
if result is True:
LOGGER.info("Successfully added subnet")
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + ADD_SUBNET4_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{'data': result}
)
elif "message" in result:
LOGGER.info("Failed to add subnet. MESSAGE in result")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_SUBNET4_API_CODE + ERROR_4010,
ERROR_4010_VALUE,
{'error': result['message']})
else:
LOGGER.info("Failed to add subnet")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_SUBNET4_API_CODE + ERROR_4011,
ERROR_4011_VALUE,
{'error': result}
)
LOGGER.debug("Response -> %s", str(response))
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_SUBNET4_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type),
str(file_name), str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_SUBNET4_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(excp)})
return response
@BP.route("/subnets/delete", methods=['POST'])
@cross_origin()
def remove_subnet():
"""
A HTTP POST function to delete a Subnet
Parameters ---
subnet_ids : list : Subnet IDs
"""
LOGGER.info("Request url -> /dhcp4/subnets/delete")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + DELETE_SUBNET4_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.error("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
subnet_ids = json_req_data["subnet_ids"]
LOGGER.info("subnet_ids -> %s", str(subnet_ids))
LOGGER.info("Going to remove a subnet")
result = delete_subnets(subnet_ids)
if result is True:
LOGGER.info("Successfully removed subnet")
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET4_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{'data': result})
elif "message" in result:
LOGGER.info("Failed to remove subnet")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET4_API_CODE + ERROR_4012,
ERROR_4012_VALUE,
{'error': result['message']})
else:
LOGGER.info("Failed to remove subnet")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET4_API_CODE + ERROR_4013,
ERROR_4013_VALUE,
{'error': result})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET4_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type),
str(file_name), str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET4_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(excp)})
return response
@BP.route("/subnets/modify", methods=['POST'])
@cross_origin()
def modify_subnet():
"""
A HTTP POST function to delete a Subnet
Parameters ---
subnet_ids : list : Subnet IDs
"""
LOGGER.info("Request url -> /dhcp4/subnets/modify")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + MODIFY_SUBNET4_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.error("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
subnet_list = json_req_data["subnet_list"]
LOGGER.info("subnet_list -> %s", str(subnet_list))
LOGGER.info("Going to modify subnet")
result = modify_subnets(subnet_list)
if result is True:
LOGGER.info("Successfully modified subnet")
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + MODIFY_SUBNET4_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{'data': result})
elif "message" in result:
LOGGER.info("Failed to modify subnet")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + MODIFY_SUBNET4_API_CODE + ERROR_4033,
ERROR_4033_VALUE,
{'error': result['message']})
else:
LOGGER.info("Failed to modify subnet")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + MODIFY_SUBNET4_API_CODE + ERROR_4034,
ERROR_4034_VALUE,
{'error': result})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + MODIFY_SUBNET4_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type),
str(file_name), str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + MODIFY_SUBNET4_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(excp)})
return response
@BP.route("/subnets/options/add", methods=['POST'])
@cross_origin()
def add_option_subnet():
"""
A HTTP POST function to add option data in Subnet
Parameters ---
subnet_id : <int> : Subnet ID
options_list:
"""
LOGGER.info("Request url -> /dhcp4/subnets/options/add")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + ADD_SUBNET_OPTION_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.error("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
dhcp4_options = json_req_data["options_list"]
LOGGER.info("Option Data -> %s", str(dhcp4_options))
subnet_id = json_req_data["subnet_id"]
LOGGER.info("Going to add option data to a subnet")
result = add_subnet_options(dhcp4_options, subnet_id)
if result is True:
LOGGER.info("Successfully added option to subnet")
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + ADD_SUBNET_OPTION_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{'data': result})
elif "message" in result:
LOGGER.info("Failed to add option to subnet")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_SUBNET_OPTION_API_CODE + ERROR_4014,
ERROR_4014_VALUE,
{'error': result.get("message")})
else:
LOGGER.info("Failed to add option to subnet")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_SUBNET_OPTION_API_CODE + ERROR_4015,
ERROR_4015_VALUE,
{'error': result})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_SUBNET_OPTION_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type),
str(file_name), str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_SUBNET_OPTION_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(excp)})
return response
@BP.route("/subnets/options/delete", methods=['POST'])
@cross_origin()
def delete_option_subnet():
"""
A HTTP POST function to delete option data from Subnet
Parameters ---
subnet_id : <int> : Subnet ID
codes : <obj> : List of codes to be deleted
"""
LOGGER.info("Request url -> /dhcp4/subnets/options/delete")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + DELETE_SUBNET_OPTION_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.error("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
subnet_id = int(json_req_data['subnet_id'])
codes = json_req_data["codes"]
LOGGER.info("codes -> %s", str(codes))
LOGGER.info("Going to delete option data from a subnet")
result = delete_subnet_options(codes, subnet_id)
if result is True:
LOGGER.info("Successfully deleted option from subnet")
__success_value = "Successfully deleted option from subnet"
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET_OPTION_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{'data': result})
elif "message" in result:
LOGGER.info("Failed to delete option from subnet")
LOGGER.info(result.get("message"))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET_OPTION_API_CODE + ERROR_4016,
ERROR_4016_VALUE,
{'error': result.get("message")})
else:
LOGGER.info("Failed to delete option from subnet")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET_OPTION_API_CODE + ERROR_4017,
ERROR_4017_VALUE,
{'error': result})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET_OPTION_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type),
str(file_name), str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET_OPTION_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(excp)})
return response
@BP.route("/subnets/reservation/options/add", methods=['POST'])
@cross_origin()
def add_options_reservation():
"""
A HTTP POST function to add options to reservations in Subnet
Parameters ---
subnet_id : <int> : Subnet ID
mac : <str> : Hardware address
options_list : <obj> : List of options to be added into the subnet
"""
LOGGER.info("Request url -> /dhcp4/subnets/reservation/options/add")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + ADD_SUBNET_RESERVATION_OPTION_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.error("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
LOGGER.info("Going to add options to reservations in Subnet")
result = add_reservation_options(json_req_data["options_list"],
json_req_data["mac"],
int(json_req_data['subnet_id']))
if result is True:
LOGGER.info("Successfully added option to subnet reservation")
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + ADD_SUBNET_RESERVATION_OPTION_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{'data': result})
elif "message" in result:
LOGGER.info("Failed to add option to subnet reservation")
LOGGER.info(result.get("message"))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_SUBNET_RESERVATION_OPTION_API_CODE + ERROR_4018,
ERROR_4018_VALUE,
{'error': result.get("message")})
else:
LOGGER.info("Failed to add option to subnet reservation")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_SUBNET_RESERVATION_OPTION_API_CODE + ERROR_4019,
ERROR_4019_VALUE,
{'error': result})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_SUBNET_RESERVATION_OPTION_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type), str(file_name),
str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_SUBNET_RESERVATION_OPTION_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(excp)})
return response
@BP.route("/subnets/reservation/options/delete", methods=['POST'])
@cross_origin()
def delete_options_reservation():
"""
A HTTP POST function to delete option data in reservations for a Subnet
Parameters ---
subnet_id : <int> : Subnet ID
mac : <str> : Hardware address
codes : <obj> : List of codes to be deleted
"""
LOGGER.info("Request url -> /dhcp4/subnets/reservation/options/delete")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + DELETE_SUBNET_RESERVATION_OPTION_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.info("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
LOGGER.info("Going to delete option data in reservations for a Subnet")
result = delete_reservation_options(json_req_data['codes'],
json_req_data['mac'],
int(json_req_data['subnet_id']))
if result is True:
LOGGER.info("Successfully deleted option on reservation")
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET_RESERVATION_OPTION_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{"data": result}
)
elif "message" in result:
LOGGER.info("Failed to delete option on reservation")
LOGGER.info(result.get("message"))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET_RESERVATION_OPTION_API_CODE + ERROR_4020,
ERROR_4020_VALUE,
{'error': result.get("message")}
)
else:
LOGGER.info("Failed to delete option on reservation")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET_RESERVATION_OPTION_API_CODE + ERROR_4021,
ERROR_4021_VALUE,
{'error': result}
)
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET_RESERVATION_OPTION_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type),
str(file_name), str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_SUBNET_RESERVATION_OPTION_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(excp)})
return response
@BP.route("/leases", methods=['GET'])
@cross_origin()
def get_all_leases():
"""
A HTTP GET function to fetch the all active leases
"""
LOGGER.info("Request url -> /dhcp4/leases")
LOGGER.info("Request method -> GET")
try:
leases = leases_hook()
LOGGER.info("type(response) = %s", str(type(leases)))
LOGGER.info("response[0] = %s", leases)
if 'message' not in leases:
if leases is not None and 'arguments' in leases[0].keys():
arguments = leases[0].get("arguments")
result = leases[0].get("result")
# 0 => SUCCESS , 1 => FAILED, 3 => Empty object
LOGGER.info("Response -> %s", json.dumps(leases))
if result == RESULT_SUCCESS:
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + GET_ALL_LEASES_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
arguments)
elif result == RESULT_EMPTY:
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + GET_ALL_LEASES_API_CODE + SUCCESS_1001,
SUCCESS_1001_VALUE,
arguments)
else:
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_ALL_LEASES_API_CODE + ERROR_4022,
ERROR_4022_VALUE,
{"error": leases})
else:
LOGGER.info("Response -> %s", json.dumps(leases))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_ALL_LEASES_API_CODE + ERROR_4022,
ERROR_4022_VALUE,
{"error": leases})
else:
LOGGER.error('Error message --> %s', str(leases["message"]))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_ALL_LEASES_API_CODE + ERROR_4004,
ERROR_4004_VALUE,
{"error": leases['message']})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_ALL_LEASES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type), str(file_name),
str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + GET_ALL_LEASES_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": str(excp)}
)
return response
@BP.route("/lease/add", methods=['POST'])
@cross_origin()
def add_lease():
"""
A HTTP GET function to add a new lease
"""
LOGGER.info("Request url -> /dhcp4/lease/add")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + ADD_LEASES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.info("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
leases = leases_hook(add_lease=True, arguments=json_req_data['arguments'])
LOGGER.info("type(response) = %s", str(type(leases)))
LOGGER.info("response[0] = %s", leases)
if 'message' not in leases:
if leases is not None and 'result' in leases[0].keys():
result = leases[0].get("result")
text = leases[0].get("text")
LOGGER.info("Response -> %s", json.dumps(leases))
# 0 => SUCCESS , 1 => FAILED
if result == RESULT_SUCCESS:
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + ADD_LEASES_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
leases)
else:
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_LEASES_API_CODE + ERROR_4023,
ERROR_4023_VALUE,
leases)
else:
LOGGER.info("Response -> %s", json.dumps(leases))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_LEASES_API_CODE + ERROR_4023,
ERROR_4023_VALUE,
{"error": leases})
else:
LOGGER.error('Error message --> %s', str(leases["message"]))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_LEASES_API_CODE + ERROR_4023,
ERROR_4023_VALUE,
{"error": leases['message']})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_LEASES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type), str(file_name),
str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_LEASES_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": str(excp)}
)
return response
@BP.route("/lease/update", methods=['POST'])
@cross_origin()
def update_lease():
"""
A HTTP GET function to update active lease
"""
LOGGER.info("Request url -> /dhcp4/lease/update")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + UPDATE_LEASES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.info("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
leases = leases_hook(update_lease=True, arguments=json_req_data['arguments'])
LOGGER.info("type(response) = %s", str(type(leases)))
LOGGER.info("response[0] = %s", leases)
if 'message' not in leases:
if leases is not None and 'result' in leases[0].keys():
result = leases[0].get("result")
LOGGER.info("Response -> %s", json.dumps(leases))
# 0 => SUCCESS , 1 => FAILED
if result == RESULT_SUCCESS:
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + UPDATE_LEASES_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
leases)
else:
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + UPDATE_LEASES_API_CODE + ERROR_4024,
ERROR_4024_VALUE,
leases)
else:
LOGGER.info("Response -> %s", json.dumps(leases))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + UPDATE_LEASES_API_CODE + ERROR_4024,
ERROR_4024_VALUE,
{"error": leases})
else:
LOGGER.error('Error message --> %s', str(leases["message"]))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + UPDATE_LEASES_API_CODE + ERROR_4024,
ERROR_4024_VALUE,
{"error": leases['message']})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + UPDATE_LEASES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type), str(file_name),
str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + UPDATE_LEASES_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": str(excp)}
)
return response
@BP.route("/lease/delete", methods=['POST'])
@cross_origin()
def delete_lease():
"""
A HTTP GET function to delete active leases
"""
LOGGER.info("Request url -> /dhcp4/lease/delete")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + DELETE_LEASES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.info("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
leases = leases_hook(delete_lease=True, ip=json_req_data['ip'])
LOGGER.info("type(response) = %s", str(type(leases)))
LOGGER.info("response[0] = %s", leases)
if 'message' not in leases:
if leases is not None and 'result' in leases[0].keys():
result = leases[0].get("result")
LOGGER.info("Response -> %s", json.dumps(leases))
# 0 => SUCCESS , 1 => FAILED, 3 => Empty object
if result == RESULT_SUCCESS:
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + DELETE_LEASES_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
leases)
elif result == RESULT_EMPTY:
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + DELETE_LEASES_API_CODE + SUCCESS_1002,
SUCCESS_1002_VALUE,
leases)
else:
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_LEASES_API_CODE + ERROR_4025,
ERROR_4025_VALUE,
leases)
else:
LOGGER.info("Response -> %s", json.dumps(leases))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_LEASES_API_CODE + ERROR_4025,
ERROR_4025_VALUE,
{"error": leases})
else:
LOGGER.error('Error message --> %s', str(leases["message"]))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_LEASES_API_CODE + ERROR_4025,
ERROR_4025_VALUE,
{"error": leases['message']})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_LEASES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type), str(file_name),
str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_LEASES_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": str(excp)}
)
return response
@BP.route("/lease/wipe", methods=['POST'])
@cross_origin()
def wipe_lease():
"""
A HTTP GET function to remove all active leases of a subnet
"""
LOGGER.info("Request url -> /dhcp4/lease/wipe")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + WIPE_LEASES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.info("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
leases = leases_hook(wipe_subnet_lease=True, subnet_id=int(json_req_data['subnet_id']))
LOGGER.info("type(response) = %s", str(type(leases)))
LOGGER.info("response[0] = %s", leases)
if 'message' not in leases:
if leases is not None and 'result' in leases[0].keys():
result = leases[0].get("result")
LOGGER.info("Response -> %s", json.dumps(leases))
# 0 => SUCCESS , 1 => FAILED, 3 => Empty object
if result == RESULT_SUCCESS:
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + WIPE_LEASES_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
leases)
elif result == RESULT_EMPTY:
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + WIPE_LEASES_API_CODE + SUCCESS_1003,
SUCCESS_1003_VALUE,
leases)
else:
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + WIPE_LEASES_API_CODE + ERROR_4026,
ERROR_4026_VALUE,
leases)
else:
LOGGER.info("Response -> %s", json.dumps(leases))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + WIPE_LEASES_API_CODE + ERROR_4026,
ERROR_4026_VALUE,
{"error": leases})
else:
LOGGER.error('Error message --> %s', str(leases["message"]))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + WIPE_LEASES_API_CODE + ERROR_4026,
ERROR_4026_VALUE,
{"error": leases['message']})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + WIPE_LEASES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type), str(file_name),
str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + WIPE_LEASES_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": str(excp)}
)
return response
@BP.route("/classes/add", methods=['POST'])
@cross_origin()
def add_class():
"""
A HTTP POST function to add a class
Parameters ---
subnet_list : <int> : Subnet ID
"""
LOGGER.info("Request url -> /dhcp4/classes/add")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + ADD_CLIENT_CLASSES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.error("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
client_class_list = json_req_data["client_class_list"]
LOGGER.info("client_classes -> %s", str(client_class_list))
LOGGER.info("Going to add a client_classes")
result = add_client_classes(client_class_list)
if result is True:
LOGGER.info("Successfully added client class")
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + ADD_CLIENT_CLASSES_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{'data': result}
)
elif "message" in result:
LOGGER.info("Failed to add client class. MESSAGE in result")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_CLIENT_CLASSES_API_CODE + ERROR_4027,
ERROR_4027_VALUE,
{'error': result['message']})
else:
LOGGER.info("Failed to add client class")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_CLIENT_CLASSES_API_CODE + ERROR_4028,
ERROR_4028_VALUE,
{'error': result}
)
LOGGER.debug("Response -> %s", str(response))
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_CLIENT_CLASSES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type),
str(file_name), str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + ADD_CLIENT_CLASSES_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(excp)})
return response
@BP.route("/classes/delete", methods=['POST'])
@cross_origin()
def remove_class():
"""
A HTTP POST function to delete a class
Parameters ---
subnet_ids : list : Subnet IDs
"""
LOGGER.info("Request url -> /dhcp4/classes/delete")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + DELETE_CLIENT_CLASSES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.error("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
class_names = json_req_data["class_names"]
LOGGER.info("class_names -> %s", str(class_names))
LOGGER.info("Going to remove a class")
result = delete_client_classes(class_names)
if result is True:
LOGGER.info("Successfully removed class")
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + DELETE_CLIENT_CLASSES_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{'data': result})
elif "message" in result:
LOGGER.info("Failed to remove class")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_CLIENT_CLASSES_API_CODE + ERROR_4029,
ERROR_4029_VALUE,
{'error': result['message']})
else:
LOGGER.info("Failed to remove class")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_CLIENT_CLASSES_API_CODE + ERROR_4030,
ERROR_4030_VALUE,
{'error': result})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_CLIENT_CLASSES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type),
str(file_name), str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + DELETE_CLIENT_CLASSES_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(excp)})
return response
@BP.route("/classes/modify", methods=['POST'])
@cross_origin()
def modify_class():
"""
A HTTP POST function to modify a CLIENT CLASS
"""
LOGGER.info("Request url -> /dhcp4/classes/modify")
LOGGER.info("Request method -> POST")
json_req_data = request.get_json()
if not json_req_data:
response = response_generator(
STATUS_KO,
HTTP_401,
SERVICE_CODE + MODIFY_CLIENT_CLASSES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{'error': ERROR_4001_VALUE})
LOGGER.error("JSON ERROR - > %s", ERROR_4001_VALUE)
else:
try:
LOGGER.info("Printing received JSON data")
LOGGER.info("json_req_data = %s", str(json_req_data))
classes = json_req_data["client_class_list"]
LOGGER.info("classes -> %s", str(classes))
LOGGER.info("Going to modify class")
result = modify_client_classes(classes)
if result is True:
LOGGER.info("Successfully modified class")
response = response_generator(
STATUS_OK,
HTTP_200,
SERVICE_CODE + MODIFY_CLIENT_CLASSES_API_CODE + SUCCESS_1000,
SUCCESS_1000_VALUE,
{'data': result})
elif "message" in result:
LOGGER.info("Failed to modify class")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + MODIFY_CLIENT_CLASSES_API_CODE + ERROR_4031,
ERROR_4031_VALUE,
{'error': result['message']})
else:
LOGGER.info("Failed to modify class")
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + MODIFY_CLIENT_CLASSES_API_CODE + ERROR_4032,
ERROR_4032_VALUE,
{'error': result})
except KeyError as exc:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
error_msg = "{}, {}, {}".format(exc_type, file_name, exc_tb.tb_lineno)
LOGGER.error(error_msg)
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + MODIFY_CLIENT_CLASSES_API_CODE + ERROR_4001,
ERROR_4001_VALUE,
{"error": repr(exc)}
)
except Exception as excp:
exc_type, exc_obj, exc_tb = sys.exc_info()
LOGGER.debug('%s, %s, %s', exc_type, exc_obj, exc_tb)
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
LOGGER.error('%s, %s, %s', str(exc_type),
str(file_name), str(exc_tb.tb_lineno))
response = response_generator(
STATUS_KO,
HTTP_200,
SERVICE_CODE + MODIFY_CLIENT_CLASSES_API_CODE + ERROR_4000,
ERROR_4000_VALUE,
{"error": repr(excp)})
return response | en | 0.679766 | REST API endpoints written in Flask Only POST Calls for requests requiring data # Success # Bad request # None or bad credentials sent # General internal server error # API CODE FOR SERVICE 100 # RESPONSE CODES FOR SERVICE 900 - FastDHCP # Check for API keys if REQUIRE_API_AUTHENTICATION is enabled in # This function will be called every time a request is received. In case # the request is a GET, then apikey is extracted from the GET request and # checked for validity. Other requests are handled similarly. # If request method is POST # Extract POST data, look for API key and handle verification # If no JSON POST request data is found, then return error # If JSON POST request data is found, then ... # If API key is found in JSON data then ... # If API key is incorrect, send an error back # Extract GET arguments, look for API key and handle verification # If no apikey is found then return error # Implement other methods here Homepage to check if FASTDHCP server is working A HTTP GET function to fetch the KEA configuration from DHCP server. :return : Response config object A HTTP GET function to fetch the IP address for mentioned MAC and Subnet in the JSON request parameters Parameters --- mac_address : subnet_id : <int> : Subnet ID # .get("ip-address")}) # .get("text")}) A HTTP GET function to fetch the reservations within a Subnet from KEA server. Parameters --- subnet_id : <str> : Subnet ID A HTTP POST function to add new Reservations to Subnet. Parameters --- subnet_id : <int> : Subnet ID reservations: # check if resp is boolean and proceed with the response A HTTP POST function to delete reservations from Subnet Parameters --- subnet_id : <int> : Subnet ID hw_addresses: # Fetch user input # Delete the reservation and returns a boolean value if successful # check if resp is boolean and proceed with the response A HTTP POST function to add a Subnet Parameters --- subnet_list : <int> : Subnet ID A HTTP POST function to delete a Subnet Parameters --- subnet_ids : list : Subnet IDs A HTTP POST function to delete a Subnet Parameters --- subnet_ids : list : Subnet IDs A HTTP POST function to add option data in Subnet Parameters --- subnet_id : <int> : Subnet ID options_list: A HTTP POST function to delete option data from Subnet Parameters --- subnet_id : <int> : Subnet ID codes : <obj> : List of codes to be deleted A HTTP POST function to add options to reservations in Subnet Parameters --- subnet_id : <int> : Subnet ID mac : <str> : Hardware address options_list : <obj> : List of options to be added into the subnet A HTTP POST function to delete option data in reservations for a Subnet Parameters --- subnet_id : <int> : Subnet ID mac : <str> : Hardware address codes : <obj> : List of codes to be deleted A HTTP GET function to fetch the all active leases # 0 => SUCCESS , 1 => FAILED, 3 => Empty object A HTTP GET function to add a new lease # 0 => SUCCESS , 1 => FAILED A HTTP GET function to update active lease # 0 => SUCCESS , 1 => FAILED A HTTP GET function to delete active leases # 0 => SUCCESS , 1 => FAILED, 3 => Empty object A HTTP GET function to remove all active leases of a subnet # 0 => SUCCESS , 1 => FAILED, 3 => Empty object A HTTP POST function to add a class Parameters --- subnet_list : <int> : Subnet ID A HTTP POST function to delete a class Parameters --- subnet_ids : list : Subnet IDs A HTTP POST function to modify a CLIENT CLASS | 2.73213 | 3 |
sctr/management/commands.py | candango/sctr | 0 | 6621960 | <filename>sctr/management/commands.py
#!/usr/bin/env python
#
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import tasks
from cartola import ftext
from firenado.management import ManagementCommand
from tornado import template
import os
SCTR_ROOT = os.path.realpath(
os.path.join(os.path.dirname(__file__), ".."))
loader = template.Loader(os.path.join(SCTR_ROOT, "templates", "management"))
class SctrManagementCommand(ManagementCommand):
def __init__(self, name, description, cmd_help, **kwargs):
super(SctrManagementCommand, self).__init__(
name, description, cmd_help, **kwargs)
self.help = loader.load("sctr_command_help.txt").generate(
command=self)
def get_help_description(self):
return "%s %s" % (
ftext.pad(self.name, size=20),
ftext.columnize(self.description, columns=30,
newline="\n%s" % (" " * 27)))
def get_subcommands_help(self):
subcommands = []
if self.sub_commands is not None:
subcommands = self.sub_commands
return loader.load("subcommands.txt").generate(
subcommands=subcommands)
sctrProcessSubcommands = [
SctrManagementCommand("list", "List Process", "",
tasks=tasks.ListProcessesTask),
SctrManagementCommand("restart", "Restart Process", "",
tasks=tasks.RestartProcessTask),
]
sctrUserSubcommands = [
SctrManagementCommand("add", " Add User", "", tasks=tasks.UserAddTask),
SctrManagementCommand("list", "List Users", "", tasks=tasks.UserListTask),
SctrManagementCommand("test", "Test User", "", tasks=tasks.UserTestTask),
]
sctrSubcommands = [
SctrManagementCommand(
"proc",
"Process related tasks",
"",
sub_commands=sctrProcessSubcommands),
SctrManagementCommand(
"user",
"User related tasks",
"",
sub_commands=sctrUserSubcommands),
]
SctrManagementCommand(
"sctr",
"Supervisord Controller related commands",
"",
category="Supervisord Controller",
sub_commands=sctrSubcommands)
| <filename>sctr/management/commands.py
#!/usr/bin/env python
#
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import tasks
from cartola import ftext
from firenado.management import ManagementCommand
from tornado import template
import os
SCTR_ROOT = os.path.realpath(
os.path.join(os.path.dirname(__file__), ".."))
loader = template.Loader(os.path.join(SCTR_ROOT, "templates", "management"))
class SctrManagementCommand(ManagementCommand):
def __init__(self, name, description, cmd_help, **kwargs):
super(SctrManagementCommand, self).__init__(
name, description, cmd_help, **kwargs)
self.help = loader.load("sctr_command_help.txt").generate(
command=self)
def get_help_description(self):
return "%s %s" % (
ftext.pad(self.name, size=20),
ftext.columnize(self.description, columns=30,
newline="\n%s" % (" " * 27)))
def get_subcommands_help(self):
subcommands = []
if self.sub_commands is not None:
subcommands = self.sub_commands
return loader.load("subcommands.txt").generate(
subcommands=subcommands)
sctrProcessSubcommands = [
SctrManagementCommand("list", "List Process", "",
tasks=tasks.ListProcessesTask),
SctrManagementCommand("restart", "Restart Process", "",
tasks=tasks.RestartProcessTask),
]
sctrUserSubcommands = [
SctrManagementCommand("add", " Add User", "", tasks=tasks.UserAddTask),
SctrManagementCommand("list", "List Users", "", tasks=tasks.UserListTask),
SctrManagementCommand("test", "Test User", "", tasks=tasks.UserTestTask),
]
sctrSubcommands = [
SctrManagementCommand(
"proc",
"Process related tasks",
"",
sub_commands=sctrProcessSubcommands),
SctrManagementCommand(
"user",
"User related tasks",
"",
sub_commands=sctrUserSubcommands),
]
SctrManagementCommand(
"sctr",
"Supervisord Controller related commands",
"",
category="Supervisord Controller",
sub_commands=sctrSubcommands)
| en | 0.831548 | #!/usr/bin/env python # # Copyright 2021 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 2.093004 | 2 |
util/file_util.py | zhangyi66ch/shuoGG1239p | 12 | 6621961 | <filename>util/file_util.py
import sys
import os
import codecs
import natsort
def get_file_content(file_path):
"""
读取文件, 暂时只支持utf8和gbk编码的文件, 自动去除BOM
:param file_path:
:return: str
"""
try:
with open(file_path, encoding='utf-8') as f1:
raw = f1.read()
# 去掉BOM
bom_head = raw.encode(encoding='utf-8')[:3]
if bom_head == codecs.BOM_UTF8:
raw = raw.encode(encoding='utf-8')[3:].decode(encoding='utf-8')
return raw
except Exception as e:
with open(file_path, encoding='GBK') as f2:
return f2.read()
def write_file_content(file_path, text, encoding='utf8'):
"""
写文件
:param file_path: str
:param text: str
:param encoding: str
:return: None
"""
try:
with open(file_path, mode='wb') as f1:
f1.write(text.encode(encoding=encoding))
except Exception as e:
print(e)
def write_file_content_append(file_path, text, encoding='utf8'):
"""
写文件-append模式
:param file_path: str
:param text: str
:param encoding: str
:return: None
"""
try:
with open(file_path, mode='ab') as f1:
f1.write(text.encode(encoding=encoding))
except Exception as e:
print(e)
def quick_mkdir(name):
"""
当前目录下建一个文件夹
:param name: 文件夹名称
:return: 新建的文件夹的完整路径
"""
new_directory = os.getcwd() + '\\' + name + "\\"
if not os.path.exists(new_directory):
try:
os.mkdir(os.getcwd() + '\\' + name)
except Exception as e:
print(e)
return new_directory
def get_files_fullpath(dir_path, suffix=''):
"""
获取dir_path目录下所有.xxx文件的路径
:param suffix: 后缀如".sql" ".java" ; 若不填则不进行文件过滤
:return: list of str
"""
files = list(filter(lambda x: os.path.isfile(os.path.join(dir_path, x)), os.listdir(dir_path)))
if suffix != '':
# 留下后缀为suffix的文件
files = list(filter(lambda x: x.endswith(suffix), files))
all_fullpath = list(map(lambda x: os.path.join(dir_path, x), files))
return all_fullpath
def get_files_fullpath_curdir(suffix=''):
"""
获取当前目录下所有.xxx文件的路径
:param suffix: 后缀如".sql" ".java" ; 若不填则不进行文件过滤
:return: list of str
"""
return get_files_fullpath(os.getcwd(), suffix)
def get_dirs_fullpath(dir_path):
"""
获取dir_path目录下所有文件夹的路径
"""
dirs = list(filter(lambda x: os.path.isdir(os.path.join(dir_path, x)), os.listdir(dir_path)))
all_fullpath = list(map(lambda x: os.path.join(dir_path, x), dirs))
return all_fullpath
if __name__ == '__main__':
pass | <filename>util/file_util.py
import sys
import os
import codecs
import natsort
def get_file_content(file_path):
"""
读取文件, 暂时只支持utf8和gbk编码的文件, 自动去除BOM
:param file_path:
:return: str
"""
try:
with open(file_path, encoding='utf-8') as f1:
raw = f1.read()
# 去掉BOM
bom_head = raw.encode(encoding='utf-8')[:3]
if bom_head == codecs.BOM_UTF8:
raw = raw.encode(encoding='utf-8')[3:].decode(encoding='utf-8')
return raw
except Exception as e:
with open(file_path, encoding='GBK') as f2:
return f2.read()
def write_file_content(file_path, text, encoding='utf8'):
"""
写文件
:param file_path: str
:param text: str
:param encoding: str
:return: None
"""
try:
with open(file_path, mode='wb') as f1:
f1.write(text.encode(encoding=encoding))
except Exception as e:
print(e)
def write_file_content_append(file_path, text, encoding='utf8'):
"""
写文件-append模式
:param file_path: str
:param text: str
:param encoding: str
:return: None
"""
try:
with open(file_path, mode='ab') as f1:
f1.write(text.encode(encoding=encoding))
except Exception as e:
print(e)
def quick_mkdir(name):
"""
当前目录下建一个文件夹
:param name: 文件夹名称
:return: 新建的文件夹的完整路径
"""
new_directory = os.getcwd() + '\\' + name + "\\"
if not os.path.exists(new_directory):
try:
os.mkdir(os.getcwd() + '\\' + name)
except Exception as e:
print(e)
return new_directory
def get_files_fullpath(dir_path, suffix=''):
"""
获取dir_path目录下所有.xxx文件的路径
:param suffix: 后缀如".sql" ".java" ; 若不填则不进行文件过滤
:return: list of str
"""
files = list(filter(lambda x: os.path.isfile(os.path.join(dir_path, x)), os.listdir(dir_path)))
if suffix != '':
# 留下后缀为suffix的文件
files = list(filter(lambda x: x.endswith(suffix), files))
all_fullpath = list(map(lambda x: os.path.join(dir_path, x), files))
return all_fullpath
def get_files_fullpath_curdir(suffix=''):
"""
获取当前目录下所有.xxx文件的路径
:param suffix: 后缀如".sql" ".java" ; 若不填则不进行文件过滤
:return: list of str
"""
return get_files_fullpath(os.getcwd(), suffix)
def get_dirs_fullpath(dir_path):
"""
获取dir_path目录下所有文件夹的路径
"""
dirs = list(filter(lambda x: os.path.isdir(os.path.join(dir_path, x)), os.listdir(dir_path)))
all_fullpath = list(map(lambda x: os.path.join(dir_path, x), dirs))
return all_fullpath
if __name__ == '__main__':
pass | zh | 0.643314 | 读取文件, 暂时只支持utf8和gbk编码的文件, 自动去除BOM
:param file_path:
:return: str # 去掉BOM 写文件
:param file_path: str
:param text: str
:param encoding: str
:return: None 写文件-append模式
:param file_path: str
:param text: str
:param encoding: str
:return: None 当前目录下建一个文件夹
:param name: 文件夹名称
:return: 新建的文件夹的完整路径 获取dir_path目录下所有.xxx文件的路径
:param suffix: 后缀如".sql" ".java" ; 若不填则不进行文件过滤
:return: list of str # 留下后缀为suffix的文件 获取当前目录下所有.xxx文件的路径
:param suffix: 后缀如".sql" ".java" ; 若不填则不进行文件过滤
:return: list of str 获取dir_path目录下所有文件夹的路径 | 2.972203 | 3 |
app/Http/Controllers/verifyParser.py | Alexamith23/conversor | 0 | 6621962 | #!/usr/bin/env python
import json
import sys
# sanitize the argument
def main(argv = sys.argv[1:]):
var = ""
it = 1
for i in argv:
var += i
if(it != len(argv)):
var += " "
it += 1
pass
return var
arguments = main()
#args = json.dumps(arguments) # doubtful operation
# default sys.exit output inverted!
if(len(sys.argv[1:]) >= 8):
sys.exit(arguments)
else:
sys.exit(0) | #!/usr/bin/env python
import json
import sys
# sanitize the argument
def main(argv = sys.argv[1:]):
var = ""
it = 1
for i in argv:
var += i
if(it != len(argv)):
var += " "
it += 1
pass
return var
arguments = main()
#args = json.dumps(arguments) # doubtful operation
# default sys.exit output inverted!
if(len(sys.argv[1:]) >= 8):
sys.exit(arguments)
else:
sys.exit(0) | en | 0.274854 | #!/usr/bin/env python # sanitize the argument #args = json.dumps(arguments) # doubtful operation # default sys.exit output inverted! | 3.813977 | 4 |
13_error/eg_13_01_error_motion_calibration.py | byrobot-python/e_drone_examples | 0 | 6621963 | <reponame>byrobot-python/e_drone_examples
# 드론 센서 캘리브레이션 요청 후 에러로 나타난 캘리브레이션 진행 상태를 화면에 표시
from time import sleep
from e_drone.drone import *
from e_drone.protocol import *
def check_error(error, flag):
if error & flag.value != 0:
return True
else:
return False
def event_error(error):
print("* eventError() / SystemTime({0:10}) / ErrorFlagsForSensor({1:032b}) / ErrorFlagsForState({2:032b})".format(error.system_time, error.error_flags_for_sensor, error.error_flags_for_state))
if check_error(error.error_flags_for_sensor, ErrorFlagsForSensor.MOTION_CALIBRATING):
print(" - The Motion Sensor is being calibrated.")
if __name__ == '__main__':
drone = Drone()
drone.open()
# 이벤트 핸들링 함수 등록
drone.set_event_handler(DataType.ERROR, event_error)
drone.send_ping(DeviceType.CONTROLLER)
sleep(0.1)
drone.send_command(DeviceType.DRONE, CommandType.CLEAR_BIAS)
sleep(0.1)
for i in range(30, 0, -1):
print(i)
sleep(1)
error = drone.get_data(DataType.ERROR)
if error and not check_error(error.error_flags_for_sensor, ErrorFlagsForSensor.MOTION_CALIBRATING):
print("* The Motion Sensor Calibration is completed.")
break
drone.close() | # 드론 센서 캘리브레이션 요청 후 에러로 나타난 캘리브레이션 진행 상태를 화면에 표시
from time import sleep
from e_drone.drone import *
from e_drone.protocol import *
def check_error(error, flag):
if error & flag.value != 0:
return True
else:
return False
def event_error(error):
print("* eventError() / SystemTime({0:10}) / ErrorFlagsForSensor({1:032b}) / ErrorFlagsForState({2:032b})".format(error.system_time, error.error_flags_for_sensor, error.error_flags_for_state))
if check_error(error.error_flags_for_sensor, ErrorFlagsForSensor.MOTION_CALIBRATING):
print(" - The Motion Sensor is being calibrated.")
if __name__ == '__main__':
drone = Drone()
drone.open()
# 이벤트 핸들링 함수 등록
drone.set_event_handler(DataType.ERROR, event_error)
drone.send_ping(DeviceType.CONTROLLER)
sleep(0.1)
drone.send_command(DeviceType.DRONE, CommandType.CLEAR_BIAS)
sleep(0.1)
for i in range(30, 0, -1):
print(i)
sleep(1)
error = drone.get_data(DataType.ERROR)
if error and not check_error(error.error_flags_for_sensor, ErrorFlagsForSensor.MOTION_CALIBRATING):
print("* The Motion Sensor Calibration is completed.")
break
drone.close() | ko | 1.00007 | # 드론 센서 캘리브레이션 요청 후 에러로 나타난 캘리브레이션 진행 상태를 화면에 표시 # 이벤트 핸들링 함수 등록 | 3.12324 | 3 |
graph_datasets/mazes/pmaze4.py | secnot/python-graph-datasets | 0 | 6621964 | <filename>graph_datasets/mazes/pmaze4.py
# Perfect maze (tree) in a 55x55 grid
# Nodes: 511
# Edges: 510
adjList = [
[18, 1],
[21, 2, 0],
[32, 1],
[33, 4],
[22, 3],
[23, 6],
[24, 5],
[25],
[9],
[38, 8],
[39, 11],
[45, 12, 10],
[40, 11],
[14],
[29, 15, 13],
[31, 14],
[35, 17],
[36, 16],
[78, 0, 19],
[18],
[42, 21],
[1, 20],
[4, 23],
[5, 22],
[44, 6, 25],
[7, 24],
[46, 27],
[64, 26],
[47, 29],
[14, 28],
[31],
[50, 15, 30],
[2, 33],
[3, 32],
[35],
[16, 34],
[17, 37],
[56, 36],
[9, 39],
[10, 38],
[12],
[60, 42],
[20, 43, 41],
[42],
[24],
[11, 46],
[26, 45],
[28, 48],
[75, 47],
[76, 50],
[31, 49],
[83, 52],
[66, 51],
[88, 54],
[68, 55, 53],
[54],
[37, 57],
[70, 56],
[71, 59],
[72, 58],
[41, 61],
[80, 60],
[81, 63],
[82, 62],
[27, 65],
[85, 64],
[52, 67],
[87, 66],
[54, 69],
[101, 68],
[57, 71],
[58, 70],
[59, 73],
[90, 72],
[92, 75],
[48, 74],
[49, 77],
[76],
[18, 79],
[95, 78],
[61, 81],
[62, 80],
[63, 83],
[51, 82],
[104, 85],
[65, 84],
[107],
[67, 88],
[53, 89, 87],
[88],
[109, 73, 91],
[90],
[141, 74, 93],
[111, 92],
[112, 95],
[79, 94],
[97],
[122, 98, 96],
[123, 97],
[116, 100],
[127, 99],
[128, 69, 102],
[117, 103, 101],
[129, 102],
[84, 105],
[119, 104],
[107],
[86, 108, 106],
[125, 107],
[90, 110],
[139, 109],
[93, 112],
[94, 111],
[144, 114],
[131, 113],
[135, 116],
[126, 99, 115],
[102],
[140, 119],
[194, 105, 118],
[121],
[146, 122, 120],
[97, 121],
[98, 124],
[155, 125, 123],
[108, 124],
[116],
[100, 128],
[101, 127],
[103, 130],
[129],
[114, 132],
[181, 131],
[148, 134],
[152, 133],
[115, 136],
[157, 135],
[166, 138],
[159, 137],
[110, 140],
[118, 139],
[92, 142],
[167, 141],
[144],
[151, 113, 145, 143],
[180, 144],
[121, 147],
[164, 146],
[133],
[186, 150],
[165, 149],
[144],
[184, 134, 153],
[169, 152],
[170, 155],
[124, 154],
[172],
[136, 158],
[185, 157],
[138, 160],
[174, 159],
[175, 162],
[176, 161],
[182, 164],
[147, 163],
[150, 166],
[137, 165],
[142, 168],
[196, 167],
[153, 170],
[154, 169],
[172],
[192, 156, 173, 171],
[172],
[160, 175],
[161, 174],
[162, 177],
[193, 176],
[216],
[197, 180],
[145, 179],
[132, 182],
[163, 181],
[200, 184],
[152, 183],
[158, 186],
[149, 185],
[207, 188],
[226, 189, 187],
[208, 188],
[203],
[225, 192],
[234, 172, 191],
[177, 194],
[119, 193],
[217, 196],
[168, 195],
[179, 198],
[219, 197],
[221, 200],
[183, 199],
[212],
[233, 203],
[224, 190, 202],
[235, 205],
[236, 204],
[237, 207],
[187, 206],
[189, 209],
[228, 208],
[220],
[313, 212],
[232, 201, 211],
[229],
[246, 215],
[239, 214],
[178, 217],
[230, 195, 216],
[249, 219],
[198, 218],
[240, 210, 221],
[199, 220],
[241, 223],
[283, 222],
[203, 225],
[191, 224],
[188, 227],
[252, 226],
[209, 229],
[213, 228],
[217, 231],
[248, 230],
[212, 233],
[202, 232],
[192, 235],
[204, 234],
[205, 237],
[206, 236],
[254, 239],
[215, 238],
[220, 241],
[222, 240],
[243],
[258, 242],
[260],
[268, 246],
[214, 245],
[272, 248],
[231, 247],
[218, 250],
[262, 249],
[264],
[227, 253],
[267, 252],
[238, 255],
[271, 254],
[275, 257],
[256],
[243, 259],
[318, 260, 258],
[277, 244, 259],
[281, 262],
[250, 261],
[297, 264],
[251, 265, 263],
[284, 264],
[278],
[253, 268],
[245, 269, 267],
[268],
[293, 271],
[299, 255, 270],
[247, 273],
[307, 272],
[280],
[256, 276],
[296, 275],
[260, 278],
[266, 279, 277],
[305, 278],
[308, 274, 281],
[261, 280],
[323, 283],
[223, 282],
[265, 285],
[315, 284],
[316, 287],
[301, 286],
[302, 289],
[317, 288],
[291],
[341, 290],
[319, 293],
[270, 292],
[310, 295],
[311, 296, 294],
[276, 295],
[263, 298],
[314, 297],
[271],
[321],
[287, 302],
[288, 301],
[337, 304],
[338, 303],
[279, 306],
[340, 305],
[273, 308],
[280, 307],
[344, 310],
[294, 309],
[295, 312],
[327, 311],
[211, 314],
[298, 313],
[285, 316],
[286, 315],
[335, 289, 318],
[259, 317],
[292, 320],
[342, 319],
[375, 300, 322],
[325, 321],
[282, 324],
[347, 323],
[359, 322, 326],
[325],
[312, 328],
[346, 327],
[348, 330],
[349, 329],
[350, 332],
[376, 333, 331],
[332],
[351, 335],
[317, 334],
[353, 337],
[303, 336],
[304, 339],
[355, 338],
[306, 341],
[291, 340],
[320, 343],
[400, 342],
[309, 345],
[362, 344],
[328, 347],
[324, 346],
[329],
[330, 350],
[331, 349],
[364, 334, 352],
[351],
[336, 354],
[370, 353],
[339, 356],
[371, 355],
[372, 358],
[374, 357],
[325, 360],
[381, 359],
[403, 362],
[345, 361],
[377, 364],
[351, 363],
[406, 366],
[410, 365],
[385, 368],
[390, 367],
[386, 370],
[354, 369],
[356, 372],
[357, 371],
[374],
[399, 358, 373],
[321],
[391, 332, 377],
[363, 376],
[393, 379],
[395, 378],
[402, 381],
[388, 360, 380],
[404, 383],
[405, 382],
[411, 385],
[367, 384],
[369, 387],
[409, 386],
[381],
[413, 390],
[368, 389],
[376, 392],
[407, 391],
[378, 394],
[417, 393],
[379, 396],
[419, 395],
[427, 398],
[428, 397],
[374, 400],
[343, 399],
[420, 402],
[380, 401],
[361, 404],
[382, 403],
[383, 406],
[365, 405],
[392],
[425, 409],
[387, 408],
[366, 411],
[384, 410],
[432, 413],
[389, 412],
[433, 415],
[434, 414],
[436, 417],
[394, 416],
[438, 419],
[396, 418],
[401, 421],
[466, 422, 420],
[440, 421],
[441, 424],
[443, 423],
[408, 426],
[444, 427, 425],
[397, 426],
[398, 429],
[447, 428],
[457, 431],
[450, 430],
[412, 433],
[414, 432],
[415, 435],
[460, 434],
[416, 437],
[452, 436],
[418, 439],
[454, 438],
[422, 441],
[423, 440],
[469, 443],
[424, 442],
[426, 445],
[463, 444],
[464, 447],
[499, 429, 446],
[473, 449],
[468, 448],
[431, 451],
[458, 450],
[437, 453],
[490, 452],
[439, 455],
[481, 454],
[486, 457],
[430, 456],
[475, 451, 459],
[458],
[435, 461],
[478, 460],
[479],
[445],
[446, 465],
[498, 464],
[421, 467],
[501, 466],
[449, 469],
[442, 468],
[493, 471],
[506, 470],
[502, 473],
[448, 472],
[488],
[458, 476],
[492, 475],
[507, 478],
[461, 477],
[510, 462, 480],
[494, 479],
[455, 482],
[495, 481],
[496, 484],
[497, 483],
[503, 486],
[456, 485],
[488],
[504, 474, 487],
[508, 490],
[453, 489],
[500],
[476, 493],
[470, 492],
[480, 495],
[482, 496, 494],
[483, 495],
[484, 498],
[465, 497],
[447],
[491, 501],
[467, 500],
[472, 503],
[485, 504, 502],
[488, 505, 503],
[504],
[471, 507],
[477, 508, 506],
[489, 507],
[510],
[479, 509]]
# x coord, y coord
nodeData = [
(11, 1),
(18, 1),
(20, 1),
(23, 1),
(26, 1),
(28, 1),
(30, 1),
(32, 1),
(41, 1),
(43, 1),
(45, 1),
(48, 1),
(55, 1),
(2, 2),
(4, 2),
(9, 2),
(36, 2),
(38, 2),
(11, 3),
(12, 3),
(16, 3),
(18, 3),
(26, 3),
(28, 3),
(30, 3),
(32, 3),
(50, 3),
(52, 3),
(1, 4),
(4, 4),
(7, 4),
(9, 4),
(20, 4),
(23, 4),
(35, 4),
(36, 4),
(38, 4),
(40, 4),
(43, 4),
(45, 4),
(55, 4),
(14, 5),
(16, 5),
(17, 5),
(30, 5),
(48, 5),
(50, 5),
(1, 6),
(5, 6),
(7, 6),
(9, 6),
(23, 6),
(27, 6),
(32, 6),
(35, 6),
(36, 6),
(40, 6),
(43, 6),
(45, 6),
(47, 6),
(14, 7),
(17, 7),
(19, 7),
(21, 7),
(52, 7),
(55, 7),
(27, 8),
(30, 8),
(35, 8),
(41, 8),
(43, 8),
(45, 8),
(47, 8),
(49, 8),
(1, 9),
(5, 9),
(7, 9),
(8, 9),
(11, 9),
(13, 9),
(17, 9),
(19, 9),
(21, 9),
(23, 9),
(53, 9),
(55, 9),
(26, 10),
(30, 10),
(32, 10),
(34, 10),
(49, 10),
(50, 10),
(1, 11),
(3, 11),
(5, 11),
(13, 11),
(16, 11),
(18, 11),
(22, 11),
(37, 11),
(39, 11),
(41, 11),
(45, 11),
(47, 11),
(53, 11),
(55, 11),
(24, 12),
(26, 12),
(30, 12),
(49, 12),
(51, 12),
(3, 13),
(5, 13),
(7, 13),
(10, 13),
(32, 13),
(37, 13),
(45, 13),
(53, 13),
(55, 13),
(14, 14),
(15, 14),
(18, 14),
(22, 14),
(28, 14),
(30, 14),
(37, 14),
(39, 14),
(41, 14),
(47, 14),
(48, 14),
(10, 15),
(12, 15),
(19, 15),
(21, 15),
(32, 15),
(34, 15),
(42, 15),
(45, 15),
(51, 15),
(53, 15),
(1, 16),
(3, 16),
(6, 16),
(7, 16),
(9, 16),
(15, 16),
(17, 16),
(19, 16),
(38, 16),
(40, 16),
(7, 17),
(21, 17),
(23, 17),
(25, 17),
(28, 17),
(31, 17),
(34, 17),
(36, 17),
(45, 17),
(47, 17),
(49, 17),
(51, 17),
(14, 18),
(17, 18),
(40, 18),
(42, 18),
(3, 19),
(5, 19),
(23, 19),
(25, 19),
(30, 19),
(31, 19),
(32, 19),
(47, 19),
(49, 19),
(51, 19),
(53, 19),
(1, 20),
(7, 20),
(9, 20),
(12, 20),
(14, 20),
(18, 20),
(21, 20),
(36, 20),
(38, 20),
(40, 20),
(42, 20),
(44, 20),
(27, 21),
(29, 21),
(31, 21),
(53, 21),
(55, 21),
(3, 22),
(5, 22),
(7, 22),
(9, 22),
(14, 22),
(18, 22),
(23, 22),
(25, 22),
(27, 22),
(33, 22),
(35, 22),
(38, 22),
(40, 22),
(44, 22),
(46, 22),
(12, 23),
(21, 23),
(23, 23),
(49, 23),
(51, 23),
(55, 23),
(1, 24),
(3, 24),
(7, 24),
(9, 24),
(12, 24),
(14, 24),
(17, 24),
(19, 24),
(27, 24),
(29, 24),
(42, 24),
(44, 24),
(46, 24),
(49, 24),
(3, 25),
(5, 25),
(23, 25),
(25, 25),
(31, 25),
(33, 25),
(35, 25),
(38, 25),
(53, 25),
(55, 25),
(12, 26),
(17, 26),
(27, 26),
(29, 26),
(40, 26),
(49, 26),
(51, 26),
(2, 27),
(5, 27),
(7, 27),
(11, 27),
(25, 27),
(44, 27),
(46, 27),
(53, 27),
(55, 27),
(13, 28),
(16, 28),
(29, 28),
(38, 28),
(40, 28),
(8, 29),
(11, 29),
(23, 29),
(25, 29),
(26, 29),
(43, 29),
(46, 29),
(49, 29),
(50, 29),
(53, 29),
(55, 29),
(2, 30),
(4, 30),
(6, 30),
(13, 30),
(15, 30),
(40, 30),
(43, 30),
(44, 30),
(6, 31),
(8, 31),
(17, 31),
(19, 31),
(26, 31),
(28, 31),
(30, 31),
(32, 31),
(34, 31),
(36, 31),
(48, 31),
(49, 31),
(51, 31),
(53, 31),
(10, 32),
(12, 32),
(15, 32),
(23, 32),
(25, 32),
(55, 32),
(1, 33),
(32, 33),
(34, 33),
(40, 33),
(42, 33),
(44, 33),
(47, 33),
(4, 34),
(6, 34),
(8, 34),
(10, 34),
(12, 34),
(14, 34),
(21, 34),
(25, 34),
(28, 34),
(30, 34),
(36, 34),
(38, 34),
(51, 34),
(53, 34),
(1, 35),
(3, 35),
(17, 35),
(19, 35),
(3, 36),
(5, 36),
(14, 36),
(16, 36),
(21, 36),
(23, 36),
(25, 36),
(28, 36),
(30, 36),
(33, 36),
(36, 36),
(38, 36),
(40, 36),
(42, 36),
(44, 36),
(47, 36),
(49, 36),
(53, 36),
(55, 36),
(8, 37),
(13, 37),
(16, 38),
(19, 38),
(21, 38),
(23, 38),
(25, 38),
(33, 38),
(34, 38),
(38, 38),
(42, 38),
(44, 38),
(46, 38),
(48, 38),
(52, 38),
(3, 39),
(6, 39),
(9, 39),
(13, 39),
(30, 39),
(33, 39),
(15, 40),
(17, 40),
(22, 40),
(25, 40),
(40, 40),
(42, 40),
(46, 40),
(48, 40),
(51, 40),
(52, 40),
(1, 41),
(28, 41),
(30, 41),
(32, 41),
(37, 41),
(4, 42),
(6, 42),
(11, 42),
(13, 42),
(19, 42),
(22, 42),
(40, 42),
(46, 42),
(6, 43),
(23, 43),
(25, 43),
(28, 43),
(30, 43),
(32, 43),
(34, 43),
(37, 43),
(39, 43),
(48, 43),
(50, 43),
(52, 43),
(55, 43),
(1, 44),
(4, 44),
(9, 44),
(11, 44),
(13, 44),
(15, 44),
(30, 44),
(42, 44),
(46, 44),
(17, 45),
(19, 45),
(21, 45),
(23, 45),
(25, 45),
(27, 45),
(32, 45),
(34, 45),
(36, 45),
(39, 45),
(1, 46),
(2, 46),
(4, 46),
(7, 46),
(14, 46),
(42, 46),
(45, 46),
(48, 46),
(50, 46),
(54, 46),
(16, 47),
(19, 47),
(21, 47),
(25, 47),
(27, 47),
(29, 47),
(32, 47),
(34, 47),
(36, 47),
(41, 47),
(4, 48),
(7, 48),
(12, 48),
(14, 48),
(45, 48),
(47, 48),
(49, 48),
(54, 48),
(8, 49),
(10, 49),
(19, 49),
(21, 49),
(34, 49),
(36, 49),
(41, 49),
(43, 49),
(14, 50),
(16, 50),
(21, 50),
(24, 50),
(29, 50),
(32, 50),
(38, 50),
(47, 50),
(49, 50),
(52, 50),
(2, 51),
(4, 51),
(10, 51),
(12, 51),
(26, 51),
(28, 51),
(6, 52),
(8, 52),
(18, 52),
(21, 52),
(23, 52),
(30, 52),
(32, 52),
(38, 52),
(40, 52),
(43, 52),
(45, 52),
(47, 52),
(50, 52),
(9, 53),
(14, 53),
(17, 53),
(18, 53),
(33, 53),
(36, 53),
(1, 54),
(23, 54),
(26, 54),
(40, 54),
(45, 54),
(47, 54),
(50, 54),
(52, 54),
(54, 54),
(1, 55),
(4, 55),
(6, 55),
(9, 55),
(18, 55),
(20, 55),
(28, 55),
(30, 55),
(33, 55),
(37, 55),
(38, 55)]
| <filename>graph_datasets/mazes/pmaze4.py
# Perfect maze (tree) in a 55x55 grid
# Nodes: 511
# Edges: 510
adjList = [
[18, 1],
[21, 2, 0],
[32, 1],
[33, 4],
[22, 3],
[23, 6],
[24, 5],
[25],
[9],
[38, 8],
[39, 11],
[45, 12, 10],
[40, 11],
[14],
[29, 15, 13],
[31, 14],
[35, 17],
[36, 16],
[78, 0, 19],
[18],
[42, 21],
[1, 20],
[4, 23],
[5, 22],
[44, 6, 25],
[7, 24],
[46, 27],
[64, 26],
[47, 29],
[14, 28],
[31],
[50, 15, 30],
[2, 33],
[3, 32],
[35],
[16, 34],
[17, 37],
[56, 36],
[9, 39],
[10, 38],
[12],
[60, 42],
[20, 43, 41],
[42],
[24],
[11, 46],
[26, 45],
[28, 48],
[75, 47],
[76, 50],
[31, 49],
[83, 52],
[66, 51],
[88, 54],
[68, 55, 53],
[54],
[37, 57],
[70, 56],
[71, 59],
[72, 58],
[41, 61],
[80, 60],
[81, 63],
[82, 62],
[27, 65],
[85, 64],
[52, 67],
[87, 66],
[54, 69],
[101, 68],
[57, 71],
[58, 70],
[59, 73],
[90, 72],
[92, 75],
[48, 74],
[49, 77],
[76],
[18, 79],
[95, 78],
[61, 81],
[62, 80],
[63, 83],
[51, 82],
[104, 85],
[65, 84],
[107],
[67, 88],
[53, 89, 87],
[88],
[109, 73, 91],
[90],
[141, 74, 93],
[111, 92],
[112, 95],
[79, 94],
[97],
[122, 98, 96],
[123, 97],
[116, 100],
[127, 99],
[128, 69, 102],
[117, 103, 101],
[129, 102],
[84, 105],
[119, 104],
[107],
[86, 108, 106],
[125, 107],
[90, 110],
[139, 109],
[93, 112],
[94, 111],
[144, 114],
[131, 113],
[135, 116],
[126, 99, 115],
[102],
[140, 119],
[194, 105, 118],
[121],
[146, 122, 120],
[97, 121],
[98, 124],
[155, 125, 123],
[108, 124],
[116],
[100, 128],
[101, 127],
[103, 130],
[129],
[114, 132],
[181, 131],
[148, 134],
[152, 133],
[115, 136],
[157, 135],
[166, 138],
[159, 137],
[110, 140],
[118, 139],
[92, 142],
[167, 141],
[144],
[151, 113, 145, 143],
[180, 144],
[121, 147],
[164, 146],
[133],
[186, 150],
[165, 149],
[144],
[184, 134, 153],
[169, 152],
[170, 155],
[124, 154],
[172],
[136, 158],
[185, 157],
[138, 160],
[174, 159],
[175, 162],
[176, 161],
[182, 164],
[147, 163],
[150, 166],
[137, 165],
[142, 168],
[196, 167],
[153, 170],
[154, 169],
[172],
[192, 156, 173, 171],
[172],
[160, 175],
[161, 174],
[162, 177],
[193, 176],
[216],
[197, 180],
[145, 179],
[132, 182],
[163, 181],
[200, 184],
[152, 183],
[158, 186],
[149, 185],
[207, 188],
[226, 189, 187],
[208, 188],
[203],
[225, 192],
[234, 172, 191],
[177, 194],
[119, 193],
[217, 196],
[168, 195],
[179, 198],
[219, 197],
[221, 200],
[183, 199],
[212],
[233, 203],
[224, 190, 202],
[235, 205],
[236, 204],
[237, 207],
[187, 206],
[189, 209],
[228, 208],
[220],
[313, 212],
[232, 201, 211],
[229],
[246, 215],
[239, 214],
[178, 217],
[230, 195, 216],
[249, 219],
[198, 218],
[240, 210, 221],
[199, 220],
[241, 223],
[283, 222],
[203, 225],
[191, 224],
[188, 227],
[252, 226],
[209, 229],
[213, 228],
[217, 231],
[248, 230],
[212, 233],
[202, 232],
[192, 235],
[204, 234],
[205, 237],
[206, 236],
[254, 239],
[215, 238],
[220, 241],
[222, 240],
[243],
[258, 242],
[260],
[268, 246],
[214, 245],
[272, 248],
[231, 247],
[218, 250],
[262, 249],
[264],
[227, 253],
[267, 252],
[238, 255],
[271, 254],
[275, 257],
[256],
[243, 259],
[318, 260, 258],
[277, 244, 259],
[281, 262],
[250, 261],
[297, 264],
[251, 265, 263],
[284, 264],
[278],
[253, 268],
[245, 269, 267],
[268],
[293, 271],
[299, 255, 270],
[247, 273],
[307, 272],
[280],
[256, 276],
[296, 275],
[260, 278],
[266, 279, 277],
[305, 278],
[308, 274, 281],
[261, 280],
[323, 283],
[223, 282],
[265, 285],
[315, 284],
[316, 287],
[301, 286],
[302, 289],
[317, 288],
[291],
[341, 290],
[319, 293],
[270, 292],
[310, 295],
[311, 296, 294],
[276, 295],
[263, 298],
[314, 297],
[271],
[321],
[287, 302],
[288, 301],
[337, 304],
[338, 303],
[279, 306],
[340, 305],
[273, 308],
[280, 307],
[344, 310],
[294, 309],
[295, 312],
[327, 311],
[211, 314],
[298, 313],
[285, 316],
[286, 315],
[335, 289, 318],
[259, 317],
[292, 320],
[342, 319],
[375, 300, 322],
[325, 321],
[282, 324],
[347, 323],
[359, 322, 326],
[325],
[312, 328],
[346, 327],
[348, 330],
[349, 329],
[350, 332],
[376, 333, 331],
[332],
[351, 335],
[317, 334],
[353, 337],
[303, 336],
[304, 339],
[355, 338],
[306, 341],
[291, 340],
[320, 343],
[400, 342],
[309, 345],
[362, 344],
[328, 347],
[324, 346],
[329],
[330, 350],
[331, 349],
[364, 334, 352],
[351],
[336, 354],
[370, 353],
[339, 356],
[371, 355],
[372, 358],
[374, 357],
[325, 360],
[381, 359],
[403, 362],
[345, 361],
[377, 364],
[351, 363],
[406, 366],
[410, 365],
[385, 368],
[390, 367],
[386, 370],
[354, 369],
[356, 372],
[357, 371],
[374],
[399, 358, 373],
[321],
[391, 332, 377],
[363, 376],
[393, 379],
[395, 378],
[402, 381],
[388, 360, 380],
[404, 383],
[405, 382],
[411, 385],
[367, 384],
[369, 387],
[409, 386],
[381],
[413, 390],
[368, 389],
[376, 392],
[407, 391],
[378, 394],
[417, 393],
[379, 396],
[419, 395],
[427, 398],
[428, 397],
[374, 400],
[343, 399],
[420, 402],
[380, 401],
[361, 404],
[382, 403],
[383, 406],
[365, 405],
[392],
[425, 409],
[387, 408],
[366, 411],
[384, 410],
[432, 413],
[389, 412],
[433, 415],
[434, 414],
[436, 417],
[394, 416],
[438, 419],
[396, 418],
[401, 421],
[466, 422, 420],
[440, 421],
[441, 424],
[443, 423],
[408, 426],
[444, 427, 425],
[397, 426],
[398, 429],
[447, 428],
[457, 431],
[450, 430],
[412, 433],
[414, 432],
[415, 435],
[460, 434],
[416, 437],
[452, 436],
[418, 439],
[454, 438],
[422, 441],
[423, 440],
[469, 443],
[424, 442],
[426, 445],
[463, 444],
[464, 447],
[499, 429, 446],
[473, 449],
[468, 448],
[431, 451],
[458, 450],
[437, 453],
[490, 452],
[439, 455],
[481, 454],
[486, 457],
[430, 456],
[475, 451, 459],
[458],
[435, 461],
[478, 460],
[479],
[445],
[446, 465],
[498, 464],
[421, 467],
[501, 466],
[449, 469],
[442, 468],
[493, 471],
[506, 470],
[502, 473],
[448, 472],
[488],
[458, 476],
[492, 475],
[507, 478],
[461, 477],
[510, 462, 480],
[494, 479],
[455, 482],
[495, 481],
[496, 484],
[497, 483],
[503, 486],
[456, 485],
[488],
[504, 474, 487],
[508, 490],
[453, 489],
[500],
[476, 493],
[470, 492],
[480, 495],
[482, 496, 494],
[483, 495],
[484, 498],
[465, 497],
[447],
[491, 501],
[467, 500],
[472, 503],
[485, 504, 502],
[488, 505, 503],
[504],
[471, 507],
[477, 508, 506],
[489, 507],
[510],
[479, 509]]
# x coord, y coord
nodeData = [
(11, 1),
(18, 1),
(20, 1),
(23, 1),
(26, 1),
(28, 1),
(30, 1),
(32, 1),
(41, 1),
(43, 1),
(45, 1),
(48, 1),
(55, 1),
(2, 2),
(4, 2),
(9, 2),
(36, 2),
(38, 2),
(11, 3),
(12, 3),
(16, 3),
(18, 3),
(26, 3),
(28, 3),
(30, 3),
(32, 3),
(50, 3),
(52, 3),
(1, 4),
(4, 4),
(7, 4),
(9, 4),
(20, 4),
(23, 4),
(35, 4),
(36, 4),
(38, 4),
(40, 4),
(43, 4),
(45, 4),
(55, 4),
(14, 5),
(16, 5),
(17, 5),
(30, 5),
(48, 5),
(50, 5),
(1, 6),
(5, 6),
(7, 6),
(9, 6),
(23, 6),
(27, 6),
(32, 6),
(35, 6),
(36, 6),
(40, 6),
(43, 6),
(45, 6),
(47, 6),
(14, 7),
(17, 7),
(19, 7),
(21, 7),
(52, 7),
(55, 7),
(27, 8),
(30, 8),
(35, 8),
(41, 8),
(43, 8),
(45, 8),
(47, 8),
(49, 8),
(1, 9),
(5, 9),
(7, 9),
(8, 9),
(11, 9),
(13, 9),
(17, 9),
(19, 9),
(21, 9),
(23, 9),
(53, 9),
(55, 9),
(26, 10),
(30, 10),
(32, 10),
(34, 10),
(49, 10),
(50, 10),
(1, 11),
(3, 11),
(5, 11),
(13, 11),
(16, 11),
(18, 11),
(22, 11),
(37, 11),
(39, 11),
(41, 11),
(45, 11),
(47, 11),
(53, 11),
(55, 11),
(24, 12),
(26, 12),
(30, 12),
(49, 12),
(51, 12),
(3, 13),
(5, 13),
(7, 13),
(10, 13),
(32, 13),
(37, 13),
(45, 13),
(53, 13),
(55, 13),
(14, 14),
(15, 14),
(18, 14),
(22, 14),
(28, 14),
(30, 14),
(37, 14),
(39, 14),
(41, 14),
(47, 14),
(48, 14),
(10, 15),
(12, 15),
(19, 15),
(21, 15),
(32, 15),
(34, 15),
(42, 15),
(45, 15),
(51, 15),
(53, 15),
(1, 16),
(3, 16),
(6, 16),
(7, 16),
(9, 16),
(15, 16),
(17, 16),
(19, 16),
(38, 16),
(40, 16),
(7, 17),
(21, 17),
(23, 17),
(25, 17),
(28, 17),
(31, 17),
(34, 17),
(36, 17),
(45, 17),
(47, 17),
(49, 17),
(51, 17),
(14, 18),
(17, 18),
(40, 18),
(42, 18),
(3, 19),
(5, 19),
(23, 19),
(25, 19),
(30, 19),
(31, 19),
(32, 19),
(47, 19),
(49, 19),
(51, 19),
(53, 19),
(1, 20),
(7, 20),
(9, 20),
(12, 20),
(14, 20),
(18, 20),
(21, 20),
(36, 20),
(38, 20),
(40, 20),
(42, 20),
(44, 20),
(27, 21),
(29, 21),
(31, 21),
(53, 21),
(55, 21),
(3, 22),
(5, 22),
(7, 22),
(9, 22),
(14, 22),
(18, 22),
(23, 22),
(25, 22),
(27, 22),
(33, 22),
(35, 22),
(38, 22),
(40, 22),
(44, 22),
(46, 22),
(12, 23),
(21, 23),
(23, 23),
(49, 23),
(51, 23),
(55, 23),
(1, 24),
(3, 24),
(7, 24),
(9, 24),
(12, 24),
(14, 24),
(17, 24),
(19, 24),
(27, 24),
(29, 24),
(42, 24),
(44, 24),
(46, 24),
(49, 24),
(3, 25),
(5, 25),
(23, 25),
(25, 25),
(31, 25),
(33, 25),
(35, 25),
(38, 25),
(53, 25),
(55, 25),
(12, 26),
(17, 26),
(27, 26),
(29, 26),
(40, 26),
(49, 26),
(51, 26),
(2, 27),
(5, 27),
(7, 27),
(11, 27),
(25, 27),
(44, 27),
(46, 27),
(53, 27),
(55, 27),
(13, 28),
(16, 28),
(29, 28),
(38, 28),
(40, 28),
(8, 29),
(11, 29),
(23, 29),
(25, 29),
(26, 29),
(43, 29),
(46, 29),
(49, 29),
(50, 29),
(53, 29),
(55, 29),
(2, 30),
(4, 30),
(6, 30),
(13, 30),
(15, 30),
(40, 30),
(43, 30),
(44, 30),
(6, 31),
(8, 31),
(17, 31),
(19, 31),
(26, 31),
(28, 31),
(30, 31),
(32, 31),
(34, 31),
(36, 31),
(48, 31),
(49, 31),
(51, 31),
(53, 31),
(10, 32),
(12, 32),
(15, 32),
(23, 32),
(25, 32),
(55, 32),
(1, 33),
(32, 33),
(34, 33),
(40, 33),
(42, 33),
(44, 33),
(47, 33),
(4, 34),
(6, 34),
(8, 34),
(10, 34),
(12, 34),
(14, 34),
(21, 34),
(25, 34),
(28, 34),
(30, 34),
(36, 34),
(38, 34),
(51, 34),
(53, 34),
(1, 35),
(3, 35),
(17, 35),
(19, 35),
(3, 36),
(5, 36),
(14, 36),
(16, 36),
(21, 36),
(23, 36),
(25, 36),
(28, 36),
(30, 36),
(33, 36),
(36, 36),
(38, 36),
(40, 36),
(42, 36),
(44, 36),
(47, 36),
(49, 36),
(53, 36),
(55, 36),
(8, 37),
(13, 37),
(16, 38),
(19, 38),
(21, 38),
(23, 38),
(25, 38),
(33, 38),
(34, 38),
(38, 38),
(42, 38),
(44, 38),
(46, 38),
(48, 38),
(52, 38),
(3, 39),
(6, 39),
(9, 39),
(13, 39),
(30, 39),
(33, 39),
(15, 40),
(17, 40),
(22, 40),
(25, 40),
(40, 40),
(42, 40),
(46, 40),
(48, 40),
(51, 40),
(52, 40),
(1, 41),
(28, 41),
(30, 41),
(32, 41),
(37, 41),
(4, 42),
(6, 42),
(11, 42),
(13, 42),
(19, 42),
(22, 42),
(40, 42),
(46, 42),
(6, 43),
(23, 43),
(25, 43),
(28, 43),
(30, 43),
(32, 43),
(34, 43),
(37, 43),
(39, 43),
(48, 43),
(50, 43),
(52, 43),
(55, 43),
(1, 44),
(4, 44),
(9, 44),
(11, 44),
(13, 44),
(15, 44),
(30, 44),
(42, 44),
(46, 44),
(17, 45),
(19, 45),
(21, 45),
(23, 45),
(25, 45),
(27, 45),
(32, 45),
(34, 45),
(36, 45),
(39, 45),
(1, 46),
(2, 46),
(4, 46),
(7, 46),
(14, 46),
(42, 46),
(45, 46),
(48, 46),
(50, 46),
(54, 46),
(16, 47),
(19, 47),
(21, 47),
(25, 47),
(27, 47),
(29, 47),
(32, 47),
(34, 47),
(36, 47),
(41, 47),
(4, 48),
(7, 48),
(12, 48),
(14, 48),
(45, 48),
(47, 48),
(49, 48),
(54, 48),
(8, 49),
(10, 49),
(19, 49),
(21, 49),
(34, 49),
(36, 49),
(41, 49),
(43, 49),
(14, 50),
(16, 50),
(21, 50),
(24, 50),
(29, 50),
(32, 50),
(38, 50),
(47, 50),
(49, 50),
(52, 50),
(2, 51),
(4, 51),
(10, 51),
(12, 51),
(26, 51),
(28, 51),
(6, 52),
(8, 52),
(18, 52),
(21, 52),
(23, 52),
(30, 52),
(32, 52),
(38, 52),
(40, 52),
(43, 52),
(45, 52),
(47, 52),
(50, 52),
(9, 53),
(14, 53),
(17, 53),
(18, 53),
(33, 53),
(36, 53),
(1, 54),
(23, 54),
(26, 54),
(40, 54),
(45, 54),
(47, 54),
(50, 54),
(52, 54),
(54, 54),
(1, 55),
(4, 55),
(6, 55),
(9, 55),
(18, 55),
(20, 55),
(28, 55),
(30, 55),
(33, 55),
(37, 55),
(38, 55)]
| en | 0.407232 | # Perfect maze (tree) in a 55x55 grid # Nodes: 511 # Edges: 510 # x coord, y coord | 2.344962 | 2 |
apps/cars/migrations/0004_auto_20210623_1520.py | Marpop/demo-car-app | 0 | 6621965 | <gh_stars>0
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("cars", "0003_auto_20210623_1457"),
]
operations = [
migrations.RemoveConstraint(
model_name="car",
name="unique name",
),
migrations.RenameField(
model_name="car",
old_name="maker",
new_name="make",
),
migrations.AddConstraint(
model_name="car",
constraint=models.UniqueConstraint(
fields=("make", "model"), name="unique name"
),
),
]
| from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("cars", "0003_auto_20210623_1457"),
]
operations = [
migrations.RemoveConstraint(
model_name="car",
name="unique name",
),
migrations.RenameField(
model_name="car",
old_name="maker",
new_name="make",
),
migrations.AddConstraint(
model_name="car",
constraint=models.UniqueConstraint(
fields=("make", "model"), name="unique name"
),
),
] | none | 1 | 1.874508 | 2 | |
xebec/tests/test_template.py | gibsramen/xebec | 1 | 6621966 | import os
def test_bake_project(cookies, data_paths):
result = cookies.bake(extra_context={
"project_name": "example-benchmark",
"feature_table_file": data_paths.table_file,
"sample_metadata_file": data_paths.metadata_file,
"phylogenetic_tree_file": data_paths.tree_file
})
assert result.exit_code == 0
assert result.exception is None
assert result.project_path.name == "example-benchmark"
assert result.project_path.is_dir()
files = os.listdir(result.project_path)
assert set(files) == {"workflow", "config"}
config_dir = os.path.join(result.project_path, "config")
config_files = os.listdir(config_dir)
assert set(config_files) == {
"alpha_div_metrics.tsv",
"beta_div_metrics.tsv",
"config.yaml"
}
workflow_dir = os.path.join(result.project_path, "workflow")
workflow_files = os.listdir(workflow_dir)
assert set(workflow_files) == {"rules", "Snakefile", "scripts", "report"}
rules_dir = os.path.join(workflow_dir, "rules")
rules_files = os.listdir(rules_dir)
exp_rules = {
"alpha_diversity.smk",
"beta_diversity.smk",
"evident.smk",
"visualization.smk",
"preprocess_data.smk",
}
assert set(rules_files) == exp_rules
scripts_dir = os.path.join(workflow_dir, "scripts")
scripts_files = os.listdir(scripts_dir)
assert set(scripts_files) == {
"interactive_effect_sizes.py",
"interactive_pw_effect_sizes.py",
"alpha_diversity.py",
"beta_diversity.py",
"concatenate.py",
"filter_metadata.py",
"rarefy.py",
"run_evident.py"
}
report_dir = os.path.join(workflow_dir, "report")
report_files = os.listdir(report_dir)
assert set(report_files) == {
"effect_size_plot.rst",
"pw_effect_size_plot.rst",
"workflow.rst",
}
| import os
def test_bake_project(cookies, data_paths):
result = cookies.bake(extra_context={
"project_name": "example-benchmark",
"feature_table_file": data_paths.table_file,
"sample_metadata_file": data_paths.metadata_file,
"phylogenetic_tree_file": data_paths.tree_file
})
assert result.exit_code == 0
assert result.exception is None
assert result.project_path.name == "example-benchmark"
assert result.project_path.is_dir()
files = os.listdir(result.project_path)
assert set(files) == {"workflow", "config"}
config_dir = os.path.join(result.project_path, "config")
config_files = os.listdir(config_dir)
assert set(config_files) == {
"alpha_div_metrics.tsv",
"beta_div_metrics.tsv",
"config.yaml"
}
workflow_dir = os.path.join(result.project_path, "workflow")
workflow_files = os.listdir(workflow_dir)
assert set(workflow_files) == {"rules", "Snakefile", "scripts", "report"}
rules_dir = os.path.join(workflow_dir, "rules")
rules_files = os.listdir(rules_dir)
exp_rules = {
"alpha_diversity.smk",
"beta_diversity.smk",
"evident.smk",
"visualization.smk",
"preprocess_data.smk",
}
assert set(rules_files) == exp_rules
scripts_dir = os.path.join(workflow_dir, "scripts")
scripts_files = os.listdir(scripts_dir)
assert set(scripts_files) == {
"interactive_effect_sizes.py",
"interactive_pw_effect_sizes.py",
"alpha_diversity.py",
"beta_diversity.py",
"concatenate.py",
"filter_metadata.py",
"rarefy.py",
"run_evident.py"
}
report_dir = os.path.join(workflow_dir, "report")
report_files = os.listdir(report_dir)
assert set(report_files) == {
"effect_size_plot.rst",
"pw_effect_size_plot.rst",
"workflow.rst",
}
| none | 1 | 2.146236 | 2 | |
hackerrank/python/classes/multiple_inheritance.py | aditya2000/coding-practice | 4 | 6621967 | # Here's a fun example of what's going on with OrderedCounter
class Tractor():
def plow(self):
print("Plowing regular fields on earth!")
class Farm(Tractor):
def clean_barn(self):
print("Mucking the stalls.")
def do_farm_things(self):
self.plow()
self.clean_barn()
# Sweet, we have our Farm, and it has a basic tractor
# But I'm moving to the Moon to start a MoonFarm,
# which is the same as running a regular Farm,
# but we need to use a MoonTractor that's modified for plowing moondirt.
class MoonTractor(Tractor):
def plow(self):
print("Plowing fields on the moon!")
# Now we just need our stub MoonFarm to extend Farm and MoonTractor
'''
Inherit basic functionalities from Farm
Then override a specific function with MoonTractor
'''
class MoonFarm(Farm, MoonTractor):
pass
# moon farm
'''
We didnt even add any functionality to MoonFarm, its still awesome
'''
farm = MoonFarm()
farm.do_farm_things()
# regular farm
farm = Farm()
farm.do_farm_things()
| # Here's a fun example of what's going on with OrderedCounter
class Tractor():
def plow(self):
print("Plowing regular fields on earth!")
class Farm(Tractor):
def clean_barn(self):
print("Mucking the stalls.")
def do_farm_things(self):
self.plow()
self.clean_barn()
# Sweet, we have our Farm, and it has a basic tractor
# But I'm moving to the Moon to start a MoonFarm,
# which is the same as running a regular Farm,
# but we need to use a MoonTractor that's modified for plowing moondirt.
class MoonTractor(Tractor):
def plow(self):
print("Plowing fields on the moon!")
# Now we just need our stub MoonFarm to extend Farm and MoonTractor
'''
Inherit basic functionalities from Farm
Then override a specific function with MoonTractor
'''
class MoonFarm(Farm, MoonTractor):
pass
# moon farm
'''
We didnt even add any functionality to MoonFarm, its still awesome
'''
farm = MoonFarm()
farm.do_farm_things()
# regular farm
farm = Farm()
farm.do_farm_things()
| en | 0.953791 | # Here's a fun example of what's going on with OrderedCounter # Sweet, we have our Farm, and it has a basic tractor # But I'm moving to the Moon to start a MoonFarm, # which is the same as running a regular Farm, # but we need to use a MoonTractor that's modified for plowing moondirt. # Now we just need our stub MoonFarm to extend Farm and MoonTractor Inherit basic functionalities from Farm Then override a specific function with MoonTractor # moon farm We didnt even add any functionality to MoonFarm, its still awesome # regular farm | 3.699408 | 4 |
importfrom/__init__.py | libeclipse/import-from | 23 | 6621968 | #!/usr/bin/env python
from __future__ import print_function, unicode_literals
from bs4 import BeautifulSoup
import requests
import json
def request(url):
if not url.startswith("http://") and not url.startswith("https://"):
url = "http://" + url
response = requests.get(url)
if response.status_code != 200:
raise Exception('Could not reach that URL. [%d]' % response.status_code)
return response.text
def magic(code):
namespace = {}
code = compile(code, '<string>', 'exec')
exec(code, namespace)
return namespace
def pastebin(url):
if not 'raw' in url:
url = 'http://pastebin.com/raw/%s' % url.split('/')[-1].strip()
return magic(request(url))
def twitter(url):
page = BeautifulSoup(request(url), "html.parser")
text = page.body.find('div', attrs={'class':'js-tweet-text-container'}).text.strip()
return magic(text)
def gist(url):
if not '/raw' in url:
url = url + '/raw'
return magic(request(url))
def dns(domain):
response = json.loads(request('https://dns.google.com/resolve?name=%s&type=TXT' % domain))
return magic(response['Answer'][0]['data'][1:-1])
# Run tests.
if __name__ == "__main__":
# Twitter
functions = twitter('https://twitter.com/libeclipse/status/732279611002912769')
hello = functions['hello']
bye = functions['bye']
print('Twitter:\n %s\n %s\n' % (hello('world'), bye('world')))
# Pastebin
functions = pastebin('http://pastebin.com/qAjHYyrs')
hello = functions['hello']
bye = functions['bye']
print('Pastebin:\n %s\n %s\n' % (hello('world'), bye('world')))
# Gist
functions = gist('https://gist.github.com/libeclipse/b240d9b0fff2a65233a30457aad99f12')
hello = functions['hello']
bye = functions['bye']
print('Gist:\n %s\n %s\n' % (hello('world'), bye('world')))
# DNS
hello = dns('importfrom-hello.libeclipse.me')['hello']
bye = dns('importfrom-bye.libeclipse.me')['bye']
print('DNS:\n %s\n %s\n' % (hello('world'), bye('world')))
# Self-implementation
string = """def hello(name):
return 'Hello, %s!' % name
def bye(name):
return 'Bye, %s!' % name"""
functions = magic(string)
hello = functions['hello']
bye = functions['bye']
print('Self Implementation:\n %s\n %s\n' % (hello('world'), bye('world')))
| #!/usr/bin/env python
from __future__ import print_function, unicode_literals
from bs4 import BeautifulSoup
import requests
import json
def request(url):
if not url.startswith("http://") and not url.startswith("https://"):
url = "http://" + url
response = requests.get(url)
if response.status_code != 200:
raise Exception('Could not reach that URL. [%d]' % response.status_code)
return response.text
def magic(code):
namespace = {}
code = compile(code, '<string>', 'exec')
exec(code, namespace)
return namespace
def pastebin(url):
if not 'raw' in url:
url = 'http://pastebin.com/raw/%s' % url.split('/')[-1].strip()
return magic(request(url))
def twitter(url):
page = BeautifulSoup(request(url), "html.parser")
text = page.body.find('div', attrs={'class':'js-tweet-text-container'}).text.strip()
return magic(text)
def gist(url):
if not '/raw' in url:
url = url + '/raw'
return magic(request(url))
def dns(domain):
response = json.loads(request('https://dns.google.com/resolve?name=%s&type=TXT' % domain))
return magic(response['Answer'][0]['data'][1:-1])
# Run tests.
if __name__ == "__main__":
# Twitter
functions = twitter('https://twitter.com/libeclipse/status/732279611002912769')
hello = functions['hello']
bye = functions['bye']
print('Twitter:\n %s\n %s\n' % (hello('world'), bye('world')))
# Pastebin
functions = pastebin('http://pastebin.com/qAjHYyrs')
hello = functions['hello']
bye = functions['bye']
print('Pastebin:\n %s\n %s\n' % (hello('world'), bye('world')))
# Gist
functions = gist('https://gist.github.com/libeclipse/b240d9b0fff2a65233a30457aad99f12')
hello = functions['hello']
bye = functions['bye']
print('Gist:\n %s\n %s\n' % (hello('world'), bye('world')))
# DNS
hello = dns('importfrom-hello.libeclipse.me')['hello']
bye = dns('importfrom-bye.libeclipse.me')['bye']
print('DNS:\n %s\n %s\n' % (hello('world'), bye('world')))
# Self-implementation
string = """def hello(name):
return 'Hello, %s!' % name
def bye(name):
return 'Bye, %s!' % name"""
functions = magic(string)
hello = functions['hello']
bye = functions['bye']
print('Self Implementation:\n %s\n %s\n' % (hello('world'), bye('world')))
| en | 0.52593 | #!/usr/bin/env python # Run tests. # Twitter # Pastebin # Gist # DNS # Self-implementation def hello(name): return 'Hello, %s!' % name def bye(name): return 'Bye, %s!' % name | 3.076821 | 3 |
tests/loader/test_EVENTKG2MLoader.py | jinzhuoran/CogKGE | 18 | 6621969 | <reponame>jinzhuoran/CogKGE
from cogkge import *
import time
loader = EVENTKG2MLoader(path='/home/hongbang/CogKTR/dataset/kr/EVENTKG2M/raw_data',
download=True,
download_path="CogKTR/dataset/")
print("Without Preprocessing:")
start_time = time.time()
train_data,valid_data,test_data = loader.load_all_data()
node_lut,relation_lut,time_lut = loader.load_all_lut()
processor = EVENTKG2MProcessor(node_lut,relation_lut,time_lut)
train_dataset = processor.process(train_data)
valid_dataset = processor.process(valid_data)
test_dataset = processor.process(test_data)
for i in range(2):
print(train_dataset[i])
print(valid_dataset[i])
print(test_dataset[i])
print("Train:{} Valid:{} Test:{}".format(len(train_dataset),
len(valid_dataset),
len(test_dataset)))
print("--- %s seconds ---" % (time.time() - start_time))
| from cogkge import *
import time
loader = EVENTKG2MLoader(path='/home/hongbang/CogKTR/dataset/kr/EVENTKG2M/raw_data',
download=True,
download_path="CogKTR/dataset/")
print("Without Preprocessing:")
start_time = time.time()
train_data,valid_data,test_data = loader.load_all_data()
node_lut,relation_lut,time_lut = loader.load_all_lut()
processor = EVENTKG2MProcessor(node_lut,relation_lut,time_lut)
train_dataset = processor.process(train_data)
valid_dataset = processor.process(valid_data)
test_dataset = processor.process(test_data)
for i in range(2):
print(train_dataset[i])
print(valid_dataset[i])
print(test_dataset[i])
print("Train:{} Valid:{} Test:{}".format(len(train_dataset),
len(valid_dataset),
len(test_dataset)))
print("--- %s seconds ---" % (time.time() - start_time)) | none | 1 | 2.568166 | 3 | |
backend/drugs/models.py | hippocampus13/IndianMedicineDB | 0 | 6621970 | from django.db import models
class TimeStampedModel(models.Model):
"""
An abstract model which provides each model with a created and modified field
"""
created_on=models.DateTimeField(auto_now_add=True)
modified_on=models.DateTimeField(auto_now=True)
class Meta:
abstract=True
class Manufacturer(TimeStampedModel):
name = models.CharField(verbose_name='Manufacturer Name', max_length=100, unique=True)
def __str__(self):
return self.name
class DrugComposition(TimeStampedModel):
short_composition = models.CharField(max_length=600, unique=True)
def __str__(self):
return self.short_composition
class PackSizeLabel(TimeStampedModel):
label = models.CharField(max_length=100, default='', unique=True)
def __str__(self):
return self.label
class DataSource(TimeStampedModel):
name = models.CharField(max_length=50, unique=True)
source_url = models.URLField(null=True, blank=True) #how to show that the datasource was entered by a user?
def __str__(self):
return self.name
class DrugType(TimeStampedModel):
type_of_drug = models.CharField(max_length=30, unique=True)
def __str__(self):
return self.type_of_drug
class Drug(TimeStampedModel):
sku_id = models.IntegerField(verbose_name='SKU ID', primary_key=True, db_index=True)
name = models.CharField(verbose_name='Drug Name', max_length=200, db_index=True)
manufacturer_name = models.ForeignKey(to=Manufacturer, on_delete=models.PROTECT, related_name='manufacturer')
drug_type = models.ForeignKey(to=DrugType, on_delete=models.PROTECT, related_name='drug_type')
pack_size_label = models.ForeignKey(to=PackSizeLabel, on_delete=models.PROTECT, related_name='packsize')
price = models.DecimalField(max_digits=7, decimal_places=2, null=True)
rx_required = models.BooleanField(verbose_name='Prescription Required', null=True)
short_composition = models.ForeignKey(to=DrugComposition, on_delete=models.PROTECT, related_name='composition')
is_discontinued = models.BooleanField()
data_source = models.ForeignKey(to = DataSource, on_delete=models.PROTECT, related_name='source')
def __str__(self):
return f'{self.data_source}-{self.sku_id}-{self.name}-{self.manufacturer_name}'
| from django.db import models
class TimeStampedModel(models.Model):
"""
An abstract model which provides each model with a created and modified field
"""
created_on=models.DateTimeField(auto_now_add=True)
modified_on=models.DateTimeField(auto_now=True)
class Meta:
abstract=True
class Manufacturer(TimeStampedModel):
name = models.CharField(verbose_name='Manufacturer Name', max_length=100, unique=True)
def __str__(self):
return self.name
class DrugComposition(TimeStampedModel):
short_composition = models.CharField(max_length=600, unique=True)
def __str__(self):
return self.short_composition
class PackSizeLabel(TimeStampedModel):
label = models.CharField(max_length=100, default='', unique=True)
def __str__(self):
return self.label
class DataSource(TimeStampedModel):
name = models.CharField(max_length=50, unique=True)
source_url = models.URLField(null=True, blank=True) #how to show that the datasource was entered by a user?
def __str__(self):
return self.name
class DrugType(TimeStampedModel):
type_of_drug = models.CharField(max_length=30, unique=True)
def __str__(self):
return self.type_of_drug
class Drug(TimeStampedModel):
sku_id = models.IntegerField(verbose_name='SKU ID', primary_key=True, db_index=True)
name = models.CharField(verbose_name='Drug Name', max_length=200, db_index=True)
manufacturer_name = models.ForeignKey(to=Manufacturer, on_delete=models.PROTECT, related_name='manufacturer')
drug_type = models.ForeignKey(to=DrugType, on_delete=models.PROTECT, related_name='drug_type')
pack_size_label = models.ForeignKey(to=PackSizeLabel, on_delete=models.PROTECT, related_name='packsize')
price = models.DecimalField(max_digits=7, decimal_places=2, null=True)
rx_required = models.BooleanField(verbose_name='Prescription Required', null=True)
short_composition = models.ForeignKey(to=DrugComposition, on_delete=models.PROTECT, related_name='composition')
is_discontinued = models.BooleanField()
data_source = models.ForeignKey(to = DataSource, on_delete=models.PROTECT, related_name='source')
def __str__(self):
return f'{self.data_source}-{self.sku_id}-{self.name}-{self.manufacturer_name}'
| en | 0.984336 | An abstract model which provides each model with a created and modified field #how to show that the datasource was entered by a user? | 2.704995 | 3 |
src/pipeline/pipeline.py | lyonva/Nue | 0 | 6621971 | from sklearn.pipeline import FeatureUnion, _transform_one, _fit_transform_one
from joblib import Parallel, delayed
import numpy as np
import pandas as pd
class FeatureJoin(FeatureUnion):
def transform(self, X):
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, X, None, weight)
for name, trans, weight in self._iter())
if not Xs:
# All transformers are None
return np.zeros((X.shape[0], 0))
return pd.concat(Xs, axis=1)
def fit_transform(self, X, y=None, **fit_params):
results = self._parallel_func(X, y, fit_params, _fit_transform_one)
if not results:
# All transformers are None
return np.zeros((X.shape[0], 0))
Xs, transformers = zip(*results)
self._update_transformer_list(transformers)
return pd.concat(Xs, axis=1)
| from sklearn.pipeline import FeatureUnion, _transform_one, _fit_transform_one
from joblib import Parallel, delayed
import numpy as np
import pandas as pd
class FeatureJoin(FeatureUnion):
def transform(self, X):
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, X, None, weight)
for name, trans, weight in self._iter())
if not Xs:
# All transformers are None
return np.zeros((X.shape[0], 0))
return pd.concat(Xs, axis=1)
def fit_transform(self, X, y=None, **fit_params):
results = self._parallel_func(X, y, fit_params, _fit_transform_one)
if not results:
# All transformers are None
return np.zeros((X.shape[0], 0))
Xs, transformers = zip(*results)
self._update_transformer_list(transformers)
return pd.concat(Xs, axis=1)
| en | 0.917324 | # All transformers are None # All transformers are None | 2.913344 | 3 |
demo/demo_calls.py | anjuchamantha/cellyzer---CDR-data-analyzer | 11 | 6621972 | <gh_stars>10-100
"""
This is for manual testing the library
"""
import cellyzer as cz
call_file_path = "demo_datasets/test_data/calls.csv"
# callDataSet = cz.read_call(call_file_path)
cz.read_msg("demo_datasets/test_data/messages.csv")
cz.read_cell("demo_datasets/test_data/antennas.csv")
callDataSet = cz.read_call(file_path="demo_datasets/test_data/excel data/calls.xlsx", file_type="xlsx")
cz.read_msg(file_path="demo_datasets/test_data/excel data/messages.xlsx", file_type="xlsx")
cz.read_cell(file_path="demo_datasets/test_data/excel data/cell.xlsx", file_type="xlsx")
cz.read_call(file_path="demo_datasets/test_data/json data/call.json", file_type="json")
cz.read_msg(file_path="demo_datasets/test_data/json data/message.json", file_type="json")
cz.read_cell(file_path="demo_datasets/test_data/json data/cell.json", file_type="json")
# print(type(callDataSet).__name__)
# cz.utils.print_dataset(callDataSet, name="Call Dataset")
#
# all_users_of_calls = callDataSet.get_all_users()
# print(">> All Users in call dataSet : %s \n" % all_users_of_calls)
#
search_user_call_1 = "3e97992791"
search_user_call_2 = "265034e175"
search_user_call_3 = '329233d117'
#
# connected_users_calls = callDataSet.get_connected_users(search_user_call_1)
# print(">> Users connected to %s : %s \n" % (search_user_call_1, connected_users_calls))
#
# user_call_record_list = callDataSet.get_records(search_user_call_1, search_user_call_2)
# print(">> call records between %s and %s" % (search_user_call_1, search_user_call_2))
# cz.utils.print_record_lists(user_call_record_list)
#
# user_call_dataset = cz.MessageDataSet(user_call_record_list)
# cz.utils.print_dataset(user_call_dataset, name="User Records of %s" % search_user_call_1 + " " + search_user_call_2)
#
callDataSet.print_connection_matrix()
#
search_user_list = ["265034e175", "e98994c239", "f0860ea982"]
callDataSet.visualize_connection_network(users=search_user_list, gui=True)
#
# close_contacts = callDataSet.get_close_contacts(search_user_call_3, top_contact=4)
# print(">> close contacts of %s :" % search_user_call_1)
# cz.utils.print_close_contacts(close_contacts)
#
active_time = callDataSet.get_most_active_time(search_user_call_1)
print(">> most active times during day of %s - %s" % (search_user_call_1, active_time))
cz.visualization.active_time_bar_chart(active_time, user=search_user_call_1)
#
# ignored_call_details = callDataSet.get_ignored_call_details(search_user_call_3)
# print(">> Ignored calls details : ")
# cz.utils.tabulate_list_of_dictionaries(ignored_call_details)
#
# call_records_around_cell = callDataSet.get_call_records_by_antenna_id(cell_id='1')
# print(">> Number of call records around cell_id - %s = %s" % (1, len(call_records_around_cell)))
#
# call_connections = callDataSet.get_connections(users=search_user_list)
# print(">> All the users connected to %s :" % search_user_list)
# print(call_connections)
| """
This is for manual testing the library
"""
import cellyzer as cz
call_file_path = "demo_datasets/test_data/calls.csv"
# callDataSet = cz.read_call(call_file_path)
cz.read_msg("demo_datasets/test_data/messages.csv")
cz.read_cell("demo_datasets/test_data/antennas.csv")
callDataSet = cz.read_call(file_path="demo_datasets/test_data/excel data/calls.xlsx", file_type="xlsx")
cz.read_msg(file_path="demo_datasets/test_data/excel data/messages.xlsx", file_type="xlsx")
cz.read_cell(file_path="demo_datasets/test_data/excel data/cell.xlsx", file_type="xlsx")
cz.read_call(file_path="demo_datasets/test_data/json data/call.json", file_type="json")
cz.read_msg(file_path="demo_datasets/test_data/json data/message.json", file_type="json")
cz.read_cell(file_path="demo_datasets/test_data/json data/cell.json", file_type="json")
# print(type(callDataSet).__name__)
# cz.utils.print_dataset(callDataSet, name="Call Dataset")
#
# all_users_of_calls = callDataSet.get_all_users()
# print(">> All Users in call dataSet : %s \n" % all_users_of_calls)
#
search_user_call_1 = "3e97992791"
search_user_call_2 = "265034e175"
search_user_call_3 = '329233d117'
#
# connected_users_calls = callDataSet.get_connected_users(search_user_call_1)
# print(">> Users connected to %s : %s \n" % (search_user_call_1, connected_users_calls))
#
# user_call_record_list = callDataSet.get_records(search_user_call_1, search_user_call_2)
# print(">> call records between %s and %s" % (search_user_call_1, search_user_call_2))
# cz.utils.print_record_lists(user_call_record_list)
#
# user_call_dataset = cz.MessageDataSet(user_call_record_list)
# cz.utils.print_dataset(user_call_dataset, name="User Records of %s" % search_user_call_1 + " " + search_user_call_2)
#
callDataSet.print_connection_matrix()
#
search_user_list = ["265034e175", "e98994c239", "f0860ea982"]
callDataSet.visualize_connection_network(users=search_user_list, gui=True)
#
# close_contacts = callDataSet.get_close_contacts(search_user_call_3, top_contact=4)
# print(">> close contacts of %s :" % search_user_call_1)
# cz.utils.print_close_contacts(close_contacts)
#
active_time = callDataSet.get_most_active_time(search_user_call_1)
print(">> most active times during day of %s - %s" % (search_user_call_1, active_time))
cz.visualization.active_time_bar_chart(active_time, user=search_user_call_1)
#
# ignored_call_details = callDataSet.get_ignored_call_details(search_user_call_3)
# print(">> Ignored calls details : ")
# cz.utils.tabulate_list_of_dictionaries(ignored_call_details)
#
# call_records_around_cell = callDataSet.get_call_records_by_antenna_id(cell_id='1')
# print(">> Number of call records around cell_id - %s = %s" % (1, len(call_records_around_cell)))
#
# call_connections = callDataSet.get_connections(users=search_user_list)
# print(">> All the users connected to %s :" % search_user_list)
# print(call_connections) | en | 0.470946 | This is for manual testing the library # callDataSet = cz.read_call(call_file_path) # print(type(callDataSet).__name__) # cz.utils.print_dataset(callDataSet, name="Call Dataset") # # all_users_of_calls = callDataSet.get_all_users() # print(">> All Users in call dataSet : %s \n" % all_users_of_calls) # # # connected_users_calls = callDataSet.get_connected_users(search_user_call_1) # print(">> Users connected to %s : %s \n" % (search_user_call_1, connected_users_calls)) # # user_call_record_list = callDataSet.get_records(search_user_call_1, search_user_call_2) # print(">> call records between %s and %s" % (search_user_call_1, search_user_call_2)) # cz.utils.print_record_lists(user_call_record_list) # # user_call_dataset = cz.MessageDataSet(user_call_record_list) # cz.utils.print_dataset(user_call_dataset, name="User Records of %s" % search_user_call_1 + " " + search_user_call_2) # # # # close_contacts = callDataSet.get_close_contacts(search_user_call_3, top_contact=4) # print(">> close contacts of %s :" % search_user_call_1) # cz.utils.print_close_contacts(close_contacts) # # # ignored_call_details = callDataSet.get_ignored_call_details(search_user_call_3) # print(">> Ignored calls details : ") # cz.utils.tabulate_list_of_dictionaries(ignored_call_details) # # call_records_around_cell = callDataSet.get_call_records_by_antenna_id(cell_id='1') # print(">> Number of call records around cell_id - %s = %s" % (1, len(call_records_around_cell))) # # call_connections = callDataSet.get_connections(users=search_user_list) # print(">> All the users connected to %s :" % search_user_list) # print(call_connections) | 2.434094 | 2 |
__init__.py | ananya-007/smartapi-python-custom | 1 | 6621973 | from __future__ import unicode_literals,absolute_import
from smartapi.smartConnect import SmartConnect
# from smartapi.webSocket import WebSocket
from smartapi.smartApiWebsocket import SmartWebSocket
__all__ = ["SmartConnect","SmartWebSocket"]
| from __future__ import unicode_literals,absolute_import
from smartapi.smartConnect import SmartConnect
# from smartapi.webSocket import WebSocket
from smartapi.smartApiWebsocket import SmartWebSocket
__all__ = ["SmartConnect","SmartWebSocket"]
| ru | 0.190302 | # from smartapi.webSocket import WebSocket | 1.350858 | 1 |
alembic/versions/26319c44a8d5_state_machine_states_extended.py | albertwo1978/atst | 1 | 6621974 | <gh_stars>1-10
"""state machine states extended
Revision ID: 26319c44a8d5
Revises: 59973fa17ded
Create Date: 2020-01-22 15:54:03.186751
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "26319c44a8d5" # pragma: allowlist secret
down_revision = "59973fa17ded" # pragma: allowlist secret
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"portfolio_state_machines",
"state",
existing_type=sa.Enum(
"UNSTARTED",
"STARTING",
"STARTED",
"COMPLETED",
"FAILED",
"TENANT_CREATED",
"TENANT_IN_PROGRESS",
"TENANT_FAILED",
"BILLING_PROFILE_CREATED",
"BILLING_PROFILE_IN_PROGRESS",
"BILLING_PROFILE_FAILED",
"ADMIN_SUBSCRIPTION_CREATED",
"ADMIN_SUBSCRIPTION_IN_PROGRESS",
"ADMIN_SUBSCRIPTION_FAILED",
name="fsmstates",
native_enum=False,
),
type_=sa.Enum(
"UNSTARTED",
"STARTING",
"STARTED",
"COMPLETED",
"FAILED",
"TENANT_CREATED",
"TENANT_IN_PROGRESS",
"TENANT_FAILED",
"BILLING_PROFILE_CREATION_CREATED",
"BILLING_PROFILE_CREATION_IN_PROGRESS",
"BILLING_PROFILE_CREATION_FAILED",
"BILLING_PROFILE_VERIFICATION_CREATED",
"BILLING_PROFILE_VERIFICATION_IN_PROGRESS",
"BILLING_PROFILE_VERIFICATION_FAILED",
"BILLING_PROFILE_TENANT_ACCESS_CREATED",
"BILLING_PROFILE_TENANT_ACCESS_IN_PROGRESS",
"BILLING_PROFILE_TENANT_ACCESS_FAILED",
"TASK_ORDER_BILLING_CREATION_CREATED",
"TASK_ORDER_BILLING_CREATION_IN_PROGRESS",
"TASK_ORDER_BILLING_CREATION_FAILED",
"TASK_ORDER_BILLING_VERIFICATION_CREATED",
"TASK_ORDER_BILLING_VERIFICATION_IN_PROGRESS",
"TASK_ORDER_BILLING_VERIFICATION_FAILED",
"BILLING_INSTRUCTION_CREATED",
"BILLING_INSTRUCTION_IN_PROGRESS",
"BILLING_INSTRUCTION_FAILED",
name="fsmstates",
native_enum=False,
create_constraint=False,
),
existing_nullable=False,
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"portfolio_state_machines",
"state",
existing_type=sa.Enum(
"UNSTARTED",
"STARTING",
"STARTED",
"COMPLETED",
"FAILED",
"TENANT_CREATED",
"TENANT_IN_PROGRESS",
"TENANT_FAILED",
"BILLING_PROFILE_CREATION_CREATED",
"BILLING_PROFILE_CREATION_IN_PROGRESS",
"BILLING_PROFILE_CREATION_FAILED",
"BILLING_PROFILE_VERIFICATION_CREATED",
"BILLING_PROFILE_VERIFICATION_IN_PROGRESS",
"BILLING_PROFILE_VERIFICATION_FAILED",
"BILLING_PROFILE_TENANT_ACCESS_CREATED",
"BILLING_PROFILE_TENANT_ACCESS_IN_PROGRESS",
"BILLING_PROFILE_TENANT_ACCESS_FAILED",
"TASK_ORDER_BILLING_CREATION_CREATED",
"TASK_ORDER_BILLING_CREATION_IN_PROGRESS",
"TASK_ORDER_BILLING_CREATION_FAILED",
"TASK_ORDER_BILLING_VERIFICATION_CREATED",
"TASK_ORDER_BILLING_VERIFICATION_IN_PROGRESS",
"TASK_ORDER_BILLING_VERIFICATION_FAILED",
"BILLING_INSTRUCTION_CREATED",
"BILLING_INSTRUCTION_IN_PROGRESS",
"BILLING_INSTRUCTION_FAILED",
name="fsmstates",
native_enum=False,
),
type_=sa.Enum(
"UNSTARTED",
"STARTING",
"STARTED",
"COMPLETED",
"FAILED",
"TENANT_CREATED",
"TENANT_IN_PROGRESS",
"TENANT_FAILED",
"BILLING_PROFILE_CREATED",
"BILLING_PROFILE_IN_PROGRESS",
"BILLING_PROFILE_FAILED",
"ADMIN_SUBSCRIPTION_CREATED",
"ADMIN_SUBSCRIPTION_IN_PROGRESS",
"ADMIN_SUBSCRIPTION_FAILED",
name="fsmstates",
native_enum=False,
),
existing_nullable=False,
)
# ### end Alembic commands ###
| """state machine states extended
Revision ID: 26319c44a8d5
Revises: 59973fa17ded
Create Date: 2020-01-22 15:54:03.186751
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "26319c44a8d5" # pragma: allowlist secret
down_revision = "59973fa17ded" # pragma: allowlist secret
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"portfolio_state_machines",
"state",
existing_type=sa.Enum(
"UNSTARTED",
"STARTING",
"STARTED",
"COMPLETED",
"FAILED",
"TENANT_CREATED",
"TENANT_IN_PROGRESS",
"TENANT_FAILED",
"BILLING_PROFILE_CREATED",
"BILLING_PROFILE_IN_PROGRESS",
"BILLING_PROFILE_FAILED",
"ADMIN_SUBSCRIPTION_CREATED",
"ADMIN_SUBSCRIPTION_IN_PROGRESS",
"ADMIN_SUBSCRIPTION_FAILED",
name="fsmstates",
native_enum=False,
),
type_=sa.Enum(
"UNSTARTED",
"STARTING",
"STARTED",
"COMPLETED",
"FAILED",
"TENANT_CREATED",
"TENANT_IN_PROGRESS",
"TENANT_FAILED",
"BILLING_PROFILE_CREATION_CREATED",
"BILLING_PROFILE_CREATION_IN_PROGRESS",
"BILLING_PROFILE_CREATION_FAILED",
"BILLING_PROFILE_VERIFICATION_CREATED",
"BILLING_PROFILE_VERIFICATION_IN_PROGRESS",
"BILLING_PROFILE_VERIFICATION_FAILED",
"BILLING_PROFILE_TENANT_ACCESS_CREATED",
"BILLING_PROFILE_TENANT_ACCESS_IN_PROGRESS",
"BILLING_PROFILE_TENANT_ACCESS_FAILED",
"TASK_ORDER_BILLING_CREATION_CREATED",
"TASK_ORDER_BILLING_CREATION_IN_PROGRESS",
"TASK_ORDER_BILLING_CREATION_FAILED",
"TASK_ORDER_BILLING_VERIFICATION_CREATED",
"TASK_ORDER_BILLING_VERIFICATION_IN_PROGRESS",
"TASK_ORDER_BILLING_VERIFICATION_FAILED",
"BILLING_INSTRUCTION_CREATED",
"BILLING_INSTRUCTION_IN_PROGRESS",
"BILLING_INSTRUCTION_FAILED",
name="fsmstates",
native_enum=False,
create_constraint=False,
),
existing_nullable=False,
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"portfolio_state_machines",
"state",
existing_type=sa.Enum(
"UNSTARTED",
"STARTING",
"STARTED",
"COMPLETED",
"FAILED",
"TENANT_CREATED",
"TENANT_IN_PROGRESS",
"TENANT_FAILED",
"BILLING_PROFILE_CREATION_CREATED",
"BILLING_PROFILE_CREATION_IN_PROGRESS",
"BILLING_PROFILE_CREATION_FAILED",
"BILLING_PROFILE_VERIFICATION_CREATED",
"BILLING_PROFILE_VERIFICATION_IN_PROGRESS",
"BILLING_PROFILE_VERIFICATION_FAILED",
"BILLING_PROFILE_TENANT_ACCESS_CREATED",
"BILLING_PROFILE_TENANT_ACCESS_IN_PROGRESS",
"BILLING_PROFILE_TENANT_ACCESS_FAILED",
"TASK_ORDER_BILLING_CREATION_CREATED",
"TASK_ORDER_BILLING_CREATION_IN_PROGRESS",
"TASK_ORDER_BILLING_CREATION_FAILED",
"TASK_ORDER_BILLING_VERIFICATION_CREATED",
"TASK_ORDER_BILLING_VERIFICATION_IN_PROGRESS",
"TASK_ORDER_BILLING_VERIFICATION_FAILED",
"BILLING_INSTRUCTION_CREATED",
"BILLING_INSTRUCTION_IN_PROGRESS",
"BILLING_INSTRUCTION_FAILED",
name="fsmstates",
native_enum=False,
),
type_=sa.Enum(
"UNSTARTED",
"STARTING",
"STARTED",
"COMPLETED",
"FAILED",
"TENANT_CREATED",
"TENANT_IN_PROGRESS",
"TENANT_FAILED",
"BILLING_PROFILE_CREATED",
"BILLING_PROFILE_IN_PROGRESS",
"BILLING_PROFILE_FAILED",
"ADMIN_SUBSCRIPTION_CREATED",
"ADMIN_SUBSCRIPTION_IN_PROGRESS",
"ADMIN_SUBSCRIPTION_FAILED",
name="fsmstates",
native_enum=False,
),
existing_nullable=False,
)
# ### end Alembic commands ### | en | 0.502199 | state machine states extended Revision ID: 26319c44a8d5 Revises: 59973fa17ded Create Date: 2020-01-22 15:54:03.186751 # revision identifiers, used by Alembic. # pragma: allowlist secret # pragma: allowlist secret # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### # ### commands auto generated by Alembic - please adjust! ### # ### end Alembic commands ### | 1.358182 | 1 |
tensormap-server/shared/request/response.py | SahanDisa/tensormap | 34 | 6621975 |
def generic_response(status_code, success, message, data=None):
"""
This function handles all the requests going through the backend
:rtype: object
"""
response = {
"success": success,
"message": message,
"data": data
}
return response, status_code
|
def generic_response(status_code, success, message, data=None):
"""
This function handles all the requests going through the backend
:rtype: object
"""
response = {
"success": success,
"message": message,
"data": data
}
return response, status_code
| en | 0.751512 | This function handles all the requests going through the backend :rtype: object | 2.38062 | 2 |
src/rosetta/utils/__init__.py | Zi-SH/rosetta | 0 | 6621976 | """Module with various utilities for the bot."""
import logging
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from discord import Message
from discord.ext.commands import Context
from typing import Callable, Union
logger = logging.getLogger(__name__)
async def send_message(context, message): # pragma: no cover
"""Send a message if the the provided context is not None."""
if context:
await context.send(message)
async def load_module(client, module, context=None): # pragma: no cover
"""Load a certain module. Returns whether or not the loading succeeded."""
try:
module = f"rosetta.{module}"
client.load_extension(module)
logger.info("Module %s was successfully loaded.", module)
await send_message(context, f"`{module}` was successfully loaded.")
return True
except (AttributeError, ImportError) as e:
logger.error("Failed to load module %s: %s", module, e)
await send_message(
context,
f"`{module}` could not be loaded due to an error. Please check the logs.",
)
return False
async def unload_module(client, module, context=None): # pragma: no cover
"""Unload a certain module. Returns whether or not the unloading succeeded."""
try:
module = f"rosetta.{module}"
client.unload_extension(module)
logger.info("Module %s was successfully unloaded.", module)
await send_message(context, f"`{module}` was successfully unloaded.")
return True
except (AttributeError, ImportError) as e:
logger.error("Failed to unload module %s: %s", module, e)
await send_message(
context,
f"`{module}` could not be unloaded due to an error. Please check the logs.",
)
return False
async def ask(
context: 'Context',
prompt: str,
condition: 'Callable[[Message],bool]',
timeout: int = 15
) -> 'Union[Message,None]':
"""Utility to make dialogues easier.
:param context: The discord context.
:param prompt: The prompt to send.
:param condition: The condition to accept new messages.
:param timeout: (Optional) the timeout to wait for. Defaults to 15s.
:return: The first Message matched or None."""
def check(m: 'Message') -> bool:
cond1 = condition(m)
return cond1 and m.channel == context.channel
prompt_msg = await context.send(prompt)
message = await context.bot.wait_for('message', check=check, timeout=timeout)
await prompt_msg.delete()
return message
| """Module with various utilities for the bot."""
import logging
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from discord import Message
from discord.ext.commands import Context
from typing import Callable, Union
logger = logging.getLogger(__name__)
async def send_message(context, message): # pragma: no cover
"""Send a message if the the provided context is not None."""
if context:
await context.send(message)
async def load_module(client, module, context=None): # pragma: no cover
"""Load a certain module. Returns whether or not the loading succeeded."""
try:
module = f"rosetta.{module}"
client.load_extension(module)
logger.info("Module %s was successfully loaded.", module)
await send_message(context, f"`{module}` was successfully loaded.")
return True
except (AttributeError, ImportError) as e:
logger.error("Failed to load module %s: %s", module, e)
await send_message(
context,
f"`{module}` could not be loaded due to an error. Please check the logs.",
)
return False
async def unload_module(client, module, context=None): # pragma: no cover
"""Unload a certain module. Returns whether or not the unloading succeeded."""
try:
module = f"rosetta.{module}"
client.unload_extension(module)
logger.info("Module %s was successfully unloaded.", module)
await send_message(context, f"`{module}` was successfully unloaded.")
return True
except (AttributeError, ImportError) as e:
logger.error("Failed to unload module %s: %s", module, e)
await send_message(
context,
f"`{module}` could not be unloaded due to an error. Please check the logs.",
)
return False
async def ask(
context: 'Context',
prompt: str,
condition: 'Callable[[Message],bool]',
timeout: int = 15
) -> 'Union[Message,None]':
"""Utility to make dialogues easier.
:param context: The discord context.
:param prompt: The prompt to send.
:param condition: The condition to accept new messages.
:param timeout: (Optional) the timeout to wait for. Defaults to 15s.
:return: The first Message matched or None."""
def check(m: 'Message') -> bool:
cond1 = condition(m)
return cond1 and m.channel == context.channel
prompt_msg = await context.send(prompt)
message = await context.bot.wait_for('message', check=check, timeout=timeout)
await prompt_msg.delete()
return message
| en | 0.612544 | Module with various utilities for the bot. # pragma: no cover Send a message if the the provided context is not None. # pragma: no cover Load a certain module. Returns whether or not the loading succeeded. # pragma: no cover Unload a certain module. Returns whether or not the unloading succeeded. Utility to make dialogues easier. :param context: The discord context. :param prompt: The prompt to send. :param condition: The condition to accept new messages. :param timeout: (Optional) the timeout to wait for. Defaults to 15s. :return: The first Message matched or None. | 2.793317 | 3 |
setup.py | ExecutableFile/dweepy | 56 | 6621977 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='dweepy',
version='0.3.0',
description='Dweepy is a Python client for dweet.io',
long_description=open('README.rst').read(),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/paddycarey/dweepy',
packages=[
'dweepy',
],
package_dir={'dweepy':
'dweepy'},
include_package_data=True,
install_requires=['requests >= 2, < 3'],
license="MIT",
zip_safe=False,
keywords='dweepy dweet dweet.io',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
test_suite='tests_dweepy',
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='dweepy',
version='0.3.0',
description='Dweepy is a Python client for dweet.io',
long_description=open('README.rst').read(),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/paddycarey/dweepy',
packages=[
'dweepy',
],
package_dir={'dweepy':
'dweepy'},
include_package_data=True,
install_requires=['requests >= 2, < 3'],
license="MIT",
zip_safe=False,
keywords='dweepy dweet dweet.io',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
test_suite='tests_dweepy',
)
| en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 1.564403 | 2 |
tests/unit/test_client_token.py | futureironman/braintree_python | 182 | 6621978 | <gh_stars>100-1000
from tests.test_helper import *
class TestClientToken(unittest.TestCase):
def test_credit_card_options_require_customer_id(self):
for option in ["verify_card", "make_default", "fail_on_duplicate_payment_method"]:
with self.assertRaisesRegexp(InvalidSignatureError, option):
ClientToken.generate({
"options": {option: True}
})
def test_generate_delegates_client_token_generation_to_gateway(self):
class MockGateway:
def generate(self, _):
return "mock_client_token"
mock_gateway = MockGateway()
client_token = ClientToken.generate({}, mock_gateway)
self.assertEqual("mock_client_token", client_token)
| from tests.test_helper import *
class TestClientToken(unittest.TestCase):
def test_credit_card_options_require_customer_id(self):
for option in ["verify_card", "make_default", "fail_on_duplicate_payment_method"]:
with self.assertRaisesRegexp(InvalidSignatureError, option):
ClientToken.generate({
"options": {option: True}
})
def test_generate_delegates_client_token_generation_to_gateway(self):
class MockGateway:
def generate(self, _):
return "mock_client_token"
mock_gateway = MockGateway()
client_token = ClientToken.generate({}, mock_gateway)
self.assertEqual("mock_client_token", client_token) | none | 1 | 2.625547 | 3 | |
case_study.py | linshaoxin-maker/taas | 4 | 6621979 | <reponame>linshaoxin-maker/taas
"""
Example 1:
Example 2:
"""
input_ids = [0, 1640, 3480, 4839, 286, 5, 78, 86, 11, 799,
107, 2156, 10, 1012, 7875, 1835, 7, 608, 99, 37,
473, 275, 479, 646, 3388, 510, 1215, 288, 742, 17657,
47385, 3277, 174, 7, 22, 283, 15, 159, 27785, 22,
15, 5, 587, 112, 5403, 9, 22, 20, 3655, 1534,
5143, 22, 13590, 45, 1482, 8238, 13179, 53, 277, 2950,
652, 11, 1427, 9, 5, 7069, 479, 646, 3388, 510,
1215, 134, 742, 2978, 2156, 89, 21, 3045, 21998, 2156,
54, 4457, 5, 1012, 177, 311, 13, 1718, 107, 137,
8296, 159, 11, 3010, 479, 646, 3388, 510, 1215, 176,
742, 7817, 579, 47385, 642, 47385, 1506, 23, 8301, 2156,
21998, 7521, 5, 78, 425, 111, 24224, 177, 9, 5,
311, 2156, 5, 4187, 22, 20088, 7732, 2156, 22, 137,
3408, 5162, 5941, 81, 7, 13179, 2156, 54, 1550, 62,
479, 646, 3388, 510, 1215, 246, 742, 2285, 145, 409,
31, 5, 311, 13, 144, 9, 5, 375, 799, 107,
2156, 21998, 399, 128, 326, 2045, 7, 2649, 10, 1451,
479, 2]
alpha = [0.0031795501708984375, 0.00832366943359375, 0.0014619827270507812, 0.004566192626953125, 0.015869140625,
0.004268646240234375, 0.00799560546875, 0.004039764404296875, 0.0020198822021484375, 0.0016908645629882812,
0.0025081634521484375, 0.0009126663208007812, 0.0019893646240234375, 0.00550079345703125, 0.004436492919921875,
0.00293731689453125, 0.002162933349609375, 0.003231048583984375, 0.0013799667358398438, 0.0008697509765625,
0.0035037994384765625, 0.005931854248046875, 0.0027332305908203125, 0.0008187294006347656, 0.0125274658203125,
0.00232696533203125, 0.002346038818359375, 0.004322052001953125, 0.0035800933837890625, 0.0037822723388671875,
0.01067352294921875, 0.0037670135498046875, 0.01082611083984375, 0.01666259765625, 0.001865386962890625,
0.00434112548828125, 0.006671905517578125, 0.0074462890625, 0.0003857612609863281, 0.0004153251647949219,
0.0052032470703125, 0.007678985595703125, 0.005222320556640625, 0.005954742431640625, 0.0026607513427734375,
0.000732421875, 0.000286102294921875, 0.0017538070678710938, 0.0008673667907714844, 0.0004661083221435547,
0.00023949146270751953, 0.0005674362182617188, 0.0138397216796875, 0.01413726806640625, 0.005115509033203125,
0.01204681396484375, 0.0025272369384765625, 0.006237030029296875, 0.00263214111328125, 0.0011262893676757812,
0.0034122467041015625, 0.005062103271484375, 0.016510009765625, 0.01116180419921875, 0.004924774169921875,
0.0003972053527832031, 0.0029659271240234375, 0.0009450912475585938, 0.0157623291015625, 0.0024852752685546875,
0.0018320083618164062, 0.00316619873046875, 0.0007638931274414062, 0.00458526611328125, 0.0009603500366210938,
0.002391815185546875, 0.0024738311767578125, 0.00186920166015625, 0.0006661415100097656, 0.00106048583984375,
0.001750946044921875, 0.0029506683349609375, 0.0007586479187011719, 0.0034656524658203125,
0.0007500648498535156, 0.0008573532104492188, 0.0015974044799804688, 0.00078582763671875, 0.000705718994140625,
0.0025653839111328125, 0.0038738250732421875, 0.0048980712890625, 0.0028514862060546875, 0.0010385513305664062,
0.0027179718017578125, 0.0011072158813476562, 0.00891876220703125, 0.0016069412231445312,
0.0015592575073242188, 0.005584716796875, 0.0020198822021484375, 0.0069122314453125, 0.0224151611328125,
0.00726318359375, 0.00586700439453125, 0.006877899169921875, 0.00543975830078125, 0.0028533935546875,
0.0030994415283203125, 0.004184722900390625, 0.004001617431640625, 0.01363372802734375, 0.00405120849609375,
0.005588531494140625, 0.00792694091796875, 0.01294708251953125, 0.0299224853515625, 0.0011472702026367188,
0.00162506103515625, 0.0029621124267578125, 0.0019168853759765625, 0.0015850067138671875,
0.0020427703857421875, 0.003032684326171875, 0.0008373260498046875, 0.0013885498046875, 0.0034236907958984375,
0.0006351470947265625, 0.00048279762268066406, 0.01136016845703125, 0.025146484375, 0.01059722900390625,
0.01200103759765625, 0.01326751708984375, 0.00888824462890625, 0.00200653076171875, 0.0022716522216796875,
0.005954742431640625, 0.0014600753784179688, 0.002254486083984375, 0.0037326812744140625,
0.0009479522705078125, 0.01496124267578125, 0.0029964447021484375, 0.0017271041870117188,
0.0021800994873046875, 0.0009751319885253906, 0.00846099853515625, 0.00766754150390625, 0.00397491455078125,
0.0030345916748046875, 0.00447845458984375, 0.0021266937255859375, 0.0123748779296875, 0.0037097930908203125,
0.00533294677734375, 0.002681732177734375, 0.0017261505126953125, 0.0017986297607421875, 0.0033588409423828125,
0.0019550323486328125, 0.0026378631591796875, 0.029754638671875, 0.04473876953125, 0.062744140625,
0.031494140625, 0.01126861572265625, 0.01105499267578125, 0.0189971923828125, 0.01666259765625,
0.00498199462890625, 0.006877899169921875]
index = [i for i in range(len(alpha))]
normalized_alpha = [(num - min(alpha)) / (max(alpha) - min(alpha)) for num in alpha]
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("sshleifer/distilbart-cnn-12-6")
token = [tokenizer.decode(s) for s in input_ids]
import pandas as pd
df = pd.DataFrame({'topic': normalized_alpha, 'index': index, 'token': token})
sorted_df = df.sort_values('topic', ascending=False)
print(list(sorted_df['token'][:20]))
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# results = np.random.rand(4, 3)
# strings = strings = np.asarray([['a', 'b', 'c'],
# ['d', 'e', 'f'],
# ['g', 'h', 'i'],
# ['j', 'k', 'l']])
# results = np.asarray([normalized_alpha]).reshape(43,4)
# strings = np.asarray([token]).reshape(43,4)
#
# labels = (np.asarray(["{0}".format(string)
# for string in zip(strings.flatten())])).reshape(43,4)
#
#
# fig, ax = plt.subplots()
# sns.heatmap(results, annot=labels, fmt="", cmap='RdYlGn', ax=ax)
# plt.show()
f = open("/Users/rachelzheng/Downloads/attention.txt", "r")
l = []
for line in f:
l.append(line.replace('\n', ''))
a = [item.split(" ") for item in l]
b = []
for i in range(len(a)):
b.append([float(num) for num in a[i]])
import numpy as np
c = []
for line in b:
c.append(np.sum(line))
normalized_c = [(num-min(c))/(max(c)-min(c)) for num in c]
results = np.asarray([normalized_c]).reshape(43,4)
strings = np.asarray([token]).reshape(43,4)
labels = (np.asarray(["{0}".format(string)
for string in zip(strings.flatten())])).reshape(43,4)
fig, ax = plt.subplots()
sns.heatmap(results, annot=labels, fmt="", cmap='RdYlGn', ax=ax)
plt.show() | """
Example 1:
Example 2:
"""
input_ids = [0, 1640, 3480, 4839, 286, 5, 78, 86, 11, 799,
107, 2156, 10, 1012, 7875, 1835, 7, 608, 99, 37,
473, 275, 479, 646, 3388, 510, 1215, 288, 742, 17657,
47385, 3277, 174, 7, 22, 283, 15, 159, 27785, 22,
15, 5, 587, 112, 5403, 9, 22, 20, 3655, 1534,
5143, 22, 13590, 45, 1482, 8238, 13179, 53, 277, 2950,
652, 11, 1427, 9, 5, 7069, 479, 646, 3388, 510,
1215, 134, 742, 2978, 2156, 89, 21, 3045, 21998, 2156,
54, 4457, 5, 1012, 177, 311, 13, 1718, 107, 137,
8296, 159, 11, 3010, 479, 646, 3388, 510, 1215, 176,
742, 7817, 579, 47385, 642, 47385, 1506, 23, 8301, 2156,
21998, 7521, 5, 78, 425, 111, 24224, 177, 9, 5,
311, 2156, 5, 4187, 22, 20088, 7732, 2156, 22, 137,
3408, 5162, 5941, 81, 7, 13179, 2156, 54, 1550, 62,
479, 646, 3388, 510, 1215, 246, 742, 2285, 145, 409,
31, 5, 311, 13, 144, 9, 5, 375, 799, 107,
2156, 21998, 399, 128, 326, 2045, 7, 2649, 10, 1451,
479, 2]
alpha = [0.0031795501708984375, 0.00832366943359375, 0.0014619827270507812, 0.004566192626953125, 0.015869140625,
0.004268646240234375, 0.00799560546875, 0.004039764404296875, 0.0020198822021484375, 0.0016908645629882812,
0.0025081634521484375, 0.0009126663208007812, 0.0019893646240234375, 0.00550079345703125, 0.004436492919921875,
0.00293731689453125, 0.002162933349609375, 0.003231048583984375, 0.0013799667358398438, 0.0008697509765625,
0.0035037994384765625, 0.005931854248046875, 0.0027332305908203125, 0.0008187294006347656, 0.0125274658203125,
0.00232696533203125, 0.002346038818359375, 0.004322052001953125, 0.0035800933837890625, 0.0037822723388671875,
0.01067352294921875, 0.0037670135498046875, 0.01082611083984375, 0.01666259765625, 0.001865386962890625,
0.00434112548828125, 0.006671905517578125, 0.0074462890625, 0.0003857612609863281, 0.0004153251647949219,
0.0052032470703125, 0.007678985595703125, 0.005222320556640625, 0.005954742431640625, 0.0026607513427734375,
0.000732421875, 0.000286102294921875, 0.0017538070678710938, 0.0008673667907714844, 0.0004661083221435547,
0.00023949146270751953, 0.0005674362182617188, 0.0138397216796875, 0.01413726806640625, 0.005115509033203125,
0.01204681396484375, 0.0025272369384765625, 0.006237030029296875, 0.00263214111328125, 0.0011262893676757812,
0.0034122467041015625, 0.005062103271484375, 0.016510009765625, 0.01116180419921875, 0.004924774169921875,
0.0003972053527832031, 0.0029659271240234375, 0.0009450912475585938, 0.0157623291015625, 0.0024852752685546875,
0.0018320083618164062, 0.00316619873046875, 0.0007638931274414062, 0.00458526611328125, 0.0009603500366210938,
0.002391815185546875, 0.0024738311767578125, 0.00186920166015625, 0.0006661415100097656, 0.00106048583984375,
0.001750946044921875, 0.0029506683349609375, 0.0007586479187011719, 0.0034656524658203125,
0.0007500648498535156, 0.0008573532104492188, 0.0015974044799804688, 0.00078582763671875, 0.000705718994140625,
0.0025653839111328125, 0.0038738250732421875, 0.0048980712890625, 0.0028514862060546875, 0.0010385513305664062,
0.0027179718017578125, 0.0011072158813476562, 0.00891876220703125, 0.0016069412231445312,
0.0015592575073242188, 0.005584716796875, 0.0020198822021484375, 0.0069122314453125, 0.0224151611328125,
0.00726318359375, 0.00586700439453125, 0.006877899169921875, 0.00543975830078125, 0.0028533935546875,
0.0030994415283203125, 0.004184722900390625, 0.004001617431640625, 0.01363372802734375, 0.00405120849609375,
0.005588531494140625, 0.00792694091796875, 0.01294708251953125, 0.0299224853515625, 0.0011472702026367188,
0.00162506103515625, 0.0029621124267578125, 0.0019168853759765625, 0.0015850067138671875,
0.0020427703857421875, 0.003032684326171875, 0.0008373260498046875, 0.0013885498046875, 0.0034236907958984375,
0.0006351470947265625, 0.00048279762268066406, 0.01136016845703125, 0.025146484375, 0.01059722900390625,
0.01200103759765625, 0.01326751708984375, 0.00888824462890625, 0.00200653076171875, 0.0022716522216796875,
0.005954742431640625, 0.0014600753784179688, 0.002254486083984375, 0.0037326812744140625,
0.0009479522705078125, 0.01496124267578125, 0.0029964447021484375, 0.0017271041870117188,
0.0021800994873046875, 0.0009751319885253906, 0.00846099853515625, 0.00766754150390625, 0.00397491455078125,
0.0030345916748046875, 0.00447845458984375, 0.0021266937255859375, 0.0123748779296875, 0.0037097930908203125,
0.00533294677734375, 0.002681732177734375, 0.0017261505126953125, 0.0017986297607421875, 0.0033588409423828125,
0.0019550323486328125, 0.0026378631591796875, 0.029754638671875, 0.04473876953125, 0.062744140625,
0.031494140625, 0.01126861572265625, 0.01105499267578125, 0.0189971923828125, 0.01666259765625,
0.00498199462890625, 0.006877899169921875]
index = [i for i in range(len(alpha))]
normalized_alpha = [(num - min(alpha)) / (max(alpha) - min(alpha)) for num in alpha]
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("sshleifer/distilbart-cnn-12-6")
token = [tokenizer.decode(s) for s in input_ids]
import pandas as pd
df = pd.DataFrame({'topic': normalized_alpha, 'index': index, 'token': token})
sorted_df = df.sort_values('topic', ascending=False)
print(list(sorted_df['token'][:20]))
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# results = np.random.rand(4, 3)
# strings = strings = np.asarray([['a', 'b', 'c'],
# ['d', 'e', 'f'],
# ['g', 'h', 'i'],
# ['j', 'k', 'l']])
# results = np.asarray([normalized_alpha]).reshape(43,4)
# strings = np.asarray([token]).reshape(43,4)
#
# labels = (np.asarray(["{0}".format(string)
# for string in zip(strings.flatten())])).reshape(43,4)
#
#
# fig, ax = plt.subplots()
# sns.heatmap(results, annot=labels, fmt="", cmap='RdYlGn', ax=ax)
# plt.show()
f = open("/Users/rachelzheng/Downloads/attention.txt", "r")
l = []
for line in f:
l.append(line.replace('\n', ''))
a = [item.split(" ") for item in l]
b = []
for i in range(len(a)):
b.append([float(num) for num in a[i]])
import numpy as np
c = []
for line in b:
c.append(np.sum(line))
normalized_c = [(num-min(c))/(max(c)-min(c)) for num in c]
results = np.asarray([normalized_c]).reshape(43,4)
strings = np.asarray([token]).reshape(43,4)
labels = (np.asarray(["{0}".format(string)
for string in zip(strings.flatten())])).reshape(43,4)
fig, ax = plt.subplots()
sns.heatmap(results, annot=labels, fmt="", cmap='RdYlGn', ax=ax)
plt.show() | en | 0.233998 | Example 1: Example 2: # results = np.random.rand(4, 3) # strings = strings = np.asarray([['a', 'b', 'c'], # ['d', 'e', 'f'], # ['g', 'h', 'i'], # ['j', 'k', 'l']]) # results = np.asarray([normalized_alpha]).reshape(43,4) # strings = np.asarray([token]).reshape(43,4) # # labels = (np.asarray(["{0}".format(string) # for string in zip(strings.flatten())])).reshape(43,4) # # # fig, ax = plt.subplots() # sns.heatmap(results, annot=labels, fmt="", cmap='RdYlGn', ax=ax) # plt.show() | 2.02369 | 2 |
docs/source/examples/test_examples.py | Nurdok/dotprod | 0 | 6621980 | <reponame>Nurdok/dotprod
import json
import pytest
import pathlib
import jsonschema
this_dir = pathlib.Path(__file__).parent
positive_examples = this_dir / 'positive'
negative_examples = this_dir / 'negative'
schema_path = this_dir / '..' / '..' / '..' / 'prod.json'
@pytest.mark.parametrize("example_path", positive_examples.rglob('*.prod'))
def test_positive_examples(example_path):
with schema_path.open('rt') as schema_file:
schema = json.load(schema_file)
with example_path.open('rt') as example_file:
example = json.load(example_file)
jsonschema.validate(example, schema)
@pytest.mark.parametrize("example_path", negative_examples.rglob('*.prod'))
def test_negative_examples(example_path):
with schema_path.open('rt') as schema_file:
schema = json.load(schema_file)
with example_path.open('rt') as example_file:
example = json.load(example_file)
with pytest.raises(jsonschema.ValidationError):
jsonschema.validate(example, schema)
| import json
import pytest
import pathlib
import jsonschema
this_dir = pathlib.Path(__file__).parent
positive_examples = this_dir / 'positive'
negative_examples = this_dir / 'negative'
schema_path = this_dir / '..' / '..' / '..' / 'prod.json'
@pytest.mark.parametrize("example_path", positive_examples.rglob('*.prod'))
def test_positive_examples(example_path):
with schema_path.open('rt') as schema_file:
schema = json.load(schema_file)
with example_path.open('rt') as example_file:
example = json.load(example_file)
jsonschema.validate(example, schema)
@pytest.mark.parametrize("example_path", negative_examples.rglob('*.prod'))
def test_negative_examples(example_path):
with schema_path.open('rt') as schema_file:
schema = json.load(schema_file)
with example_path.open('rt') as example_file:
example = json.load(example_file)
with pytest.raises(jsonschema.ValidationError):
jsonschema.validate(example, schema) | none | 1 | 2.413965 | 2 | |
psychometrics/equating.py | deepdatadive/psychometric | 2 | 6621981 | <reponame>deepdatadive/psychometric<gh_stars>1-10
from psychometrics.simulation import simulate_items, simulate_people, item_vectors
from psychometrics.CTT import examinee_score
from psychometrics.test_info import test_descriptives
import pandas as pd
import numpy as np
# Equating
# todo: CTT - Linear Equating
# Ctodo: TT - Equipercentile Equating
#
# todo: IRT - Fixed Ancor
# todo: IRT - Conversion Equating
# todo concurrent IRT
def mean_equating(initial_data, subsequent_data, sd_equating=False):
test1_info = test_descriptives(initial_data)
test2_info = test_descriptives(subsequent_data)
score_difference = test1_info['average_score'] - test2_info['average_score']
initial_scores = examinee_score(subsequent_data)
subsequent_scores = examinee_score(subsequent_data) + score_difference
print(test1_info, test2_info)
equating_dict = {
'initial_scores': initial_scores,
'mean_equated_scores': subsequent_scores,
'mean_sd_equated_scores': None
}
equating_df = pd.DataFrame(equating_dict)
if sd_equating == True:
sd_division = test1_info['score_sd']/test2_info['score_sd']
equating_df['mean_sd_equated_scores'] = (sd_division)*equating_df['initial_scores'] + (test1_info['average_score'] - sd_division*test2_info['average_score'])
equating_dict = equating_df.to_dict()
return equating_dict
items1 = simulate_items()
items2= simulate_items(difficulty={'mean':1, 'sd':1})
people = simulate_people(100, {'mean': 0, 'sd': 1})
prob_vector1, response_vector1 = item_vectors(items1, people)
prob_vector2, response_vector2 = item_vectors(items2, people)
examinee_scores1 = examinee_score(response_vector1)
examinee_scores2 = examinee_score(response_vector2)
print(mean_equating(response_vector1, response_vector2, sd_equating=True)) | from psychometrics.simulation import simulate_items, simulate_people, item_vectors
from psychometrics.CTT import examinee_score
from psychometrics.test_info import test_descriptives
import pandas as pd
import numpy as np
# Equating
# todo: CTT - Linear Equating
# Ctodo: TT - Equipercentile Equating
#
# todo: IRT - Fixed Ancor
# todo: IRT - Conversion Equating
# todo concurrent IRT
def mean_equating(initial_data, subsequent_data, sd_equating=False):
test1_info = test_descriptives(initial_data)
test2_info = test_descriptives(subsequent_data)
score_difference = test1_info['average_score'] - test2_info['average_score']
initial_scores = examinee_score(subsequent_data)
subsequent_scores = examinee_score(subsequent_data) + score_difference
print(test1_info, test2_info)
equating_dict = {
'initial_scores': initial_scores,
'mean_equated_scores': subsequent_scores,
'mean_sd_equated_scores': None
}
equating_df = pd.DataFrame(equating_dict)
if sd_equating == True:
sd_division = test1_info['score_sd']/test2_info['score_sd']
equating_df['mean_sd_equated_scores'] = (sd_division)*equating_df['initial_scores'] + (test1_info['average_score'] - sd_division*test2_info['average_score'])
equating_dict = equating_df.to_dict()
return equating_dict
items1 = simulate_items()
items2= simulate_items(difficulty={'mean':1, 'sd':1})
people = simulate_people(100, {'mean': 0, 'sd': 1})
prob_vector1, response_vector1 = item_vectors(items1, people)
prob_vector2, response_vector2 = item_vectors(items2, people)
examinee_scores1 = examinee_score(response_vector1)
examinee_scores2 = examinee_score(response_vector2)
print(mean_equating(response_vector1, response_vector2, sd_equating=True)) | en | 0.260441 | # Equating # todo: CTT - Linear Equating # Ctodo: TT - Equipercentile Equating # # todo: IRT - Fixed Ancor # todo: IRT - Conversion Equating # todo concurrent IRT | 2.729123 | 3 |
mozillians/graphql_profiles/urls.py | divyamoncy/mozillians | 202 | 6621982 | from django.conf import settings
from django.conf.urls import url
from mozillians.graphql_profiles import views
app_name = 'graphql_profiles'
urlpatterns = [
# App level graphQL url
url(r'^$', views.MozilliansGraphQLView.as_view(graphiql=settings.DEV), name='graphql_view'),
]
| from django.conf import settings
from django.conf.urls import url
from mozillians.graphql_profiles import views
app_name = 'graphql_profiles'
urlpatterns = [
# App level graphQL url
url(r'^$', views.MozilliansGraphQLView.as_view(graphiql=settings.DEV), name='graphql_view'),
]
| en | 0.736585 | # App level graphQL url | 1.46738 | 1 |
sparkprs/__init__.py | ymzong/spark-pr-dashboard | 37 | 6621983 | <reponame>ymzong/spark-pr-dashboard
import os
from flask import Flask
from flask.ext.cache import Cache
from werkzeug.contrib.cache import SimpleCache
is_production = os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine/')
is_test = os.getenv('CI', '') == 'true'
VERSION = os.environ.get('CURRENT_VERSION_ID', 'UNKNOWN_VERSION')
app = Flask('sparkprs', static_folder="../static", template_folder="../templates")
if is_test:
app.config.from_pyfile('../settings.cfg.template')
elif is_production:
app.config.from_pyfile('../settings.cfg')
else:
app.config.from_pyfile('../settings.cfg.local')
if is_test:
cache = Cache(app, config={'CACHE_TYPE': 'simple'})
else:
cache = Cache(app, config={'CACHE_TYPE': 'memcached', 'CACHE_KEY_PREFIX': VERSION})
| import os
from flask import Flask
from flask.ext.cache import Cache
from werkzeug.contrib.cache import SimpleCache
is_production = os.getenv('SERVER_SOFTWARE', '').startswith('Google App Engine/')
is_test = os.getenv('CI', '') == 'true'
VERSION = os.environ.get('CURRENT_VERSION_ID', 'UNKNOWN_VERSION')
app = Flask('sparkprs', static_folder="../static", template_folder="../templates")
if is_test:
app.config.from_pyfile('../settings.cfg.template')
elif is_production:
app.config.from_pyfile('../settings.cfg')
else:
app.config.from_pyfile('../settings.cfg.local')
if is_test:
cache = Cache(app, config={'CACHE_TYPE': 'simple'})
else:
cache = Cache(app, config={'CACHE_TYPE': 'memcached', 'CACHE_KEY_PREFIX': VERSION}) | none | 1 | 2.173497 | 2 | |
TypeRX_server/chat/migrations/0002_auto_20180730_0227.py | kamaljohnson/TypRX-GAME | 1 | 6621984 | <reponame>kamaljohnson/TypRX-GAME
# Generated by Django 2.0.7 on 2018-07-29 20:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('chat', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='chatmessages',
name='thread',
),
migrations.DeleteModel(
name='Thread',
),
]
| # Generated by Django 2.0.7 on 2018-07-29 20:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('chat', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='chatmessages',
name='thread',
),
migrations.DeleteModel(
name='Thread',
),
] | en | 0.658215 | # Generated by Django 2.0.7 on 2018-07-29 20:57 | 1.494304 | 1 |
dominio_ag.py | ITCRStevenLPZ/Proyecto2-Analisis-de-Algoritmos | 0 | 6621985 | from abc import abstractclassmethod
from dominio import Dominio
class DominioAG(Dominio):
"""
Representa el objeto de dominio que conoce los detalles de implementación y modelamiento
de algún problema específico para ser resuelto con algoritmos genéticos.
Métodos:
generar(n)
Construye aleatoriamente una lista de estructuras de datos que representa n
posibles soluciones al problema.
cruzar(sol_a, sol_b)
Produce una nueva posible solución cruzando las dos soluciones dadas por parámetro.
mutar(sol)
Produce una nueva solución aplicando un ligero cambio a la solución dada por
parámetro.
"""
@abstractclassmethod
def generar_n(self, n):
"""Construye aleatoriamente una lista de estructuras de datos que representa n
posibles soluciones al problema.
Entradas:
n (int)
Número de soluciones aleatorias a generar.
Salidas:
(list) Lista que contiene n estructuras de datos, cada una representando
una posible solución al problema modelado por el objeto de dominio.
"""
pass
@abstractclassmethod
def cruzar(self, sol_a, sol_b):
"""Produce una nueva posible solución cruzando las dos soluciones dadas por parámetro.
Entradas:
sol_a (estructura de datos)
Estructura de datos que modela la solución antecesora A que será cruzada con la B
sol_b (estructura de datos)
Estructura de datos que modela la solución antecesora B que será cruzada con la A
Salidas:
(estructura de datos) Una nueva solución producto del cruzamiento entre las soluciones A y B
"""
pass
@abstractclassmethod
def mutar(self, sol):
"""Produce una nueva solución aplicando un ligero cambio a la solución dada por
parámetro.
Entradas:
sol (estructura de datos)
La solución a mutar.
Salidas:
(estructura de datos) Una nueva solución que refleja un ligero cambio con respecto
a la solución dada por parámetro
"""
pass | from abc import abstractclassmethod
from dominio import Dominio
class DominioAG(Dominio):
"""
Representa el objeto de dominio que conoce los detalles de implementación y modelamiento
de algún problema específico para ser resuelto con algoritmos genéticos.
Métodos:
generar(n)
Construye aleatoriamente una lista de estructuras de datos que representa n
posibles soluciones al problema.
cruzar(sol_a, sol_b)
Produce una nueva posible solución cruzando las dos soluciones dadas por parámetro.
mutar(sol)
Produce una nueva solución aplicando un ligero cambio a la solución dada por
parámetro.
"""
@abstractclassmethod
def generar_n(self, n):
"""Construye aleatoriamente una lista de estructuras de datos que representa n
posibles soluciones al problema.
Entradas:
n (int)
Número de soluciones aleatorias a generar.
Salidas:
(list) Lista que contiene n estructuras de datos, cada una representando
una posible solución al problema modelado por el objeto de dominio.
"""
pass
@abstractclassmethod
def cruzar(self, sol_a, sol_b):
"""Produce una nueva posible solución cruzando las dos soluciones dadas por parámetro.
Entradas:
sol_a (estructura de datos)
Estructura de datos que modela la solución antecesora A que será cruzada con la B
sol_b (estructura de datos)
Estructura de datos que modela la solución antecesora B que será cruzada con la A
Salidas:
(estructura de datos) Una nueva solución producto del cruzamiento entre las soluciones A y B
"""
pass
@abstractclassmethod
def mutar(self, sol):
"""Produce una nueva solución aplicando un ligero cambio a la solución dada por
parámetro.
Entradas:
sol (estructura de datos)
La solución a mutar.
Salidas:
(estructura de datos) Una nueva solución que refleja un ligero cambio con respecto
a la solución dada por parámetro
"""
pass | es | 0.938525 | Representa el objeto de dominio que conoce los detalles de implementación y modelamiento de algún problema específico para ser resuelto con algoritmos genéticos. Métodos: generar(n) Construye aleatoriamente una lista de estructuras de datos que representa n posibles soluciones al problema. cruzar(sol_a, sol_b) Produce una nueva posible solución cruzando las dos soluciones dadas por parámetro. mutar(sol) Produce una nueva solución aplicando un ligero cambio a la solución dada por parámetro. Construye aleatoriamente una lista de estructuras de datos que representa n posibles soluciones al problema. Entradas: n (int) Número de soluciones aleatorias a generar. Salidas: (list) Lista que contiene n estructuras de datos, cada una representando una posible solución al problema modelado por el objeto de dominio. Produce una nueva posible solución cruzando las dos soluciones dadas por parámetro. Entradas: sol_a (estructura de datos) Estructura de datos que modela la solución antecesora A que será cruzada con la B sol_b (estructura de datos) Estructura de datos que modela la solución antecesora B que será cruzada con la A Salidas: (estructura de datos) Una nueva solución producto del cruzamiento entre las soluciones A y B Produce una nueva solución aplicando un ligero cambio a la solución dada por parámetro. Entradas: sol (estructura de datos) La solución a mutar. Salidas: (estructura de datos) Una nueva solución que refleja un ligero cambio con respecto a la solución dada por parámetro | 3.857832 | 4 |
scripts/image_saver.py | NehilDanis/shape_registration | 0 | 6621986 | <reponame>NehilDanis/shape_registration<filename>scripts/image_saver.py
#!/usr/bin/env python
# license removed for brevity
import rospy
import sys
from sensor_msgs.msg import Image
import cv2
import os
from cv_bridge import CvBridge, CvBridgeError
# train path
path = "/home/nehil/images_arm"
class ImageSaver():
def __init__(self):
self.set_id = 1
self.num_img = 1
self.count = 273
self.bridge = CvBridge()
self.sub = rospy.Subscriber("/k4a/rgb/image_rect_color", Image, self.callback)
self.rate = rospy.Rate(1) # 1 Hz
# Do stuff, maybe in a while loop
def callback(self, img_msg):
self.rate.sleep() # Sleeps for 1/rate sec
'''if self.num_img <= 32:
try:
cv_image = self.bridge.imgmsg_to_cv2(img_msg, "bgr8")
except CvBridgeError as e:
print(e)
#file_name = "set_" + str(self.set_id) + "_image_" + str(self.num_img) + ".jpg"
file_name = "img" + str(self.count) + ".jpg"
#crop_img = cv_image[:, 420: 1520]
scale_percent = 50
#calculate the 50 percent of original dimensions
width = int(cv_image.shape[1] * 1/3)
height = int(cv_image.shape[0] * 1/3)
# dsize
dsize = (width, height)
# resize image
#cv_image = cv2.resize(cv_image, dsize)
cv2.imwrite(os.path.join(path, file_name), cv_image)
self.num_img += 1
self.count += 1
elif self.num_img == 33:
key = raw_input("press q to quit or change the position of the camera and press c to continue...")
if key == "c":
print("heyy")
self.num_img = 1
self.set_id+= 1'''
try:
cv_image = self.bridge.imgmsg_to_cv2(img_msg, "bgr8")
except CvBridgeError as e:
print(e)
file_name = "img" + str(self.count) + ".jpg"
cv2.imshow("img", cv_image)
cv2.waitKey(1)
cv2.imwrite(os.path.join(path, file_name), cv_image)
self.count += 1
def main(args):
rospy.init_node('ImageSaverNode', anonymous=True)
pp = ImageSaver()
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
if __name__ == '__main__':
main(sys.argv)
| #!/usr/bin/env python
# license removed for brevity
import rospy
import sys
from sensor_msgs.msg import Image
import cv2
import os
from cv_bridge import CvBridge, CvBridgeError
# train path
path = "/home/nehil/images_arm"
class ImageSaver():
def __init__(self):
self.set_id = 1
self.num_img = 1
self.count = 273
self.bridge = CvBridge()
self.sub = rospy.Subscriber("/k4a/rgb/image_rect_color", Image, self.callback)
self.rate = rospy.Rate(1) # 1 Hz
# Do stuff, maybe in a while loop
def callback(self, img_msg):
self.rate.sleep() # Sleeps for 1/rate sec
'''if self.num_img <= 32:
try:
cv_image = self.bridge.imgmsg_to_cv2(img_msg, "bgr8")
except CvBridgeError as e:
print(e)
#file_name = "set_" + str(self.set_id) + "_image_" + str(self.num_img) + ".jpg"
file_name = "img" + str(self.count) + ".jpg"
#crop_img = cv_image[:, 420: 1520]
scale_percent = 50
#calculate the 50 percent of original dimensions
width = int(cv_image.shape[1] * 1/3)
height = int(cv_image.shape[0] * 1/3)
# dsize
dsize = (width, height)
# resize image
#cv_image = cv2.resize(cv_image, dsize)
cv2.imwrite(os.path.join(path, file_name), cv_image)
self.num_img += 1
self.count += 1
elif self.num_img == 33:
key = raw_input("press q to quit or change the position of the camera and press c to continue...")
if key == "c":
print("heyy")
self.num_img = 1
self.set_id+= 1'''
try:
cv_image = self.bridge.imgmsg_to_cv2(img_msg, "bgr8")
except CvBridgeError as e:
print(e)
file_name = "img" + str(self.count) + ".jpg"
cv2.imshow("img", cv_image)
cv2.waitKey(1)
cv2.imwrite(os.path.join(path, file_name), cv_image)
self.count += 1
def main(args):
rospy.init_node('ImageSaverNode', anonymous=True)
pp = ImageSaver()
try:
rospy.spin()
except KeyboardInterrupt:
print("Shutting down")
if __name__ == '__main__':
main(sys.argv) | en | 0.638856 | #!/usr/bin/env python # license removed for brevity # train path # 1 Hz # Do stuff, maybe in a while loop # Sleeps for 1/rate sec if self.num_img <= 32: try: cv_image = self.bridge.imgmsg_to_cv2(img_msg, "bgr8") except CvBridgeError as e: print(e) #file_name = "set_" + str(self.set_id) + "_image_" + str(self.num_img) + ".jpg" file_name = "img" + str(self.count) + ".jpg" #crop_img = cv_image[:, 420: 1520] scale_percent = 50 #calculate the 50 percent of original dimensions width = int(cv_image.shape[1] * 1/3) height = int(cv_image.shape[0] * 1/3) # dsize dsize = (width, height) # resize image #cv_image = cv2.resize(cv_image, dsize) cv2.imwrite(os.path.join(path, file_name), cv_image) self.num_img += 1 self.count += 1 elif self.num_img == 33: key = raw_input("press q to quit or change the position of the camera and press c to continue...") if key == "c": print("heyy") self.num_img = 1 self.set_id+= 1 | 2.780719 | 3 |
gr-azure-software-radio/python/default_credentials/default_credentials.py | pomeroy3/azure-software-radio | 0 | 6621987 | <filename>gr-azure-software-radio/python/default_credentials/default_credentials.py
# pylint: disable=invalid-name
# Copyright (c) Microsoft Corporation.
# Licensed under the GNU General Public License v3.0 or later.
# See License.txt in the project root for license information.
#
# pylint: disable=too-many-arguments
from azure.identity import DefaultAzureCredential
def get_DefaultAzureCredential(enable_cli_credential, enable_environment, enable_managed_identity,
enable_powershell, enable_visual_studio_code, enable_shared_token_cache,
enable_interactive_browser, cred_authority='login.microsoftonline.com'):
"""
Returns DefaultAzureCredential
Args:
enable_cli_credential: enable CLI authentication
enable_environment: enable environment variable authentication
enable_managed_identity: enable managed identity authentication
enable_powershell: enable powersehll authentication
enable_visual_studio_code: enable visual studio code authentication
enable_interactive_browser: enable interactive broswer authentication
cred_authority : Authority to use, defaults to 'login.microsoftonline.com'
Returns:
A DefaultAzureCredential.
"""
return DefaultAzureCredential(exclude_cli_credential=not enable_cli_credential,
exclude_environment_credential=not enable_environment,
exclude_managed_identity_credential=not enable_managed_identity,
exclude_powershell_credential=not enable_powershell,
exclude_visual_studio_code_credential=not enable_visual_studio_code,
exclude_shared_token_cache_credential=not enable_shared_token_cache,
exclude_interactive_browser_credential=not enable_interactive_browser,
authority=cred_authority)
| <filename>gr-azure-software-radio/python/default_credentials/default_credentials.py
# pylint: disable=invalid-name
# Copyright (c) Microsoft Corporation.
# Licensed under the GNU General Public License v3.0 or later.
# See License.txt in the project root for license information.
#
# pylint: disable=too-many-arguments
from azure.identity import DefaultAzureCredential
def get_DefaultAzureCredential(enable_cli_credential, enable_environment, enable_managed_identity,
enable_powershell, enable_visual_studio_code, enable_shared_token_cache,
enable_interactive_browser, cred_authority='login.microsoftonline.com'):
"""
Returns DefaultAzureCredential
Args:
enable_cli_credential: enable CLI authentication
enable_environment: enable environment variable authentication
enable_managed_identity: enable managed identity authentication
enable_powershell: enable powersehll authentication
enable_visual_studio_code: enable visual studio code authentication
enable_interactive_browser: enable interactive broswer authentication
cred_authority : Authority to use, defaults to 'login.microsoftonline.com'
Returns:
A DefaultAzureCredential.
"""
return DefaultAzureCredential(exclude_cli_credential=not enable_cli_credential,
exclude_environment_credential=not enable_environment,
exclude_managed_identity_credential=not enable_managed_identity,
exclude_powershell_credential=not enable_powershell,
exclude_visual_studio_code_credential=not enable_visual_studio_code,
exclude_shared_token_cache_credential=not enable_shared_token_cache,
exclude_interactive_browser_credential=not enable_interactive_browser,
authority=cred_authority)
| en | 0.416281 | # pylint: disable=invalid-name # Copyright (c) Microsoft Corporation. # Licensed under the GNU General Public License v3.0 or later. # See License.txt in the project root for license information. # # pylint: disable=too-many-arguments Returns DefaultAzureCredential Args: enable_cli_credential: enable CLI authentication enable_environment: enable environment variable authentication enable_managed_identity: enable managed identity authentication enable_powershell: enable powersehll authentication enable_visual_studio_code: enable visual studio code authentication enable_interactive_browser: enable interactive broswer authentication cred_authority : Authority to use, defaults to 'login.microsoftonline.com' Returns: A DefaultAzureCredential. | 1.888302 | 2 |
gdsfactory/watch.py | simbilod/gdsfactory | 0 | 6621988 | <reponame>simbilod/gdsfactory
import logging
import pathlib
import sys
import time
import traceback
from functools import partial
from typing import Optional
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
from gdsfactory.config import cwd
from gdsfactory.pdk import get_active_pdk
from gdsfactory.read.from_yaml import from_yaml
class YamlEventHandler(FileSystemEventHandler):
"""Captures pic.yml events."""
def __init__(self, logger=None, path: Optional[str] = None):
super().__init__()
self.logger = logger or logging.root
pdk = get_active_pdk()
pdk.register_cells_yaml(dirpath=path)
def update_cell(self, src_path, update: bool = False) -> None:
"""Register new YAML file into active pdk.
pdk.cells[filename] = partial(from_yaml, filepath)
"""
pdk = get_active_pdk()
filepath = pathlib.Path(src_path)
cell_name = filepath.stem.split(".")[0]
function = partial(from_yaml, filepath)
try:
pdk.register_cells_yaml(**{cell_name: function}, update=update)
except ValueError as e:
print(e)
def on_moved(self, event):
super().on_moved(event)
what = "directory" if event.is_directory else "file"
self.logger.info(
"Moved %s: from %s to %s", what, event.src_path, event.dest_path
)
if what == "file" and event.dest_path.endswith(".pic.yml"):
self.logger.info("Created %s: %s", what, event.src_path)
self.update_cell(event.dest_path)
self.get_component(event.src_path)
def on_created(self, event):
super().on_created(event)
what = "directory" if event.is_directory else "file"
if what == "file" and event.src_path.endswith(".pic.yml"):
self.logger.info("Created %s: %s", what, event.src_path)
self.update_cell(event.src_path)
self.get_component(event.src_path)
def on_deleted(self, event):
super().on_deleted(event)
what = "directory" if event.is_directory else "file"
self.logger.info("Deleted %s: %s", what, event.src_path)
if what == "file" and event.src_path.endswith(".pic.yml"):
pdk = get_active_pdk()
filepath = pathlib.Path(event.src_path)
cell_name = filepath.stem.split(".")[0]
pdk.remove_cell(cell_name)
def on_modified(self, event):
super().on_modified(event)
what = "directory" if event.is_directory else "file"
if what == "file" and event.src_path.endswith(".pic.yml"):
self.logger.info("Modified %s: %s", what, event.src_path)
self.get_component(event.src_path)
def get_component(self, filepath):
try:
filepath = pathlib.Path(filepath)
if filepath.exists():
c = from_yaml(filepath)
self.update_cell(filepath, update=True)
c.show()
# on_yaml_cell_modified.fire(c)
return c
except Exception as e:
traceback.print_exc(file=sys.stdout)
print(e)
def watch(path=str(cwd)) -> None:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
event_handler = YamlEventHandler(path=path)
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
logging.info(f"Observing {path!r}")
try:
while True:
time.sleep(1)
finally:
observer.stop()
observer.join()
if __name__ == "__main__":
path = sys.argv[1] if len(sys.argv) > 1 else "."
watch(path)
| import logging
import pathlib
import sys
import time
import traceback
from functools import partial
from typing import Optional
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
from gdsfactory.config import cwd
from gdsfactory.pdk import get_active_pdk
from gdsfactory.read.from_yaml import from_yaml
class YamlEventHandler(FileSystemEventHandler):
"""Captures pic.yml events."""
def __init__(self, logger=None, path: Optional[str] = None):
super().__init__()
self.logger = logger or logging.root
pdk = get_active_pdk()
pdk.register_cells_yaml(dirpath=path)
def update_cell(self, src_path, update: bool = False) -> None:
"""Register new YAML file into active pdk.
pdk.cells[filename] = partial(from_yaml, filepath)
"""
pdk = get_active_pdk()
filepath = pathlib.Path(src_path)
cell_name = filepath.stem.split(".")[0]
function = partial(from_yaml, filepath)
try:
pdk.register_cells_yaml(**{cell_name: function}, update=update)
except ValueError as e:
print(e)
def on_moved(self, event):
super().on_moved(event)
what = "directory" if event.is_directory else "file"
self.logger.info(
"Moved %s: from %s to %s", what, event.src_path, event.dest_path
)
if what == "file" and event.dest_path.endswith(".pic.yml"):
self.logger.info("Created %s: %s", what, event.src_path)
self.update_cell(event.dest_path)
self.get_component(event.src_path)
def on_created(self, event):
super().on_created(event)
what = "directory" if event.is_directory else "file"
if what == "file" and event.src_path.endswith(".pic.yml"):
self.logger.info("Created %s: %s", what, event.src_path)
self.update_cell(event.src_path)
self.get_component(event.src_path)
def on_deleted(self, event):
super().on_deleted(event)
what = "directory" if event.is_directory else "file"
self.logger.info("Deleted %s: %s", what, event.src_path)
if what == "file" and event.src_path.endswith(".pic.yml"):
pdk = get_active_pdk()
filepath = pathlib.Path(event.src_path)
cell_name = filepath.stem.split(".")[0]
pdk.remove_cell(cell_name)
def on_modified(self, event):
super().on_modified(event)
what = "directory" if event.is_directory else "file"
if what == "file" and event.src_path.endswith(".pic.yml"):
self.logger.info("Modified %s: %s", what, event.src_path)
self.get_component(event.src_path)
def get_component(self, filepath):
try:
filepath = pathlib.Path(filepath)
if filepath.exists():
c = from_yaml(filepath)
self.update_cell(filepath, update=True)
c.show()
# on_yaml_cell_modified.fire(c)
return c
except Exception as e:
traceback.print_exc(file=sys.stdout)
print(e)
def watch(path=str(cwd)) -> None:
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
event_handler = YamlEventHandler(path=path)
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
logging.info(f"Observing {path!r}")
try:
while True:
time.sleep(1)
finally:
observer.stop()
observer.join()
if __name__ == "__main__":
path = sys.argv[1] if len(sys.argv) > 1 else "."
watch(path) | en | 0.682618 | Captures pic.yml events. Register new YAML file into active pdk. pdk.cells[filename] = partial(from_yaml, filepath) # on_yaml_cell_modified.fire(c) | 2.191919 | 2 |
app.py | chinmay29hub/typeroid | 0 | 6621989 | from click import option
import typer
from removebg import RemoveBg
import config
import time
import yfinance as yf
import pyfiglet
import pandas as pd
import matplotlib.pyplot as plt
from wordcloud import WordCloud
import random
from pick import pick
from scihub import SciHub
app = typer.Typer()
sh = SciHub()
def removing_link(link):
rmbg = RemoveBg(config.api_key, "error.log")
rmbg.remove_background_from_img_url(
link)
@app.command()
def remove_bg_link():
typer.echo(pyfiglet.figlet_format("remove.link"))
link = typer.prompt(
"Enter the link of the image you want to remove the background from")
typer.echo("\n")
total = 0
with typer.progressbar(removing_link(link), length=100, label="Removing Background") as progress:
for remove in progress:
time.sleep(0.01)
total += 1
typer.echo("\n")
typer.secho(f"Done! Image downloaded in current directory", fg=typer.colors.BLUE,
bg=typer.colors.YELLOW, bold=True)
typer.echo("\n")
def removing_file(file):
rmbg = RemoveBg(config.api_key, "error.log")
rmbg.remove_background_from_img_file(file)
@app.command()
def remove_bg_file():
typer.echo(pyfiglet.figlet_format("remove.file"))
file = typer.prompt(
"Enter the path of the file you want to remove the background from")
typer.echo("\n")
total = 0
with typer.progressbar(removing_file(file), length=100, label="Removing Background") as progress:
for remove in progress:
time.sleep(0.01)
total += 1
typer.echo("\n")
typer.secho(f"Done! Image downloaded in image path", fg=typer.colors.BLUE,
bg=typer.colors.YELLOW, bold=True)
typer.echo("\n")
@app.command()
def removebg():
title = pyfiglet.figlet_format("remove.bg")
options = ['Remove from a image file?',
'Remove from a link?', 'Back', 'Exit']
option, index = pick(options, title, indicator='=>', default_index=0)
if option == 'Remove from a image file?':
remove_bg_file()
elif option == 'Remove from a link?':
remove_bg_link()
elif option == 'Back':
menu()
else:
exit()
@app.command()
def repository():
typer.echo(pyfiglet.figlet_format("Repo"))
typer.secho(f"\nOpening Github Repository in your Default browser\n",
fg=typer.colors.BLUE, bg=typer.colors.YELLOW, bold=True)
typer.launch("https://github.com/chinmay29hub/typer")
def stock_loading(symbol, start, end):
data = yf.download(symbol, start, end, progress=False)
file_name = f"{symbol}-{start}-to-{end}.csv"
data.to_csv(file_name)
@app.command()
def stock():
typer.echo(pyfiglet.figlet_format("stock"))
symbol = typer.prompt(
"\nEnter Company Symbol | eg. AAPL, IBM, GOOGL, etc.")
typer.echo("\n")
start = typer.prompt("Enter start date | eg. 2020-01-01")
typer.echo("\n")
end = typer.prompt("Enter end date | eg. 2021-01-01")
typer.echo("\n")
total = 0
with typer.progressbar(stock_loading(symbol, start, end), length=100, label="Downloading dataset") as progress:
for loading in progress:
time.sleep(0.01)
total += 1
typer.echo("\n")
typer.secho(f"Done! Dataset downloaded in current directory", fg=typer.colors.BLUE,
bg=typer.colors.YELLOW, bold=True)
typer.echo("\n")
def word_cloud():
df = pd.read_csv("dataset/android-games.csv")
df.head()
df.isna().sum()
text = " ".join(cat.split()[1] for cat in df.category)
word_cloud = WordCloud(width=1600, height=800, max_font_size=200,
collocations=False, background_color='black').generate(text)
plt.imshow(word_cloud, interpolation='bilinear')
plt.axis("off")
plt.show()
@app.command()
def wordcloud():
typer.echo(pyfiglet.figlet_format("wordcloud"))
total = 0
with typer.progressbar(length=100, label="Creating wordcloud") as progress:
for loading in progress:
time.sleep(0.02)
total += 1
typer.echo("\n")
typer.secho(f"Done! Here it is", fg=typer.colors.BLUE,
bg=typer.colors.YELLOW, bold=True)
word_cloud()
typer.echo("\n")
def pass_generate(c):
lower = "abcdefghijklmnopqrstuvwxyz"
upper = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
numbers = "0123456789"
symbols = "!@#$%^&*()__+"
all = lower + upper + numbers + symbols
password = "".join(random.sample(all, c))
typer.secho(
f"\nYour generated password is : {password}", fg=typer.colors.GREEN, bold=True)
@app.command()
def password():
typer.echo(pyfiglet.figlet_format("password"))
lengthh = typer.prompt("\nEnter length of password | eg. 16 ")
typer.echo("\n")
c = int(lengthh)
total = 0
with typer.progressbar(length=100, label="Generating password") as progress:
for loading in progress:
time.sleep(0.015)
total += 1
typer.echo("\n")
pass_generate(c)
typer.echo("\n")
typer.secho(f"Done!", fg=typer.colors.BLUE,
bg=typer.colors.YELLOW, bold=True)
def download_file(link, file_name):
result = sh.download(link, path=f'{file_name}.pdf')
@app.command()
def scihub():
typer.echo(pyfiglet.figlet_format("sci-hub"))
link = typer.prompt("Enter the link of the article you want to download ")
typer.echo("\n")
file_name = typer.prompt("Enter the filename to give to the pdf ")
typer.echo("\n")
total = 0
with typer.progressbar(download_file(link, file_name), length=100, label="Downloading Paper") as progress:
for loading in progress:
time.sleep(0.01)
total += 1
typer.echo("\n")
typer.secho(f"Done! PDF downloaded in current directory", fg=typer.colors.BLUE,
bg=typer.colors.YELLOW, bold=True)
typer.echo("\n")
@app.command()
def menu():
title = pyfiglet.figlet_format("Typeroid")
options = ['Generate Wordcloud', 'Generate Password', 'Fetch Stock Data',
'Remove Background from image', 'Sci-Hub - Download any research paper', 'Open this Repository', 'Exit']
option, index = pick(options, title, indicator='=>', default_index=0)
if option == 'Generate Wordcloud':
wordcloud()
elif option == 'Generate Password':
password()
elif option == 'Fetch Stock Data':
stock()
elif option == 'Remove Background from image':
removebg()
elif option == 'Sci-Hub - Download any research paper':
scihub()
elif option == 'Open this Repository':
repository()
else:
exit()
@app.callback()
def main(ctx: typer.Context):
"""
please execute any one command.
"""
typer.secho(
f"\nAbout to execute command: {ctx.invoked_subcommand}\n", fg=typer.colors.GREEN)
if __name__ == "__main__":
app()
| from click import option
import typer
from removebg import RemoveBg
import config
import time
import yfinance as yf
import pyfiglet
import pandas as pd
import matplotlib.pyplot as plt
from wordcloud import WordCloud
import random
from pick import pick
from scihub import SciHub
app = typer.Typer()
sh = SciHub()
def removing_link(link):
rmbg = RemoveBg(config.api_key, "error.log")
rmbg.remove_background_from_img_url(
link)
@app.command()
def remove_bg_link():
typer.echo(pyfiglet.figlet_format("remove.link"))
link = typer.prompt(
"Enter the link of the image you want to remove the background from")
typer.echo("\n")
total = 0
with typer.progressbar(removing_link(link), length=100, label="Removing Background") as progress:
for remove in progress:
time.sleep(0.01)
total += 1
typer.echo("\n")
typer.secho(f"Done! Image downloaded in current directory", fg=typer.colors.BLUE,
bg=typer.colors.YELLOW, bold=True)
typer.echo("\n")
def removing_file(file):
rmbg = RemoveBg(config.api_key, "error.log")
rmbg.remove_background_from_img_file(file)
@app.command()
def remove_bg_file():
typer.echo(pyfiglet.figlet_format("remove.file"))
file = typer.prompt(
"Enter the path of the file you want to remove the background from")
typer.echo("\n")
total = 0
with typer.progressbar(removing_file(file), length=100, label="Removing Background") as progress:
for remove in progress:
time.sleep(0.01)
total += 1
typer.echo("\n")
typer.secho(f"Done! Image downloaded in image path", fg=typer.colors.BLUE,
bg=typer.colors.YELLOW, bold=True)
typer.echo("\n")
@app.command()
def removebg():
title = pyfiglet.figlet_format("remove.bg")
options = ['Remove from a image file?',
'Remove from a link?', 'Back', 'Exit']
option, index = pick(options, title, indicator='=>', default_index=0)
if option == 'Remove from a image file?':
remove_bg_file()
elif option == 'Remove from a link?':
remove_bg_link()
elif option == 'Back':
menu()
else:
exit()
@app.command()
def repository():
typer.echo(pyfiglet.figlet_format("Repo"))
typer.secho(f"\nOpening Github Repository in your Default browser\n",
fg=typer.colors.BLUE, bg=typer.colors.YELLOW, bold=True)
typer.launch("https://github.com/chinmay29hub/typer")
def stock_loading(symbol, start, end):
data = yf.download(symbol, start, end, progress=False)
file_name = f"{symbol}-{start}-to-{end}.csv"
data.to_csv(file_name)
@app.command()
def stock():
typer.echo(pyfiglet.figlet_format("stock"))
symbol = typer.prompt(
"\nEnter Company Symbol | eg. AAPL, IBM, GOOGL, etc.")
typer.echo("\n")
start = typer.prompt("Enter start date | eg. 2020-01-01")
typer.echo("\n")
end = typer.prompt("Enter end date | eg. 2021-01-01")
typer.echo("\n")
total = 0
with typer.progressbar(stock_loading(symbol, start, end), length=100, label="Downloading dataset") as progress:
for loading in progress:
time.sleep(0.01)
total += 1
typer.echo("\n")
typer.secho(f"Done! Dataset downloaded in current directory", fg=typer.colors.BLUE,
bg=typer.colors.YELLOW, bold=True)
typer.echo("\n")
def word_cloud():
df = pd.read_csv("dataset/android-games.csv")
df.head()
df.isna().sum()
text = " ".join(cat.split()[1] for cat in df.category)
word_cloud = WordCloud(width=1600, height=800, max_font_size=200,
collocations=False, background_color='black').generate(text)
plt.imshow(word_cloud, interpolation='bilinear')
plt.axis("off")
plt.show()
@app.command()
def wordcloud():
typer.echo(pyfiglet.figlet_format("wordcloud"))
total = 0
with typer.progressbar(length=100, label="Creating wordcloud") as progress:
for loading in progress:
time.sleep(0.02)
total += 1
typer.echo("\n")
typer.secho(f"Done! Here it is", fg=typer.colors.BLUE,
bg=typer.colors.YELLOW, bold=True)
word_cloud()
typer.echo("\n")
def pass_generate(c):
lower = "abcdefghijklmnopqrstuvwxyz"
upper = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
numbers = "0123456789"
symbols = "!@#$%^&*()__+"
all = lower + upper + numbers + symbols
password = "".join(random.sample(all, c))
typer.secho(
f"\nYour generated password is : {password}", fg=typer.colors.GREEN, bold=True)
@app.command()
def password():
typer.echo(pyfiglet.figlet_format("password"))
lengthh = typer.prompt("\nEnter length of password | eg. 16 ")
typer.echo("\n")
c = int(lengthh)
total = 0
with typer.progressbar(length=100, label="Generating password") as progress:
for loading in progress:
time.sleep(0.015)
total += 1
typer.echo("\n")
pass_generate(c)
typer.echo("\n")
typer.secho(f"Done!", fg=typer.colors.BLUE,
bg=typer.colors.YELLOW, bold=True)
def download_file(link, file_name):
result = sh.download(link, path=f'{file_name}.pdf')
@app.command()
def scihub():
typer.echo(pyfiglet.figlet_format("sci-hub"))
link = typer.prompt("Enter the link of the article you want to download ")
typer.echo("\n")
file_name = typer.prompt("Enter the filename to give to the pdf ")
typer.echo("\n")
total = 0
with typer.progressbar(download_file(link, file_name), length=100, label="Downloading Paper") as progress:
for loading in progress:
time.sleep(0.01)
total += 1
typer.echo("\n")
typer.secho(f"Done! PDF downloaded in current directory", fg=typer.colors.BLUE,
bg=typer.colors.YELLOW, bold=True)
typer.echo("\n")
@app.command()
def menu():
title = pyfiglet.figlet_format("Typeroid")
options = ['Generate Wordcloud', 'Generate Password', 'Fetch Stock Data',
'Remove Background from image', 'Sci-Hub - Download any research paper', 'Open this Repository', 'Exit']
option, index = pick(options, title, indicator='=>', default_index=0)
if option == 'Generate Wordcloud':
wordcloud()
elif option == 'Generate Password':
password()
elif option == 'Fetch Stock Data':
stock()
elif option == 'Remove Background from image':
removebg()
elif option == 'Sci-Hub - Download any research paper':
scihub()
elif option == 'Open this Repository':
repository()
else:
exit()
@app.callback()
def main(ctx: typer.Context):
"""
please execute any one command.
"""
typer.secho(
f"\nAbout to execute command: {ctx.invoked_subcommand}\n", fg=typer.colors.GREEN)
if __name__ == "__main__":
app()
| en | 0.590404 | #$%^&*()__+" please execute any one command. | 2.710384 | 3 |
slack/migrations/0001_initial.py | oditorium/django-slack | 2 | 6621990 | <filename>slack/migrations/0001_initial.py<gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-04-05 03:39
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import slack.models.keyvalue
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='KeyValuePair',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(db_index=True, max_length=240)),
('value', models.CharField(blank=True, db_index=True, max_length=240, null=True)),
],
),
migrations.CreateModel(
name='KeyValueStore',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('_namespace', models.CharField(blank=True, default='', max_length=255, unique=True)),
],
bases=(slack.models.keyvalue.KeyValueStoreBase, models.Model),
),
migrations.AddField(
model_name='keyvaluepair',
name='segment',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='slack.KeyValueStore'),
),
]
| <filename>slack/migrations/0001_initial.py<gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-04-05 03:39
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import slack.models.keyvalue
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='KeyValuePair',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(db_index=True, max_length=240)),
('value', models.CharField(blank=True, db_index=True, max_length=240, null=True)),
],
),
migrations.CreateModel(
name='KeyValueStore',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('_namespace', models.CharField(blank=True, default='', max_length=255, unique=True)),
],
bases=(slack.models.keyvalue.KeyValueStoreBase, models.Model),
),
migrations.AddField(
model_name='keyvaluepair',
name='segment',
field=models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='slack.KeyValueStore'),
),
]
| en | 0.812191 | # -*- coding: utf-8 -*- # Generated by Django 1.9.1 on 2016-04-05 03:39 | 1.568487 | 2 |
python/dynamic_graph/sot/torque_control/identification/pos_ctrl/compress_hrpsys_data.py | nim65s/sot-torque-control | 6 | 6621991 | <reponame>nim65s/sot-torque-control
# -*- coding: utf-8 -*-
# flake8: noqa
"""
Created on Wed Apr 22 11:36:11 2015
Compress old data in the log format of hrpsys in a format that is compatible with the new data
collected using the RealTimeTracer.
@author: adelpret
"""
import matplotlib.pyplot as plt
import numpy as np
from compute_estimates_from_sensors import compute_estimates_from_sensors
from load_hrpsys_log import load_hrpsys_log_astate, load_hrpsys_log_rstate
out_data_folder = '../results/20140807-legTorqueId1/'
OUT_DATA_FILE_NAME = 'data.npz'
A_STATE_FILE = '/home/adelpret/devel/yarp_gazebo/src/motorFrictionIdentification/data/20140807-legTorqueId/legTorqueId_pos1-astate.log'
R_STATE_FILE = '/home/adelpret/devel/yarp_gazebo/src/motorFrictionIdentification/data/20140807-legTorqueId/legTorqueId_pos1-rstate.log'
ESTIMATION_DELAY = 0.2
COMPUTE_TORQUES_WITHOUT_GYRO = False
sensors = load_hrpsys_log_astate(A_STATE_FILE, 'rad')
ref = load_hrpsys_log_rstate(R_STATE_FILE, 'rad')
#sensors = sensors[:5000];
#ref = ref[:5000];
(torques, dq, ddq) = compute_estimates_from_sensors(sensors, ESTIMATION_DELAY)
if (COMPUTE_TORQUES_WITHOUT_GYRO):
sensors['gyro'] = np.zeros(sensors['gyro'].shape)
(torques_no_gyro, dq, ddq) = compute_estimates_from_sensors(sensors, ESTIMATION_DELAY)
for i in range(12): #torques.shape[1]):
print "Plot data joint %d out of %d" % (i, torques.shape[1])
f, ax = plt.subplots(1, 1, sharex=True)
ax.plot(sensors['time'], sensors['torque'][:, i], 'b')
delta_q = ref['enc'][:, i] - sensors['enc'][:, i]
scale = np.mean(sensors['torque'][:, i]) / np.mean(delta_q)
ax.plot(sensors['time'], scale * delta_q, 'r--')
ax.plot(sensors['time'], torques[:, i], 'g--')
ax.legend(['hrpsys', 'delta_q', 'torque'])
ax.set_title('torque hrpsys')
f, ax = plt.subplots(3, 1, sharex=True)
ax[0].plot(sensors['time'], torques[:, i])
if (COMPUTE_TORQUES_WITHOUT_GYRO):
ax[0].plot(sensors['time'], torques_no_gyro[:, i])
ax[0].set_title('Torque joint ' + str(i))
ax[1].plot(sensors['time'], sensors['enc'][:, i])
ax[1].plot(sensors['time'], sensors['enc'][:, i] - ref['enc'][:, i])
ax[1].set_title('Angle joint ' + str(i))
ax[2].plot(sensors['time'], dq[:, i])
ax[2].set_title('Velocity joint ' + str(i))
ax[1].legend(['Angle', 'Delta_q'])
if (COMPUTE_TORQUES_WITHOUT_GYRO):
ax[0].legend(['Torque w/ gyro', 'Torque w/o gyro'])
plt.show()
DT = float(sensors['time'][1] - sensors['time'][0])
# sampling period
LOST_SAMPLES = int(ESTIMATION_DELAY / DT)
print "Gonna shift data of %d samples to compensate for estimation delay" % LOST_SAMPLES
if (COMPUTE_TORQUES_WITHOUT_GYRO):
np.savez(out_data_folder + OUT_DATA_FILE_NAME,
dq=dq[:-LOST_SAMPLES, :],
tau=torques_no_gyro[:-LOST_SAMPLES, :],
qDes=ref['enc'][LOST_SAMPLES:, :30],
enc=sensors['enc'][LOST_SAMPLES:, :30])
else:
np.savez(out_data_folder + OUT_DATA_FILE_NAME,
dq=dq[:-LOST_SAMPLES, :],
tau=torques[:-LOST_SAMPLES, :],
qDes=ref['enc'][LOST_SAMPLES:, :30],
enc=sensors['enc'][LOST_SAMPLES:, :30])
| # -*- coding: utf-8 -*-
# flake8: noqa
"""
Created on Wed Apr 22 11:36:11 2015
Compress old data in the log format of hrpsys in a format that is compatible with the new data
collected using the RealTimeTracer.
@author: adelpret
"""
import matplotlib.pyplot as plt
import numpy as np
from compute_estimates_from_sensors import compute_estimates_from_sensors
from load_hrpsys_log import load_hrpsys_log_astate, load_hrpsys_log_rstate
out_data_folder = '../results/20140807-legTorqueId1/'
OUT_DATA_FILE_NAME = 'data.npz'
A_STATE_FILE = '/home/adelpret/devel/yarp_gazebo/src/motorFrictionIdentification/data/20140807-legTorqueId/legTorqueId_pos1-astate.log'
R_STATE_FILE = '/home/adelpret/devel/yarp_gazebo/src/motorFrictionIdentification/data/20140807-legTorqueId/legTorqueId_pos1-rstate.log'
ESTIMATION_DELAY = 0.2
COMPUTE_TORQUES_WITHOUT_GYRO = False
sensors = load_hrpsys_log_astate(A_STATE_FILE, 'rad')
ref = load_hrpsys_log_rstate(R_STATE_FILE, 'rad')
#sensors = sensors[:5000];
#ref = ref[:5000];
(torques, dq, ddq) = compute_estimates_from_sensors(sensors, ESTIMATION_DELAY)
if (COMPUTE_TORQUES_WITHOUT_GYRO):
sensors['gyro'] = np.zeros(sensors['gyro'].shape)
(torques_no_gyro, dq, ddq) = compute_estimates_from_sensors(sensors, ESTIMATION_DELAY)
for i in range(12): #torques.shape[1]):
print "Plot data joint %d out of %d" % (i, torques.shape[1])
f, ax = plt.subplots(1, 1, sharex=True)
ax.plot(sensors['time'], sensors['torque'][:, i], 'b')
delta_q = ref['enc'][:, i] - sensors['enc'][:, i]
scale = np.mean(sensors['torque'][:, i]) / np.mean(delta_q)
ax.plot(sensors['time'], scale * delta_q, 'r--')
ax.plot(sensors['time'], torques[:, i], 'g--')
ax.legend(['hrpsys', 'delta_q', 'torque'])
ax.set_title('torque hrpsys')
f, ax = plt.subplots(3, 1, sharex=True)
ax[0].plot(sensors['time'], torques[:, i])
if (COMPUTE_TORQUES_WITHOUT_GYRO):
ax[0].plot(sensors['time'], torques_no_gyro[:, i])
ax[0].set_title('Torque joint ' + str(i))
ax[1].plot(sensors['time'], sensors['enc'][:, i])
ax[1].plot(sensors['time'], sensors['enc'][:, i] - ref['enc'][:, i])
ax[1].set_title('Angle joint ' + str(i))
ax[2].plot(sensors['time'], dq[:, i])
ax[2].set_title('Velocity joint ' + str(i))
ax[1].legend(['Angle', 'Delta_q'])
if (COMPUTE_TORQUES_WITHOUT_GYRO):
ax[0].legend(['Torque w/ gyro', 'Torque w/o gyro'])
plt.show()
DT = float(sensors['time'][1] - sensors['time'][0])
# sampling period
LOST_SAMPLES = int(ESTIMATION_DELAY / DT)
print "Gonna shift data of %d samples to compensate for estimation delay" % LOST_SAMPLES
if (COMPUTE_TORQUES_WITHOUT_GYRO):
np.savez(out_data_folder + OUT_DATA_FILE_NAME,
dq=dq[:-LOST_SAMPLES, :],
tau=torques_no_gyro[:-LOST_SAMPLES, :],
qDes=ref['enc'][LOST_SAMPLES:, :30],
enc=sensors['enc'][LOST_SAMPLES:, :30])
else:
np.savez(out_data_folder + OUT_DATA_FILE_NAME,
dq=dq[:-LOST_SAMPLES, :],
tau=torques[:-LOST_SAMPLES, :],
qDes=ref['enc'][LOST_SAMPLES:, :30],
enc=sensors['enc'][LOST_SAMPLES:, :30]) | en | 0.696094 | # -*- coding: utf-8 -*- # flake8: noqa Created on Wed Apr 22 11:36:11 2015 Compress old data in the log format of hrpsys in a format that is compatible with the new data collected using the RealTimeTracer. @author: adelpret #sensors = sensors[:5000]; #ref = ref[:5000]; #torques.shape[1]): # sampling period | 2.001734 | 2 |
atcoder/abc/abc147_a.py | knuu/competitive-programming | 1 | 6621992 | <reponame>knuu/competitive-programming<filename>atcoder/abc/abc147_a.py
A = [int(x) for x in input().split()]
if sum(A) >= 22:
print("bust")
else:
print("win")
| A = [int(x) for x in input().split()]
if sum(A) >= 22:
print("bust")
else:
print("win") | none | 1 | 3.285166 | 3 | |
task4/conftest.py | rokimaru/rest_api_autotests | 0 | 6621993 | <reponame>rokimaru/rest_api_autotests
import pytest
def pytest_addoption(parser):
parser.addoption('--url', type=str, default='https://ya.ru')
parser.addoption('--status_code', type=int, default=200)
@pytest.fixture
def get_options(request):
return (request.config.getoption('--url'),
request.config.getoption('--status_code')) | import pytest
def pytest_addoption(parser):
parser.addoption('--url', type=str, default='https://ya.ru')
parser.addoption('--status_code', type=int, default=200)
@pytest.fixture
def get_options(request):
return (request.config.getoption('--url'),
request.config.getoption('--status_code')) | none | 1 | 2.170766 | 2 | |
gemma_conf_search.py | jensengroup/substituent_insulater_screening | 0 | 6621994 | import sys
import numpy as np
import pandas as pd
from multiprocessing import Pool
from rdkit import Chem
from rdkit.Chem import AllChem
sys.path.append("/home/koerstz/projects/gemma_part2/QMC_6.2")
from qmmol import QMMol
from qmconf import QMConf
from calculator.xtb import xTB
from conformers.create_conformers import RotatableBonds
def gs_conformer_search(name, rdkit_conf, chrg, mult, cpus):
""" ground state conformer search """
charged = False # hard coded for mogens
# create conformers
qmmol = QMMol()
qmmol.add_conformer(rdkit_conf, fmt='rdkit', label=name,
charged_fragments=charged, set_initial=True)
#print(qmmol.conformers)
#print(qmmol.conformers[0].write_xyz())
#quit()
num_confs = 5
qmmol.create_random_conformers(threads=cpus, num_confs=num_confs)
print(len(qmmol.conformers))
for conf in qmmol.conformers:
print(conf.label)
conf.write_xyz()
quit()
xtb_params = {'method': 'gfn2',
'opt': 'opt',
'cpus': 1}
qmmol.calc = xTB(parameters=xtb_params)
qmmol.optimize(num_procs=cpus, keep_files=True)
#for conf in qmmol.conformers:
# print(conf.label, conf.results['energy'])
# conf.write_xyz()
# Get most stable conformer. If most stable conformer
# not identical to initial conf try second lowest.
initial_smi = Chem.MolToSmiles(Chem.RemoveHs(qmmol.initial_conformer.get_rdkit_mol()))
low_energy_conf = qmmol.nlowest(1)[0]
try:
conf_smi = Chem.MolToSmiles(Chem.RemoveHs(low_energy_conf.get_rdkit_mol()))
except:
conf_smi = 'fail'
i = 1
while initial_smi != conf_smi:
low_energy_conf = qmmol.nlowest(i+1)[-1]
try:
conf_smi = Chem.MolToSmiles(Chem.RemoveHs(low_energy_conf.get_rdkit_mol()))
except:
conf_smi = 'fail'
i += 1
if len(qmmol.conformers) < i:
sys.exit('no conformers match the initial input')
return low_energy_conf
def gs_gemma(tup): #name, smi, chrg, mult, cps):
"""GS conformers search given a smiles string """
cps = 1
name, smi, chrg, mult = tup.comp_name, tup.smiles, tup.charge, tup.multiplicity
mol = Chem.AddHs(Chem.MolFromSmiles(smi))
AllChem.EmbedMolecule(mol)
mol = Chem.AddHs(mol)
Chem.MolToMolFile(mol, name + '.sdf')
rdkit_conf = mol.GetConformer()
qmconf = gs_conformer_search(name, rdkit_conf, chrg, mult, cps)
#print(qmconf.results, qmconf.label)
return qmconf
if __name__ == '__main__':
cpus = 2
data = pd.read_csv(sys.argv[1])
# find storage energy
compound_list = list()
for compound in data.itertuples():
mol = gs_gemma(compound)
compound_list.append({'comp_name': compound.comp_name,
'mol': mol})
data_out = pd.DataFrame(compound_list)
data_out.to_pickle(sys.argv[1].split('.')[0] + '.pkl')
| import sys
import numpy as np
import pandas as pd
from multiprocessing import Pool
from rdkit import Chem
from rdkit.Chem import AllChem
sys.path.append("/home/koerstz/projects/gemma_part2/QMC_6.2")
from qmmol import QMMol
from qmconf import QMConf
from calculator.xtb import xTB
from conformers.create_conformers import RotatableBonds
def gs_conformer_search(name, rdkit_conf, chrg, mult, cpus):
""" ground state conformer search """
charged = False # hard coded for mogens
# create conformers
qmmol = QMMol()
qmmol.add_conformer(rdkit_conf, fmt='rdkit', label=name,
charged_fragments=charged, set_initial=True)
#print(qmmol.conformers)
#print(qmmol.conformers[0].write_xyz())
#quit()
num_confs = 5
qmmol.create_random_conformers(threads=cpus, num_confs=num_confs)
print(len(qmmol.conformers))
for conf in qmmol.conformers:
print(conf.label)
conf.write_xyz()
quit()
xtb_params = {'method': 'gfn2',
'opt': 'opt',
'cpus': 1}
qmmol.calc = xTB(parameters=xtb_params)
qmmol.optimize(num_procs=cpus, keep_files=True)
#for conf in qmmol.conformers:
# print(conf.label, conf.results['energy'])
# conf.write_xyz()
# Get most stable conformer. If most stable conformer
# not identical to initial conf try second lowest.
initial_smi = Chem.MolToSmiles(Chem.RemoveHs(qmmol.initial_conformer.get_rdkit_mol()))
low_energy_conf = qmmol.nlowest(1)[0]
try:
conf_smi = Chem.MolToSmiles(Chem.RemoveHs(low_energy_conf.get_rdkit_mol()))
except:
conf_smi = 'fail'
i = 1
while initial_smi != conf_smi:
low_energy_conf = qmmol.nlowest(i+1)[-1]
try:
conf_smi = Chem.MolToSmiles(Chem.RemoveHs(low_energy_conf.get_rdkit_mol()))
except:
conf_smi = 'fail'
i += 1
if len(qmmol.conformers) < i:
sys.exit('no conformers match the initial input')
return low_energy_conf
def gs_gemma(tup): #name, smi, chrg, mult, cps):
"""GS conformers search given a smiles string """
cps = 1
name, smi, chrg, mult = tup.comp_name, tup.smiles, tup.charge, tup.multiplicity
mol = Chem.AddHs(Chem.MolFromSmiles(smi))
AllChem.EmbedMolecule(mol)
mol = Chem.AddHs(mol)
Chem.MolToMolFile(mol, name + '.sdf')
rdkit_conf = mol.GetConformer()
qmconf = gs_conformer_search(name, rdkit_conf, chrg, mult, cps)
#print(qmconf.results, qmconf.label)
return qmconf
if __name__ == '__main__':
cpus = 2
data = pd.read_csv(sys.argv[1])
# find storage energy
compound_list = list()
for compound in data.itertuples():
mol = gs_gemma(compound)
compound_list.append({'comp_name': compound.comp_name,
'mol': mol})
data_out = pd.DataFrame(compound_list)
data_out.to_pickle(sys.argv[1].split('.')[0] + '.pkl')
| en | 0.565155 | ground state conformer search # hard coded for mogens # create conformers #print(qmmol.conformers) #print(qmmol.conformers[0].write_xyz()) #quit() #for conf in qmmol.conformers: # print(conf.label, conf.results['energy']) # conf.write_xyz() # Get most stable conformer. If most stable conformer # not identical to initial conf try second lowest. #name, smi, chrg, mult, cps): GS conformers search given a smiles string #print(qmconf.results, qmconf.label) # find storage energy | 2.065502 | 2 |
task_list_dev/__init__.py | HenriqueLR/task-list-dev | 0 | 6621995 | <reponame>HenriqueLR/task-list-dev
# coding: utf-8
from task_list_dev import tools
VERSION = (1, 0, 0, 'final', 0)
def get_version(*args, **kwargs):
from task_list_dev.utils.version import get_version
return get_version(*args, **kwargs)
__version__ = get_version(VERSION)
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__url__ = "https://github.com/HenriqueLR/task-list-dev"
| # coding: utf-8
from task_list_dev import tools
VERSION = (1, 0, 0, 'final', 0)
def get_version(*args, **kwargs):
from task_list_dev.utils.version import get_version
return get_version(*args, **kwargs)
__version__ = get_version(VERSION)
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__url__ = "https://github.com/HenriqueLR/task-list-dev" | en | 0.833554 | # coding: utf-8 | 1.949049 | 2 |
code/my_layers.py | pmhalvor/imn | 103 | 6621996 | import keras.backend as K
from keras.engine.topology import Layer
from keras import initializers
from keras import regularizers
from keras import constraints
from keras.layers.convolutional import Conv1D
import numpy as np
class Attention(Layer):
def __init__(self,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
"""
Keras Layer that implements an Content Attention mechanism.
Supports Masking.
"""
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(Attention, self).__init__(**kwargs)
def build(self, input_shape):
self.W = self.add_weight((input_shape[-1], ),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight((1,),
initializer='zeros',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
self.built = True
def compute_mask(self, input_tensor, mask=None):
return None
def call(self, input_tensor, mask=None):
x = input_tensor
query = self.W
query = K.expand_dims(query, axis=-2)
eij = K.sum(x*query, axis=-1)
if self.bias:
eij += self.b
a = K.exp(eij)
a_sigmoid = K.sigmoid(eij)
if mask is not None:
a *= K.cast(mask, K.floatx())
a_sigmoid *= K.cast(mask, K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
return [a, a_sigmoid]
def compute_output_shape(self, input_shape):
return [(input_shape[0], input_shape[1]),
(input_shape[0], input_shape[1])]
class Self_attention(Layer):
def __init__(self, use_opinion,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
"""
Keras Layer that implements an Content Attention mechanism.
Supports Masking.
"""
self.use_opinion = use_opinion
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(Self_attention, self).__init__(**kwargs)
def build(self, input_shape):
input_dim = input_shape[0][-1]
self.steps = input_shape[0][-2]
self.W = self.add_weight((input_dim, input_dim),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight((input_dim,),
initializer='zeros',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
self.built = True
def compute_mask(self, x, mask):
return mask
def call(self, input_tensor, mask):
x = input_tensor[0]
gold_opinion = input_tensor[1]
predict_opinion = input_tensor[2]
gold_prob = input_tensor[3]
mask = mask[0]
assert mask is not None
x_tran = K.dot(x, self.W)
if self.bias:
x_tran += self.b
x_transpose = K.permute_dimensions(x, (0,2,1))
weights = K.batch_dot(x_tran, x_transpose)
location = np.abs(np.tile(np.array(range(self.steps)), (self.steps,1)) - np.array(range(self.steps)).reshape(self.steps,1))
loc_weights = 1.0 / (location+K.epsilon())
loc_weights *= K.cast((location!=0), K.floatx())
weights *= loc_weights
if self.use_opinion:
gold_opinion_ = gold_opinion[:,:,1]+gold_opinion[:,:,2]
predict_opinion_ = predict_opinion[:,:,3]+predict_opinion[:,:,4]
# gold_prob is either 0 or 1
opinion_weights = gold_prob*gold_opinion_ + (1.-gold_prob)*predict_opinion_
opinion_weights = K.expand_dims(opinion_weights, axis=-2)
weights *= opinion_weights
weights = K.tanh(weights)
weights = K.exp(weights)
weights *= (np.eye(self.steps)==0)
if mask is not None:
mask = K.expand_dims(mask, axis=-2)
mask = K.repeat_elements(mask, self.steps, axis=1)
weights *= K.cast(mask, K.floatx())
weights /= K.cast(K.sum(weights, axis=-1, keepdims=True) + K.epsilon(), K.floatx())
output = K.batch_dot(weights, x)
return output
class WeightedSum(Layer):
def __init__(self, **kwargs):
self.supports_masking = True
super(WeightedSum, self).__init__(**kwargs)
def call(self, input_tensor, mask=None):
assert type(input_tensor) == list
assert type(mask) == list
x = input_tensor[0]
a = input_tensor[1]
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return (input_shape[0][0], input_shape[0][-1])
def compute_mask(self, x, mask=None):
return None
class Conv1DWithMasking(Conv1D):
def __init__(self, **kwargs):
self.supports_masking = True
super(Conv1DWithMasking, self).__init__(**kwargs)
def compute_mask(self, x, mask):
return mask
class Remove_domain_emb(Layer):
def __init__(self, **kwargs):
self.supports_masking = True
super(Remove_domain_emb, self).__init__(**kwargs)
def call(self, x, mask=None):
mask_ = np.ones((400,))
mask_[300:]=0
embs = x*mask_
return embs
def compute_output_shape(self, input_shape):
return input_shape
def compute_mask(self, x, mask):
return mask
| import keras.backend as K
from keras.engine.topology import Layer
from keras import initializers
from keras import regularizers
from keras import constraints
from keras.layers.convolutional import Conv1D
import numpy as np
class Attention(Layer):
def __init__(self,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
"""
Keras Layer that implements an Content Attention mechanism.
Supports Masking.
"""
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(Attention, self).__init__(**kwargs)
def build(self, input_shape):
self.W = self.add_weight((input_shape[-1], ),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight((1,),
initializer='zeros',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
self.built = True
def compute_mask(self, input_tensor, mask=None):
return None
def call(self, input_tensor, mask=None):
x = input_tensor
query = self.W
query = K.expand_dims(query, axis=-2)
eij = K.sum(x*query, axis=-1)
if self.bias:
eij += self.b
a = K.exp(eij)
a_sigmoid = K.sigmoid(eij)
if mask is not None:
a *= K.cast(mask, K.floatx())
a_sigmoid *= K.cast(mask, K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
return [a, a_sigmoid]
def compute_output_shape(self, input_shape):
return [(input_shape[0], input_shape[1]),
(input_shape[0], input_shape[1])]
class Self_attention(Layer):
def __init__(self, use_opinion,
W_regularizer=None, b_regularizer=None,
W_constraint=None, b_constraint=None,
bias=True, **kwargs):
"""
Keras Layer that implements an Content Attention mechanism.
Supports Masking.
"""
self.use_opinion = use_opinion
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(Self_attention, self).__init__(**kwargs)
def build(self, input_shape):
input_dim = input_shape[0][-1]
self.steps = input_shape[0][-2]
self.W = self.add_weight((input_dim, input_dim),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight((input_dim,),
initializer='zeros',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
self.built = True
def compute_mask(self, x, mask):
return mask
def call(self, input_tensor, mask):
x = input_tensor[0]
gold_opinion = input_tensor[1]
predict_opinion = input_tensor[2]
gold_prob = input_tensor[3]
mask = mask[0]
assert mask is not None
x_tran = K.dot(x, self.W)
if self.bias:
x_tran += self.b
x_transpose = K.permute_dimensions(x, (0,2,1))
weights = K.batch_dot(x_tran, x_transpose)
location = np.abs(np.tile(np.array(range(self.steps)), (self.steps,1)) - np.array(range(self.steps)).reshape(self.steps,1))
loc_weights = 1.0 / (location+K.epsilon())
loc_weights *= K.cast((location!=0), K.floatx())
weights *= loc_weights
if self.use_opinion:
gold_opinion_ = gold_opinion[:,:,1]+gold_opinion[:,:,2]
predict_opinion_ = predict_opinion[:,:,3]+predict_opinion[:,:,4]
# gold_prob is either 0 or 1
opinion_weights = gold_prob*gold_opinion_ + (1.-gold_prob)*predict_opinion_
opinion_weights = K.expand_dims(opinion_weights, axis=-2)
weights *= opinion_weights
weights = K.tanh(weights)
weights = K.exp(weights)
weights *= (np.eye(self.steps)==0)
if mask is not None:
mask = K.expand_dims(mask, axis=-2)
mask = K.repeat_elements(mask, self.steps, axis=1)
weights *= K.cast(mask, K.floatx())
weights /= K.cast(K.sum(weights, axis=-1, keepdims=True) + K.epsilon(), K.floatx())
output = K.batch_dot(weights, x)
return output
class WeightedSum(Layer):
def __init__(self, **kwargs):
self.supports_masking = True
super(WeightedSum, self).__init__(**kwargs)
def call(self, input_tensor, mask=None):
assert type(input_tensor) == list
assert type(mask) == list
x = input_tensor[0]
a = input_tensor[1]
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return (input_shape[0][0], input_shape[0][-1])
def compute_mask(self, x, mask=None):
return None
class Conv1DWithMasking(Conv1D):
def __init__(self, **kwargs):
self.supports_masking = True
super(Conv1DWithMasking, self).__init__(**kwargs)
def compute_mask(self, x, mask):
return mask
class Remove_domain_emb(Layer):
def __init__(self, **kwargs):
self.supports_masking = True
super(Remove_domain_emb, self).__init__(**kwargs)
def call(self, x, mask=None):
mask_ = np.ones((400,))
mask_[300:]=0
embs = x*mask_
return embs
def compute_output_shape(self, input_shape):
return input_shape
def compute_mask(self, x, mask):
return mask
| en | 0.861602 | Keras Layer that implements an Content Attention mechanism. Supports Masking. Keras Layer that implements an Content Attention mechanism. Supports Masking. # gold_prob is either 0 or 1 | 2.554902 | 3 |
src/local_simple_database/class_local_simple_database.py | stas-prokopiev/local_simple_database | 3 | 6621997 | """Module with class to handle all simple local databases"""
from __future__ import unicode_literals
# Standard library imports
import logging
import datetime
# Third party imports
import dateutil.parser as parser
# Local imports
from local_simple_database.virtual_class_all_local_databases import \
VirtualAnyLocalDatabase
LOGGER = logging.getLogger("local_simple_database")
LIST_ALL_SUPPORTED_TYPES = ["int", "float", "str", "datetime", "date"]
class LocalSimpleDatabase(VirtualAnyLocalDatabase):
"""
This class was built to handle all one value DataBase-s
...
Parent Attributes
-----------------
self.str_path_main_database_dir : str
Path to main folder with DataBase-s
self.str_datetime_template_for_rolling : str
Datetime template for folder name if to use rolling
self.list_supported_types : list
DataBase Types with which this local database can work
self.dict_file_lock_by_fila_path : dict
{file_path_1: FileLock object, ...}
self.float_max_seconds_per_file_operation : float
Seconds per file operation, need it for multiprocessing safety
Attributes
----------
self.dict_str_db_type_by_str_db_name : dict
{database_1_name: str_value_type, ...}
self.dict_list_db_allowed_types_by_str_db_name : dict
{database_1_name: list_allowed_types_for_set_value, ...}
self.dict_func_db_getter_by_str_db_name : dict
{database_1_name: func_to_convert_str_to_value, ...}
self.dict_func_db_setter_by_str_db_name : dict
{database_1_name: func_to_convert_value_to_str, ...}
"""
def __init__(
self,
str_path_database_dir=".",
float_max_seconds_per_file_operation=0.01,
str_datetime_template_for_rolling="",
):
"""Init DB-s object
Parameters
----------
str_path_database_dir : str, optional
Path to main folder with DataBase-s (default is ".")
float_max_seconds_per_file_operation : float
Seconds per file operation, need it for multiprocessing safety
str_datetime_template_for_rolling : str
Datetime template for folder name if to use rolling
"""
# Init class of all local DataBases
super(LocalSimpleDatabase, self).__init__(
str_path_database_dir=str_path_database_dir,
float_max_seconds_per_file_operation=\
float_max_seconds_per_file_operation,
str_datetime_template_for_rolling=\
str_datetime_template_for_rolling,
)
self.list_supported_types = LIST_ALL_SUPPORTED_TYPES
self.dict_func_db_getter_by_str_db_name = {}
self.dict_func_db_setter_by_str_db_name = {}
self.dict_str_db_type_by_str_db_name = {}
self.dict_list_db_allowed_types_by_str_db_name = {}
def init_new_class_obj(self, **kwargs):
"""Create a new instance of the same class object
Parameters
----------
"""
return LocalSimpleDatabase(**kwargs)
def __getitem__(self, str_db_name):
"""self[database_name] method for getting DB current value
Parameters
----------
str_db_name : str
Name of DataBase which to use
"""
if str_db_name not in self.dict_func_db_getter_by_str_db_name:
self.init_new_simple_database(str_db_name)
str_db_content = self.read_file_content(str_db_name)
func_getter = self.dict_func_db_getter_by_str_db_name[str_db_name]
return func_getter(str_db_content)
def __setitem__(self, str_db_name, value_to_set):
"""self[database_name] = x method for setting DB value
Parameters
----------
str_db_name : str
Name of DataBase which to use
value_to_set : object
Value to set for DB
"""
#####
if str_db_name not in self.dict_func_db_setter_by_str_db_name:
self.init_new_simple_database(str_db_name)
# Check that value to set has suitable type
list_allowed_type = \
self.dict_list_db_allowed_types_by_str_db_name[str_db_name]
assert isinstance(value_to_set, tuple(list_allowed_type)), (
"ERROR: Unable to set for DB with type: " +
str(self.dict_str_db_type_by_str_db_name[str_db_name]) +
" Value with type: " + str(type(value_to_set))
)
# Get setter converter and save value
func_setter = self.dict_func_db_setter_by_str_db_name[str_db_name]
str_value_to_save = func_setter(value_to_set)
self.save_file_content(
str_value_to_save,
str_db_name
)
LOGGER.debug(
"For DataBase %s set value: %s", str_db_name, str_value_to_save
)
def init_new_simple_database(self, str_db_name):
"""Method for first preparings for new database
Parameters
----------
str_db_name : str
Name of DataBase which to use
"""
# assert isinstance(str_db_name, str), (
# "ERROR: DataBase name should have type str, now it is: " +
# str(type(str_db_name))
# )
assert str_db_name, "ERROR: Database name should not be empty"
#####
# If DB already initialized then finish execution
assert str_db_name not in self.dict_str_db_type_by_str_db_name,\
"ERROR: DB {} is not defined, but shouldn't be so.".format(
str_db_name
)
#####
# Check that name of DataBase is correct
LOGGER.debug("Try to init new DB: %s", str_db_name)
str_db_type = self.define_type_of_db_by_name(str_db_name)
LOGGER.debug("DB type: %s", str_db_type)
if str_db_type not in self.list_supported_types:
raise KeyError(
"Unable to init database with name: " + str_db_name +
" As database type: " + str_db_type +
" NOT in the list of allowed types: " +
str(self.list_supported_types)
)
#####
# Init new DataBase
self.dict_str_db_type_by_str_db_name[str_db_name] = str_db_type
LOGGER.debug(
"Initialize new database with name %s With type of values: %s",
str_db_name,
str(str_db_type).upper()
)
#####
# int
if str_db_type == "int":
self.dict_list_db_allowed_types_by_str_db_name[str_db_name] = \
[int]
def getter(str_f_content):
if not str_f_content:
return int()
return int(str_f_content)
self.dict_func_db_getter_by_str_db_name[str_db_name] = getter
self.dict_func_db_setter_by_str_db_name[str_db_name] = \
lambda value_to_set: "%d" % value_to_set
#####
# float
elif str_db_type == "float":
self.dict_list_db_allowed_types_by_str_db_name[str_db_name] = \
[int, float]
def getter(str_f_content):
if not str_f_content:
return float()
return float(str_f_content)
self.dict_func_db_getter_by_str_db_name[str_db_name] = getter
self.dict_func_db_setter_by_str_db_name[str_db_name] = \
lambda value_to_set: "%d" % value_to_set
#####
# str
elif str_db_type == "str":
self.dict_list_db_allowed_types_by_str_db_name[str_db_name] = \
[str]
self.dict_func_db_getter_by_str_db_name[str_db_name] = str
self.dict_func_db_setter_by_str_db_name[str_db_name] = str
# self.dict_func_db_getter_by_str_db_name[str_db_name] = \
# lambda str_f_content: str(str_f_content)
# self.dict_func_db_setter_by_str_db_name[str_db_name] = \
# lambda value_to_set: str(value_to_set)
#####
# datetime
elif str_db_type == "datetime":
self.dict_list_db_allowed_types_by_str_db_name[str_db_name] = \
[datetime.date]
def getter(str_f_content):
if not str_f_content:
dt_obj = datetime.datetime(1970, 1, 1)
dt_obj = dt_obj.replace(tzinfo=datetime.timezone.utc)
return dt_obj
try:
return datetime.datetime.fromisoformat(str_f_content)
except (ValueError, AttributeError):
return parser.parse(str_f_content)
self.dict_func_db_getter_by_str_db_name[str_db_name] = getter
self.dict_func_db_setter_by_str_db_name[str_db_name] = \
lambda value_to_set: str(value_to_set.isoformat())
#####
# date
elif str_db_type == "date":
self.dict_list_db_allowed_types_by_str_db_name[str_db_name] = \
[datetime.date]
def getter(str_f_content):
if not str_f_content:
dt_obj = datetime.datetime(1970, 1, 1)
dt_obj = dt_obj.replace(tzinfo=datetime.timezone.utc)
return dt_obj.date()
try:
return \
datetime.datetime.fromisoformat(str_f_content).date()
except (ValueError, AttributeError):
return parser.parse(str_f_content)
self.dict_func_db_getter_by_str_db_name[str_db_name] = getter
self.dict_func_db_setter_by_str_db_name[str_db_name] = \
lambda value_to_set: str(value_to_set.date().isoformat())
| """Module with class to handle all simple local databases"""
from __future__ import unicode_literals
# Standard library imports
import logging
import datetime
# Third party imports
import dateutil.parser as parser
# Local imports
from local_simple_database.virtual_class_all_local_databases import \
VirtualAnyLocalDatabase
LOGGER = logging.getLogger("local_simple_database")
LIST_ALL_SUPPORTED_TYPES = ["int", "float", "str", "datetime", "date"]
class LocalSimpleDatabase(VirtualAnyLocalDatabase):
"""
This class was built to handle all one value DataBase-s
...
Parent Attributes
-----------------
self.str_path_main_database_dir : str
Path to main folder with DataBase-s
self.str_datetime_template_for_rolling : str
Datetime template for folder name if to use rolling
self.list_supported_types : list
DataBase Types with which this local database can work
self.dict_file_lock_by_fila_path : dict
{file_path_1: FileLock object, ...}
self.float_max_seconds_per_file_operation : float
Seconds per file operation, need it for multiprocessing safety
Attributes
----------
self.dict_str_db_type_by_str_db_name : dict
{database_1_name: str_value_type, ...}
self.dict_list_db_allowed_types_by_str_db_name : dict
{database_1_name: list_allowed_types_for_set_value, ...}
self.dict_func_db_getter_by_str_db_name : dict
{database_1_name: func_to_convert_str_to_value, ...}
self.dict_func_db_setter_by_str_db_name : dict
{database_1_name: func_to_convert_value_to_str, ...}
"""
def __init__(
self,
str_path_database_dir=".",
float_max_seconds_per_file_operation=0.01,
str_datetime_template_for_rolling="",
):
"""Init DB-s object
Parameters
----------
str_path_database_dir : str, optional
Path to main folder with DataBase-s (default is ".")
float_max_seconds_per_file_operation : float
Seconds per file operation, need it for multiprocessing safety
str_datetime_template_for_rolling : str
Datetime template for folder name if to use rolling
"""
# Init class of all local DataBases
super(LocalSimpleDatabase, self).__init__(
str_path_database_dir=str_path_database_dir,
float_max_seconds_per_file_operation=\
float_max_seconds_per_file_operation,
str_datetime_template_for_rolling=\
str_datetime_template_for_rolling,
)
self.list_supported_types = LIST_ALL_SUPPORTED_TYPES
self.dict_func_db_getter_by_str_db_name = {}
self.dict_func_db_setter_by_str_db_name = {}
self.dict_str_db_type_by_str_db_name = {}
self.dict_list_db_allowed_types_by_str_db_name = {}
def init_new_class_obj(self, **kwargs):
"""Create a new instance of the same class object
Parameters
----------
"""
return LocalSimpleDatabase(**kwargs)
def __getitem__(self, str_db_name):
"""self[database_name] method for getting DB current value
Parameters
----------
str_db_name : str
Name of DataBase which to use
"""
if str_db_name not in self.dict_func_db_getter_by_str_db_name:
self.init_new_simple_database(str_db_name)
str_db_content = self.read_file_content(str_db_name)
func_getter = self.dict_func_db_getter_by_str_db_name[str_db_name]
return func_getter(str_db_content)
def __setitem__(self, str_db_name, value_to_set):
"""self[database_name] = x method for setting DB value
Parameters
----------
str_db_name : str
Name of DataBase which to use
value_to_set : object
Value to set for DB
"""
#####
if str_db_name not in self.dict_func_db_setter_by_str_db_name:
self.init_new_simple_database(str_db_name)
# Check that value to set has suitable type
list_allowed_type = \
self.dict_list_db_allowed_types_by_str_db_name[str_db_name]
assert isinstance(value_to_set, tuple(list_allowed_type)), (
"ERROR: Unable to set for DB with type: " +
str(self.dict_str_db_type_by_str_db_name[str_db_name]) +
" Value with type: " + str(type(value_to_set))
)
# Get setter converter and save value
func_setter = self.dict_func_db_setter_by_str_db_name[str_db_name]
str_value_to_save = func_setter(value_to_set)
self.save_file_content(
str_value_to_save,
str_db_name
)
LOGGER.debug(
"For DataBase %s set value: %s", str_db_name, str_value_to_save
)
def init_new_simple_database(self, str_db_name):
"""Method for first preparings for new database
Parameters
----------
str_db_name : str
Name of DataBase which to use
"""
# assert isinstance(str_db_name, str), (
# "ERROR: DataBase name should have type str, now it is: " +
# str(type(str_db_name))
# )
assert str_db_name, "ERROR: Database name should not be empty"
#####
# If DB already initialized then finish execution
assert str_db_name not in self.dict_str_db_type_by_str_db_name,\
"ERROR: DB {} is not defined, but shouldn't be so.".format(
str_db_name
)
#####
# Check that name of DataBase is correct
LOGGER.debug("Try to init new DB: %s", str_db_name)
str_db_type = self.define_type_of_db_by_name(str_db_name)
LOGGER.debug("DB type: %s", str_db_type)
if str_db_type not in self.list_supported_types:
raise KeyError(
"Unable to init database with name: " + str_db_name +
" As database type: " + str_db_type +
" NOT in the list of allowed types: " +
str(self.list_supported_types)
)
#####
# Init new DataBase
self.dict_str_db_type_by_str_db_name[str_db_name] = str_db_type
LOGGER.debug(
"Initialize new database with name %s With type of values: %s",
str_db_name,
str(str_db_type).upper()
)
#####
# int
if str_db_type == "int":
self.dict_list_db_allowed_types_by_str_db_name[str_db_name] = \
[int]
def getter(str_f_content):
if not str_f_content:
return int()
return int(str_f_content)
self.dict_func_db_getter_by_str_db_name[str_db_name] = getter
self.dict_func_db_setter_by_str_db_name[str_db_name] = \
lambda value_to_set: "%d" % value_to_set
#####
# float
elif str_db_type == "float":
self.dict_list_db_allowed_types_by_str_db_name[str_db_name] = \
[int, float]
def getter(str_f_content):
if not str_f_content:
return float()
return float(str_f_content)
self.dict_func_db_getter_by_str_db_name[str_db_name] = getter
self.dict_func_db_setter_by_str_db_name[str_db_name] = \
lambda value_to_set: "%d" % value_to_set
#####
# str
elif str_db_type == "str":
self.dict_list_db_allowed_types_by_str_db_name[str_db_name] = \
[str]
self.dict_func_db_getter_by_str_db_name[str_db_name] = str
self.dict_func_db_setter_by_str_db_name[str_db_name] = str
# self.dict_func_db_getter_by_str_db_name[str_db_name] = \
# lambda str_f_content: str(str_f_content)
# self.dict_func_db_setter_by_str_db_name[str_db_name] = \
# lambda value_to_set: str(value_to_set)
#####
# datetime
elif str_db_type == "datetime":
self.dict_list_db_allowed_types_by_str_db_name[str_db_name] = \
[datetime.date]
def getter(str_f_content):
if not str_f_content:
dt_obj = datetime.datetime(1970, 1, 1)
dt_obj = dt_obj.replace(tzinfo=datetime.timezone.utc)
return dt_obj
try:
return datetime.datetime.fromisoformat(str_f_content)
except (ValueError, AttributeError):
return parser.parse(str_f_content)
self.dict_func_db_getter_by_str_db_name[str_db_name] = getter
self.dict_func_db_setter_by_str_db_name[str_db_name] = \
lambda value_to_set: str(value_to_set.isoformat())
#####
# date
elif str_db_type == "date":
self.dict_list_db_allowed_types_by_str_db_name[str_db_name] = \
[datetime.date]
def getter(str_f_content):
if not str_f_content:
dt_obj = datetime.datetime(1970, 1, 1)
dt_obj = dt_obj.replace(tzinfo=datetime.timezone.utc)
return dt_obj.date()
try:
return \
datetime.datetime.fromisoformat(str_f_content).date()
except (ValueError, AttributeError):
return parser.parse(str_f_content)
self.dict_func_db_getter_by_str_db_name[str_db_name] = getter
self.dict_func_db_setter_by_str_db_name[str_db_name] = \
lambda value_to_set: str(value_to_set.date().isoformat())
| en | 0.483177 | Module with class to handle all simple local databases # Standard library imports # Third party imports # Local imports This class was built to handle all one value DataBase-s ... Parent Attributes ----------------- self.str_path_main_database_dir : str Path to main folder with DataBase-s self.str_datetime_template_for_rolling : str Datetime template for folder name if to use rolling self.list_supported_types : list DataBase Types with which this local database can work self.dict_file_lock_by_fila_path : dict {file_path_1: FileLock object, ...} self.float_max_seconds_per_file_operation : float Seconds per file operation, need it for multiprocessing safety Attributes ---------- self.dict_str_db_type_by_str_db_name : dict {database_1_name: str_value_type, ...} self.dict_list_db_allowed_types_by_str_db_name : dict {database_1_name: list_allowed_types_for_set_value, ...} self.dict_func_db_getter_by_str_db_name : dict {database_1_name: func_to_convert_str_to_value, ...} self.dict_func_db_setter_by_str_db_name : dict {database_1_name: func_to_convert_value_to_str, ...} Init DB-s object Parameters ---------- str_path_database_dir : str, optional Path to main folder with DataBase-s (default is ".") float_max_seconds_per_file_operation : float Seconds per file operation, need it for multiprocessing safety str_datetime_template_for_rolling : str Datetime template for folder name if to use rolling # Init class of all local DataBases Create a new instance of the same class object Parameters ---------- self[database_name] method for getting DB current value Parameters ---------- str_db_name : str Name of DataBase which to use self[database_name] = x method for setting DB value Parameters ---------- str_db_name : str Name of DataBase which to use value_to_set : object Value to set for DB ##### # Check that value to set has suitable type # Get setter converter and save value Method for first preparings for new database Parameters ---------- str_db_name : str Name of DataBase which to use # assert isinstance(str_db_name, str), ( # "ERROR: DataBase name should have type str, now it is: " + # str(type(str_db_name)) # ) ##### # If DB already initialized then finish execution ##### # Check that name of DataBase is correct ##### # Init new DataBase ##### # int ##### # float ##### # str # self.dict_func_db_getter_by_str_db_name[str_db_name] = \ # lambda str_f_content: str(str_f_content) # self.dict_func_db_setter_by_str_db_name[str_db_name] = \ # lambda value_to_set: str(value_to_set) ##### # datetime ##### # date | 2.558847 | 3 |
src/core/service/api/routing/Router.py | xxdunedainxx/Z-Py | 0 | 6621998 | from .IRouter import IRouter
from ....conf.API.routes.RouteConfig import RouteConfig
from ....util.app.ErrorFactory.api.RoutingErrors import InvalidAPIMethod,InvalidMethodForRoute,InternalAPIError
# For Route check decorator
from functools import wraps
from flask_restplus import Namespace,Resource
class Router(IRouter):
def __init__(self,routerConfig: RouteConfig):
self.core_route=""
self.api_specific_resource=""
self.supported_methods=[]
self.api_specific_routes={}
super().__init__(routerConfig=routerConfig)
self.core_route=routerConfig.core_route
self.api_specific_resource=routerConfig.api_specific_resource
self.supported_methods=routerConfig.supported_methods
self.api_specific_routes=routerConfig.api_specific_routes
#def get_route(self,method: str)->str:
# return self._validate_route(
# method=method)
# Custom Route Check Decorator
def route_check(self,method:str):
def route_check_decorate(api):
@wraps(api)
def validate(*args,**kwargs):
validate_path = self._validate_route(
method=method,
api=api)
if validate_path[0]["ok"] is True:
return api(*args,**kwargs)
else:
return validate_path
return validate
return route_check_decorate
def _validate_route(self,
method: str,
api: Resource):
if method in self.supported_methods:
return {"ok" : True},200
elif method not in self.supported_methods:
invalidMethod=InvalidAPIMethod(method=method)
return invalidMethod.raise_error()
#elif self.api_specific_routes[method] != route:
# invalidRouteMethod=InvalidMethodForRoute(
# method=method,
# route=route)
# return invalidRouteMethod.raise_error()
else:
genericError=InternalAPIError(
message="Something went wrong validating the route.",
returnCode=500)
return genericError.raise_error()
| from .IRouter import IRouter
from ....conf.API.routes.RouteConfig import RouteConfig
from ....util.app.ErrorFactory.api.RoutingErrors import InvalidAPIMethod,InvalidMethodForRoute,InternalAPIError
# For Route check decorator
from functools import wraps
from flask_restplus import Namespace,Resource
class Router(IRouter):
def __init__(self,routerConfig: RouteConfig):
self.core_route=""
self.api_specific_resource=""
self.supported_methods=[]
self.api_specific_routes={}
super().__init__(routerConfig=routerConfig)
self.core_route=routerConfig.core_route
self.api_specific_resource=routerConfig.api_specific_resource
self.supported_methods=routerConfig.supported_methods
self.api_specific_routes=routerConfig.api_specific_routes
#def get_route(self,method: str)->str:
# return self._validate_route(
# method=method)
# Custom Route Check Decorator
def route_check(self,method:str):
def route_check_decorate(api):
@wraps(api)
def validate(*args,**kwargs):
validate_path = self._validate_route(
method=method,
api=api)
if validate_path[0]["ok"] is True:
return api(*args,**kwargs)
else:
return validate_path
return validate
return route_check_decorate
def _validate_route(self,
method: str,
api: Resource):
if method in self.supported_methods:
return {"ok" : True},200
elif method not in self.supported_methods:
invalidMethod=InvalidAPIMethod(method=method)
return invalidMethod.raise_error()
#elif self.api_specific_routes[method] != route:
# invalidRouteMethod=InvalidMethodForRoute(
# method=method,
# route=route)
# return invalidRouteMethod.raise_error()
else:
genericError=InternalAPIError(
message="Something went wrong validating the route.",
returnCode=500)
return genericError.raise_error()
| en | 0.224677 | # For Route check decorator #def get_route(self,method: str)->str: # return self._validate_route( # method=method) # Custom Route Check Decorator #elif self.api_specific_routes[method] != route: # invalidRouteMethod=InvalidMethodForRoute( # method=method, # route=route) # return invalidRouteMethod.raise_error() | 2.286432 | 2 |
Python/CeV/Exercicios/ex86.py | WerickL/Learning | 0 | 6621999 | mat = [[], [], []]
for L in range(0, 3):
for C in range(0, 3):
val = int(input(f'Digite o valor [{L + 1}][{C + 1}] da matriz: '))
mat[L].insert(C, val)
for L in range(0, 3):
for C in range(0, 3):
print(f'[{mat[L][C]:^5}]', end='')
print()
# cont = 0
# for L in range(0, 3):
# for C in range(0, 3):
# print(f'[ {mat[cont][0]} ]', end='')
# cont += 1
# print('\n', end='')
| mat = [[], [], []]
for L in range(0, 3):
for C in range(0, 3):
val = int(input(f'Digite o valor [{L + 1}][{C + 1}] da matriz: '))
mat[L].insert(C, val)
for L in range(0, 3):
for C in range(0, 3):
print(f'[{mat[L][C]:^5}]', end='')
print()
# cont = 0
# for L in range(0, 3):
# for C in range(0, 3):
# print(f'[ {mat[cont][0]} ]', end='')
# cont += 1
# print('\n', end='')
| en | 0.310957 | # cont = 0 # for L in range(0, 3): # for C in range(0, 3): # print(f'[ {mat[cont][0]} ]', end='') # cont += 1 # print('\n', end='') | 3.936171 | 4 |
src/my_model.py | gargrohin/Melody_extraction | 3 | 6622000 |
'''
Code for CRNN model and it's training.
Also has the data_generator used. The neccessary reshaping of the matrices and normalization etc. done here.
Take note of the paths to the data_generator.
Previous knowledge of generators and keras/tensorflow required to understand the code.
'''
import numpy as np
import librosa
import os
#import logging
from keras.optimizers import SGD, Adam
from keras.callbacks import EarlyStopping,ModelCheckpoint,ReduceLROnPlateau,Callback
import keras as k
from keras import backend as K
from keras.models import Model, Sequential
from keras.layers import Dense, Reshape, BatchNormalization, Bidirectional, GRU,Dropout
from keras.layers import Conv2D, LSTM, Input, TimeDistributed, Lambda, ZeroPadding3D
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="1"
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU
config.log_device_placement = False # to log device placement (on which device the operation ran)
# (nothing gets printed in Jupyter, only if you run it standalone)
sess = tf.Session(config=config)
set_session(sess) # set this TensorFlow session as the default session for Keras
import h5py
import json
import os
#import csv
import sys
#import pandas as pd
#import mir_eval
import math
from sklearn.preprocessing import LabelBinarizer,normalize
def train_model(model):
'''
The function that trains a certain neural network model with the given arguments.
:param model: Keras.Model - Constructed model
:param args: List - Input arguments
:return:
'''
# x_train, y_train, x_validation, y_validation = load_dataset_TD(dataset_number=args.dataset_number, args=args)
#
# dataset_train_size = x_train.shape[0] # First dimension gives the number of samples
# dataset_validation_size = x_validation.shape[0]
batch_size = 16
# Set the optimizers
opt_ADAM = Adam(clipnorm=1., clipvalue=0.5)
opt_SGD = SGD(lr=0.0005, decay=1e-4, momentum=0.9, nesterov=True)
# Compile the model
model.compile(loss='categorical_crossentropy', optimizer=opt_ADAM, metrics=['accuracy'])
# Use either a part of training set per epoch or all the set per epoch
# if args.use_part_of_training_set_per_epoch:
# number_of_batches_train = np.int(np.floor(args.training_amount_number_of_samples/args.batch_size))
# else:
# number_of_batches_train = np.max((np.floor((dataset_train_size) / args.batch_size), 1))
#
# number_of_batches_validation = np.max((np.floor(dataset_validation_size / args.batch_size), 1))
# if args.use_part_of_training_set:
# filename = 'model{0}_' \
# 'datasetNumber-{1}_' \
# 'augment-{2}_patchSize-{3}_' \
# 'numberOfPatches-{4}_' \
# 'batchSize-{5}_' \
# 'batchInOneEpoch-{6}_' \
# 'trainingAmountPercentage-{7}'.format(
# args.model_name, args.dataset_number, args.augment_data, args.patch_size, args.number_of_patches,
# args.batch_size, number_of_batches_train, np.int(args.training_amount_percentage))
# else:
# filename = 'model{0}_' \
# 'datasetNumber-{1}_' \
# 'augment-{2}_' \
# 'patchSize-{3}_' \
# 'numberOfPatches-{4}_' \
# 'batchSize-{5}_' \
# 'batchInOneEpoch-{6}'.format(
# args.model_name, args.dataset_number, args.augment_data, args.patch_size, args.number_of_patches,
# args.batch_size, number_of_batches_train)
cb = set_callbacks()
model.fit_generator(generator = generator(train_names),
steps_per_epoch = 85,
epochs = 100,
validation_data= generator(val_names),
validation_steps= 20,
callbacks= cb,
verbose= 1)
#model.load_weights('{0}/{1}.h5'.format(get_trained_model_save_path(dataset_name=args.dataset_name), filename))
return model
def sq(x):
from keras import backend as K
return K.squeeze(x, axis=4)
def construct_model():
'''
Construcs the CRNN model
:param args: Input arguments
:return: model: Constructed Model object
'''
number_of_patches = 20
patch_size = 50
feature_size = 301
number_of_classes = 61
step_notes = 5
RNN = 'LSTM'
verbose = False
kernel_coeff = 0.00001
number_of_channels = 1
input_shape = (number_of_patches, patch_size, feature_size, number_of_channels)
inputs = Input(shape=input_shape)
zp = ZeroPadding3D(padding=(0, 0, 2))(inputs)
#### CNN LAYERS ####
cnn1 = TimeDistributed(Conv2D(64, (1, 5),
padding='valid',
activation='relu',
strides=(1, np.int(step_notes)),
kernel_regularizer=k.regularizers.l2(kernel_coeff),
data_format='channels_last', name='cnn1'))(inputs)
cnn1a = BatchNormalization()(cnn1)
zp = ZeroPadding3D(padding=(0, 1, 2))(cnn1a)
cnn2 = TimeDistributed(
Conv2D(64, (3, 5), padding='valid', activation='relu', data_format='channels_last', name='cnn2'))(zp)
cnn2a = BatchNormalization()(cnn2)
zp = ZeroPadding3D(padding=(0, 1, 1))(cnn2a)
cnn3 = TimeDistributed(
Conv2D(64, (3, 3), padding='valid', activation='relu', data_format='channels_last', name='cnn3'))(zp)
cnn3a = BatchNormalization()(cnn3)
zp = ZeroPadding3D(padding=(0, 1, 7))(cnn3a)
cnn4 = TimeDistributed(
Conv2D(16, (3, 15), padding='valid', activation='relu', data_format='channels_last', name='cnn4'))(zp)
cnn4a = BatchNormalization()(cnn4)
cnn5 = TimeDistributed(
Conv2D(1, (1, 1), padding='same', activation='relu', data_format='channels_last', name='cnn5'))(cnn4a)
#### RESHAPING LAYERS ####
cnn5a = Lambda(sq)(cnn5)
cnn5b = Reshape((number_of_patches * patch_size, -1), name='cnn5-reshape')(cnn5a)
#### BIDIRECTIONAL RNN LAYERS ####
# if RNN == 'LSTM':
# rnn1 = Bidirectional(LSTM(128,
# kernel_regularizer=k.regularizers.l1_l2(0.0001),
# return_sequences=True), name='rnn1')(cnn5b)
# elif RNN == 'GRU':
# rnn1 = Bidirectional(GRU(128,
# kernel_regularizer=k.regularizers.l1_l2(0.0001),
# return_sequences=True), name='rnn1')(cnn5b)
#### CLASSIFICATION (DENSE) LAYER ####
classifier = TimeDistributed(Dense(number_of_classes,
activation='softmax',
kernel_regularizer=k.regularizers.l2(0.00001),
bias_regularizer=k.regularizers.l2()), name='output')(cnn5b)
model = Model(inputs=inputs, outputs=classifier)
if verbose == True or 1:
model.summary()
print('{0} as RNN!'.format(RNN))
return model
def normalize(v):
norm = np.linalg.norm(v)
if norm == 0:
return v
return v / norm
def song(sp, gp):
HF0 = np.load(sp)
HF0 = normalize(HF0)
#HF0 = librosa.power_to_db(HF0, ref=np.max)
#HF0 = HF0['arr_0']
ground = np.load(gp)
#print(gp)
T = HF0.shape[1]
# if T != ground.shape[0]:
# print(T, ground.shape[0])
# print('ground dimension error')
gr = np.zeros([61,T])
for t in range(T):
if ground[t] != 0:
gr[int(ground[t] -1) , t] = 1
patches_size = 50
number_of_patches = math.floor(T/patches_size)
# print(number_of_patches)
x = np.zeros([number_of_patches,patches_size,301])
y = np.zeros([number_of_patches,patches_size,61])
j = 0
i=0
while j < number_of_patches:
# if i%patches_size == 0 and i !=0:
# j+=1
# if j +1 == number_of_patches:
# break
#if j == 10: print(i)
x[j] = np.swapaxes(HF0[:,i:i+patches_size] , 0 , 1)
y[j] = np.swapaxes(gr[:,i:i+patches_size] , 0,1)
i = i + patches_size
j+=1
# print(y.shape)
return x , y
names = np.load('names.npy')
l = names.shape[0]
train_names = names[:int(0.8*l)]
val_names = names[int(0.8*l):]
# def on_epoch_end(indices):
# 'Updates indexes after each epoch'
# np.random.shuffle(indices)
# return indices
def generator(names):
sp = 'outs_hf/out_'
gp = 'ground/'
indices = np.arange(names.shape[0])
while True:
np.random.shuffle(indices)
# if train:
# names = train_names
# else:
# names = val_names
count=0
batch_size = 16
patch_size = 50
number_of_patches = 20
x_train_batch = np.zeros([batch_size , number_of_patches , patch_size , 301 , 1 ])
y_train_batch = np.zeros([batch_size , patch_size * number_of_patches , 61])
batch_count = 0
for i in indices:
name = names[i]
x, y = song(sp + name[:-1] + 'y', gp + name[:-1] + 'y')
i = 0
while (i+1)*number_of_patches <= x.shape[0]:
count+=1
x_train = np.reshape(
x[i*number_of_patches : (i+1)*number_of_patches,:,:] ,
[number_of_patches , patch_size , 301 , 1 ])
y_train = np.zeros([number_of_patches * patch_size , 61])
for j in range(number_of_patches):
y_train[j*patch_size : (j+1)*patch_size, : ] = y[i+j]
#print(y_train.shape)
y_train = np.reshape(
y_train, [number_of_patches * patch_size , 61])
i+=1
if batch_count < batch_size:
x_train_batch[batch_count] = x_train
y_train_batch[batch_count] = y_train
batch_count +=1
else:
batch_count = 0
yield x_train_batch, y_train_batch
print()
print(count)
print()
def set_callbacks():
'''
Sets the callback functions for the network training
:param save_filename: Filename to be used in ModelCheckpoint
:param args: Input arguments
:return: cb: List of callbacks
'''
# Callbacks
cb = [EarlyStopping(monitor='val_loss',
patience=20,
verbose=True),
ModelCheckpoint('model3.h5',
monitor='val_loss',
save_best_only=True,
verbose=False),
ReduceLROnPlateau(monitor='val_loss',
patience=10,
verbose=True)]
return cb
model = construct_model()
model_trained = train_model(model)
model_json = model_trained.to_json()
with open("model3.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model_trained.save_weights("model3.h5")
|
'''
Code for CRNN model and it's training.
Also has the data_generator used. The neccessary reshaping of the matrices and normalization etc. done here.
Take note of the paths to the data_generator.
Previous knowledge of generators and keras/tensorflow required to understand the code.
'''
import numpy as np
import librosa
import os
#import logging
from keras.optimizers import SGD, Adam
from keras.callbacks import EarlyStopping,ModelCheckpoint,ReduceLROnPlateau,Callback
import keras as k
from keras import backend as K
from keras.models import Model, Sequential
from keras.layers import Dense, Reshape, BatchNormalization, Bidirectional, GRU,Dropout
from keras.layers import Conv2D, LSTM, Input, TimeDistributed, Lambda, ZeroPadding3D
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="1"
#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU
config.log_device_placement = False # to log device placement (on which device the operation ran)
# (nothing gets printed in Jupyter, only if you run it standalone)
sess = tf.Session(config=config)
set_session(sess) # set this TensorFlow session as the default session for Keras
import h5py
import json
import os
#import csv
import sys
#import pandas as pd
#import mir_eval
import math
from sklearn.preprocessing import LabelBinarizer,normalize
def train_model(model):
'''
The function that trains a certain neural network model with the given arguments.
:param model: Keras.Model - Constructed model
:param args: List - Input arguments
:return:
'''
# x_train, y_train, x_validation, y_validation = load_dataset_TD(dataset_number=args.dataset_number, args=args)
#
# dataset_train_size = x_train.shape[0] # First dimension gives the number of samples
# dataset_validation_size = x_validation.shape[0]
batch_size = 16
# Set the optimizers
opt_ADAM = Adam(clipnorm=1., clipvalue=0.5)
opt_SGD = SGD(lr=0.0005, decay=1e-4, momentum=0.9, nesterov=True)
# Compile the model
model.compile(loss='categorical_crossentropy', optimizer=opt_ADAM, metrics=['accuracy'])
# Use either a part of training set per epoch or all the set per epoch
# if args.use_part_of_training_set_per_epoch:
# number_of_batches_train = np.int(np.floor(args.training_amount_number_of_samples/args.batch_size))
# else:
# number_of_batches_train = np.max((np.floor((dataset_train_size) / args.batch_size), 1))
#
# number_of_batches_validation = np.max((np.floor(dataset_validation_size / args.batch_size), 1))
# if args.use_part_of_training_set:
# filename = 'model{0}_' \
# 'datasetNumber-{1}_' \
# 'augment-{2}_patchSize-{3}_' \
# 'numberOfPatches-{4}_' \
# 'batchSize-{5}_' \
# 'batchInOneEpoch-{6}_' \
# 'trainingAmountPercentage-{7}'.format(
# args.model_name, args.dataset_number, args.augment_data, args.patch_size, args.number_of_patches,
# args.batch_size, number_of_batches_train, np.int(args.training_amount_percentage))
# else:
# filename = 'model{0}_' \
# 'datasetNumber-{1}_' \
# 'augment-{2}_' \
# 'patchSize-{3}_' \
# 'numberOfPatches-{4}_' \
# 'batchSize-{5}_' \
# 'batchInOneEpoch-{6}'.format(
# args.model_name, args.dataset_number, args.augment_data, args.patch_size, args.number_of_patches,
# args.batch_size, number_of_batches_train)
cb = set_callbacks()
model.fit_generator(generator = generator(train_names),
steps_per_epoch = 85,
epochs = 100,
validation_data= generator(val_names),
validation_steps= 20,
callbacks= cb,
verbose= 1)
#model.load_weights('{0}/{1}.h5'.format(get_trained_model_save_path(dataset_name=args.dataset_name), filename))
return model
def sq(x):
from keras import backend as K
return K.squeeze(x, axis=4)
def construct_model():
'''
Construcs the CRNN model
:param args: Input arguments
:return: model: Constructed Model object
'''
number_of_patches = 20
patch_size = 50
feature_size = 301
number_of_classes = 61
step_notes = 5
RNN = 'LSTM'
verbose = False
kernel_coeff = 0.00001
number_of_channels = 1
input_shape = (number_of_patches, patch_size, feature_size, number_of_channels)
inputs = Input(shape=input_shape)
zp = ZeroPadding3D(padding=(0, 0, 2))(inputs)
#### CNN LAYERS ####
cnn1 = TimeDistributed(Conv2D(64, (1, 5),
padding='valid',
activation='relu',
strides=(1, np.int(step_notes)),
kernel_regularizer=k.regularizers.l2(kernel_coeff),
data_format='channels_last', name='cnn1'))(inputs)
cnn1a = BatchNormalization()(cnn1)
zp = ZeroPadding3D(padding=(0, 1, 2))(cnn1a)
cnn2 = TimeDistributed(
Conv2D(64, (3, 5), padding='valid', activation='relu', data_format='channels_last', name='cnn2'))(zp)
cnn2a = BatchNormalization()(cnn2)
zp = ZeroPadding3D(padding=(0, 1, 1))(cnn2a)
cnn3 = TimeDistributed(
Conv2D(64, (3, 3), padding='valid', activation='relu', data_format='channels_last', name='cnn3'))(zp)
cnn3a = BatchNormalization()(cnn3)
zp = ZeroPadding3D(padding=(0, 1, 7))(cnn3a)
cnn4 = TimeDistributed(
Conv2D(16, (3, 15), padding='valid', activation='relu', data_format='channels_last', name='cnn4'))(zp)
cnn4a = BatchNormalization()(cnn4)
cnn5 = TimeDistributed(
Conv2D(1, (1, 1), padding='same', activation='relu', data_format='channels_last', name='cnn5'))(cnn4a)
#### RESHAPING LAYERS ####
cnn5a = Lambda(sq)(cnn5)
cnn5b = Reshape((number_of_patches * patch_size, -1), name='cnn5-reshape')(cnn5a)
#### BIDIRECTIONAL RNN LAYERS ####
# if RNN == 'LSTM':
# rnn1 = Bidirectional(LSTM(128,
# kernel_regularizer=k.regularizers.l1_l2(0.0001),
# return_sequences=True), name='rnn1')(cnn5b)
# elif RNN == 'GRU':
# rnn1 = Bidirectional(GRU(128,
# kernel_regularizer=k.regularizers.l1_l2(0.0001),
# return_sequences=True), name='rnn1')(cnn5b)
#### CLASSIFICATION (DENSE) LAYER ####
classifier = TimeDistributed(Dense(number_of_classes,
activation='softmax',
kernel_regularizer=k.regularizers.l2(0.00001),
bias_regularizer=k.regularizers.l2()), name='output')(cnn5b)
model = Model(inputs=inputs, outputs=classifier)
if verbose == True or 1:
model.summary()
print('{0} as RNN!'.format(RNN))
return model
def normalize(v):
norm = np.linalg.norm(v)
if norm == 0:
return v
return v / norm
def song(sp, gp):
HF0 = np.load(sp)
HF0 = normalize(HF0)
#HF0 = librosa.power_to_db(HF0, ref=np.max)
#HF0 = HF0['arr_0']
ground = np.load(gp)
#print(gp)
T = HF0.shape[1]
# if T != ground.shape[0]:
# print(T, ground.shape[0])
# print('ground dimension error')
gr = np.zeros([61,T])
for t in range(T):
if ground[t] != 0:
gr[int(ground[t] -1) , t] = 1
patches_size = 50
number_of_patches = math.floor(T/patches_size)
# print(number_of_patches)
x = np.zeros([number_of_patches,patches_size,301])
y = np.zeros([number_of_patches,patches_size,61])
j = 0
i=0
while j < number_of_patches:
# if i%patches_size == 0 and i !=0:
# j+=1
# if j +1 == number_of_patches:
# break
#if j == 10: print(i)
x[j] = np.swapaxes(HF0[:,i:i+patches_size] , 0 , 1)
y[j] = np.swapaxes(gr[:,i:i+patches_size] , 0,1)
i = i + patches_size
j+=1
# print(y.shape)
return x , y
names = np.load('names.npy')
l = names.shape[0]
train_names = names[:int(0.8*l)]
val_names = names[int(0.8*l):]
# def on_epoch_end(indices):
# 'Updates indexes after each epoch'
# np.random.shuffle(indices)
# return indices
def generator(names):
sp = 'outs_hf/out_'
gp = 'ground/'
indices = np.arange(names.shape[0])
while True:
np.random.shuffle(indices)
# if train:
# names = train_names
# else:
# names = val_names
count=0
batch_size = 16
patch_size = 50
number_of_patches = 20
x_train_batch = np.zeros([batch_size , number_of_patches , patch_size , 301 , 1 ])
y_train_batch = np.zeros([batch_size , patch_size * number_of_patches , 61])
batch_count = 0
for i in indices:
name = names[i]
x, y = song(sp + name[:-1] + 'y', gp + name[:-1] + 'y')
i = 0
while (i+1)*number_of_patches <= x.shape[0]:
count+=1
x_train = np.reshape(
x[i*number_of_patches : (i+1)*number_of_patches,:,:] ,
[number_of_patches , patch_size , 301 , 1 ])
y_train = np.zeros([number_of_patches * patch_size , 61])
for j in range(number_of_patches):
y_train[j*patch_size : (j+1)*patch_size, : ] = y[i+j]
#print(y_train.shape)
y_train = np.reshape(
y_train, [number_of_patches * patch_size , 61])
i+=1
if batch_count < batch_size:
x_train_batch[batch_count] = x_train
y_train_batch[batch_count] = y_train
batch_count +=1
else:
batch_count = 0
yield x_train_batch, y_train_batch
print()
print(count)
print()
def set_callbacks():
'''
Sets the callback functions for the network training
:param save_filename: Filename to be used in ModelCheckpoint
:param args: Input arguments
:return: cb: List of callbacks
'''
# Callbacks
cb = [EarlyStopping(monitor='val_loss',
patience=20,
verbose=True),
ModelCheckpoint('model3.h5',
monitor='val_loss',
save_best_only=True,
verbose=False),
ReduceLROnPlateau(monitor='val_loss',
patience=10,
verbose=True)]
return cb
model = construct_model()
model_trained = train_model(model)
model_json = model_trained.to_json()
with open("model3.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model_trained.save_weights("model3.h5")
| en | 0.526075 | Code for CRNN model and it's training. Also has the data_generator used. The neccessary reshaping of the matrices and normalization etc. done here. Take note of the paths to the data_generator. Previous knowledge of generators and keras/tensorflow required to understand the code. #import logging #os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # dynamically grow the memory used on the GPU # to log device placement (on which device the operation ran) # (nothing gets printed in Jupyter, only if you run it standalone) # set this TensorFlow session as the default session for Keras #import csv #import pandas as pd #import mir_eval The function that trains a certain neural network model with the given arguments. :param model: Keras.Model - Constructed model :param args: List - Input arguments :return: # x_train, y_train, x_validation, y_validation = load_dataset_TD(dataset_number=args.dataset_number, args=args) # # dataset_train_size = x_train.shape[0] # First dimension gives the number of samples # dataset_validation_size = x_validation.shape[0] # Set the optimizers # Compile the model # Use either a part of training set per epoch or all the set per epoch # if args.use_part_of_training_set_per_epoch: # number_of_batches_train = np.int(np.floor(args.training_amount_number_of_samples/args.batch_size)) # else: # number_of_batches_train = np.max((np.floor((dataset_train_size) / args.batch_size), 1)) # # number_of_batches_validation = np.max((np.floor(dataset_validation_size / args.batch_size), 1)) # if args.use_part_of_training_set: # filename = 'model{0}_' \ # 'datasetNumber-{1}_' \ # 'augment-{2}_patchSize-{3}_' \ # 'numberOfPatches-{4}_' \ # 'batchSize-{5}_' \ # 'batchInOneEpoch-{6}_' \ # 'trainingAmountPercentage-{7}'.format( # args.model_name, args.dataset_number, args.augment_data, args.patch_size, args.number_of_patches, # args.batch_size, number_of_batches_train, np.int(args.training_amount_percentage)) # else: # filename = 'model{0}_' \ # 'datasetNumber-{1}_' \ # 'augment-{2}_' \ # 'patchSize-{3}_' \ # 'numberOfPatches-{4}_' \ # 'batchSize-{5}_' \ # 'batchInOneEpoch-{6}'.format( # args.model_name, args.dataset_number, args.augment_data, args.patch_size, args.number_of_patches, # args.batch_size, number_of_batches_train) #model.load_weights('{0}/{1}.h5'.format(get_trained_model_save_path(dataset_name=args.dataset_name), filename)) Construcs the CRNN model :param args: Input arguments :return: model: Constructed Model object #### CNN LAYERS #### #### RESHAPING LAYERS #### #### BIDIRECTIONAL RNN LAYERS #### # if RNN == 'LSTM': # rnn1 = Bidirectional(LSTM(128, # kernel_regularizer=k.regularizers.l1_l2(0.0001), # return_sequences=True), name='rnn1')(cnn5b) # elif RNN == 'GRU': # rnn1 = Bidirectional(GRU(128, # kernel_regularizer=k.regularizers.l1_l2(0.0001), # return_sequences=True), name='rnn1')(cnn5b) #### CLASSIFICATION (DENSE) LAYER #### #HF0 = librosa.power_to_db(HF0, ref=np.max) #HF0 = HF0['arr_0'] #print(gp) # if T != ground.shape[0]: # print(T, ground.shape[0]) # print('ground dimension error') # print(number_of_patches) # if i%patches_size == 0 and i !=0: # j+=1 # if j +1 == number_of_patches: # break #if j == 10: print(i) # print(y.shape) # def on_epoch_end(indices): # 'Updates indexes after each epoch' # np.random.shuffle(indices) # return indices # if train: # names = train_names # else: # names = val_names #print(y_train.shape) Sets the callback functions for the network training :param save_filename: Filename to be used in ModelCheckpoint :param args: Input arguments :return: cb: List of callbacks # Callbacks # serialize weights to HDF5 | 3.13283 | 3 |
MANRECT2.py | akashsuper2000/codechef-archive | 0 | 6622001 | <filename>MANRECT2.py
import sys
from math import ceil
for i in range(int(input())):
print('Q',0,0)
sys.stdout.flush()
n = int(input())
p = ceil(n/2)
print('Q',p,0)
sys.stdout.flush()
a = int(input())
print('Q',0,p)
sys.stdout.flush()
b = int(input())
if(b<a):
x = b
y = n-b
else:
x = n-a
y = a
print('Q',10**9,y)
sys.stdout.flush()
m = int(input())
m = 10**9-m
print('Q',x,10**9)
sys.stdout.flush()
n = int(input())
n = 10**9-n
print('A',x,y,m,n)
| <filename>MANRECT2.py
import sys
from math import ceil
for i in range(int(input())):
print('Q',0,0)
sys.stdout.flush()
n = int(input())
p = ceil(n/2)
print('Q',p,0)
sys.stdout.flush()
a = int(input())
print('Q',0,p)
sys.stdout.flush()
b = int(input())
if(b<a):
x = b
y = n-b
else:
x = n-a
y = a
print('Q',10**9,y)
sys.stdout.flush()
m = int(input())
m = 10**9-m
print('Q',x,10**9)
sys.stdout.flush()
n = int(input())
n = 10**9-n
print('A',x,y,m,n)
| none | 1 | 3.214341 | 3 | |
ytdownloader.py | z3oxs/ytdownloader | 11 | 6622002 | #!/usr/bin/env python3
# Coded By f4ll_py
# Re-written by @arcticlimer
"""
Main application module.
The main function runs a while
window loop and parse his events
"""
import pytube
import urllib
from typing import Union
from typing import Tuple
from youtube import YouTube
from youtube import Playlist
from window import Window
def main() -> None:
youtube = None
playlist = None
# Main window loop
while not window.is_closed:
event, values = window.read()
stream = values['stream']
path = values['path']
url = values['url']
if event == 'search':
# Cleaning last search objects
youtube = None
playlist = None
youtube, playlist = search(url=url,
path=path)
if event == 'Download':
download(youtube=youtube,
playlist=playlist,
stream=stream,
path=path)
window.close()
def handle_exceptions(func) -> object:
"""
This is needed since pytube current version is
quite unstable and can raise some unexpected errors.
"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except KeyError as e:
window.s_append('An error with the cipher has ocurred. '
'See documentation in GitHub to resolve: '
'https://github.com/f4ll-py/ytdownloader.')
except pytube.exceptions.RegexMatchError:
window.s_append('Could not find any YouTube videos with that URL.')
except urllib.error.HTTPError:
window.s_append('This video is not available. Try again later.')
except PermissionError:
window.s_append('Permission denied for the current path.')
return wrapper
@handle_exceptions
def search(url: str,
path: str,) -> Tuple[Union[YouTube, None],
Union[None, Playlist]]:
"""
Function to handle search for a video/playlist.
Parameters
----------
url : `str`
Searched video/playlist url.
path: `str`
Path where the video will be downloaded
Returns
-------
It can returns a YouTube or Playlist object,
or nothing if any exception was raised
"""
window.s_append(f'Searching for {url}...')
youtube = playlist = None
if 'playlist?' in url:
playlist = Playlist(url=url,
window=window)
window.s_append('Playlist found! Select a download mode on '
'the streams below!')
window['stream'].update(values=['Video (Max quality)',
'Audio (Max quality)'])
else:
youtube = YouTube(path=path,
url=url,
window=window)
v = [f'{v.resolution} ({v.fps} FPS)' for v in youtube['videos']]
a = [a.abr for a in youtube['audios']]
window['stream'].update(values=v+a)
window.s_append(f'{len(youtube)} downloadable streams found!'
' Select one below!')
return youtube, playlist
@handle_exceptions
def download(youtube : Union[YouTube, None],
playlist: Union[Playlist, None],
stream: str,
path: str) -> None:
"""
Function to handle download for a video/playlist.
Parameters
----------
stream : `str`
Selected value of the window stream
path: `str`
Path where the video will be downloaded
"""
if not playlist and not youtube:
window.s_append('You must search for a video before download!')
return
if playlist:
for video_url in playlist.video_urls:
# A try here is needed since it would
# stop downloading the playlist after
# an exception.
try:
youtube = YouTube(path=path,
url=video_url,
window=window)
if 'Audio' in stream:
youtube.download_audio(title=True)
else:
youtube.download_video()
except Exception as e:
window.s_append('An unexpected pytube library error occured,'
' could not download.')
print(f'Exception {e}')
window.s_append('All downloads finished!')
if youtube:
if youtube.path != path:
youtube.path = path
quality = stream[:5].strip()
fps = int(stream[-7:-5])
if 'kb' in quality:
youtube.download_audio(bitrate=stream)
else:
youtube.download_video(res=quality,
fps=fps)
window.s_append(f'{youtube.title} download finished!')
if __name__ == '__main__':
window = Window(theme='SystemDefault1',
justify='left',
font=('Calibri', 12),
button=('Black', 'White'))
main()
| #!/usr/bin/env python3
# Coded By f4ll_py
# Re-written by @arcticlimer
"""
Main application module.
The main function runs a while
window loop and parse his events
"""
import pytube
import urllib
from typing import Union
from typing import Tuple
from youtube import YouTube
from youtube import Playlist
from window import Window
def main() -> None:
youtube = None
playlist = None
# Main window loop
while not window.is_closed:
event, values = window.read()
stream = values['stream']
path = values['path']
url = values['url']
if event == 'search':
# Cleaning last search objects
youtube = None
playlist = None
youtube, playlist = search(url=url,
path=path)
if event == 'Download':
download(youtube=youtube,
playlist=playlist,
stream=stream,
path=path)
window.close()
def handle_exceptions(func) -> object:
"""
This is needed since pytube current version is
quite unstable and can raise some unexpected errors.
"""
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except KeyError as e:
window.s_append('An error with the cipher has ocurred. '
'See documentation in GitHub to resolve: '
'https://github.com/f4ll-py/ytdownloader.')
except pytube.exceptions.RegexMatchError:
window.s_append('Could not find any YouTube videos with that URL.')
except urllib.error.HTTPError:
window.s_append('This video is not available. Try again later.')
except PermissionError:
window.s_append('Permission denied for the current path.')
return wrapper
@handle_exceptions
def search(url: str,
path: str,) -> Tuple[Union[YouTube, None],
Union[None, Playlist]]:
"""
Function to handle search for a video/playlist.
Parameters
----------
url : `str`
Searched video/playlist url.
path: `str`
Path where the video will be downloaded
Returns
-------
It can returns a YouTube or Playlist object,
or nothing if any exception was raised
"""
window.s_append(f'Searching for {url}...')
youtube = playlist = None
if 'playlist?' in url:
playlist = Playlist(url=url,
window=window)
window.s_append('Playlist found! Select a download mode on '
'the streams below!')
window['stream'].update(values=['Video (Max quality)',
'Audio (Max quality)'])
else:
youtube = YouTube(path=path,
url=url,
window=window)
v = [f'{v.resolution} ({v.fps} FPS)' for v in youtube['videos']]
a = [a.abr for a in youtube['audios']]
window['stream'].update(values=v+a)
window.s_append(f'{len(youtube)} downloadable streams found!'
' Select one below!')
return youtube, playlist
@handle_exceptions
def download(youtube : Union[YouTube, None],
playlist: Union[Playlist, None],
stream: str,
path: str) -> None:
"""
Function to handle download for a video/playlist.
Parameters
----------
stream : `str`
Selected value of the window stream
path: `str`
Path where the video will be downloaded
"""
if not playlist and not youtube:
window.s_append('You must search for a video before download!')
return
if playlist:
for video_url in playlist.video_urls:
# A try here is needed since it would
# stop downloading the playlist after
# an exception.
try:
youtube = YouTube(path=path,
url=video_url,
window=window)
if 'Audio' in stream:
youtube.download_audio(title=True)
else:
youtube.download_video()
except Exception as e:
window.s_append('An unexpected pytube library error occured,'
' could not download.')
print(f'Exception {e}')
window.s_append('All downloads finished!')
if youtube:
if youtube.path != path:
youtube.path = path
quality = stream[:5].strip()
fps = int(stream[-7:-5])
if 'kb' in quality:
youtube.download_audio(bitrate=stream)
else:
youtube.download_video(res=quality,
fps=fps)
window.s_append(f'{youtube.title} download finished!')
if __name__ == '__main__':
window = Window(theme='SystemDefault1',
justify='left',
font=('Calibri', 12),
button=('Black', 'White'))
main()
| en | 0.836566 | #!/usr/bin/env python3 # Coded By f4ll_py # Re-written by @arcticlimer Main application module. The main function runs a while window loop and parse his events # Main window loop # Cleaning last search objects This is needed since pytube current version is quite unstable and can raise some unexpected errors. Function to handle search for a video/playlist. Parameters ---------- url : `str` Searched video/playlist url. path: `str` Path where the video will be downloaded Returns ------- It can returns a YouTube or Playlist object, or nothing if any exception was raised Function to handle download for a video/playlist. Parameters ---------- stream : `str` Selected value of the window stream path: `str` Path where the video will be downloaded # A try here is needed since it would # stop downloading the playlist after # an exception. | 2.902989 | 3 |
pythonProject/venv/Lib/site-packages/flash/tool.py | NaseerAslamKhan/Salesman-Web-Application | 0 | 6622003 | import os
import platform
def mkdir(*args):
for folder in args:
if not os.path.exists(folder):
os.makedirs(folder)
def make_join(*args):
folder = os.path.join(*args)
mkdir(folder)
return folder
def list_dir(folder, condition=None, key=None, reverse=False):
if condition is not None:
files = [(file, os.path.join(folder, file)) for file in filter(condition, os.listdir(folder))]
else:
files = [(file, os.path.join(folder, file)) for file in os.listdir(folder)]
if key is not None:
files = sorted(files, key=key, reverse=reverse)
return files
def flatten(l):
return [item for sublist in l for item in sublist]
def is_win():
return platform.system() == "Windows"
def get_postfix(post_fix):
return lambda x: x.endswith(post_fix)
| import os
import platform
def mkdir(*args):
for folder in args:
if not os.path.exists(folder):
os.makedirs(folder)
def make_join(*args):
folder = os.path.join(*args)
mkdir(folder)
return folder
def list_dir(folder, condition=None, key=None, reverse=False):
if condition is not None:
files = [(file, os.path.join(folder, file)) for file in filter(condition, os.listdir(folder))]
else:
files = [(file, os.path.join(folder, file)) for file in os.listdir(folder)]
if key is not None:
files = sorted(files, key=key, reverse=reverse)
return files
def flatten(l):
return [item for sublist in l for item in sublist]
def is_win():
return platform.system() == "Windows"
def get_postfix(post_fix):
return lambda x: x.endswith(post_fix)
| none | 1 | 3.059537 | 3 | |
aoc2021/d03-1.py | jbudynek/advent-of-code | 0 | 6622004 | # coding: utf-8
import numpy as np
def boom(input_val, DBG=True):
leng = len(input_val[0])
gamma = ""
epsilon = ""
for k in range(leng):
nb_0 = 0
nb_1 = 1
for ll in input_val:
if ll[k] == '0':
nb_0 += 1
if ll[k] == '1':
nb_1 += 1
if nb_0 < nb_1:
gamma += '1'
epsilon += '0'
else:
gamma += '0'
epsilon += '1'
return int(gamma, 2)*int(epsilon, 2)
#############
INPUT_FILE = "input-d03.txt"
f = open(INPUT_FILE, "r")
contents = f.read()
puzzle_input = contents.splitlines()
f.close()
ret = boom(puzzle_input, DBG=False)
print(ret)
# PART 1 - 3687446 OK
| # coding: utf-8
import numpy as np
def boom(input_val, DBG=True):
leng = len(input_val[0])
gamma = ""
epsilon = ""
for k in range(leng):
nb_0 = 0
nb_1 = 1
for ll in input_val:
if ll[k] == '0':
nb_0 += 1
if ll[k] == '1':
nb_1 += 1
if nb_0 < nb_1:
gamma += '1'
epsilon += '0'
else:
gamma += '0'
epsilon += '1'
return int(gamma, 2)*int(epsilon, 2)
#############
INPUT_FILE = "input-d03.txt"
f = open(INPUT_FILE, "r")
contents = f.read()
puzzle_input = contents.splitlines()
f.close()
ret = boom(puzzle_input, DBG=False)
print(ret)
# PART 1 - 3687446 OK
| en | 0.28395 | # coding: utf-8 ############# # PART 1 - 3687446 OK | 3.111903 | 3 |
setup.py | dubovyk/scvid | 1 | 6622005 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""setup.py: setuptools control."""
import re
from setuptools import setup
from pip.req import parse_requirements
install_reqs = parse_requirements('requirements.txt', session=False)
reqs = [str(ir.req) for ir in install_reqs]
version = re.search(
'^__version__\s*=\s*"(.*)"',
open('scvid/scvid.py').read(),
re.M
).group(1)
with open("scvid/README.md", "rb") as f:
long_descr = f.read().decode("utf-8")
setup(
name="scvid-lin",
packages=["scvid"],
entry_points={
"console_scripts": ['scvid = scvid.scvid:main']
},
version=version,
install_requires=reqs,
description="Python command line application for creating video from live sceenshots.",
long_description=long_descr,
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/dubovyk/scvid/",
)
| # -*- coding: utf-8 -*-
"""setup.py: setuptools control."""
import re
from setuptools import setup
from pip.req import parse_requirements
install_reqs = parse_requirements('requirements.txt', session=False)
reqs = [str(ir.req) for ir in install_reqs]
version = re.search(
'^__version__\s*=\s*"(.*)"',
open('scvid/scvid.py').read(),
re.M
).group(1)
with open("scvid/README.md", "rb") as f:
long_descr = f.read().decode("utf-8")
setup(
name="scvid-lin",
packages=["scvid"],
entry_points={
"console_scripts": ['scvid = scvid.scvid:main']
},
version=version,
install_requires=reqs,
description="Python command line application for creating video from live sceenshots.",
long_description=long_descr,
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/dubovyk/scvid/",
) | en | 0.635993 | # -*- coding: utf-8 -*- setup.py: setuptools control. | 1.726277 | 2 |
main.py | christiangra/python_classroom | 1 | 6622006 | <gh_stars>1-10
#usr/bin/python3.7
| #usr/bin/python3.7 | en | 0.384653 | #usr/bin/python3.7 | 1.084869 | 1 |
python/bv_build/subprocess_utils.py | sapetnioc/brainvisa-maker | 1 | 6622007 | import sys
import subprocess
def silent_check_call(command, cwd=None, env=None, exit=False):
'''
Call a command without printing any output but raises a
subprocess.CalledProcessError containing the full command output (both
stdout and stderr) if the return code of the command is not zero.
'''
p = subprocess.Popen(command,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,cwd=cwd, env=env)
stdout = p.communicate()[0]
if p.returncode:
if exit:
print >> sys.stderr, 'Command failed: %s\nCommand output:\n%s' % (' '.join('"%s"' % i for i in command), stdout)
sys.exit(1)
else:
raise subprocess.CalledProcessError(p.returncode,command,stdout)
def silent_or_exit_call(*args,**kwargs):
try:
silent_check_call(*args, **kwargs)
except subprocess.CalledProcessError, e:
print >> sys.stderr, 'Command failed: %s\nCommand output:\n%s' % (' '.join('"%s"' % i for i in e.cmd), e.output)
sys.exit(1)
def verbose_check_call(command, cwd=None, verbose=None):
if verbose:
subprocess.check_call(command, cwd=cwd, stdout=verbose, stderr=verbose)
else:
silent_check_call(command, cwd=cwd, exit=True)
| import sys
import subprocess
def silent_check_call(command, cwd=None, env=None, exit=False):
'''
Call a command without printing any output but raises a
subprocess.CalledProcessError containing the full command output (both
stdout and stderr) if the return code of the command is not zero.
'''
p = subprocess.Popen(command,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,cwd=cwd, env=env)
stdout = p.communicate()[0]
if p.returncode:
if exit:
print >> sys.stderr, 'Command failed: %s\nCommand output:\n%s' % (' '.join('"%s"' % i for i in command), stdout)
sys.exit(1)
else:
raise subprocess.CalledProcessError(p.returncode,command,stdout)
def silent_or_exit_call(*args,**kwargs):
try:
silent_check_call(*args, **kwargs)
except subprocess.CalledProcessError, e:
print >> sys.stderr, 'Command failed: %s\nCommand output:\n%s' % (' '.join('"%s"' % i for i in e.cmd), e.output)
sys.exit(1)
def verbose_check_call(command, cwd=None, verbose=None):
if verbose:
subprocess.check_call(command, cwd=cwd, stdout=verbose, stderr=verbose)
else:
silent_check_call(command, cwd=cwd, exit=True)
| en | 0.65104 | Call a command without printing any output but raises a subprocess.CalledProcessError containing the full command output (both stdout and stderr) if the return code of the command is not zero. | 2.856111 | 3 |
zombieEscape/scenes.py | ScottJohnson02/Zombie-Escape | 0 | 6622008 | class Scene(object):
def help(self):
# brings the user to a help menu
return "Here is all the commands you can enter in the field "
class Apartment(Scene):
def __init__(self):
# message is the text that apperas after beating or losing a room
self.message = ' '
self.name = 'Apartment'
def enter(self):
# text the user sees when entering a new room
return """You enter an old abandoned appartment where it looks as if there was a fire. You suddenly encounter a walker but it is melted into the carpet
you have a choice to make: 1. kill the walker or 2. dont kill the walker """
def choice(self, word):
# takes the user input and then returns a value that the game engine takes and then either moves to next scene or kill the player
if word == 'die':
return 'die'
elif word == '1':
self.message = 'You decide to kill the walker to end his suffering '
return 'next'
elif word == '2':
self.message = 'You decide to let the walker suffer like the monster it is. You hear faint growls crying out as you walk away'
return 'next'
else:
return 'error'
class Thaddeus(Scene):
def __init__(self):
self.message = ' '
self.name = 'Thaddeus'
def enter(self):
return """welcome to Thaddeus Stevens. The campus is in complete dissaray and trash is everywhere along with a hoard of walkers. You slowly sneak through the campus trying
to avoid detection when you come across a bunch of looters looking for any supplies they can find. They spot you and call out to you for help. They are asking for meds because her sister
is sick with a fever and they need antibiotics. You have a choice to make: 1. give them your meds, 2. run away from them, 3. rob them"""
def choice(self, word):
if word == '1':
self.message = "'Wow that was easy! Come on out boys' the looter shouts. You see two men with sawed offs come out the shadow and before you have time to respond you get shot. If it makes you feel better they later died due to attracting walkers. "
return 'die'
elif word == '2':
self.message = "'DAMMIT HE DIDN'T BELEIEVE ME START FIRING' she shouts and 2 men with rifles start taking pot shots at you but luckily they miss and the noise attracts the walkers to their location. Luckily you made it out unharmed"
return 'next'
elif word == '3':
self.message = " In your most intimidating voice you demand that she hands you all of HER supplies and you pull out your gun. She chuckles at your attempt and then a man comes from behind you and chokes you out and everything goes black."
return 'die'
else:
return 'error'
class CarDealership(Scene):
def __init__(self):
self.name = 'CarDealership'
self.message = ' '
def enter(self):
return """welcome to the car dealership! You see a bright red convertible with a large price tag that reads $18,000. Next to that vechicle you see a beat up van that has
grafiti on it that reads "FREE CANDY". You also see that the door to the dealership is open. You decide to peer into the managers office where you see all the car keys hanging
up on display. There is a large crowd of walkers heading your way and they seem to have noticed you and are NOW HEADING YOUR WAY! You have to make a choice:
1. Ride off in the fast convertible
2. Take the beat up van
3. Try to hide from the zombies """
def choice(self, word):
if word == '1':
self.message = 'You died from a zombie lurking in the backseat and when you hop in you feel a bite in your neck and everything goes hazy.'
return 'die'
elif word == '2':
self.message = "You hop in the van and it is slow to start but it eventually starts and you run over 3 zombies and you go back on the road to continue your adventure"
return 'next'
elif word == 'loot':
return 'loot'
elif word == '3':
self.message = 'You climb to the roof of the dealership but the zombies don\'t give up and they stay around the building. Days go by and they haven\' left yet. You die due to starvation'
return 'die'
else:
return word
class Market(Scene):
def __init__(self):
self.name = 'Market'
self.message = ' '
def enter(self):
return """'Welcome to the Market!' a vendor says and then you notice that they whole location was turned into a trader camp where fellow survivors can trade their supplies. You see a fragile old lady running a stand by herself.
She is in the corner of the building and someone could rob her easily. You have a choice to make: 1. Rob the lady or 2. let her be and leave the camp"""
def choice(self, word):
if word.upper() == '1':
self.message = "The old lady sees the look on your face and knows that you want to rob her. She quickly pulls out her pistol and blasts you between the eyes."
return 'die'
elif word.upper() == '2':
self.message = 'You decide not to rob the sweet innocent old lady adn you give her a smile before you exit through the doors.'
return 'next'
elif word.upper() == 'LOOT':
return 'loot'
else:
return 'error'
class GroceryStore(Scene):
def __init__(self):
self.name = 'Grocery Store'
self.message = ' '
def enter(self):
return """You are walking along the street and you come across a family owned grocery store. You hear a car coming so you run to go hide in the building, but then you hear the car stop right in front of the store
they shout out "COME HERE FELLER'" as they blast their guns in the air. Your heart starts racing as you begin thinking about what to do next. You run behind one of the counters towards the front of the store. You hear them talk about splitting up to track you down.
You know you could take them all in a 1v1 style but you don't know about a 1 v 3. Suddenly you hear footsteps slowly coming closer to you. He walks past you but you decide to sneak up behind him and subdue him with your trusty hunting knife. He goes down silently and
you loot him and take his gun. You see one of the goons look at you and he reaches for his gun as he shreaks "THAT SUN OF A BITCH KILLED MICHA BLAST HIM" as he is in the middle of his sentence you unload a clip into him and he falls to the ground. There is only 1 left and he is begging for
his life. You have a choice to make: 1. Kill the man or 2. Don't kill the man and spare him """
def choice(self, word):
if word.upper() == '1':
self.message = "You don't wanna take any chances and you shoot him between the eyes making it painless but effective and you leave the store with their supplies and their truck"
return 'next'
elif word.upper() == '2':
self.message = 'You to spare him his life since you would feel to guilty killing someone who doesn\'t want a fight. However he pulled a fast one on you and takes his small pistol from his pocket and SHOOTS YOU IN THE STOMACH'
return 'die'
elif word.upper() == 'LOOT':
return 'loot'
else:
return 'error'
| class Scene(object):
def help(self):
# brings the user to a help menu
return "Here is all the commands you can enter in the field "
class Apartment(Scene):
def __init__(self):
# message is the text that apperas after beating or losing a room
self.message = ' '
self.name = 'Apartment'
def enter(self):
# text the user sees when entering a new room
return """You enter an old abandoned appartment where it looks as if there was a fire. You suddenly encounter a walker but it is melted into the carpet
you have a choice to make: 1. kill the walker or 2. dont kill the walker """
def choice(self, word):
# takes the user input and then returns a value that the game engine takes and then either moves to next scene or kill the player
if word == 'die':
return 'die'
elif word == '1':
self.message = 'You decide to kill the walker to end his suffering '
return 'next'
elif word == '2':
self.message = 'You decide to let the walker suffer like the monster it is. You hear faint growls crying out as you walk away'
return 'next'
else:
return 'error'
class Thaddeus(Scene):
def __init__(self):
self.message = ' '
self.name = 'Thaddeus'
def enter(self):
return """welcome to Thaddeus Stevens. The campus is in complete dissaray and trash is everywhere along with a hoard of walkers. You slowly sneak through the campus trying
to avoid detection when you come across a bunch of looters looking for any supplies they can find. They spot you and call out to you for help. They are asking for meds because her sister
is sick with a fever and they need antibiotics. You have a choice to make: 1. give them your meds, 2. run away from them, 3. rob them"""
def choice(self, word):
if word == '1':
self.message = "'Wow that was easy! Come on out boys' the looter shouts. You see two men with sawed offs come out the shadow and before you have time to respond you get shot. If it makes you feel better they later died due to attracting walkers. "
return 'die'
elif word == '2':
self.message = "'DAMMIT HE DIDN'T BELEIEVE ME START FIRING' she shouts and 2 men with rifles start taking pot shots at you but luckily they miss and the noise attracts the walkers to their location. Luckily you made it out unharmed"
return 'next'
elif word == '3':
self.message = " In your most intimidating voice you demand that she hands you all of HER supplies and you pull out your gun. She chuckles at your attempt and then a man comes from behind you and chokes you out and everything goes black."
return 'die'
else:
return 'error'
class CarDealership(Scene):
def __init__(self):
self.name = 'CarDealership'
self.message = ' '
def enter(self):
return """welcome to the car dealership! You see a bright red convertible with a large price tag that reads $18,000. Next to that vechicle you see a beat up van that has
grafiti on it that reads "FREE CANDY". You also see that the door to the dealership is open. You decide to peer into the managers office where you see all the car keys hanging
up on display. There is a large crowd of walkers heading your way and they seem to have noticed you and are NOW HEADING YOUR WAY! You have to make a choice:
1. Ride off in the fast convertible
2. Take the beat up van
3. Try to hide from the zombies """
def choice(self, word):
if word == '1':
self.message = 'You died from a zombie lurking in the backseat and when you hop in you feel a bite in your neck and everything goes hazy.'
return 'die'
elif word == '2':
self.message = "You hop in the van and it is slow to start but it eventually starts and you run over 3 zombies and you go back on the road to continue your adventure"
return 'next'
elif word == 'loot':
return 'loot'
elif word == '3':
self.message = 'You climb to the roof of the dealership but the zombies don\'t give up and they stay around the building. Days go by and they haven\' left yet. You die due to starvation'
return 'die'
else:
return word
class Market(Scene):
def __init__(self):
self.name = 'Market'
self.message = ' '
def enter(self):
return """'Welcome to the Market!' a vendor says and then you notice that they whole location was turned into a trader camp where fellow survivors can trade their supplies. You see a fragile old lady running a stand by herself.
She is in the corner of the building and someone could rob her easily. You have a choice to make: 1. Rob the lady or 2. let her be and leave the camp"""
def choice(self, word):
if word.upper() == '1':
self.message = "The old lady sees the look on your face and knows that you want to rob her. She quickly pulls out her pistol and blasts you between the eyes."
return 'die'
elif word.upper() == '2':
self.message = 'You decide not to rob the sweet innocent old lady adn you give her a smile before you exit through the doors.'
return 'next'
elif word.upper() == 'LOOT':
return 'loot'
else:
return 'error'
class GroceryStore(Scene):
def __init__(self):
self.name = 'Grocery Store'
self.message = ' '
def enter(self):
return """You are walking along the street and you come across a family owned grocery store. You hear a car coming so you run to go hide in the building, but then you hear the car stop right in front of the store
they shout out "COME HERE FELLER'" as they blast their guns in the air. Your heart starts racing as you begin thinking about what to do next. You run behind one of the counters towards the front of the store. You hear them talk about splitting up to track you down.
You know you could take them all in a 1v1 style but you don't know about a 1 v 3. Suddenly you hear footsteps slowly coming closer to you. He walks past you but you decide to sneak up behind him and subdue him with your trusty hunting knife. He goes down silently and
you loot him and take his gun. You see one of the goons look at you and he reaches for his gun as he shreaks "THAT SUN OF A BITCH KILLED MICHA BLAST HIM" as he is in the middle of his sentence you unload a clip into him and he falls to the ground. There is only 1 left and he is begging for
his life. You have a choice to make: 1. Kill the man or 2. Don't kill the man and spare him """
def choice(self, word):
if word.upper() == '1':
self.message = "You don't wanna take any chances and you shoot him between the eyes making it painless but effective and you leave the store with their supplies and their truck"
return 'next'
elif word.upper() == '2':
self.message = 'You to spare him his life since you would feel to guilty killing someone who doesn\'t want a fight. However he pulled a fast one on you and takes his small pistol from his pocket and SHOOTS YOU IN THE STOMACH'
return 'die'
elif word.upper() == 'LOOT':
return 'loot'
else:
return 'error'
| en | 0.973829 | # brings the user to a help menu # message is the text that apperas after beating or losing a room # text the user sees when entering a new room You enter an old abandoned appartment where it looks as if there was a fire. You suddenly encounter a walker but it is melted into the carpet you have a choice to make: 1. kill the walker or 2. dont kill the walker # takes the user input and then returns a value that the game engine takes and then either moves to next scene or kill the player welcome to Thaddeus Stevens. The campus is in complete dissaray and trash is everywhere along with a hoard of walkers. You slowly sneak through the campus trying to avoid detection when you come across a bunch of looters looking for any supplies they can find. They spot you and call out to you for help. They are asking for meds because her sister is sick with a fever and they need antibiotics. You have a choice to make: 1. give them your meds, 2. run away from them, 3. rob them welcome to the car dealership! You see a bright red convertible with a large price tag that reads $18,000. Next to that vechicle you see a beat up van that has grafiti on it that reads "FREE CANDY". You also see that the door to the dealership is open. You decide to peer into the managers office where you see all the car keys hanging up on display. There is a large crowd of walkers heading your way and they seem to have noticed you and are NOW HEADING YOUR WAY! You have to make a choice: 1. Ride off in the fast convertible 2. Take the beat up van 3. Try to hide from the zombies 'Welcome to the Market!' a vendor says and then you notice that they whole location was turned into a trader camp where fellow survivors can trade their supplies. You see a fragile old lady running a stand by herself. She is in the corner of the building and someone could rob her easily. You have a choice to make: 1. Rob the lady or 2. let her be and leave the camp You are walking along the street and you come across a family owned grocery store. You hear a car coming so you run to go hide in the building, but then you hear the car stop right in front of the store they shout out "COME HERE FELLER'" as they blast their guns in the air. Your heart starts racing as you begin thinking about what to do next. You run behind one of the counters towards the front of the store. You hear them talk about splitting up to track you down. You know you could take them all in a 1v1 style but you don't know about a 1 v 3. Suddenly you hear footsteps slowly coming closer to you. He walks past you but you decide to sneak up behind him and subdue him with your trusty hunting knife. He goes down silently and you loot him and take his gun. You see one of the goons look at you and he reaches for his gun as he shreaks "THAT SUN OF A BITCH KILLED MICHA BLAST HIM" as he is in the middle of his sentence you unload a clip into him and he falls to the ground. There is only 1 left and he is begging for his life. You have a choice to make: 1. Kill the man or 2. Don't kill the man and spare him | 3.779073 | 4 |
backend/geotiff.py | KhaledSharif/SimpleWMTS | 2 | 6622009 | <gh_stars>1-10
class GeoTIFF:
"""
GeoTIFF constructor: create class from a path to the GeoTIFF file
Parameters
----------
path_to_file : str
"""
def __init__(self, path_to_file: str):
from functions import get_gdal_info
from os.path import abspath
self.path = abspath(path_to_file)
self.name = self.path.split("/")[-1].split(".")[0]
self.info = get_gdal_info(self.path)
"""
to_json: convert a GeoTIFF object to a dict to facilitate JSON conversion
"""
def to_json(self):
return {
"path": self.path,
"name": self.name,
"info": self.info,
}
| class GeoTIFF:
"""
GeoTIFF constructor: create class from a path to the GeoTIFF file
Parameters
----------
path_to_file : str
"""
def __init__(self, path_to_file: str):
from functions import get_gdal_info
from os.path import abspath
self.path = abspath(path_to_file)
self.name = self.path.split("/")[-1].split(".")[0]
self.info = get_gdal_info(self.path)
"""
to_json: convert a GeoTIFF object to a dict to facilitate JSON conversion
"""
def to_json(self):
return {
"path": self.path,
"name": self.name,
"info": self.info,
} | en | 0.558058 | GeoTIFF constructor: create class from a path to the GeoTIFF file Parameters ---------- path_to_file : str to_json: convert a GeoTIFF object to a dict to facilitate JSON conversion | 3.555491 | 4 |
datasets/hcstvg_eval.py | antoyang/TubeDETR | 4 | 6622010 | from pathlib import Path
from typing import Dict, List
import numpy as np
import util.dist as dist
import json
from functools import reduce
from util.box_ops import np_box_iou
class HCSTVGGiouEvaluator:
def __init__(
self,
hcstvg_path: str,
subset: str = "test",
verbose: bool = True,
iou_thresholds: list = [0.3, 0.5],
fps: int = 5,
video_max_len: int = 200,
v2=False,
tmp_loc=True,
):
"""
:param hcstvg_path: path to HC-STVG annotations
:param subset: train, val or test
:param verbose: whether to print more information or not
:param iou_thresholds: IoU thresholds for the vIoU metrics
:param fps: number of frames per second
:param video_max_len: maximum number of frames to be extracted from a video
:param v2: whether to use the second version of the dataset
:param tmp_loc: whether to evaluate temporal localization
"""
assert subset in ["train", "val", "test"], f"Wrong HC-STVG subset {subset}"
self.iou_thresholds = iou_thresholds
self.tmp_loc = tmp_loc
hcstvg_path = Path(hcstvg_path)
# We load the image ids corresponding to the current subset
if not v2:
path = hcstvg_path / f"{subset}_proc.json"
else:
path = hcstvg_path / f"{subset}v2_proc.json"
self.anns = json.load(open(path, "r"))
self.vid2imgids = (
{}
) # map video_id to list of corresponding frames to forward, index in all images of the first image to forward, and list of corresponding frames in the GT tube
self.vid2steds = {} # map video_id to [start, end] of the GT tube
self.img2box = {} # map video_id + frame_id to bbox
for video in self.anns:
video_num_images = video["frame_count"]
video_id = video["video_id"]
video_fps = video_num_images / 20
sampling_rate = fps / video_fps
assert sampling_rate <= 1 # downsampling at fps
start_frame = 0 if self.tmp_loc else video["tube_start_frame"]
end_frame = (
video_num_images - 1 if self.tmp_loc else video["tube_end_frame"]
)
frame_ids = [start_frame]
for frame_id in range(start_frame, end_frame):
if int(frame_ids[-1] * sampling_rate) < int(frame_id * sampling_rate):
frame_ids.append(frame_id)
if (
len(frame_ids) > video_max_len
): # temporal downsampling if there is still too many images
frame_ids = [
frame_ids[(j * len(frame_ids)) // video_max_len]
for j in range(video_max_len)
]
inter_frames = []
self.vid2steds[video_id] = [
video["tube_start_frame"],
video["tube_end_frame"],
]
for frame_id in frame_ids:
if video["tube_start_frame"] <= frame_id < video["tube_end_frame"]:
x1, y1, w, h = video["trajectory"][
frame_id - video["tube_start_frame"]
]
x2 = x1 + w
y2 = y1 + h
self.img2box[f"{video_id}_{frame_id}"] = [[x1, y1, x2, y2]]
inter_frames.append(f"{video_id}_{frame_id}")
self.vid2imgids[video_id] = [frame_ids, inter_frames]
if verbose:
print(f"HC-STVG subset contains {len(self.vid2imgids)} videos")
print(f"There are {len(self.imgid2box)} images to evaluate")
def evaluate(self, predictions: List[Dict], video_predictions: List[Dict]):
if len(video_predictions) < len(self.vid2imgids):
raise RuntimeError(
f"{len(self.vid2imgids) - len(video_predictions)} video predictions missing"
)
if len(predictions) < len(self.img2box):
raise RuntimeError(
f"{len(self.img2box) - len(predictions)} box predictions missing"
)
vid_metrics = {}
for video_id, video_pred in video_predictions.items():
if video_id in vid_metrics:
print(f"Warning, multiple predictions found for video {video_id}")
continue
if self.tmp_loc:
gt_sted = self.vid2steds[video_id]
pred_sted = video_pred["sted"]
frame_ids, inter_frames = self.vid2imgids[video_id]
# compute temporal iou
if self.tmp_loc:
max_start = max(gt_sted[0], pred_sted[0])
min_end = min(gt_sted[1], pred_sted[1])
min_start = min(gt_sted[0], pred_sted[0])
max_end = max(gt_sted[1], pred_sted[1])
if min_end <= max_start:
tiou = 0
else:
intersection = min_end - max_start
gt_span = gt_sted[1] - gt_sted[0]
pred_span = pred_sted[1] - pred_sted[0]
union = gt_span + pred_span - intersection
tiou = intersection / union
# compute viou and gt_viou
vid_metrics[video_id] = {
"gt_sted": gt_sted,
"pred_sted": pred_sted,
"tiou": tiou,
"img_metrics": {},
}
union_predgt = [
frame_id
for frame_id in frame_ids
if min_start <= frame_id < max_end
]
inter_predgt = set(
[
frame_id
for frame_id in frame_ids
if max_start <= frame_id < min_end
]
)
viou = 0
else:
vid_metrics[video_id] = {
"img_metrics": {},
}
union_predgt = frame_ids
inter_predgt = frame_ids
gt_viou = 0
for (
image_id
) in (
inter_frames
): # iterate on all frames of the annotated moment to update GT metrics
if image_id not in predictions:
raise RuntimeError(f"No prediction for frame {image_id}")
gt_boxes = self.img2box[image_id]
pred_boxes = predictions[image_id]["boxes"]
iou = np_box_iou(np.array(pred_boxes), np.array(gt_boxes))[0][0]
frame_id = int(image_id.split("_")[1])
vid_metrics[video_id]["img_metrics"][image_id] = {
"iou": iou,
"pred_box": pred_boxes[0],
"gt_box": gt_boxes[0],
}
if (
frame_id in inter_predgt and self.tmp_loc
): # update viou if this frame is in the intersection between the annotated moment and the predicted moment
viou += iou
gt_viou += iou
if self.tmp_loc: # compute viou@R
viou = viou / max(len(union_predgt), 1)
vid_metrics[video_id]["viou"] = viou
recalls = {thresh: 0 for thresh in self.iou_thresholds}
for thresh in self.iou_thresholds:
if viou > thresh:
recalls[thresh] += 1
# compute gt_viou@R
gt_viou = gt_viou / max(len(inter_frames), 1)
vid_metrics[video_id]["gt_viou"] = gt_viou
gt_recalls = {thresh: 0 for thresh in self.iou_thresholds}
for thresh in self.iou_thresholds:
if gt_viou > thresh:
gt_recalls[thresh] += 1
vid_metrics[video_id].update(
{f"viou@{thresh}": recalls[thresh] for thresh in self.iou_thresholds}
)
vid_metrics[video_id].update(
{
f"gt_viou@{thresh}": gt_recalls[thresh]
for thresh in self.iou_thresholds
}
)
return vid_metrics
class HCSTVGEvaluator(object):
def __init__(
self,
hcstvg_path,
subset,
iou_thresholds=[0.3, 0.5],
fps=5,
video_max_len=200,
v2=False,
save_pred=False,
tmp_loc=True,
):
"""
:param hcstvg_path: path to HC-STVG annotations
:param subset: train, val or test
:param verbose: whether to print more information or not
:param iou_thresholds: IoU thresholds for the vIoU metrics
:param fps: number of frames per second
:param video_max_len: maximum number of frames to be extracted from a video
:param v2: whether to use the second version of the dataset
:param save_pred: whether to save predictions in the output of summarize
"""
self.evaluator = HCSTVGGiouEvaluator(
hcstvg_path,
subset=subset,
verbose=False,
iou_thresholds=iou_thresholds,
fps=fps,
video_max_len=video_max_len,
v2=v2,
tmp_loc=tmp_loc,
)
self.predictions = {}
self.video_predictions = {}
self.results = None
self.iou_thresholds = iou_thresholds
self.save_pred = save_pred
self.tmp_loc = tmp_loc
def accumulate(self):
pass
def update(self, predictions):
self.predictions.update(predictions)
def video_update(self, video_predictions):
self.video_predictions.update(video_predictions)
def synchronize_between_processes(self):
all_predictions = dist.all_gather(self.predictions)
self.predictions = reduce(lambda a, b: a.update(b) or a, all_predictions, {})
all_video_predictions = dist.all_gather(self.video_predictions)
self.video_predictions = reduce(
lambda a, b: a.update(b) or a, all_video_predictions, {}
)
def summarize(self):
if dist.is_main_process():
self.results = self.evaluator.evaluate(
self.predictions, self.video_predictions
)
metrics = {"gt_viou": 0}
if self.tmp_loc:
metrics.update({"tiou": 0, "viou": 0})
for thresh in self.iou_thresholds: # init metrics
if self.tmp_loc:
metrics[f"viou@{thresh}"] = 0
metrics[f"gt_viou@{thresh}"] = 0
counter = 0
for x in self.results.values(): # sum results
if self.tmp_loc:
metrics["tiou"] += x["tiou"]
metrics["viou"] += x["viou"]
metrics["gt_viou"] += x["gt_viou"]
for thresh in self.iou_thresholds:
if self.tmp_loc:
metrics[f"viou@{thresh}"] += x[f"viou@{thresh}"]
metrics[f"gt_viou@{thresh}"] += x[f"gt_viou@{thresh}"]
counter += 1
for key in metrics: # average results
metrics[key] = metrics[key] / counter
print(f"{key}: {metrics[key]:.4f}")
out = {f"{name}": metrics[name] for name in metrics}
if self.save_pred:
out["predictions"] = self.predictions
out["video_predictions"] = self.video_predictions
out["vid_metrics"] = self.results
return out
return None, None
| from pathlib import Path
from typing import Dict, List
import numpy as np
import util.dist as dist
import json
from functools import reduce
from util.box_ops import np_box_iou
class HCSTVGGiouEvaluator:
def __init__(
self,
hcstvg_path: str,
subset: str = "test",
verbose: bool = True,
iou_thresholds: list = [0.3, 0.5],
fps: int = 5,
video_max_len: int = 200,
v2=False,
tmp_loc=True,
):
"""
:param hcstvg_path: path to HC-STVG annotations
:param subset: train, val or test
:param verbose: whether to print more information or not
:param iou_thresholds: IoU thresholds for the vIoU metrics
:param fps: number of frames per second
:param video_max_len: maximum number of frames to be extracted from a video
:param v2: whether to use the second version of the dataset
:param tmp_loc: whether to evaluate temporal localization
"""
assert subset in ["train", "val", "test"], f"Wrong HC-STVG subset {subset}"
self.iou_thresholds = iou_thresholds
self.tmp_loc = tmp_loc
hcstvg_path = Path(hcstvg_path)
# We load the image ids corresponding to the current subset
if not v2:
path = hcstvg_path / f"{subset}_proc.json"
else:
path = hcstvg_path / f"{subset}v2_proc.json"
self.anns = json.load(open(path, "r"))
self.vid2imgids = (
{}
) # map video_id to list of corresponding frames to forward, index in all images of the first image to forward, and list of corresponding frames in the GT tube
self.vid2steds = {} # map video_id to [start, end] of the GT tube
self.img2box = {} # map video_id + frame_id to bbox
for video in self.anns:
video_num_images = video["frame_count"]
video_id = video["video_id"]
video_fps = video_num_images / 20
sampling_rate = fps / video_fps
assert sampling_rate <= 1 # downsampling at fps
start_frame = 0 if self.tmp_loc else video["tube_start_frame"]
end_frame = (
video_num_images - 1 if self.tmp_loc else video["tube_end_frame"]
)
frame_ids = [start_frame]
for frame_id in range(start_frame, end_frame):
if int(frame_ids[-1] * sampling_rate) < int(frame_id * sampling_rate):
frame_ids.append(frame_id)
if (
len(frame_ids) > video_max_len
): # temporal downsampling if there is still too many images
frame_ids = [
frame_ids[(j * len(frame_ids)) // video_max_len]
for j in range(video_max_len)
]
inter_frames = []
self.vid2steds[video_id] = [
video["tube_start_frame"],
video["tube_end_frame"],
]
for frame_id in frame_ids:
if video["tube_start_frame"] <= frame_id < video["tube_end_frame"]:
x1, y1, w, h = video["trajectory"][
frame_id - video["tube_start_frame"]
]
x2 = x1 + w
y2 = y1 + h
self.img2box[f"{video_id}_{frame_id}"] = [[x1, y1, x2, y2]]
inter_frames.append(f"{video_id}_{frame_id}")
self.vid2imgids[video_id] = [frame_ids, inter_frames]
if verbose:
print(f"HC-STVG subset contains {len(self.vid2imgids)} videos")
print(f"There are {len(self.imgid2box)} images to evaluate")
def evaluate(self, predictions: List[Dict], video_predictions: List[Dict]):
if len(video_predictions) < len(self.vid2imgids):
raise RuntimeError(
f"{len(self.vid2imgids) - len(video_predictions)} video predictions missing"
)
if len(predictions) < len(self.img2box):
raise RuntimeError(
f"{len(self.img2box) - len(predictions)} box predictions missing"
)
vid_metrics = {}
for video_id, video_pred in video_predictions.items():
if video_id in vid_metrics:
print(f"Warning, multiple predictions found for video {video_id}")
continue
if self.tmp_loc:
gt_sted = self.vid2steds[video_id]
pred_sted = video_pred["sted"]
frame_ids, inter_frames = self.vid2imgids[video_id]
# compute temporal iou
if self.tmp_loc:
max_start = max(gt_sted[0], pred_sted[0])
min_end = min(gt_sted[1], pred_sted[1])
min_start = min(gt_sted[0], pred_sted[0])
max_end = max(gt_sted[1], pred_sted[1])
if min_end <= max_start:
tiou = 0
else:
intersection = min_end - max_start
gt_span = gt_sted[1] - gt_sted[0]
pred_span = pred_sted[1] - pred_sted[0]
union = gt_span + pred_span - intersection
tiou = intersection / union
# compute viou and gt_viou
vid_metrics[video_id] = {
"gt_sted": gt_sted,
"pred_sted": pred_sted,
"tiou": tiou,
"img_metrics": {},
}
union_predgt = [
frame_id
for frame_id in frame_ids
if min_start <= frame_id < max_end
]
inter_predgt = set(
[
frame_id
for frame_id in frame_ids
if max_start <= frame_id < min_end
]
)
viou = 0
else:
vid_metrics[video_id] = {
"img_metrics": {},
}
union_predgt = frame_ids
inter_predgt = frame_ids
gt_viou = 0
for (
image_id
) in (
inter_frames
): # iterate on all frames of the annotated moment to update GT metrics
if image_id not in predictions:
raise RuntimeError(f"No prediction for frame {image_id}")
gt_boxes = self.img2box[image_id]
pred_boxes = predictions[image_id]["boxes"]
iou = np_box_iou(np.array(pred_boxes), np.array(gt_boxes))[0][0]
frame_id = int(image_id.split("_")[1])
vid_metrics[video_id]["img_metrics"][image_id] = {
"iou": iou,
"pred_box": pred_boxes[0],
"gt_box": gt_boxes[0],
}
if (
frame_id in inter_predgt and self.tmp_loc
): # update viou if this frame is in the intersection between the annotated moment and the predicted moment
viou += iou
gt_viou += iou
if self.tmp_loc: # compute viou@R
viou = viou / max(len(union_predgt), 1)
vid_metrics[video_id]["viou"] = viou
recalls = {thresh: 0 for thresh in self.iou_thresholds}
for thresh in self.iou_thresholds:
if viou > thresh:
recalls[thresh] += 1
# compute gt_viou@R
gt_viou = gt_viou / max(len(inter_frames), 1)
vid_metrics[video_id]["gt_viou"] = gt_viou
gt_recalls = {thresh: 0 for thresh in self.iou_thresholds}
for thresh in self.iou_thresholds:
if gt_viou > thresh:
gt_recalls[thresh] += 1
vid_metrics[video_id].update(
{f"viou@{thresh}": recalls[thresh] for thresh in self.iou_thresholds}
)
vid_metrics[video_id].update(
{
f"gt_viou@{thresh}": gt_recalls[thresh]
for thresh in self.iou_thresholds
}
)
return vid_metrics
class HCSTVGEvaluator(object):
def __init__(
self,
hcstvg_path,
subset,
iou_thresholds=[0.3, 0.5],
fps=5,
video_max_len=200,
v2=False,
save_pred=False,
tmp_loc=True,
):
"""
:param hcstvg_path: path to HC-STVG annotations
:param subset: train, val or test
:param verbose: whether to print more information or not
:param iou_thresholds: IoU thresholds for the vIoU metrics
:param fps: number of frames per second
:param video_max_len: maximum number of frames to be extracted from a video
:param v2: whether to use the second version of the dataset
:param save_pred: whether to save predictions in the output of summarize
"""
self.evaluator = HCSTVGGiouEvaluator(
hcstvg_path,
subset=subset,
verbose=False,
iou_thresholds=iou_thresholds,
fps=fps,
video_max_len=video_max_len,
v2=v2,
tmp_loc=tmp_loc,
)
self.predictions = {}
self.video_predictions = {}
self.results = None
self.iou_thresholds = iou_thresholds
self.save_pred = save_pred
self.tmp_loc = tmp_loc
def accumulate(self):
pass
def update(self, predictions):
self.predictions.update(predictions)
def video_update(self, video_predictions):
self.video_predictions.update(video_predictions)
def synchronize_between_processes(self):
all_predictions = dist.all_gather(self.predictions)
self.predictions = reduce(lambda a, b: a.update(b) or a, all_predictions, {})
all_video_predictions = dist.all_gather(self.video_predictions)
self.video_predictions = reduce(
lambda a, b: a.update(b) or a, all_video_predictions, {}
)
def summarize(self):
if dist.is_main_process():
self.results = self.evaluator.evaluate(
self.predictions, self.video_predictions
)
metrics = {"gt_viou": 0}
if self.tmp_loc:
metrics.update({"tiou": 0, "viou": 0})
for thresh in self.iou_thresholds: # init metrics
if self.tmp_loc:
metrics[f"viou@{thresh}"] = 0
metrics[f"gt_viou@{thresh}"] = 0
counter = 0
for x in self.results.values(): # sum results
if self.tmp_loc:
metrics["tiou"] += x["tiou"]
metrics["viou"] += x["viou"]
metrics["gt_viou"] += x["gt_viou"]
for thresh in self.iou_thresholds:
if self.tmp_loc:
metrics[f"viou@{thresh}"] += x[f"viou@{thresh}"]
metrics[f"gt_viou@{thresh}"] += x[f"gt_viou@{thresh}"]
counter += 1
for key in metrics: # average results
metrics[key] = metrics[key] / counter
print(f"{key}: {metrics[key]:.4f}")
out = {f"{name}": metrics[name] for name in metrics}
if self.save_pred:
out["predictions"] = self.predictions
out["video_predictions"] = self.video_predictions
out["vid_metrics"] = self.results
return out
return None, None
| en | 0.693006 | :param hcstvg_path: path to HC-STVG annotations :param subset: train, val or test :param verbose: whether to print more information or not :param iou_thresholds: IoU thresholds for the vIoU metrics :param fps: number of frames per second :param video_max_len: maximum number of frames to be extracted from a video :param v2: whether to use the second version of the dataset :param tmp_loc: whether to evaluate temporal localization # We load the image ids corresponding to the current subset # map video_id to list of corresponding frames to forward, index in all images of the first image to forward, and list of corresponding frames in the GT tube # map video_id to [start, end] of the GT tube # map video_id + frame_id to bbox # downsampling at fps # temporal downsampling if there is still too many images # compute temporal iou # compute viou and gt_viou # iterate on all frames of the annotated moment to update GT metrics # update viou if this frame is in the intersection between the annotated moment and the predicted moment # compute viou@R # compute gt_viou@R :param hcstvg_path: path to HC-STVG annotations :param subset: train, val or test :param verbose: whether to print more information or not :param iou_thresholds: IoU thresholds for the vIoU metrics :param fps: number of frames per second :param video_max_len: maximum number of frames to be extracted from a video :param v2: whether to use the second version of the dataset :param save_pred: whether to save predictions in the output of summarize # init metrics # sum results # average results | 2.47376 | 2 |
ball.py | RDShah/basketball | 0 | 6622011 | <gh_stars>0
from constants import dt
import numpy as np
from utilz import *
class Ball:
def __init__(self):
self.position = np.zeros(2)
self.acceleration = np.zeros(2)
self.velocity = np.zeros(2)
self.is_in_possession = False
def set_acceleration(self,acc=np.zeros(2)):
assert self.is_in_possession or (acc==np.zeros(2)).all(),'No one has possession of the ball, it cannot accelerate.'
self.acceleration = acc
def step(self):
self.position += dt*self.velocity
self.velocity += dt*self.acceleration
if not in_bounds(self.position):
self.position = np.zeros(2)
self.velocity = np.zeros(2)
def get_summary(self):
return {
'position':self.position,
'velocity':self.velocity,
'possession':self.is_in_possession
} | from constants import dt
import numpy as np
from utilz import *
class Ball:
def __init__(self):
self.position = np.zeros(2)
self.acceleration = np.zeros(2)
self.velocity = np.zeros(2)
self.is_in_possession = False
def set_acceleration(self,acc=np.zeros(2)):
assert self.is_in_possession or (acc==np.zeros(2)).all(),'No one has possession of the ball, it cannot accelerate.'
self.acceleration = acc
def step(self):
self.position += dt*self.velocity
self.velocity += dt*self.acceleration
if not in_bounds(self.position):
self.position = np.zeros(2)
self.velocity = np.zeros(2)
def get_summary(self):
return {
'position':self.position,
'velocity':self.velocity,
'possession':self.is_in_possession
} | none | 1 | 3.57617 | 4 | |
utils/macrostrat/utils/__init__.py | UW-Macrostrat/python-tools | 0 | 6622012 | """
This module houses utility functions that are shared between Sparrow's
core and command-line interface.
"""
import os
from contextlib import contextmanager
from pathlib import Path
from .logs import setup_stderr_logs, get_logger
from .shell import cmd, split_args
def relative_path(base, *parts)->Path:
if not os.path.isdir(str(base)):
base = os.path.dirname(base)
return Path(os.path.join(base, *parts))
@contextmanager
def working_directory(path: Path):
"""A context manager which changes the working directory to the given
path, and then changes it back to its previous value on exit.
"""
prev_cwd = os.getcwd()
os.chdir(str(path))
yield
os.chdir(prev_cwd)
| """
This module houses utility functions that are shared between Sparrow's
core and command-line interface.
"""
import os
from contextlib import contextmanager
from pathlib import Path
from .logs import setup_stderr_logs, get_logger
from .shell import cmd, split_args
def relative_path(base, *parts)->Path:
if not os.path.isdir(str(base)):
base = os.path.dirname(base)
return Path(os.path.join(base, *parts))
@contextmanager
def working_directory(path: Path):
"""A context manager which changes the working directory to the given
path, and then changes it back to its previous value on exit.
"""
prev_cwd = os.getcwd()
os.chdir(str(path))
yield
os.chdir(prev_cwd)
| en | 0.939161 | This module houses utility functions that are shared between Sparrow's core and command-line interface. A context manager which changes the working directory to the given path, and then changes it back to its previous value on exit. | 2.517593 | 3 |
keypoint-network/trainer.py | RahulSajnani/DRACO-Weakly-Supervised-Dense-Reconstruction-And-Canonicalization-of-Objects | 3 | 6622013 |
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torchvision import transforms
import pytorch_lightning as pl
from pytorch_lightning.core.lightning import LightningModule
from Data_loaders import data_loader
from models import hourglass
class HeatmapLoss(torch.nn.Module):
"""
loss for detection heatmap
"""
def __init__(self, nstack, weighted = False):
super().__init__()
self.nstack = nstack
self.weighted = weighted
def loss_single(self, pred, ground_truth):
weights = (ground_truth > 0.1) * 81 + 1
criterion = torch.nn.BCEWithLogitsLoss()
if self.weighted:
criterion = torch.nn.BCEWithLogitsLoss(pos_weight=weights)
#print("check")
# l = ((pred - ground_truth)**2) * weights
#else:
# l = ((pred - ground_truth)**2)
l = criterion(pred, ground_truth)
#l = l.mean(dim=3).mean(dim=2).mean(dim=1) #[4, 16, 64, 64] -> [4]
#print(l)
return l # size = batch_size
def forward(self, combined_heatmap_preds, heatmaps_gt):
combined_loss = []
for i in range(self.nstack):
combined_loss.append(self.loss_single(combined_heatmap_preds[: ,i], heatmaps_gt))
#print(combined_loss)
combined_loss = torch.stack(combined_loss, dim=0)
mean_loss = torch.mean(combined_loss)
return mean_loss
class HG_trainer(LightningModule):
def __init__(self, batch_size, dataset_path, nstack = 3, nclasses = 22, nblocks = 4, weighted = False, **kwargs):
super(HG_trainer, self).__init__()
self.dataset_path = dataset_path
self.hparams.batch_size = batch_size
self.hparams.num_workers = 4
self.nstack = nstack
self.nblock = nblocks
self.weighted = weighted
self.network = hourglass.hg(num_classes = nclasses, num_stacks = nstack, num_blocks = nblocks)
self.calc_loss = HeatmapLoss(nstack, weighted)
self.least_loss = 100
self.least_loss_val = 100
def forward(self, imgs):
all_pred_heatmaps = self.network(imgs)
return torch.stack(all_pred_heatmaps, dim=1)
def training_step(self, batch, batch_idx):
"""
Lightning calls this inside the training loop with the data from the training dataloader
passed in as `batch`.
"""
# forward pass
batch_imgs, heatmaps_gt = batch["views"], batch["heatmaps"]
combined_heatmap_preds = self(batch_imgs)
train_loss = self.calc_loss(combined_heatmap_preds, heatmaps_gt)
epoch_trainer_logger = {"train_loss": train_loss}
return {"loss": train_loss, "train_epoch_logger": epoch_trainer_logger, "log": epoch_trainer_logger}
def validation_step(self, batch, batch_idx):
"""
Called every batch
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
batch_imgs, heatmaps_gt = batch["views"], batch["heatmaps"]
combined_heatmap_preds = self(batch_imgs)
val_loss = self.calc_loss(combined_heatmap_preds, heatmaps_gt)
log_tb = {"val_loss": val_loss}
return {"val_loss": val_loss, "log": log_tb, "val_epoch_logger": log_tb}
'''
def test_step(self, batch, batch_idx):
batch_imgs, heatmaps_gt = batch["views"], batch["heatmaps"]
combined_heatmap_preds = self(batch_imgs)
test_loss = self.calc_loss(combined_heatmap_preds, heatmaps_gt)
tensorboard_logs = {'test_loss': test_loss}
return {'test_loss': test_loss, 'test_log': tensorboard_logs}
'''
def training_epoch_end(self, outputs):
'''
Logging all losses at the end of training epoch
'''
epoch_train_loss = torch.stack([x['train_epoch_logger']['train_loss'] for x in outputs]).mean()
print("\nTrain loss:", epoch_train_loss)
return {"train_avg_loss": epoch_train_loss}
def validation_epoch_end(self, outputs):
'''
Logging all losses at the end of training epoch
'''
epoch_val_loss = torch.stack([x['val_epoch_logger']['val_loss'] for x in outputs]).mean()
print("\nValidation loss:", epoch_val_loss)
if self.least_loss_val > epoch_val_loss:
self.least_loss_val = epoch_val_loss
self.save_model()
pbar = {"val_loss": epoch_val_loss}
return {"val_avg_loss": epoch_val_loss, "progress_bar": pbar}
def save_model(self):
'''
Custom save model function for SLURM
'''
if self.weighted:
save_file_name = f"./hg_rms_large{self.nstack}_{self.nblock}_change_{self.weighted*1}.ckpt"
else:
save_file_name = f"./hg_rms_no_weights_{self.nstack}_{self.nblock}large_change_{self.weighted*1}.ckpt"
torch.save(self.network.state_dict(), save_file_name)
print(f"Saved model in location {save_file_name}")
'''
def test_epoch_end(self, outputs):
avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean()
tensorboard_logs = {'test_loss': avg_loss}
return {'test_loss': avg_loss, 'log': tensorboard_logs}
'''
def configure_optimizers(self):
optimizer = torch.optim.RMSprop(self.network.parameters(), lr=2.5e-4)
#optimizer = torch.optim.Adam(self.network.parameters(), lr=2.5e-5, weight_decay = 0.004, betas= (0.009, 0.999))
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience = 5, verbose = True)
return [optimizer], [scheduler]
def setup(self, stage):
self.train_set = data_loader.Keypoint_dataset(self.dataset_path, train = 1)
self.val_set = data_loader.Keypoint_dataset(self.dataset_path, train = 2)
self.test_set = data_loader.Keypoint_dataset(self.dataset_path, train = 0)
def train_dataloader(self):
return DataLoader(self.train_set, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers, shuffle = True)
def val_dataloader(self):
return DataLoader(self.val_set, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers, shuffle = False)
|
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torchvision import transforms
import pytorch_lightning as pl
from pytorch_lightning.core.lightning import LightningModule
from Data_loaders import data_loader
from models import hourglass
class HeatmapLoss(torch.nn.Module):
"""
loss for detection heatmap
"""
def __init__(self, nstack, weighted = False):
super().__init__()
self.nstack = nstack
self.weighted = weighted
def loss_single(self, pred, ground_truth):
weights = (ground_truth > 0.1) * 81 + 1
criterion = torch.nn.BCEWithLogitsLoss()
if self.weighted:
criterion = torch.nn.BCEWithLogitsLoss(pos_weight=weights)
#print("check")
# l = ((pred - ground_truth)**2) * weights
#else:
# l = ((pred - ground_truth)**2)
l = criterion(pred, ground_truth)
#l = l.mean(dim=3).mean(dim=2).mean(dim=1) #[4, 16, 64, 64] -> [4]
#print(l)
return l # size = batch_size
def forward(self, combined_heatmap_preds, heatmaps_gt):
combined_loss = []
for i in range(self.nstack):
combined_loss.append(self.loss_single(combined_heatmap_preds[: ,i], heatmaps_gt))
#print(combined_loss)
combined_loss = torch.stack(combined_loss, dim=0)
mean_loss = torch.mean(combined_loss)
return mean_loss
class HG_trainer(LightningModule):
def __init__(self, batch_size, dataset_path, nstack = 3, nclasses = 22, nblocks = 4, weighted = False, **kwargs):
super(HG_trainer, self).__init__()
self.dataset_path = dataset_path
self.hparams.batch_size = batch_size
self.hparams.num_workers = 4
self.nstack = nstack
self.nblock = nblocks
self.weighted = weighted
self.network = hourglass.hg(num_classes = nclasses, num_stacks = nstack, num_blocks = nblocks)
self.calc_loss = HeatmapLoss(nstack, weighted)
self.least_loss = 100
self.least_loss_val = 100
def forward(self, imgs):
all_pred_heatmaps = self.network(imgs)
return torch.stack(all_pred_heatmaps, dim=1)
def training_step(self, batch, batch_idx):
"""
Lightning calls this inside the training loop with the data from the training dataloader
passed in as `batch`.
"""
# forward pass
batch_imgs, heatmaps_gt = batch["views"], batch["heatmaps"]
combined_heatmap_preds = self(batch_imgs)
train_loss = self.calc_loss(combined_heatmap_preds, heatmaps_gt)
epoch_trainer_logger = {"train_loss": train_loss}
return {"loss": train_loss, "train_epoch_logger": epoch_trainer_logger, "log": epoch_trainer_logger}
def validation_step(self, batch, batch_idx):
"""
Called every batch
Lightning calls this inside the validation loop with the data from the validation dataloader
passed in as `batch`.
"""
batch_imgs, heatmaps_gt = batch["views"], batch["heatmaps"]
combined_heatmap_preds = self(batch_imgs)
val_loss = self.calc_loss(combined_heatmap_preds, heatmaps_gt)
log_tb = {"val_loss": val_loss}
return {"val_loss": val_loss, "log": log_tb, "val_epoch_logger": log_tb}
'''
def test_step(self, batch, batch_idx):
batch_imgs, heatmaps_gt = batch["views"], batch["heatmaps"]
combined_heatmap_preds = self(batch_imgs)
test_loss = self.calc_loss(combined_heatmap_preds, heatmaps_gt)
tensorboard_logs = {'test_loss': test_loss}
return {'test_loss': test_loss, 'test_log': tensorboard_logs}
'''
def training_epoch_end(self, outputs):
'''
Logging all losses at the end of training epoch
'''
epoch_train_loss = torch.stack([x['train_epoch_logger']['train_loss'] for x in outputs]).mean()
print("\nTrain loss:", epoch_train_loss)
return {"train_avg_loss": epoch_train_loss}
def validation_epoch_end(self, outputs):
'''
Logging all losses at the end of training epoch
'''
epoch_val_loss = torch.stack([x['val_epoch_logger']['val_loss'] for x in outputs]).mean()
print("\nValidation loss:", epoch_val_loss)
if self.least_loss_val > epoch_val_loss:
self.least_loss_val = epoch_val_loss
self.save_model()
pbar = {"val_loss": epoch_val_loss}
return {"val_avg_loss": epoch_val_loss, "progress_bar": pbar}
def save_model(self):
'''
Custom save model function for SLURM
'''
if self.weighted:
save_file_name = f"./hg_rms_large{self.nstack}_{self.nblock}_change_{self.weighted*1}.ckpt"
else:
save_file_name = f"./hg_rms_no_weights_{self.nstack}_{self.nblock}large_change_{self.weighted*1}.ckpt"
torch.save(self.network.state_dict(), save_file_name)
print(f"Saved model in location {save_file_name}")
'''
def test_epoch_end(self, outputs):
avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean()
tensorboard_logs = {'test_loss': avg_loss}
return {'test_loss': avg_loss, 'log': tensorboard_logs}
'''
def configure_optimizers(self):
optimizer = torch.optim.RMSprop(self.network.parameters(), lr=2.5e-4)
#optimizer = torch.optim.Adam(self.network.parameters(), lr=2.5e-5, weight_decay = 0.004, betas= (0.009, 0.999))
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience = 5, verbose = True)
return [optimizer], [scheduler]
def setup(self, stage):
self.train_set = data_loader.Keypoint_dataset(self.dataset_path, train = 1)
self.val_set = data_loader.Keypoint_dataset(self.dataset_path, train = 2)
self.test_set = data_loader.Keypoint_dataset(self.dataset_path, train = 0)
def train_dataloader(self):
return DataLoader(self.train_set, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers, shuffle = True)
def val_dataloader(self):
return DataLoader(self.val_set, batch_size=self.hparams.batch_size, num_workers=self.hparams.num_workers, shuffle = False)
| en | 0.568857 | loss for detection heatmap #print("check") # l = ((pred - ground_truth)**2) * weights #else: # l = ((pred - ground_truth)**2) #l = l.mean(dim=3).mean(dim=2).mean(dim=1) #[4, 16, 64, 64] -> [4] #print(l) # size = batch_size #print(combined_loss) Lightning calls this inside the training loop with the data from the training dataloader passed in as `batch`. # forward pass Called every batch Lightning calls this inside the validation loop with the data from the validation dataloader passed in as `batch`. def test_step(self, batch, batch_idx): batch_imgs, heatmaps_gt = batch["views"], batch["heatmaps"] combined_heatmap_preds = self(batch_imgs) test_loss = self.calc_loss(combined_heatmap_preds, heatmaps_gt) tensorboard_logs = {'test_loss': test_loss} return {'test_loss': test_loss, 'test_log': tensorboard_logs} Logging all losses at the end of training epoch Logging all losses at the end of training epoch Custom save model function for SLURM def test_epoch_end(self, outputs): avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean() tensorboard_logs = {'test_loss': avg_loss} return {'test_loss': avg_loss, 'log': tensorboard_logs} #optimizer = torch.optim.Adam(self.network.parameters(), lr=2.5e-5, weight_decay = 0.004, betas= (0.009, 0.999)) | 2.434804 | 2 |
gowleyes/main.py | CrakeNotSnowman/gowleyes | 0 | 6622014 | <filename>gowleyes/main.py
import argparse, textwrap
import logging
import os
from utils import docsToEReader
from utils import sendToEreader
from utils import processWebPages
def joinAndMakeDir(parent, child):
newDir = os.path.join(parent, child)
if not os.path.exists(newDir):
os.makedirs(newDir)
logging.info("Making new directory: " + newDir)
return newDir
def interface():
args = argparse.ArgumentParser(
prog='main.py',
formatter_class=argparse.RawDescriptionHelpFormatter,
description='A Document converter to prep and send files to my ereader',
epilog=textwrap.dedent('''\
Program currently does not take input.
'''))
args.add_argument('-q', '--quiet-mode', type=bool, default=False,\
help='[True/False] An input that is not currently used')
args = args.parse_args()
return args
def alskd():
'''
'''
from utils import docsToEReader
sourceTex_Filepath = "tests/test_format_src/tex/Basic_Tex/Simple_And_Plain/SimpleAndPlain.tex"
output_Directory = "temp/"
x = docsToEReader.latexSourceToHTML_htlatex(sourceTex_Filepath, output_Directory)
#print(x)
assert x >0
if __name__ == "__main__":
#args = interface()
#alskd()
pass | <filename>gowleyes/main.py
import argparse, textwrap
import logging
import os
from utils import docsToEReader
from utils import sendToEreader
from utils import processWebPages
def joinAndMakeDir(parent, child):
newDir = os.path.join(parent, child)
if not os.path.exists(newDir):
os.makedirs(newDir)
logging.info("Making new directory: " + newDir)
return newDir
def interface():
args = argparse.ArgumentParser(
prog='main.py',
formatter_class=argparse.RawDescriptionHelpFormatter,
description='A Document converter to prep and send files to my ereader',
epilog=textwrap.dedent('''\
Program currently does not take input.
'''))
args.add_argument('-q', '--quiet-mode', type=bool, default=False,\
help='[True/False] An input that is not currently used')
args = args.parse_args()
return args
def alskd():
'''
'''
from utils import docsToEReader
sourceTex_Filepath = "tests/test_format_src/tex/Basic_Tex/Simple_And_Plain/SimpleAndPlain.tex"
output_Directory = "temp/"
x = docsToEReader.latexSourceToHTML_htlatex(sourceTex_Filepath, output_Directory)
#print(x)
assert x >0
if __name__ == "__main__":
#args = interface()
#alskd()
pass | en | 0.787923 | \ Program currently does not take input. #print(x) #args = interface() #alskd() | 2.254266 | 2 |
esep/plot/plot_tmp.py | lyingTree/ESEP | 0 | 6622015 | <reponame>lyingTree/ESEP
# -*- coding:utf-8 -*-
"""
-------------------------------------------------------------------------------
Project Name : ESEP
File Name : plot_tmp.py
Start Date : 2021-10-06 14:40
Contributor : D.CW
Email : <EMAIL>
-------------------------------------------------------------------------------
Introduction:
$END$
-------------------------------------------------------------------------------
"""
import matplotlib.pyplot as plt
import numpy as np
def init_fig(row, col, figsize=(12, 8), left=None, bottom=None, right=None, top=None, wspace=None, hspace=None):
fig, axs = plt.subplots(row, col, figsize=figsize)
fig.subplots_adjust(left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=0.05)
return fig, axs
def time_series(data_ls, row, col, fig_conf, add_idx_text):
fig, axs = init_fig(row, col, **fig_conf)
if not isinstance(axs, list):
axs = [axs]
handles = []
for idx, data in enumerate(data_ls):
x, y, z, addition_data, plot_type, plt_opt = data
if plot_type == 'scatter':
tmp = axs[idx].scatter(x, y, **plt_opt)
elif plot_type == 'contour':
tmp = axs[idx].contour(x, y, z, **plt_opt)
elif plot_type == 'contourf':
tmp = axs[idx].contourf(x, y, z, **plt_opt)
else:
tmp = axs[idx].plot(x, y, **plt_opt)
handles.append(tmp)
xticks = x if addition_data.get('xticks') is None else addition_data.get('xticks')
xlabels = x if addition_data.get('xlabels') is None else addition_data.get('xlabels')
yticks = y if addition_data.get('yticks') is None else addition_data.get('yticks')
ylabels = y if addition_data.get('ylabels') is None else addition_data.get('ylabels')
xlim = [np.nanmin(x), np.nanmax(x)] if addition_data.get('xlim') is None else addition_data.get('xlim')
ylim = [np.nanmin(y), np.nanmax(y)] if addition_data.get('ylim') is None else addition_data.get('ylim')
legend = addition_data.get('legend')
axs[idx].set_xticks(xticks)
axs[idx].set_xticklabels(xlabels)
axs[idx].set_yticks(yticks)
axs[idx].set_yticklabels(ylabels)
axs[idx].set_xlim(xlim)
axs[idx].set_ylim(ylim)
axs[idx].legend(legend)
if add_idx_text:
axs[idx].text(0.005, 0.88, '({0})'.format(chr(97 + idx)), fontsize=20, transform=axs[idx].transAxes)
return fig, axs
| # -*- coding:utf-8 -*-
"""
-------------------------------------------------------------------------------
Project Name : ESEP
File Name : plot_tmp.py
Start Date : 2021-10-06 14:40
Contributor : D.CW
Email : <EMAIL>
-------------------------------------------------------------------------------
Introduction:
$END$
-------------------------------------------------------------------------------
"""
import matplotlib.pyplot as plt
import numpy as np
def init_fig(row, col, figsize=(12, 8), left=None, bottom=None, right=None, top=None, wspace=None, hspace=None):
fig, axs = plt.subplots(row, col, figsize=figsize)
fig.subplots_adjust(left=left, bottom=bottom, right=right, top=top, wspace=wspace, hspace=0.05)
return fig, axs
def time_series(data_ls, row, col, fig_conf, add_idx_text):
fig, axs = init_fig(row, col, **fig_conf)
if not isinstance(axs, list):
axs = [axs]
handles = []
for idx, data in enumerate(data_ls):
x, y, z, addition_data, plot_type, plt_opt = data
if plot_type == 'scatter':
tmp = axs[idx].scatter(x, y, **plt_opt)
elif plot_type == 'contour':
tmp = axs[idx].contour(x, y, z, **plt_opt)
elif plot_type == 'contourf':
tmp = axs[idx].contourf(x, y, z, **plt_opt)
else:
tmp = axs[idx].plot(x, y, **plt_opt)
handles.append(tmp)
xticks = x if addition_data.get('xticks') is None else addition_data.get('xticks')
xlabels = x if addition_data.get('xlabels') is None else addition_data.get('xlabels')
yticks = y if addition_data.get('yticks') is None else addition_data.get('yticks')
ylabels = y if addition_data.get('ylabels') is None else addition_data.get('ylabels')
xlim = [np.nanmin(x), np.nanmax(x)] if addition_data.get('xlim') is None else addition_data.get('xlim')
ylim = [np.nanmin(y), np.nanmax(y)] if addition_data.get('ylim') is None else addition_data.get('ylim')
legend = addition_data.get('legend')
axs[idx].set_xticks(xticks)
axs[idx].set_xticklabels(xlabels)
axs[idx].set_yticks(yticks)
axs[idx].set_yticklabels(ylabels)
axs[idx].set_xlim(xlim)
axs[idx].set_ylim(ylim)
axs[idx].legend(legend)
if add_idx_text:
axs[idx].text(0.005, 0.88, '({0})'.format(chr(97 + idx)), fontsize=20, transform=axs[idx].transAxes)
return fig, axs | en | 0.199247 | # -*- coding:utf-8 -*- ------------------------------------------------------------------------------- Project Name : ESEP File Name : plot_tmp.py Start Date : 2021-10-06 14:40 Contributor : D.CW Email : <EMAIL> ------------------------------------------------------------------------------- Introduction: $END$ ------------------------------------------------------------------------------- | 2.519871 | 3 |
apps/labs/module01/SystemPerformanceApp.py | shyamsastha/iot-device | 0 | 6622016 | '''
Created on Jan 17, 2019
Simple Python Application
@author: <NAME>
'''
from labs.module01 import SystemPerformanceAdaptor
#initiating the adaptor
sysPerfAdaptor = SystemPerformanceAdaptor.SystemPerformanceAdaptor()
#initiating the daemon
sysPerfAdaptor.daemon = True
print("Starting system performance app daemon thread...")
print("Update every 10 seconds...")
#enabling the adaptor
sysPerfAdaptor.EnableAdaptor = True
#starting the thread
sysPerfAdaptor.start()
#condition for the infinite loop
while (True):
pass
if __name__ == '__main__':
pass | '''
Created on Jan 17, 2019
Simple Python Application
@author: <NAME>
'''
from labs.module01 import SystemPerformanceAdaptor
#initiating the adaptor
sysPerfAdaptor = SystemPerformanceAdaptor.SystemPerformanceAdaptor()
#initiating the daemon
sysPerfAdaptor.daemon = True
print("Starting system performance app daemon thread...")
print("Update every 10 seconds...")
#enabling the adaptor
sysPerfAdaptor.EnableAdaptor = True
#starting the thread
sysPerfAdaptor.start()
#condition for the infinite loop
while (True):
pass
if __name__ == '__main__':
pass | en | 0.561817 | Created on Jan 17, 2019 Simple Python Application @author: <NAME> #initiating the adaptor #initiating the daemon #enabling the adaptor #starting the thread #condition for the infinite loop | 2.688657 | 3 |
setup.py | musketeer90/smarterp | 2 | 6622017 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
# get version from __version__ variable in smarterp/__init__.py
from smarterp import __version__ as version
setup(
name='smarterp',
version=version,
description='Improvements to ERPNexxt with Machine Learning and Data Analysis',
author='<EMAIL>',
author_email='<EMAIL>',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
| # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
# get version from __version__ variable in smarterp/__init__.py
from smarterp import __version__ as version
setup(
name='smarterp',
version=version,
description='Improvements to ERPNexxt with Machine Learning and Data Analysis',
author='<EMAIL>',
author_email='<EMAIL>',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
| en | 0.525048 | # -*- coding: utf-8 -*- # get version from __version__ variable in smarterp/__init__.py | 1.586898 | 2 |
pellet_labels/eval_ensemble.py | mpascucci/AST-image-processing | 6 | 6622018 | <gh_stars>1-10
import argparse
import os
import pickle
import numpy as np
import tensorflow as tf
import pandas as pd
from trainer import model
from trainer.pellet_list import PELLET_LIST, REMOVED_CLASSES
from trainer import task
from util import gcs_util as util
WORKING_DIR = os.getcwd()
MODEL_FOLDER = 'uncertainty_pellet_labels_model'
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--job-dir',
type=str,
required=True,
help='GCS location to the ensemble models')
parser.add_argument(
'--train-files',
type=str,
required=True,
nargs='*',
help='Dataset training file local or GCS')
parser.add_argument(
'--destination-file',
type=str,
required=True,
default='uncertainty_data.pickle',
help='File name to write uncertainty data to')
parser.add_argument(
'--n-ensemble',
type=int,
default=10,
help='Number of ensemble models that were trained')
parser.add_argument(
'--img-size',
type=int,
default=64,
help='square size to resize input images to in pixel, default=64')
parser.add_argument(
'--threshold',
type=str,
default='std_dev',
choices=['std_dev', 'max_p', 'entropy'],
help='which type of threshold to use to calculate uncertainty_data')
args, _ = parser.parse_known_args()
return args
class Evaluator():
def __init__(self, threshold):
self.threshold = threshold
STRATEGIES = {
'entropy': self._evaluate_entropy,
'max_p': self._evaluate_max_p,
'std_dev': self._evaluate_std_dev
}
self.evaluate = STRATEGIES[threshold]
def _evaluate_entropy(self, valid_predictions, ukn_predictions):
uncertainty_data = []
valid_predictions = np.mean(valid_predictions, axis=0)
ukn_predictions = np.mean(ukn_predictions, axis=0)
valid_entropy = compute_entropies(valid_predictions)
ukn_entropy = compute_entropies(ukn_predictions)
for t in np.linspace(0., np.log(10), 1000, endpoint=True):
in_set_acc = len(valid_entropy[valid_entropy <= t]) / len(valid_entropy)
out_set_mis = len(ukn_entropy[ukn_entropy <= t]) / len(ukn_entropy)
uncertainty_data.append([t, in_set_acc, out_set_mis])
return uncertainty_data
def _evaluate_max_p(self, valid_predictions, ukn_predictions):
uncertainty_data = []
valid_predictions = np.mean(valid_predictions, axis=0)
ukn_predictions = np.mean(ukn_predictions, axis=0)
valid_confidence = np.amax(valid_predictions, axis=-1)
ukn_confidence = np.amax(ukn_predictions, axis=-1)
for t in np.arange(0.5, 1, 0.001):
in_set_acc = len(valid_confidence[valid_confidence >= t]) / len(valid_confidence)
out_set_mis = len(ukn_confidence[ukn_confidence >= t]) / len(ukn_confidence)
uncertainty_data.append([t, in_set_acc, out_set_mis])
return uncertainty_data
def _evaluate_std_dev(self, valid_predictions, ukn_predictions):
uncertainty_data = []
valid_deviations = compute_deviations(valid_predictions)
ukn_deviations = compute_deviations(ukn_predictions)
for t in np.arange(0.0001, 0.1, 0.0001):
in_set_acc = len(valid_deviations[valid_deviations <= t]) / len(valid_deviations)
out_set_mis = len(ukn_deviations[ukn_deviations <= t]) / len(ukn_deviations)
uncertainty_data.append([t, in_set_acc, out_set_mis])
return uncertainty_data
def compute_entropies(predictions):
# For a reference on distribution entropy, see [1]
# [1]: https://peltarion.com/knowledge-center/documentation/modeling-view/build-an-ai-model/loss-functions/categorical-crossentropy
return -np.sum(np.log(predictions + 1e-10) * predictions, axis=-1)
def compute_deviations(predictions):
# Compute the deviation between the max probability of each prediction
return np.std(np.max(predictions, axis=-1), axis=0)
def eval_ensemble(args):
# Take an ensemble of models trained on gcloud and evaluate their accuracy in
# classifying in and out of distribution data. The evaluation can be done
# using 3 types of threshold: 'max_p', 'entropy', 'std_dev'. Outputs a pandas
# dataframe with accuracy metrics at different threshold value
assert(args.job_dir.startswith('gs://'))
class_list = [pellet_class for pellet_class in PELLET_LIST
if pellet_class not in REMOVED_CLASSES]
train_images = []
train_labels = []
valid_images = []
valid_labels = []
ukn_images = []
for path in args.train_files:
input_data = model.load_and_preprocess_data(
path,
WORKING_DIR,
args.img_size,
class_list,
REMOVED_CLASSES)
train_images.append(input_data.train_data)
train_labels.append(input_data.train_labels)
valid_images.append(input_data.valid_data)
valid_labels.append(input_data.valid_labels)
ukn_images.append(input_data.ukn_data)
train_images = np.concatenate(train_images, axis=0)
train_labels = np.concatenate(train_labels, axis=0)
valid_images = np.concatenate(valid_images, axis=0)
valid_labels = np.concatenate(valid_labels, axis=0)
ukn_images = np.concatenate(ukn_images, axis=0)
# Load models
model_paths = util.load_models_from_gcs(
args.job_dir, MODEL_FOLDER, task.MODEL_NAME, WORKING_DIR, args.n_ensemble)
models = []
for path in model_paths:
models.append(tf.keras.models.load_model(path, {'sin': tf.sin}))
# Generate predictions
image_gen = model.get_data_generator()
valid_flow = image_gen.flow(valid_images, valid_labels, shuffle=False)
ukn_flow = image_gen.flow(ukn_images, shuffle=False)
valid_predictions = []
ukn_predictions = []
for m in models:
valid_predictions.append(m.predict(valid_flow))
ukn_predictions.append(m.predict(ukn_flow))
evaluator = Evaluator(args.threshold)
uncertainty_data = evaluator.evaluate(valid_predictions, ukn_predictions)
uncertainty_data = pd.DataFrame(uncertainty_data,
columns=[args.threshold, 'in_set_acc', 'out_set_mis'])
uncertainty_path = os.path.join(WORKING_DIR, args.destination_file)
with open(uncertainty_path, 'wb') as file:
pickle.dump(uncertainty_data, file)
if __name__ == '__main__':
args = get_args()
eval_ensemble(args) | import argparse
import os
import pickle
import numpy as np
import tensorflow as tf
import pandas as pd
from trainer import model
from trainer.pellet_list import PELLET_LIST, REMOVED_CLASSES
from trainer import task
from util import gcs_util as util
WORKING_DIR = os.getcwd()
MODEL_FOLDER = 'uncertainty_pellet_labels_model'
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--job-dir',
type=str,
required=True,
help='GCS location to the ensemble models')
parser.add_argument(
'--train-files',
type=str,
required=True,
nargs='*',
help='Dataset training file local or GCS')
parser.add_argument(
'--destination-file',
type=str,
required=True,
default='uncertainty_data.pickle',
help='File name to write uncertainty data to')
parser.add_argument(
'--n-ensemble',
type=int,
default=10,
help='Number of ensemble models that were trained')
parser.add_argument(
'--img-size',
type=int,
default=64,
help='square size to resize input images to in pixel, default=64')
parser.add_argument(
'--threshold',
type=str,
default='std_dev',
choices=['std_dev', 'max_p', 'entropy'],
help='which type of threshold to use to calculate uncertainty_data')
args, _ = parser.parse_known_args()
return args
class Evaluator():
def __init__(self, threshold):
self.threshold = threshold
STRATEGIES = {
'entropy': self._evaluate_entropy,
'max_p': self._evaluate_max_p,
'std_dev': self._evaluate_std_dev
}
self.evaluate = STRATEGIES[threshold]
def _evaluate_entropy(self, valid_predictions, ukn_predictions):
uncertainty_data = []
valid_predictions = np.mean(valid_predictions, axis=0)
ukn_predictions = np.mean(ukn_predictions, axis=0)
valid_entropy = compute_entropies(valid_predictions)
ukn_entropy = compute_entropies(ukn_predictions)
for t in np.linspace(0., np.log(10), 1000, endpoint=True):
in_set_acc = len(valid_entropy[valid_entropy <= t]) / len(valid_entropy)
out_set_mis = len(ukn_entropy[ukn_entropy <= t]) / len(ukn_entropy)
uncertainty_data.append([t, in_set_acc, out_set_mis])
return uncertainty_data
def _evaluate_max_p(self, valid_predictions, ukn_predictions):
uncertainty_data = []
valid_predictions = np.mean(valid_predictions, axis=0)
ukn_predictions = np.mean(ukn_predictions, axis=0)
valid_confidence = np.amax(valid_predictions, axis=-1)
ukn_confidence = np.amax(ukn_predictions, axis=-1)
for t in np.arange(0.5, 1, 0.001):
in_set_acc = len(valid_confidence[valid_confidence >= t]) / len(valid_confidence)
out_set_mis = len(ukn_confidence[ukn_confidence >= t]) / len(ukn_confidence)
uncertainty_data.append([t, in_set_acc, out_set_mis])
return uncertainty_data
def _evaluate_std_dev(self, valid_predictions, ukn_predictions):
uncertainty_data = []
valid_deviations = compute_deviations(valid_predictions)
ukn_deviations = compute_deviations(ukn_predictions)
for t in np.arange(0.0001, 0.1, 0.0001):
in_set_acc = len(valid_deviations[valid_deviations <= t]) / len(valid_deviations)
out_set_mis = len(ukn_deviations[ukn_deviations <= t]) / len(ukn_deviations)
uncertainty_data.append([t, in_set_acc, out_set_mis])
return uncertainty_data
def compute_entropies(predictions):
# For a reference on distribution entropy, see [1]
# [1]: https://peltarion.com/knowledge-center/documentation/modeling-view/build-an-ai-model/loss-functions/categorical-crossentropy
return -np.sum(np.log(predictions + 1e-10) * predictions, axis=-1)
def compute_deviations(predictions):
# Compute the deviation between the max probability of each prediction
return np.std(np.max(predictions, axis=-1), axis=0)
def eval_ensemble(args):
# Take an ensemble of models trained on gcloud and evaluate their accuracy in
# classifying in and out of distribution data. The evaluation can be done
# using 3 types of threshold: 'max_p', 'entropy', 'std_dev'. Outputs a pandas
# dataframe with accuracy metrics at different threshold value
assert(args.job_dir.startswith('gs://'))
class_list = [pellet_class for pellet_class in PELLET_LIST
if pellet_class not in REMOVED_CLASSES]
train_images = []
train_labels = []
valid_images = []
valid_labels = []
ukn_images = []
for path in args.train_files:
input_data = model.load_and_preprocess_data(
path,
WORKING_DIR,
args.img_size,
class_list,
REMOVED_CLASSES)
train_images.append(input_data.train_data)
train_labels.append(input_data.train_labels)
valid_images.append(input_data.valid_data)
valid_labels.append(input_data.valid_labels)
ukn_images.append(input_data.ukn_data)
train_images = np.concatenate(train_images, axis=0)
train_labels = np.concatenate(train_labels, axis=0)
valid_images = np.concatenate(valid_images, axis=0)
valid_labels = np.concatenate(valid_labels, axis=0)
ukn_images = np.concatenate(ukn_images, axis=0)
# Load models
model_paths = util.load_models_from_gcs(
args.job_dir, MODEL_FOLDER, task.MODEL_NAME, WORKING_DIR, args.n_ensemble)
models = []
for path in model_paths:
models.append(tf.keras.models.load_model(path, {'sin': tf.sin}))
# Generate predictions
image_gen = model.get_data_generator()
valid_flow = image_gen.flow(valid_images, valid_labels, shuffle=False)
ukn_flow = image_gen.flow(ukn_images, shuffle=False)
valid_predictions = []
ukn_predictions = []
for m in models:
valid_predictions.append(m.predict(valid_flow))
ukn_predictions.append(m.predict(ukn_flow))
evaluator = Evaluator(args.threshold)
uncertainty_data = evaluator.evaluate(valid_predictions, ukn_predictions)
uncertainty_data = pd.DataFrame(uncertainty_data,
columns=[args.threshold, 'in_set_acc', 'out_set_mis'])
uncertainty_path = os.path.join(WORKING_DIR, args.destination_file)
with open(uncertainty_path, 'wb') as file:
pickle.dump(uncertainty_data, file)
if __name__ == '__main__':
args = get_args()
eval_ensemble(args) | en | 0.785848 | # For a reference on distribution entropy, see [1] # [1]: https://peltarion.com/knowledge-center/documentation/modeling-view/build-an-ai-model/loss-functions/categorical-crossentropy # Compute the deviation between the max probability of each prediction # Take an ensemble of models trained on gcloud and evaluate their accuracy in # classifying in and out of distribution data. The evaluation can be done # using 3 types of threshold: 'max_p', 'entropy', 'std_dev'. Outputs a pandas # dataframe with accuracy metrics at different threshold value # Load models # Generate predictions | 2.376707 | 2 |
run_2D.py | jcartus/QLearningExperiments | 0 | 6622019 | """This script will run the agent in a 1 D environment and plot its learning
progress
Author: <NAME>, 22.12.2021
"""
import numpy as np
import matplotlib.pyplot as plt
from environment import DiscreteEnvironment
from agent import AgentBase
from analysis import AnalyzerEpisode, AnalyzerRun, animate_episodes
import seaborn as sns
def generate_course_1():
win = 5
death = -10
return np.array([
[ 0, 0, 0, 1, 0, win, 0, death, 1, 0, 0],
[death, 0, 0, 4, 0, 0, 0, 0, 5, 0, 0],
[death, 0, 0, 4, 0, 0, 0, 0, 5, 0, win]
]).transpose(), win, death
def generate_square():
win = 5
death = -10
return np.array([
[ 0, 0, 0],
[ 0, death, 0],
[ 0, 0, win ]
]).transpose(), win, death
def generate_square_w_wall():
win = 5
death = -15
return np.array([
[ 0, 0, 0, 0, 0],
[ 0, death, death, death, 0],
[ 0, death, 0, 0, 0],
[ 0, death, 0, 0, 0],
[ 0, 0, 0, 0, win ]
]).transpose(), win, death
def main():
#game_map, win_values, death_values = generate_course_1()
#game_map, win_values, death_values = generate_square()
game_map, win_values, death_values = generate_square_w_wall()
wins = np.arange(game_map.size)[game_map.flatten() == win_values]
deaths = np.arange(game_map.size)[game_map.flatten() == death_values]
environment = DiscreteEnvironment(
game_map=game_map,
init_mode="zero",
#init_mode="random",
reward_mode="cumulative",
wins=wins,
deaths=deaths
)
agent = AgentBase(
environment=environment,
discount_factor=0.8,
learning_rate=0.2,
epsilon_greedyness=0.5
)
#--- do training ---
n_episodes_train = 500
agent.run(n_episodes=n_episodes_train)
#---
#--- plot analysis plots ---
analysis = AnalyzerRun(run_statistics=agent._run_statistics)
plt.figure(figsize=(6, 10))
n = 4
plt.subplot(n, 1, 1)
analysis.plot_steps()
plt.subplot(n, 1, 2)
analysis.plot_reward()
plt.subplot(n, 1, 3)
analysis.plot_actions()
plt.subplot(n, 1, 4)
analysis.plot_average_final_state()
plt.tight_layout()
plt.show()
#---
#--- do additional exploitation runs to recods gifs ---
agent._epsilon = 1
n_episodes_exploit = 3
agent.run(n_episodes=n_episodes_exploit)
n_episdes_tot = n_episodes_train + n_episodes_exploit
animate_episodes(
agent,
episodes=[0, n_episdes_tot-2, n_episdes_tot-1],
show=True,
save_path="animations",
wins=wins,
deaths=deaths
)
#---
if __name__ == '__main__':
main() | """This script will run the agent in a 1 D environment and plot its learning
progress
Author: <NAME>, 22.12.2021
"""
import numpy as np
import matplotlib.pyplot as plt
from environment import DiscreteEnvironment
from agent import AgentBase
from analysis import AnalyzerEpisode, AnalyzerRun, animate_episodes
import seaborn as sns
def generate_course_1():
win = 5
death = -10
return np.array([
[ 0, 0, 0, 1, 0, win, 0, death, 1, 0, 0],
[death, 0, 0, 4, 0, 0, 0, 0, 5, 0, 0],
[death, 0, 0, 4, 0, 0, 0, 0, 5, 0, win]
]).transpose(), win, death
def generate_square():
win = 5
death = -10
return np.array([
[ 0, 0, 0],
[ 0, death, 0],
[ 0, 0, win ]
]).transpose(), win, death
def generate_square_w_wall():
win = 5
death = -15
return np.array([
[ 0, 0, 0, 0, 0],
[ 0, death, death, death, 0],
[ 0, death, 0, 0, 0],
[ 0, death, 0, 0, 0],
[ 0, 0, 0, 0, win ]
]).transpose(), win, death
def main():
#game_map, win_values, death_values = generate_course_1()
#game_map, win_values, death_values = generate_square()
game_map, win_values, death_values = generate_square_w_wall()
wins = np.arange(game_map.size)[game_map.flatten() == win_values]
deaths = np.arange(game_map.size)[game_map.flatten() == death_values]
environment = DiscreteEnvironment(
game_map=game_map,
init_mode="zero",
#init_mode="random",
reward_mode="cumulative",
wins=wins,
deaths=deaths
)
agent = AgentBase(
environment=environment,
discount_factor=0.8,
learning_rate=0.2,
epsilon_greedyness=0.5
)
#--- do training ---
n_episodes_train = 500
agent.run(n_episodes=n_episodes_train)
#---
#--- plot analysis plots ---
analysis = AnalyzerRun(run_statistics=agent._run_statistics)
plt.figure(figsize=(6, 10))
n = 4
plt.subplot(n, 1, 1)
analysis.plot_steps()
plt.subplot(n, 1, 2)
analysis.plot_reward()
plt.subplot(n, 1, 3)
analysis.plot_actions()
plt.subplot(n, 1, 4)
analysis.plot_average_final_state()
plt.tight_layout()
plt.show()
#---
#--- do additional exploitation runs to recods gifs ---
agent._epsilon = 1
n_episodes_exploit = 3
agent.run(n_episodes=n_episodes_exploit)
n_episdes_tot = n_episodes_train + n_episodes_exploit
animate_episodes(
agent,
episodes=[0, n_episdes_tot-2, n_episdes_tot-1],
show=True,
save_path="animations",
wins=wins,
deaths=deaths
)
#---
if __name__ == '__main__':
main() | en | 0.523506 | This script will run the agent in a 1 D environment and plot its learning progress Author: <NAME>, 22.12.2021 #game_map, win_values, death_values = generate_course_1() #game_map, win_values, death_values = generate_square() #init_mode="random", #--- do training --- #--- #--- plot analysis plots --- #--- #--- do additional exploitation runs to recods gifs --- #--- | 3.290825 | 3 |
beartype_test/a00_unit/a00_util/test_utilobject.py | posita/beartype | 1,056 | 6622020 | #!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype object utility unit tests.**
This submodule unit tests the public API of the private
:mod:`beartype._util.utilobject` submodule.
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To raise human-readable test errors, avoid importing from
# package-specific submodules at module scope.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# ....................{ TESTS ~ tester }....................
def test_is_object_hashable() -> None:
'''
Test the :func:`beartype._util.utilobject.is_object_hashable` tester.
'''
# Defer heavyweight imports.
from beartype._util.utilobject import is_object_hashable
from beartype_test.a00_unit.data.hint.data_hint import (
NOT_HINTS_HASHABLE, NOT_HINTS_UNHASHABLE,)
# Assert this tester accepts unhashable objects.
for object_hashable in NOT_HINTS_HASHABLE:
assert is_object_hashable(object_hashable) is True
# Assert this tester rejects unhashable objects.
for object_unhashable in NOT_HINTS_UNHASHABLE:
assert is_object_hashable(object_unhashable) is False
# ....................{ TESTS ~ getter }....................
def test_get_object_basename_scoped() -> None:
'''
Test the :func:`beartype._util.utilobject.get_object_basename_scoped` getter.
'''
# Defer heavyweight imports.
from beartype.roar._roarexc import _BeartypeUtilObjectNameException
from beartype._util.utilobject import get_object_basename_scoped
from beartype_test.a00_unit.data.data_type import (
CALLABLES,
closure_factory,
)
from pytest import raises
# Assert this getter returns the fully-qualified names of non-nested
# callables unmodified.
for callable_obj in CALLABLES:
assert get_object_basename_scoped(callable_obj) == (
callable_obj.__qualname__)
# Assert this getter returns the fully-qualified names of closures stripped
# of meaningless "<locals>." substrings.
assert get_object_basename_scoped(closure_factory()) == (
'closure_factory.closure')
# Assert this getter raises "AttributeError" exceptions when passed objects
# declaring neither "__qualname__" nor "__name__" dunder attributes.
with raises(_BeartypeUtilObjectNameException):
get_object_basename_scoped(
'From the ice-gulfs that gird his secret throne,')
| #!/usr/bin/env python3
# --------------------( LICENSE )--------------------
# Copyright (c) 2014-2021 Beartype authors.
# See "LICENSE" for further details.
'''
**Beartype object utility unit tests.**
This submodule unit tests the public API of the private
:mod:`beartype._util.utilobject` submodule.
'''
# ....................{ IMPORTS }....................
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# WARNING: To raise human-readable test errors, avoid importing from
# package-specific submodules at module scope.
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# ....................{ TESTS ~ tester }....................
def test_is_object_hashable() -> None:
'''
Test the :func:`beartype._util.utilobject.is_object_hashable` tester.
'''
# Defer heavyweight imports.
from beartype._util.utilobject import is_object_hashable
from beartype_test.a00_unit.data.hint.data_hint import (
NOT_HINTS_HASHABLE, NOT_HINTS_UNHASHABLE,)
# Assert this tester accepts unhashable objects.
for object_hashable in NOT_HINTS_HASHABLE:
assert is_object_hashable(object_hashable) is True
# Assert this tester rejects unhashable objects.
for object_unhashable in NOT_HINTS_UNHASHABLE:
assert is_object_hashable(object_unhashable) is False
# ....................{ TESTS ~ getter }....................
def test_get_object_basename_scoped() -> None:
'''
Test the :func:`beartype._util.utilobject.get_object_basename_scoped` getter.
'''
# Defer heavyweight imports.
from beartype.roar._roarexc import _BeartypeUtilObjectNameException
from beartype._util.utilobject import get_object_basename_scoped
from beartype_test.a00_unit.data.data_type import (
CALLABLES,
closure_factory,
)
from pytest import raises
# Assert this getter returns the fully-qualified names of non-nested
# callables unmodified.
for callable_obj in CALLABLES:
assert get_object_basename_scoped(callable_obj) == (
callable_obj.__qualname__)
# Assert this getter returns the fully-qualified names of closures stripped
# of meaningless "<locals>." substrings.
assert get_object_basename_scoped(closure_factory()) == (
'closure_factory.closure')
# Assert this getter raises "AttributeError" exceptions when passed objects
# declaring neither "__qualname__" nor "__name__" dunder attributes.
with raises(_BeartypeUtilObjectNameException):
get_object_basename_scoped(
'From the ice-gulfs that gird his secret throne,')
| en | 0.464857 | #!/usr/bin/env python3 # --------------------( LICENSE )-------------------- # Copyright (c) 2014-2021 Beartype authors. # See "LICENSE" for further details. **Beartype object utility unit tests.** This submodule unit tests the public API of the private :mod:`beartype._util.utilobject` submodule. # ....................{ IMPORTS }.................... #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # WARNING: To raise human-readable test errors, avoid importing from # package-specific submodules at module scope. #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # ....................{ TESTS ~ tester }.................... Test the :func:`beartype._util.utilobject.is_object_hashable` tester. # Defer heavyweight imports. # Assert this tester accepts unhashable objects. # Assert this tester rejects unhashable objects. # ....................{ TESTS ~ getter }.................... Test the :func:`beartype._util.utilobject.get_object_basename_scoped` getter. # Defer heavyweight imports. # Assert this getter returns the fully-qualified names of non-nested # callables unmodified. # Assert this getter returns the fully-qualified names of closures stripped # of meaningless "<locals>." substrings. # Assert this getter raises "AttributeError" exceptions when passed objects # declaring neither "__qualname__" nor "__name__" dunder attributes. | 2.016655 | 2 |
src/utils/logging.py | JeremyAndress/Flask-Celery | 2 | 6622021 | import os
import uuid
import logging
from logging.handlers import RotatingFileHandler
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
LOG_FILENAME_INFO = BASE_DIR+'/logs/info.log'
logging.basicConfig(
handlers=[
logging.StreamHandler(),
RotatingFileHandler(LOG_FILENAME_INFO, maxBytes=20000, backupCount=10)
],
level=logging.INFO,
format= '[%(asctime)s] [%(pathname)s:%(lineno)d] [%(levelname)s] - %(message)s',
datefmt='%d/%m/%Y %H:%M:%S'
)
logger = logging.getLogger("launchpad")
def gene_extra(ms):
return {
'id':str(uuid.uuid4()),
'msisdn': ms
}
| import os
import uuid
import logging
from logging.handlers import RotatingFileHandler
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
LOG_FILENAME_INFO = BASE_DIR+'/logs/info.log'
logging.basicConfig(
handlers=[
logging.StreamHandler(),
RotatingFileHandler(LOG_FILENAME_INFO, maxBytes=20000, backupCount=10)
],
level=logging.INFO,
format= '[%(asctime)s] [%(pathname)s:%(lineno)d] [%(levelname)s] - %(message)s',
datefmt='%d/%m/%Y %H:%M:%S'
)
logger = logging.getLogger("launchpad")
def gene_extra(ms):
return {
'id':str(uuid.uuid4()),
'msisdn': ms
}
| none | 1 | 2.506395 | 3 | |
text/_form/cascade/property/_op/declare.py | jedhsu/text | 0 | 6622022 | <gh_stars>0
"""
*Property-Declare*
Declare a property.
"""
from ._operator import PropertyOperator
class PropertyDeclare(
PropertyOperator,
):
pass
"""
*Declaration-Block*
"""
from typing import Sequence
from dataclasses import dataclass
__all__ = ["DeclarationBlock"]
from .declaration import Declaration
@dataclass
class DeclarationBlock:
INDENT_LENGTH = 4
declarations: Sequence[Declaration]
@classmethod
def indent(cls, line: str) -> str:
"""
Indents a line of text.
"""
return " " * cls.INDENT_LENGTH + line
def into_css(self) -> str:
declarations = [decl.into_css() for decl in self.declarations]
declarations = [self.indent(decl) + ";" for decl in declarations]
declarations = ["{"] + declarations + ["}"]
declarations = "\n".join(declarations)
return declarations
| """
*Property-Declare*
Declare a property.
"""
from ._operator import PropertyOperator
class PropertyDeclare(
PropertyOperator,
):
pass
"""
*Declaration-Block*
"""
from typing import Sequence
from dataclasses import dataclass
__all__ = ["DeclarationBlock"]
from .declaration import Declaration
@dataclass
class DeclarationBlock:
INDENT_LENGTH = 4
declarations: Sequence[Declaration]
@classmethod
def indent(cls, line: str) -> str:
"""
Indents a line of text.
"""
return " " * cls.INDENT_LENGTH + line
def into_css(self) -> str:
declarations = [decl.into_css() for decl in self.declarations]
declarations = [self.indent(decl) + ";" for decl in declarations]
declarations = ["{"] + declarations + ["}"]
declarations = "\n".join(declarations)
return declarations | en | 0.533701 | *Property-Declare* Declare a property. *Declaration-Block* Indents a line of text. | 2.950812 | 3 |
FOSSEE_math/email_config.py | Sarathsathyan/django_math.animations | 2 | 6622023 | EMAIL_HOST = 'your smtp host name'
EMAIL_PORT = 'PORT Number'
EMAIL_HOST_USER = 'your username'
EMAIL_HOST_PASSWORD = '<PASSWORD>'
EMAIL_USE_TLS = True
SENDER_EMAIL = 'your email address'
SECRET_KEY_SETTINGS = ''
| EMAIL_HOST = 'your smtp host name'
EMAIL_PORT = 'PORT Number'
EMAIL_HOST_USER = 'your username'
EMAIL_HOST_PASSWORD = '<PASSWORD>'
EMAIL_USE_TLS = True
SENDER_EMAIL = 'your email address'
SECRET_KEY_SETTINGS = ''
| none | 1 | 1.22649 | 1 | |
src/web/schools/migrations/0011_auto_20170222_2057.py | fossabot/SIStema | 5 | 6622024 | <reponame>fossabot/SIStema
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-22 20:57
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('schools', '0010_merge_20170219_1854'),
]
operations = [
migrations.CreateModel(
name='SchoolParticipant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('parallel', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='school_participants', to='schools.Parallel')),
('school', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='school_participants', to='schools.School')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='school_participants', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='schoolparticipant',
unique_together=set([('school', 'user')]),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-22 20:57
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('schools', '0010_merge_20170219_1854'),
]
operations = [
migrations.CreateModel(
name='SchoolParticipant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('parallel', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='school_participants', to='schools.Parallel')),
('school', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='school_participants', to='schools.School')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='school_participants', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AlterUniqueTogether(
name='schoolparticipant',
unique_together=set([('school', 'user')]),
),
] | en | 0.741488 | # -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-02-22 20:57 | 1.671421 | 2 |
tfbp/train/lib.py | klauscc/tfbp | 0 | 6622025 | # -*- coding: utf-8 -*-
#================================================================
# God Bless You.
#
# author: klaus
# email: <EMAIL>
# created date: 2019/12/17
# description:
#
#================================================================
import tensorflow as tf
keras = tf.keras
def build_model(model, optimizer, callbacks=None):
"""TODO: Docstring for build_model.
Args:
model: keras.Model.
input_shape: List. Input shape including batch_size.
optimizer: keras.optimizers.
Kwargs:
callbacks: keras.
Returns: TODO
"""
model.optimizer = optimizer
model.callbacks = callbacks
if model.callbacks is not None:
for callback in model.callbacks:
callback.set_model(model)
return model
def get_optimizer(params, custom_scheduler=None):
"""get optimizer from params
Args:
params (TODO): TODO
Returns: TODO
"""
other_args = {}
if "clipnorm" in params:
other_args["clipnorm"] = params.clipnorm
if "clipvalue" in params:
other_args["clipvalue"] = params.clipvalue
if params.lr_decay_policy == "exp":
lr = tf.keras.optimizers.schedules.ExponentialDecay(params.lr,
params.decay_steps,
params.decay_rate,
staircase=params.staircase)
else:
lr = params.init_lr
if params.optimizer == "adam":
beta_1 = params.get("beta_1", 0.9)
beta_2 = params.get("beta_2", 0.999)
optimizer = tf.keras.optimizers.Adam(lr, beta_1=beta_1, beta_2=beta_2, **other_args)
elif params.optimizer == "sgd":
momentum = params.get("momentum", 0.9)
optimizer = tf.keras.optimizers.SGD(lr, momentum=momentum, **other_args)
else:
raise ValueError("unsupported optimizer: {}".format(params.optimizer))
return optimizer
| # -*- coding: utf-8 -*-
#================================================================
# God Bless You.
#
# author: klaus
# email: <EMAIL>
# created date: 2019/12/17
# description:
#
#================================================================
import tensorflow as tf
keras = tf.keras
def build_model(model, optimizer, callbacks=None):
"""TODO: Docstring for build_model.
Args:
model: keras.Model.
input_shape: List. Input shape including batch_size.
optimizer: keras.optimizers.
Kwargs:
callbacks: keras.
Returns: TODO
"""
model.optimizer = optimizer
model.callbacks = callbacks
if model.callbacks is not None:
for callback in model.callbacks:
callback.set_model(model)
return model
def get_optimizer(params, custom_scheduler=None):
"""get optimizer from params
Args:
params (TODO): TODO
Returns: TODO
"""
other_args = {}
if "clipnorm" in params:
other_args["clipnorm"] = params.clipnorm
if "clipvalue" in params:
other_args["clipvalue"] = params.clipvalue
if params.lr_decay_policy == "exp":
lr = tf.keras.optimizers.schedules.ExponentialDecay(params.lr,
params.decay_steps,
params.decay_rate,
staircase=params.staircase)
else:
lr = params.init_lr
if params.optimizer == "adam":
beta_1 = params.get("beta_1", 0.9)
beta_2 = params.get("beta_2", 0.999)
optimizer = tf.keras.optimizers.Adam(lr, beta_1=beta_1, beta_2=beta_2, **other_args)
elif params.optimizer == "sgd":
momentum = params.get("momentum", 0.9)
optimizer = tf.keras.optimizers.SGD(lr, momentum=momentum, **other_args)
else:
raise ValueError("unsupported optimizer: {}".format(params.optimizer))
return optimizer
| en | 0.409641 | # -*- coding: utf-8 -*- #================================================================ # God Bless You. # # author: klaus # email: <EMAIL> # created date: 2019/12/17 # description: # #================================================================ TODO: Docstring for build_model. Args: model: keras.Model. input_shape: List. Input shape including batch_size. optimizer: keras.optimizers. Kwargs: callbacks: keras. Returns: TODO get optimizer from params Args: params (TODO): TODO Returns: TODO | 2.150626 | 2 |
secondary-voice-server-AI/assem-vc/datasets/resample.py | doongu/pnu_opensource_hack | 178 | 6622026 | import os
import glob
import tqdm
from itertools import repeat
from multiprocessing import Pool, freeze_support
from argparse import ArgumentParser
def resampling(wavdir, sr):
newdir = wavdir.replace('.wav', '-22k.wav')
os.system('ffmpeg -hide_banner -loglevel panic -y -i %s -ar %d %s' % (wavdir, sr, newdir))
os.system('rm %s'% wavdir)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--sampling_rate', type=int, default=22050,
help="target sampling rate to resample")
parser.add_argument('--num_workers', type=int, default=32,
help="number of workers")
args = parser.parse_args()
freeze_support()
input_paths = glob.glob(os.path.join('datasets', '**', '*.wav'), recursive=True)
with Pool(processes=args.num_workers) as p:
r = list(tqdm.tqdm(p.starmap(resampling, zip(input_paths, repeat(args.sampling_rate))), total=len(input_paths)))
| import os
import glob
import tqdm
from itertools import repeat
from multiprocessing import Pool, freeze_support
from argparse import ArgumentParser
def resampling(wavdir, sr):
newdir = wavdir.replace('.wav', '-22k.wav')
os.system('ffmpeg -hide_banner -loglevel panic -y -i %s -ar %d %s' % (wavdir, sr, newdir))
os.system('rm %s'% wavdir)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--sampling_rate', type=int, default=22050,
help="target sampling rate to resample")
parser.add_argument('--num_workers', type=int, default=32,
help="number of workers")
args = parser.parse_args()
freeze_support()
input_paths = glob.glob(os.path.join('datasets', '**', '*.wav'), recursive=True)
with Pool(processes=args.num_workers) as p:
r = list(tqdm.tqdm(p.starmap(resampling, zip(input_paths, repeat(args.sampling_rate))), total=len(input_paths)))
| none | 1 | 2.420602 | 2 | |
UCourse/course_homes/migrations/0018_coursehome_register_date.py | Natsu1270/UCourse | 1 | 6622027 | <filename>UCourse/course_homes/migrations/0018_coursehome_register_date.py
# Generated by Django 3.0.3 on 2020-06-02 16:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_homes', '0017_auto_20200531_2241'),
]
operations = [
migrations.AddField(
model_name='coursehome',
name='register_date',
field=models.DateField(blank=True, null=True),
),
]
| <filename>UCourse/course_homes/migrations/0018_coursehome_register_date.py
# Generated by Django 3.0.3 on 2020-06-02 16:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_homes', '0017_auto_20200531_2241'),
]
operations = [
migrations.AddField(
model_name='coursehome',
name='register_date',
field=models.DateField(blank=True, null=True),
),
]
| en | 0.831736 | # Generated by Django 3.0.3 on 2020-06-02 16:53 | 1.575571 | 2 |
coding/learn_gevent/gevent_06_echoserver.py | yatao91/learning_road | 3 | 6622028 | # -*- coding: utf-8 -*-
from __future__ import print_function
from gevent.server import StreamServer
def echo(socket, address):
print("New connection from %s:%s" % address)
socket.sendall(b"Welcome to the echo server! Type quit to exit.\r\n")
rfileobj = socket.makefile(mode='rb')
while True:
line = rfileobj.readline()
if not line:
print("client disconnectted")
break
if line.strip().lower() == b'quit':
print("client quit")
break
socket.sendall(line)
print("echoed %r" % line)
rfileobj.close()
if __name__ == '__main__':
server = StreamServer(('127.0.0.1', 16000), echo)
print("Starting echo server on port 16000")
server.serve_forever()
| # -*- coding: utf-8 -*-
from __future__ import print_function
from gevent.server import StreamServer
def echo(socket, address):
print("New connection from %s:%s" % address)
socket.sendall(b"Welcome to the echo server! Type quit to exit.\r\n")
rfileobj = socket.makefile(mode='rb')
while True:
line = rfileobj.readline()
if not line:
print("client disconnectted")
break
if line.strip().lower() == b'quit':
print("client quit")
break
socket.sendall(line)
print("echoed %r" % line)
rfileobj.close()
if __name__ == '__main__':
server = StreamServer(('127.0.0.1', 16000), echo)
print("Starting echo server on port 16000")
server.serve_forever()
| en | 0.769321 | # -*- coding: utf-8 -*- | 2.91319 | 3 |
graph_networks/GIN.py | hengwei-chan/graph_network_demo | 1 | 6622029 | <reponame>hengwei-chan/graph_network_demo
import tensorflow as tf
import numpy as np
import copy
from graph_networks.utilities import CustomDropout as dropout
class GIN(tf.keras.Model):
'''
This class is an implementation of the GIN-E model (Edge extention of Xu et al. 2019).
'''
def __init__(self,config,return_hv=False):
super(GIN, self).__init__(name="GIN")
self.config = config
self.return_hv = return_hv
if self.config.layernorm_passing_gin:
self.layernorm_passing_gin = tf.keras.layers.LayerNormalization(epsilon=1e-6,axis=1)
if self.config.dropout_passing_gin:
self.dropout_passing_gin = tf.keras.layers.Dropout(self.config.dropout_rate)
if self.config.layernorm_aggregate_gin:
self.layernorm_aggregate_gin = tf.keras.layers.LayerNormalization(epsilon=1e-6,axis=1)
if self.config.dropout_aggregate_gin:
self.dropout_aggregate_gin = tf.keras.layers.Dropout(self.config.dropout_rate)
self.eps = tf.Variable(tf.zeros((self.config.input_size_gin,)),trainable=True)
self.massge_iteration_gin = config.message_iterations_gin
self.gin_aggregate = list()
for t in range(0,self.massge_iteration_gin):
self.gin_aggregate.append(tf.keras.layers.Dense(self.config.input_size_gin,
activation=None,input_shape=(self.config.input_size_gin,),
name="gin_aggregate"+str(t),use_bias=self.config.gin_aggregate_bias))
def call(self,instance,train=True):
edge_aligned_node_features = tf.convert_to_tensor(instance.edge_aligned_node_features,dtype=tf.dtypes.float32)
dir_edge_features = tf.convert_to_tensor(instance.dir_edge_features,dtype=tf.dtypes.float32)
node_features = tf.convert_to_tensor(instance.node_features,dtype=tf.dtypes.float32)
# initialize
# aggregate info and combine
h_0_v = node_features
h_v = 0
# massage passing gin
for t in range(0,self.massge_iteration_gin):
if t == 0:
h_v = tf.matmul(tf.convert_to_tensor(instance.adj_matrix,
dtype=tf.dtypes.float32),h_0_v)
else:
h_v = tf.matmul(tf.convert_to_tensor(instance.adj_matrix,
dtype=tf.dtypes.float32),h_v)
if self.config.layernorm_passing_gin:
h_v = self.layernorm_passing_gin(h_v)
if self.config.dropout_passing_gin:
h_v = self.dropout_passing_gin(h_v,training=train)
h_v =self.gin_aggregate[t]((1+self.eps)*h_0_v+h_v)
if self.return_hv:
return h_v
h = tf.reduce_sum(h_v,axis=0)
h = tf.reshape(h,[1,self.config.input_size_gin])
if self.config.layernorm_aggregate_gin:
h = self.layernorm_aggregate_gin(h)
if self.config.dropout_aggregate_gin:
h = self.dropout_aggregate_gin(h,training=train)
return h | import tensorflow as tf
import numpy as np
import copy
from graph_networks.utilities import CustomDropout as dropout
class GIN(tf.keras.Model):
'''
This class is an implementation of the GIN-E model (Edge extention of Xu et al. 2019).
'''
def __init__(self,config,return_hv=False):
super(GIN, self).__init__(name="GIN")
self.config = config
self.return_hv = return_hv
if self.config.layernorm_passing_gin:
self.layernorm_passing_gin = tf.keras.layers.LayerNormalization(epsilon=1e-6,axis=1)
if self.config.dropout_passing_gin:
self.dropout_passing_gin = tf.keras.layers.Dropout(self.config.dropout_rate)
if self.config.layernorm_aggregate_gin:
self.layernorm_aggregate_gin = tf.keras.layers.LayerNormalization(epsilon=1e-6,axis=1)
if self.config.dropout_aggregate_gin:
self.dropout_aggregate_gin = tf.keras.layers.Dropout(self.config.dropout_rate)
self.eps = tf.Variable(tf.zeros((self.config.input_size_gin,)),trainable=True)
self.massge_iteration_gin = config.message_iterations_gin
self.gin_aggregate = list()
for t in range(0,self.massge_iteration_gin):
self.gin_aggregate.append(tf.keras.layers.Dense(self.config.input_size_gin,
activation=None,input_shape=(self.config.input_size_gin,),
name="gin_aggregate"+str(t),use_bias=self.config.gin_aggregate_bias))
def call(self,instance,train=True):
edge_aligned_node_features = tf.convert_to_tensor(instance.edge_aligned_node_features,dtype=tf.dtypes.float32)
dir_edge_features = tf.convert_to_tensor(instance.dir_edge_features,dtype=tf.dtypes.float32)
node_features = tf.convert_to_tensor(instance.node_features,dtype=tf.dtypes.float32)
# initialize
# aggregate info and combine
h_0_v = node_features
h_v = 0
# massage passing gin
for t in range(0,self.massge_iteration_gin):
if t == 0:
h_v = tf.matmul(tf.convert_to_tensor(instance.adj_matrix,
dtype=tf.dtypes.float32),h_0_v)
else:
h_v = tf.matmul(tf.convert_to_tensor(instance.adj_matrix,
dtype=tf.dtypes.float32),h_v)
if self.config.layernorm_passing_gin:
h_v = self.layernorm_passing_gin(h_v)
if self.config.dropout_passing_gin:
h_v = self.dropout_passing_gin(h_v,training=train)
h_v =self.gin_aggregate[t]((1+self.eps)*h_0_v+h_v)
if self.return_hv:
return h_v
h = tf.reduce_sum(h_v,axis=0)
h = tf.reshape(h,[1,self.config.input_size_gin])
if self.config.layernorm_aggregate_gin:
h = self.layernorm_aggregate_gin(h)
if self.config.dropout_aggregate_gin:
h = self.dropout_aggregate_gin(h,training=train)
return h | en | 0.75616 | This class is an implementation of the GIN-E model (Edge extention of Xu et al. 2019). # initialize # aggregate info and combine # massage passing gin | 2.638109 | 3 |
solutions/problem3.py | wy/ProjectEuler | 0 | 6622030 | # coding: utf8
# Author: <NAME> (~wy)
# Date: 2017
def prime(n):
if n > 1:
for i in range(2,n):
if n % i == 0:
return False
return True
else:
return False
def smallestprimedivisor(p):
for i in range(2,p):
if p % i == 0 and prime(i):
return i
return p
def problem3(p):
largestprime = 0
curr = p
while curr > 1:
print(curr)
# find the first prime divisor of p
largestprime = smallestprimedivisor(curr)
while curr % largestprime == 0:
curr = curr // largestprime
return largestprime
| # coding: utf8
# Author: <NAME> (~wy)
# Date: 2017
def prime(n):
if n > 1:
for i in range(2,n):
if n % i == 0:
return False
return True
else:
return False
def smallestprimedivisor(p):
for i in range(2,p):
if p % i == 0 and prime(i):
return i
return p
def problem3(p):
largestprime = 0
curr = p
while curr > 1:
print(curr)
# find the first prime divisor of p
largestprime = smallestprimedivisor(curr)
while curr % largestprime == 0:
curr = curr // largestprime
return largestprime
| en | 0.742858 | # coding: utf8 # Author: <NAME> (~wy) # Date: 2017 # find the first prime divisor of p | 4.096403 | 4 |
Day 4 - Beginner - Randomisation and Python Lists/Day4_Project.py | AkashKumarSingh11032001/100-Days-Python-Bootcamp-By-AngelaYu | 0 | 6622031 | <filename>Day 4 - Beginner - Randomisation and Python Lists/Day4_Project.py
# DAY 4 FINAL PROJECTS
import random
rock = '''
_______
---' ____)
(_____)
(_____)
(____)
---.__(___)
'''
paper = '''
_______
---' ____)____
______)
_______)
_______)
---.__________)
'''
scissor = '''
_______
---' ____)____
______)
__________)
(____)
---.__(___)
'''
user_choice = eval(
input("Enter you choice 0- Rock || 1- Paper || 2- Scisore: "))
comp_choice = random.randint(0, 2)
img = [rock, paper, scissor]
print("User Picked : " + img[user_choice])
print(f"Computer Picked {comp_choice}: " + img[comp_choice])
print("R E S U L T => ")
if(user_choice == comp_choice):
print("Its Draw !!!")
elif(user_choice == 0):
if(comp_choice == 2):
print("YOU WIN !!!")
else:
print("Computer Win !!!")
elif(user_choice == 1):
if(comp_choice == 0):
print("YOU WIN !!!")
else:
print("Computer Win !!!")
elif(user_choice == 2):
if(comp_choice == 1):
print("YOU WIN !!!")
else:
print("Computer Win !!!")
| <filename>Day 4 - Beginner - Randomisation and Python Lists/Day4_Project.py
# DAY 4 FINAL PROJECTS
import random
rock = '''
_______
---' ____)
(_____)
(_____)
(____)
---.__(___)
'''
paper = '''
_______
---' ____)____
______)
_______)
_______)
---.__________)
'''
scissor = '''
_______
---' ____)____
______)
__________)
(____)
---.__(___)
'''
user_choice = eval(
input("Enter you choice 0- Rock || 1- Paper || 2- Scisore: "))
comp_choice = random.randint(0, 2)
img = [rock, paper, scissor]
print("User Picked : " + img[user_choice])
print(f"Computer Picked {comp_choice}: " + img[comp_choice])
print("R E S U L T => ")
if(user_choice == comp_choice):
print("Its Draw !!!")
elif(user_choice == 0):
if(comp_choice == 2):
print("YOU WIN !!!")
else:
print("Computer Win !!!")
elif(user_choice == 1):
if(comp_choice == 0):
print("YOU WIN !!!")
else:
print("Computer Win !!!")
elif(user_choice == 2):
if(comp_choice == 1):
print("YOU WIN !!!")
else:
print("Computer Win !!!")
| es | 0.134412 | # DAY 4 FINAL PROJECTS _______ ---' ____) (_____) (_____) (____) ---.__(___) _______ ---' ____)____ ______) _______) _______) ---.__________) _______ ---' ____)____ ______) __________) (____) ---.__(___) | 4.120795 | 4 |
botty_mcbotface/plugins/help.py | ColumbiaSC-Tech/botty_mcbotface | 11 | 6622032 | # -*- coding: utf-8 -*-
from slackbot.bot import respond_to, re
help_msg = '\nI\'m not a very smart baht...\n' \
'You can command me with _dot_ commands.\n' \
'Here\'s what I know so far\n' \
'`.(8|8ball|eightball) <question>` - Ask the magic 8ball a question\n' \
'`.calendar next <number>` - Fetch upcoming events on the CSC-Tech google calendar\n' \
'`.fix table(s)` - Flip tables right side up\n' \
'`.flip table(s)` - Flip tables upside down\n' \
'`.flip <word>` - Flip words upside down\n' \
'`.seen <@user>` - Checks the last time a user was on Slack\n' \
'`.today` - Returns all the holidays for today\n' \
'`.g <search term>` - Search google and return the first result\n' \
'`.y <search term>` - Search youtube and return the first result'
@respond_to(r'^halp|help', re.IGNORECASE)
def help(message):
"""
List botty's current commands
:param message: Message to bot requesting help
:return: Message to user
"""
return message.reply(help_msg)
| # -*- coding: utf-8 -*-
from slackbot.bot import respond_to, re
help_msg = '\nI\'m not a very smart baht...\n' \
'You can command me with _dot_ commands.\n' \
'Here\'s what I know so far\n' \
'`.(8|8ball|eightball) <question>` - Ask the magic 8ball a question\n' \
'`.calendar next <number>` - Fetch upcoming events on the CSC-Tech google calendar\n' \
'`.fix table(s)` - Flip tables right side up\n' \
'`.flip table(s)` - Flip tables upside down\n' \
'`.flip <word>` - Flip words upside down\n' \
'`.seen <@user>` - Checks the last time a user was on Slack\n' \
'`.today` - Returns all the holidays for today\n' \
'`.g <search term>` - Search google and return the first result\n' \
'`.y <search term>` - Search youtube and return the first result'
@respond_to(r'^halp|help', re.IGNORECASE)
def help(message):
"""
List botty's current commands
:param message: Message to bot requesting help
:return: Message to user
"""
return message.reply(help_msg)
| en | 0.790764 | # -*- coding: utf-8 -*- List botty's current commands :param message: Message to bot requesting help :return: Message to user | 3.089408 | 3 |
core/test/test_base.py | uktrade/fadmin2 | 3 | 6622033 | <filename>core/test/test_base.py
from django.contrib.auth import get_user_model
from django.test import (
modify_settings,
override_settings,
TestCase,
)
TEST_EMAIL = "<EMAIL>" # /PS-IGNORE
@modify_settings(
MIDDLEWARE={
'remove': 'authbroker_client.middleware.ProtectAllViewsMiddleware',
},
AUTHENTICATION_BACKENDS={
'remove': 'authbroker_client.backends.AuthbrokerBackend',
},
)
@override_settings(AXES_ENABLED=False)
class BaseTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.test_user_email = TEST_EMAIL
cls.test_password = "<PASSWORD>"
cls.test_user, _ = get_user_model().objects.get_or_create(
username="test_user",
email=cls.test_user_email,
)
cls.test_user.set_password(<PASSWORD>)
cls.test_user.save()
| <filename>core/test/test_base.py
from django.contrib.auth import get_user_model
from django.test import (
modify_settings,
override_settings,
TestCase,
)
TEST_EMAIL = "<EMAIL>" # /PS-IGNORE
@modify_settings(
MIDDLEWARE={
'remove': 'authbroker_client.middleware.ProtectAllViewsMiddleware',
},
AUTHENTICATION_BACKENDS={
'remove': 'authbroker_client.backends.AuthbrokerBackend',
},
)
@override_settings(AXES_ENABLED=False)
class BaseTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.test_user_email = TEST_EMAIL
cls.test_password = "<PASSWORD>"
cls.test_user, _ = get_user_model().objects.get_or_create(
username="test_user",
email=cls.test_user_email,
)
cls.test_user.set_password(<PASSWORD>)
cls.test_user.save()
| it | 0.261628 | # /PS-IGNORE | 2.174466 | 2 |
pipeline/utils/visualize.py | nmiles2718/hst_cosmic_rays | 1 | 6622034 | <filename>pipeline/utils/visualize.py
#!/usr/bin/env python
"""
A module to facilitate the visualization of data generated by the pipeline.
"""
from collections import Iterable
import logging
from itertools import chain
from astropy.io import fits
from astropy.time import Time
from astropy.stats import sigma_clipped_stats, LombScargle
import astropy.units as u
from astropy.visualization import ImageNormalize, SqrtStretch, LinearStretch, \
ZScaleInterval, LogStretch, ManualInterval
# import costools
import cartopy.crs as ccrs
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import dask.array as da
import matplotlib as mpl
#mpl.use('qt5agg')
# from matplotlib import rc
# rc('text', usetex=True)
import matplotlib.pyplot as plt
import matplotlib.colors as colors
plt.style.use('ggplot')
from mpl_toolkits.basemap import Basemap
import matplotlib.colors as colors
from matplotlib.dates import DateFormatter
from matplotlib.legend import Legend
from matplotlib import ticker
import matplotlib as mpl
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator)
import numpy as np
import pandas as pd
import pmagpy.ipmag as ipmag
from scipy.stats import gaussian_kde
import sunpy
import sunpy.timeseries
import sunpy.data.sample
logging.basicConfig(format='%(levelname)-4s '
'[%(module)s.%(funcName)s:%(lineno)d]'
' %(message)s',
)
LOG = logging.getLogger('visualize')
LOG.setLevel(logging.INFO)
class Visualizer(object):
"""
A class for visualizing data generated by the pipeline
"""
def __init__(self):
pass
self.image_norms = {
'log': LogStretch(),
'linear': LinearStretch(),
'sqrt' : SqrtStretch(),
}
self.map = None
def mk_fig(self, nrows=1, ncols=1, figsize=(6,6),
sharex=False,
sharey=False,
showgrid=True):
""" Convenience method for creating a matplotlib figure
Parameters
----------
nrows : int
Number of row-subplots to make
ncols : int
Number of column-subplots to make
figsize : tupple of ints
Size of the figure
Returns
-------
fig : :py:class:`matplotlib.Figure`
axes : tuple of :py:class:`matplotlib.axes.Axes`
"""
fig, axes = plt.subplots(nrows=nrows,
ncols=ncols,
figsize=figsize,
sharex=sharex,
sharey=sharey,
gridspec_kw={'wspace': 0.,
'hspace': 0.1})
if isinstance(axes, Iterable) and not showgrid:
axes = axes.flatten()
for ax in axes:
ax.grid(False)
elif not showgrid:
axes.grid(False)
return fig, axes
def _perform_SAA_cut(self, df, key):
saa = [list(t) for t in zip(*costools.saamodel.saaModel(5))]
saa[0].append(saa[0][0])
saa[1].append(saa[1][0])
saa = np.asarray(saa)
saa_eastern = (39.0, -30.0) # lon/lat
saa_western = (267.0, -20.0)
saa_northern = (312.0, 1.0)
mask = (df['longitude_{}'.format(key)] > saa_eastern[0]) &\
(df['longitude_{}'.format(key)] < saa_western[0]) &\
(df['latitude_{}'.format(key)] > saa_northern[1])
cut = df[mask]
return cut
def plot_hist(self, data, bins, label, ax=None, lw=1.75,ls='-',
logy=True, logx=False, c='k', range=None, normalize=True):
"""Generate a histogram for a given dataset
Parameters
----------
data : :py:class:`dask.array`
THe dask array to use to generate a histogram
bins: int
The number of bins to use
ax : :py:class:`matplotlib.axes.Axes`
If passed, the histogram will be added to the plot contained by
this `Axes` instance. Otherwise, one will be created.
logy : bool
If True, the y-axis will be plotted on log-scale
logx : bool
If True, the logarithm of the `data` input will be taken prior to
creating the histogram
Returns
-------
fig : :py:class:`matplotlib.figure.Figure`
ax : :py:class:`matplotlib.axes.Axes`
hist :
"""
# if logx:
# data = da.log10(data)
if range is not None:
h, edges = da.histogram(data, bins=bins,
range=range, density=normalize)
else:
h, edges = da.histogram(data, bins=bins)
hist = h.compute()
#if normalize:
# hist = hist/hist.max()
# Create an axis if it doesnt exists
lw = 1.75
if ax is None:
fig, ax = self.mk_fig(nrows=1, ncols=1)
else:
fig = ax.get_figure()
if logx and logy:
ax.loglog(edges[:-1], hist, basex=10, basey=10,
drawstyle='steps-mid',color=c, lw=lw, label=label, ls=ls)
elif logy:
# self.ax.step(edges[:-1], h.compute(), color='r')
ax.semilogy(edges[:-1], hist,
label=label,ls=ls,
drawstyle='steps-mid', color=c, lw=lw)
else:
ax.step(edges[:-1], hist,
label=label,ls=ls,
where='mid', color=c, lw=lw)
ax.tick_params(axis='both', which='major',
labelsize=10, width=2)
# ax.legend(loc='best')
return fig, ax, hist, edges,
def kde2D_plot(self, parameter1, parameter2, normtype='log',
interval=None, xlim=None, ylim=None, gridsize=100):
"""Generate a 2D KDE for the given parameters.
Parameters
----------
parameter1 : `numpy.array`
X-axis variable
parameter2 : `numpy.array`
Y-axis variable
normtype : {'log', 'linear', 'sqrt'}
Normalization type to apply to the data
interval : tuple
Limits of the interval to use when computing the image scaling
xlim : tuple
X-limits to use for the plot and the KDE grid
ylim : tuple
Y-limits to use for the plot and the KDE grid
gridsize : int
Step-size for the grid
Returns
-------
fig : :py:class:`matplotlib.figure.Figure`
ax : :py:class:`matplotlib.axes.Axes`
surface : numpy.array
The KDE surface plot
"""
data = np.vstack([parameter1, parameter2])
if xlim is None:
xlim = (np.min(parameter1), np.max(parameter1))
if ylim is None:
ylim = (np.min(parameter2), np.max(parameter2))
# Generate a grid to compute the KDE over
xgrid = np.linspace(xlim[0], xlim[1], gridsize)
ygrid = np.linspace(ylim[0], ylim[1], gridsize)
kde = gaussian_kde(data)
Xgrid, Ygrid = np.meshgrid(xgrid, ygrid)
surface = kde.evaluate(np.vstack([Xgrid.ravel(), Ygrid.ravel()]))
if isinstance(interval, tuple):
Interval = ManualInterval(vmin=interval[0], vmax=interval[1])
else:
Interval = ZScaleInterval()
norm = ImageNormalize(surface,
stretch=self.image_norms[normtype],
interval=Interval)
fig, ax = self.mk_fig(nrows=1, ncols=1)
ax.imshow(surface.reshape(Xgrid.shape),
norm=norm,
cmap='gray',
origin='lower',
aspect='auto',
extent=[xgrid.min(), xgrid.max(), ygrid.min(),ygrid.max()])
return fig, ax, surface
def plot_periodogram(self, df, legend_label, exptime_cut=100, ax=None,
window='20D', min_periods=10):
""" Generate a periodogram of the incident CR rate
Parameters
----------
df : :py:class:`pandas.DataFrame`
Dataframe containing all of the statistics for the incident CR rate
legend_label : str
Label name for the data to display in the plot's legend
ax : :py:class:`matplotlib.axes.Axes`
If passed, the histogram will be added to the plot contained by
this `Axes` instance. Otherwise, one will be created.
window : str
String alias for the time period representing the size of the
moving window. Some common ones are listed below:
- 'W', one week window
- '15D', 15 day window
- 'M', one month window
A complete list may be found `here <http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_
min_periods : int
Minimum number of datapoints that must be in a given window
Returns
-------
frequency : :py:class:`numpy.array` or :py:class:`astropy.Quantity`
An array of all the frequencies consider in the periodogram
power : :py:class: `numpy.array`
An array of the spectral power density associated with frequency
ax : `matplotlib.axes.Axes`
The `Axes` for the corresponding plot
"""
flags = df.integration_time.gt(100)
df1 = df[flags][['incident_cr_rate','mjd']]
df1 = df1.rolling(window=window, min_periods=min_periods).mean()
df1.dropna(inplace=True)
days = df1['mjd'].values * u.day
smoothed_rate = df1['incident_cr_rate'].values
frequency, power = LombScargle(days, smoothed_rate).autopower()
if ax is None:
fig, ax = self.mk_fig()
else:
ax = ax
ax.plot(frequency, power, label=legend_label)
return frequency, power, ax
def plot_cr_rate_vs_time(self, df, legend_label, ax= None, i=0,min_exptime=200,yoffset=0,
smooth_type='rolling',ms=2, window='20D', normalize=True,min_periods=20):
"""Plot the observed cosmic ray rate as a function of time.
Parameters
----------
df : `pandas.DataFrame`
DataFrame containing the incident cosmic ray rate information
legend_label : str
Label to use for the dataset in the plot legend
ax : `matplotlib.axes.Axes`
An instance of a plot to add the current dataset too
i : int
Integer used to determine the color of the points used in the
scatter plot
smooth_type : {'rolling', 'resample'}
Type of smoothing to apply to the cosmic ray rate dataset.
- :py:meth:`pandas.DataFrame.rolling()`
- :py:meth:`pandas.DataFrame.resample()`
window : str
String alias for the time period representing the size of the
moving window. Some common ones are listed below:
- 'W', one week window
- '15D', 15 day window
- 'M', one month window
A complete list may be found `here <http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_
min_periods : int
Minimum number of datapoints that must be in a given window
Returns
-------
fig : `matplotlib.figure.Figure`
ax : `matplotlib.axes.Axes`
"""
# Get the long exposures with reliable statistics
flags = df.integration_time.gt(min_exptime)
LOG.info('Total number of observations with exptime > {}: {}'.format(min_exptime,
flags.sum()))
exptime_cut = df[flags]
#df = self._perform_SAA_cut(df, key='start')
mean, med, std = sigma_clipped_stats(exptime_cut['incident_cr_rate'], sigma=5)
mean, median, std = sigma_clipped_stats(exptime_cut['incident_cr_rate'],
sigma_lower=5,
sigma_upper=5)
LOG.info('{} mean: {} median: {} std: {}'.format(legend_label, mean, median, std))
sigma_mask = (exptime_cut['incident_cr_rate'] > mean - 3*std) & (exptime_cut['incident_cr_rate'] < mean + 5*std)
sigma_cut = exptime_cut[sigma_mask]
df1 = exptime_cut.loc[:, ['incident_cr_rate','mjd']]
# Smooth the cosmic ray rate
if smooth_type == 'rolling':
LOG.info('Smoothing the data using a '
'rolling mean over a {} window'.format(window))
df1 = df1.rolling(window=window, min_periods=min_periods).median()
elif smooth_type == 'resample':
LOG.info('Resampling the data using a rolling mean over'
'a {} window'.format(window))
df1 = df1.resample(rule=window).median()
if normalize:
LOG.info('Normalizing the date by the median value')
df1.loc[:,'incident_cr_rate'] = df1['incident_cr_rate']/df['incident_cr_rate'].median()
avg_no_nan = df1.dropna()
if ax is None:
fig, ax = self.mk_fig(nrows=1, ncols=1, figsize=(7,4))
else:
fig = ax.get_figure()
# Color cycle to use for repeated use of ax argument
CB_color_cycle = ['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']
# Make the scatter plot
ax.scatter([Time(val, format='mjd').to_datetime()
for val in avg_no_nan[avg_no_nan.incident_cr_rate.gt(0)]['mjd']],
avg_no_nan[avg_no_nan.incident_cr_rate.gt(0)]['incident_cr_rate']+yoffset,
label=legend_label,
marker='o',
s=ms,
color=CB_color_cycle[i])
ax.tick_params(labelbottom=False)
# ax.set_xlabel('Date')
ax.set_ylabel('Cosmic Ray Rate [$CR/s/cm^2$]', fontsize=14)
# ax.set_title('Smoothed Cosmic Ray Rate')
return fig, ax
def _draw_map(self, map=None, scale=0.9):
if map is None:
pass
else:
self.map=map
# Set the background map up
#self.map.drawcoastlines()
#self.map.fillcontinents()
self.map.shadedrelief(scale=scale)
# Draw the meridians
# lats and longs are returned as a dictionary
lats = self.map.drawparallels(np.linspace(-90, 90, 13),
labels=[True, False, False, False],
fontsize=10)
lons = self.map.drawmeridians(np.linspace(-180, 180, 13),
labels=[False, False, False, True],
fontsize=10)
# keys contain the plt.Line2D instances
lat_lines = chain(*(tup[1][0] for tup in lats.items()))
lon_lines = chain(*(tup[1][0] for tup in lons.items()))
all_lines = chain(lat_lines, lon_lines)
# cycle through these lines and set the desired style
for line in all_lines:
line.set(linestyle='-', alpha=0.3, color='w')
def plot_hst_loc(self, i = 5, df = None, title='',thresh=5,
fout='',min_exptime=800, key='start', save=False,
orbital_path1=None, orbital_path2=None):
self.fig = plt.figure(figsize=(8, 6))
# Get the model for the SAA
self.map = Basemap(projection='cyl')
self._draw_map()
df = df[df.integration_time.gt(min_exptime)]
df.sort_values(by='incident_cr_rate', inplace=True)
cbar_bounds = [0,20,40,60,80,100,120,140,160]
sci_cmap = plt.cm.gray
custom_norm = colors.BoundaryNorm(boundaries=cbar_bounds,
ncolors=sci_cmap.N)
# Generate an SAA contour
saa = [list(t) for t in zip(*costools.saamodel.saaModel(i))]
# Ensure the polygon representing the SAA is a closed curve by adding
# the starting points to the end of the list of lat/lon coords
saa[0].append(saa[0][0])
saa[1].append(saa[1][0])
self.map.plot(saa[1], saa[0],
c='k',
latlon=True,
label='SAA contour {}'.format(i))
# df = self.perform_SAA_cut(df=df, key=key)
if df is None:
lat, lon, rate = self.data_df['latitude_{}'.format(key)], \
self.data_df['longitude_{}'.format(key)], \
self.data_df['incident_cr_rate']
else:
#df = df[df['integration_time'] > 800]
lat, lon, rate = df['latitude_{}'.format(key)], \
df['longitude_{}'.format(key)], \
df['incident_cr_rate']
LOG.info('{} {} {}'.format(len(lat), len(lon), len(rate)))
# lat1, lon1, rate1 = lat[rate >0], lon[rate >0], rate[rate>0]
# LOG.info('{} {} {}'.format(len(lat), len(lon), len(rate)))
# median = np.median(rate)
# std = np.std(rate)
mean, median, std = sigma_clipped_stats(rate, sigma_lower=3,
sigma_upper=3)
LOG.info('{} +\- {}'.format(median, std))
norm = ImageNormalize(rate,
stretch=LinearStretch(),
vmin=mean - thresh*std, vmax=mean + thresh*std)
cbar_below_mean = [mean - (i+1)*std for i in range(thresh)]
cbar_above_mean = [mean + (i+1)*std for i in range(thresh)]
cbar_bounds = cbar_below_mean + [mean] + cbar_above_mean
print(cbar_bounds)
cbar_bounds.sort()
sci_cmap = plt.cm.viridis
custom_norm = colors.BoundaryNorm(boundaries=cbar_bounds,
ncolors=sci_cmap.N)
scat = self.map.scatter(lon.values, lat.values,
marker='o',
s=5,
latlon=True,
c=rate, alpha=0.15,
norm = custom_norm,
cmap='viridis')
#im = self.map.contourf(lon_grid, lat_grid, rate, norm=norm, cmap='viridis')
ax = plt.gca()
ax.set_title(title)
# Plot the path of HST
#self.map.plot(
# orbital_path1.metadata['longitude'],
# orbital_path1.metadata['latitude'],lw=1.25,
# label=f'Int. Time: {1000:.1f}s', color='k', ls='-'
#)
if orbital_path2 is not None:
self.map.scatter(
orbital_path2.metadata['longitude'][::4][1:],
orbital_path2.metadata['latitude'][::4][1:],c='k',s=20,label='285 seccond interval'
)
if orbital_path1 is not None:
self.map.plot(
orbital_path2.metadata['longitude'],
orbital_path2.metadata['latitude'],
label=f'Orbital Path Over {2000:.0f} seconds',color='k', ls='--', lw=1.25
)
ax1_legend = ax.legend(loc='upper right',
ncol=1,
labelspacing=0.2,
columnspacing=0.5,
edgecolor='k')
# for i in range(len(ax1_legend.legendHandles)):
# ax1_legend.legendHandles[i]._sizes = [30]
#cbar_tick_labels = [f'<x>-{thresh}$\sigma$', '<x>', f'<x>+{thresh}$\sigma$']
#cbar_ticks = [mean - thresh*std,mean, mean + thresh*std]
cbar_ticks = cbar_bounds
cax = self.fig.add_axes([0.1, 0.1, 0.8, 0.05])
cbar = self.fig.colorbar(scat, cax=cax,
ticks=cbar_ticks,orientation='horizontal')
cbar.set_alpha(1)
cbar.draw_all()
cbar_tick_labels = [f'<x>-{i}$\sigma$' for i in [5,4,3,2,1]] +['<x>']+ [f'<x>+{i}$\sigma$' for i in [1,2,3,4,5]]
cbar.ax.set_xticklabels(cbar_tick_labels, horizontalalignment='right', rotation=30)
cbar.set_label('CR Flux [CR/s/$cm^2$]', fontsize=10)
# cbar.ax.set_xticklabels(cbar.ax.get_xticklabels(),
# fontweight='medium',fontsize=8)
if save:
if not fout:
fout = 'lat_lon_{}.png'.format(key)
self.fig.savefig(fout,
format='png',bbox_inches='tight',
dpi=350, transparent=False)
plt.show()
return self.fig
def plot_hst_loc_cartopy(self, i = 5, df = None, title='',thresh=5,
fout='',min_exptime=800, key='start', save=False,
orbital_path1=None, orbital_path2=None, projection=ccrs.PlateCarree()):
fig, ax = plt.subplots(
nrows=1,
ncols=1,
figsize=(8,7),
tight_layout=True,
subplot_kw={'projection': projection}
)
crs = projection
transform = crs._as_mpl_transform(ax)
df = df[df.integration_time.gt(min_exptime)]
df.sort_values(by='incident_cr_rate', inplace=True)
# Plot configuration
ax.coastlines()
gl = ax.gridlines(crs=crs, draw_labels=True,
linewidth=1, color='k', alpha=0.4, linestyle='--')
fname ='/ifs/missions/projects/plcosmic/hst_cosmic_rays/APJ_plots/HYP_50M_SR_W.tif'
ax.imshow(
plt.imread(fname),
origin='upper',
transform=crs,
extent=[-180, 180, -90, 90]
)
gl.xlabels_top = False
gl.ylabels_left = True
gl.ylabels_right = False
gl.xlines = True
# gl.xlocator = mticker.FixedLocator([-180, -45, 0, 45, 180])
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlocator = MultipleLocator(60)
gl.ylocator = MultipleLocator(15)
gl.xlabel_style = {'size': 10, 'color': 'black'}
gl.xlabel_style = {'color': 'black'}
date = 2005
altitude = 565
# Calculate the B field grid
# Evenly space grid with 1 degree resolution in both Latitude and Longitude
lat = np.linspace(-90, 90, 1 * 180 + 1)
lon = np.linspace(0, 360, 1 * 360 + 1)
lat_grid, lon_grid = np.meshgrid(lat, lon)
coordinates = list(zip(lat_grid.ravel(), lon_grid.ravel()))
B_strength = []
for coords in coordinates:
b_field = ipmag.igrf([date, altitude, coords[0], coords[1]])
B_strength.append(b_field[-1])
B_strength_grid = np.array(B_strength).reshape(lat_grid.shape)
# Get the CR rate information
lat, lon, rate = df['latitude_{}'.format(key)], \
df['longitude_{}'.format(key)], \
df['incident_cr_rate']
LOG.info('{} {} {}'.format(len(lat), len(lon), len(rate)))
# Get average statistics to generate contour
mean, median, std = sigma_clipped_stats(rate, sigma_lower=3,
sigma_upper=3)
LOG.info('{} +\- {}'.format(mean, std))
norm = ImageNormalize(rate,
stretch=LinearStretch(),
vmin=mean - thresh*std, vmax=mean + thresh*std)
cbar_below_mean = [mean - (i+1)*std for i in range(thresh)]
cbar_above_mean = [mean + (i+1)*std for i in range(thresh)]
cbar_bounds = cbar_below_mean + [mean] + cbar_above_mean
print(cbar_bounds)
cbar_bounds.sort()
sci_cmap = plt.cm.viridis
custom_norm = colors.BoundaryNorm(boundaries=cbar_bounds,
ncolors=sci_cmap.N)
scat = ax.scatter(
lon.values,
lat.values,
marker='o',
s=3.5,
c=rate, alpha=0.2,
norm = custom_norm,
cmap='viridis',
transform=ccrs.PlateCarree()
)
cbar_ticks = cbar_bounds
cax = fig.add_axes([0.1, 0.2, 0.8, 0.05])
cbar = fig.colorbar(scat, cax=cax,
ticks=cbar_ticks,orientation='horizontal')
cbar.set_alpha(1)
cbar.draw_all()
cbar_tick_labels = [f'<x>-{i}$\sigma$' for i in [5,4,3,2,1]] +['<x>']+ [f'<x>+{i}$\sigma$' for i in [1,2,3,4,5]]
cbar.ax.set_xticklabels(cbar_tick_labels, horizontalalignment='right', rotation=30)
cbar.set_label('CR Flux [CR/s/$cm^2$]', fontsize=10)
cntr = ax.contour(
lon_grid,
lat_grid,
B_strength_grid,
cmap='plasma',
levels=10,
alpha=1,
lw=2,
transform=ccrs.PlateCarree()
)
h1, l1 = cntr.legend_elements("B_strength_grid")
l1_custom = [f"{val.split('=')[-1].strip('$').strip()} nT" for val in l1]
leg1 = Legend(
ax, h1, l1_custom, loc='upper left', edgecolor='k',
fontsize=8,framealpha=0.45,facecolor='tab:gray',
bbox_to_anchor=(1.05, 1.03), title='Total Magnetic Intensity'
)
ax.add_artist(leg1)
if orbital_path1 is not None:
ax.scatter(
orbital_path1.metadata['longitude'][::4][1:],
orbital_path1.metadata['latitude'][::4][1:],c='k',s=20,label='285 seccond interval'
)
if orbital_path2 is not None:
ax.plot(
orbital_path2.metadata['longitude'],
orbital_path2.metadata['latitude'],
label=f'Orbital Path Over {2000:.0f} seconds',color='k', ls='--', lw=1.25
)
plt.show()
return fig
def plot_solar_cycle(self, variable=None, ax = None, smoothed=False):
""" Retrieve solar cycle information
Parameters
----------
variable
ax
smoothed
Returns
-------
"""
noaa = sunpy.timeseries.TimeSeries(sunpy.data.sample.NOAAINDICES_TIMESERIES,
source='NOAAIndices')
if variable is None and ax is not None:
noaa.peek(type='sunspot RI', ax=ax)
elif ax is not None:
noaa.peek(type=variable, ax=ax)
return noaa
# if __name__ == '__main__':
# main()
| <filename>pipeline/utils/visualize.py
#!/usr/bin/env python
"""
A module to facilitate the visualization of data generated by the pipeline.
"""
from collections import Iterable
import logging
from itertools import chain
from astropy.io import fits
from astropy.time import Time
from astropy.stats import sigma_clipped_stats, LombScargle
import astropy.units as u
from astropy.visualization import ImageNormalize, SqrtStretch, LinearStretch, \
ZScaleInterval, LogStretch, ManualInterval
# import costools
import cartopy.crs as ccrs
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import dask.array as da
import matplotlib as mpl
#mpl.use('qt5agg')
# from matplotlib import rc
# rc('text', usetex=True)
import matplotlib.pyplot as plt
import matplotlib.colors as colors
plt.style.use('ggplot')
from mpl_toolkits.basemap import Basemap
import matplotlib.colors as colors
from matplotlib.dates import DateFormatter
from matplotlib.legend import Legend
from matplotlib import ticker
import matplotlib as mpl
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator)
import numpy as np
import pandas as pd
import pmagpy.ipmag as ipmag
from scipy.stats import gaussian_kde
import sunpy
import sunpy.timeseries
import sunpy.data.sample
logging.basicConfig(format='%(levelname)-4s '
'[%(module)s.%(funcName)s:%(lineno)d]'
' %(message)s',
)
LOG = logging.getLogger('visualize')
LOG.setLevel(logging.INFO)
class Visualizer(object):
"""
A class for visualizing data generated by the pipeline
"""
def __init__(self):
pass
self.image_norms = {
'log': LogStretch(),
'linear': LinearStretch(),
'sqrt' : SqrtStretch(),
}
self.map = None
def mk_fig(self, nrows=1, ncols=1, figsize=(6,6),
sharex=False,
sharey=False,
showgrid=True):
""" Convenience method for creating a matplotlib figure
Parameters
----------
nrows : int
Number of row-subplots to make
ncols : int
Number of column-subplots to make
figsize : tupple of ints
Size of the figure
Returns
-------
fig : :py:class:`matplotlib.Figure`
axes : tuple of :py:class:`matplotlib.axes.Axes`
"""
fig, axes = plt.subplots(nrows=nrows,
ncols=ncols,
figsize=figsize,
sharex=sharex,
sharey=sharey,
gridspec_kw={'wspace': 0.,
'hspace': 0.1})
if isinstance(axes, Iterable) and not showgrid:
axes = axes.flatten()
for ax in axes:
ax.grid(False)
elif not showgrid:
axes.grid(False)
return fig, axes
def _perform_SAA_cut(self, df, key):
saa = [list(t) for t in zip(*costools.saamodel.saaModel(5))]
saa[0].append(saa[0][0])
saa[1].append(saa[1][0])
saa = np.asarray(saa)
saa_eastern = (39.0, -30.0) # lon/lat
saa_western = (267.0, -20.0)
saa_northern = (312.0, 1.0)
mask = (df['longitude_{}'.format(key)] > saa_eastern[0]) &\
(df['longitude_{}'.format(key)] < saa_western[0]) &\
(df['latitude_{}'.format(key)] > saa_northern[1])
cut = df[mask]
return cut
def plot_hist(self, data, bins, label, ax=None, lw=1.75,ls='-',
logy=True, logx=False, c='k', range=None, normalize=True):
"""Generate a histogram for a given dataset
Parameters
----------
data : :py:class:`dask.array`
THe dask array to use to generate a histogram
bins: int
The number of bins to use
ax : :py:class:`matplotlib.axes.Axes`
If passed, the histogram will be added to the plot contained by
this `Axes` instance. Otherwise, one will be created.
logy : bool
If True, the y-axis will be plotted on log-scale
logx : bool
If True, the logarithm of the `data` input will be taken prior to
creating the histogram
Returns
-------
fig : :py:class:`matplotlib.figure.Figure`
ax : :py:class:`matplotlib.axes.Axes`
hist :
"""
# if logx:
# data = da.log10(data)
if range is not None:
h, edges = da.histogram(data, bins=bins,
range=range, density=normalize)
else:
h, edges = da.histogram(data, bins=bins)
hist = h.compute()
#if normalize:
# hist = hist/hist.max()
# Create an axis if it doesnt exists
lw = 1.75
if ax is None:
fig, ax = self.mk_fig(nrows=1, ncols=1)
else:
fig = ax.get_figure()
if logx and logy:
ax.loglog(edges[:-1], hist, basex=10, basey=10,
drawstyle='steps-mid',color=c, lw=lw, label=label, ls=ls)
elif logy:
# self.ax.step(edges[:-1], h.compute(), color='r')
ax.semilogy(edges[:-1], hist,
label=label,ls=ls,
drawstyle='steps-mid', color=c, lw=lw)
else:
ax.step(edges[:-1], hist,
label=label,ls=ls,
where='mid', color=c, lw=lw)
ax.tick_params(axis='both', which='major',
labelsize=10, width=2)
# ax.legend(loc='best')
return fig, ax, hist, edges,
def kde2D_plot(self, parameter1, parameter2, normtype='log',
interval=None, xlim=None, ylim=None, gridsize=100):
"""Generate a 2D KDE for the given parameters.
Parameters
----------
parameter1 : `numpy.array`
X-axis variable
parameter2 : `numpy.array`
Y-axis variable
normtype : {'log', 'linear', 'sqrt'}
Normalization type to apply to the data
interval : tuple
Limits of the interval to use when computing the image scaling
xlim : tuple
X-limits to use for the plot and the KDE grid
ylim : tuple
Y-limits to use for the plot and the KDE grid
gridsize : int
Step-size for the grid
Returns
-------
fig : :py:class:`matplotlib.figure.Figure`
ax : :py:class:`matplotlib.axes.Axes`
surface : numpy.array
The KDE surface plot
"""
data = np.vstack([parameter1, parameter2])
if xlim is None:
xlim = (np.min(parameter1), np.max(parameter1))
if ylim is None:
ylim = (np.min(parameter2), np.max(parameter2))
# Generate a grid to compute the KDE over
xgrid = np.linspace(xlim[0], xlim[1], gridsize)
ygrid = np.linspace(ylim[0], ylim[1], gridsize)
kde = gaussian_kde(data)
Xgrid, Ygrid = np.meshgrid(xgrid, ygrid)
surface = kde.evaluate(np.vstack([Xgrid.ravel(), Ygrid.ravel()]))
if isinstance(interval, tuple):
Interval = ManualInterval(vmin=interval[0], vmax=interval[1])
else:
Interval = ZScaleInterval()
norm = ImageNormalize(surface,
stretch=self.image_norms[normtype],
interval=Interval)
fig, ax = self.mk_fig(nrows=1, ncols=1)
ax.imshow(surface.reshape(Xgrid.shape),
norm=norm,
cmap='gray',
origin='lower',
aspect='auto',
extent=[xgrid.min(), xgrid.max(), ygrid.min(),ygrid.max()])
return fig, ax, surface
def plot_periodogram(self, df, legend_label, exptime_cut=100, ax=None,
window='20D', min_periods=10):
""" Generate a periodogram of the incident CR rate
Parameters
----------
df : :py:class:`pandas.DataFrame`
Dataframe containing all of the statistics for the incident CR rate
legend_label : str
Label name for the data to display in the plot's legend
ax : :py:class:`matplotlib.axes.Axes`
If passed, the histogram will be added to the plot contained by
this `Axes` instance. Otherwise, one will be created.
window : str
String alias for the time period representing the size of the
moving window. Some common ones are listed below:
- 'W', one week window
- '15D', 15 day window
- 'M', one month window
A complete list may be found `here <http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_
min_periods : int
Minimum number of datapoints that must be in a given window
Returns
-------
frequency : :py:class:`numpy.array` or :py:class:`astropy.Quantity`
An array of all the frequencies consider in the periodogram
power : :py:class: `numpy.array`
An array of the spectral power density associated with frequency
ax : `matplotlib.axes.Axes`
The `Axes` for the corresponding plot
"""
flags = df.integration_time.gt(100)
df1 = df[flags][['incident_cr_rate','mjd']]
df1 = df1.rolling(window=window, min_periods=min_periods).mean()
df1.dropna(inplace=True)
days = df1['mjd'].values * u.day
smoothed_rate = df1['incident_cr_rate'].values
frequency, power = LombScargle(days, smoothed_rate).autopower()
if ax is None:
fig, ax = self.mk_fig()
else:
ax = ax
ax.plot(frequency, power, label=legend_label)
return frequency, power, ax
def plot_cr_rate_vs_time(self, df, legend_label, ax= None, i=0,min_exptime=200,yoffset=0,
smooth_type='rolling',ms=2, window='20D', normalize=True,min_periods=20):
"""Plot the observed cosmic ray rate as a function of time.
Parameters
----------
df : `pandas.DataFrame`
DataFrame containing the incident cosmic ray rate information
legend_label : str
Label to use for the dataset in the plot legend
ax : `matplotlib.axes.Axes`
An instance of a plot to add the current dataset too
i : int
Integer used to determine the color of the points used in the
scatter plot
smooth_type : {'rolling', 'resample'}
Type of smoothing to apply to the cosmic ray rate dataset.
- :py:meth:`pandas.DataFrame.rolling()`
- :py:meth:`pandas.DataFrame.resample()`
window : str
String alias for the time period representing the size of the
moving window. Some common ones are listed below:
- 'W', one week window
- '15D', 15 day window
- 'M', one month window
A complete list may be found `here <http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_
min_periods : int
Minimum number of datapoints that must be in a given window
Returns
-------
fig : `matplotlib.figure.Figure`
ax : `matplotlib.axes.Axes`
"""
# Get the long exposures with reliable statistics
flags = df.integration_time.gt(min_exptime)
LOG.info('Total number of observations with exptime > {}: {}'.format(min_exptime,
flags.sum()))
exptime_cut = df[flags]
#df = self._perform_SAA_cut(df, key='start')
mean, med, std = sigma_clipped_stats(exptime_cut['incident_cr_rate'], sigma=5)
mean, median, std = sigma_clipped_stats(exptime_cut['incident_cr_rate'],
sigma_lower=5,
sigma_upper=5)
LOG.info('{} mean: {} median: {} std: {}'.format(legend_label, mean, median, std))
sigma_mask = (exptime_cut['incident_cr_rate'] > mean - 3*std) & (exptime_cut['incident_cr_rate'] < mean + 5*std)
sigma_cut = exptime_cut[sigma_mask]
df1 = exptime_cut.loc[:, ['incident_cr_rate','mjd']]
# Smooth the cosmic ray rate
if smooth_type == 'rolling':
LOG.info('Smoothing the data using a '
'rolling mean over a {} window'.format(window))
df1 = df1.rolling(window=window, min_periods=min_periods).median()
elif smooth_type == 'resample':
LOG.info('Resampling the data using a rolling mean over'
'a {} window'.format(window))
df1 = df1.resample(rule=window).median()
if normalize:
LOG.info('Normalizing the date by the median value')
df1.loc[:,'incident_cr_rate'] = df1['incident_cr_rate']/df['incident_cr_rate'].median()
avg_no_nan = df1.dropna()
if ax is None:
fig, ax = self.mk_fig(nrows=1, ncols=1, figsize=(7,4))
else:
fig = ax.get_figure()
# Color cycle to use for repeated use of ax argument
CB_color_cycle = ['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']
# Make the scatter plot
ax.scatter([Time(val, format='mjd').to_datetime()
for val in avg_no_nan[avg_no_nan.incident_cr_rate.gt(0)]['mjd']],
avg_no_nan[avg_no_nan.incident_cr_rate.gt(0)]['incident_cr_rate']+yoffset,
label=legend_label,
marker='o',
s=ms,
color=CB_color_cycle[i])
ax.tick_params(labelbottom=False)
# ax.set_xlabel('Date')
ax.set_ylabel('Cosmic Ray Rate [$CR/s/cm^2$]', fontsize=14)
# ax.set_title('Smoothed Cosmic Ray Rate')
return fig, ax
def _draw_map(self, map=None, scale=0.9):
if map is None:
pass
else:
self.map=map
# Set the background map up
#self.map.drawcoastlines()
#self.map.fillcontinents()
self.map.shadedrelief(scale=scale)
# Draw the meridians
# lats and longs are returned as a dictionary
lats = self.map.drawparallels(np.linspace(-90, 90, 13),
labels=[True, False, False, False],
fontsize=10)
lons = self.map.drawmeridians(np.linspace(-180, 180, 13),
labels=[False, False, False, True],
fontsize=10)
# keys contain the plt.Line2D instances
lat_lines = chain(*(tup[1][0] for tup in lats.items()))
lon_lines = chain(*(tup[1][0] for tup in lons.items()))
all_lines = chain(lat_lines, lon_lines)
# cycle through these lines and set the desired style
for line in all_lines:
line.set(linestyle='-', alpha=0.3, color='w')
def plot_hst_loc(self, i = 5, df = None, title='',thresh=5,
fout='',min_exptime=800, key='start', save=False,
orbital_path1=None, orbital_path2=None):
self.fig = plt.figure(figsize=(8, 6))
# Get the model for the SAA
self.map = Basemap(projection='cyl')
self._draw_map()
df = df[df.integration_time.gt(min_exptime)]
df.sort_values(by='incident_cr_rate', inplace=True)
cbar_bounds = [0,20,40,60,80,100,120,140,160]
sci_cmap = plt.cm.gray
custom_norm = colors.BoundaryNorm(boundaries=cbar_bounds,
ncolors=sci_cmap.N)
# Generate an SAA contour
saa = [list(t) for t in zip(*costools.saamodel.saaModel(i))]
# Ensure the polygon representing the SAA is a closed curve by adding
# the starting points to the end of the list of lat/lon coords
saa[0].append(saa[0][0])
saa[1].append(saa[1][0])
self.map.plot(saa[1], saa[0],
c='k',
latlon=True,
label='SAA contour {}'.format(i))
# df = self.perform_SAA_cut(df=df, key=key)
if df is None:
lat, lon, rate = self.data_df['latitude_{}'.format(key)], \
self.data_df['longitude_{}'.format(key)], \
self.data_df['incident_cr_rate']
else:
#df = df[df['integration_time'] > 800]
lat, lon, rate = df['latitude_{}'.format(key)], \
df['longitude_{}'.format(key)], \
df['incident_cr_rate']
LOG.info('{} {} {}'.format(len(lat), len(lon), len(rate)))
# lat1, lon1, rate1 = lat[rate >0], lon[rate >0], rate[rate>0]
# LOG.info('{} {} {}'.format(len(lat), len(lon), len(rate)))
# median = np.median(rate)
# std = np.std(rate)
mean, median, std = sigma_clipped_stats(rate, sigma_lower=3,
sigma_upper=3)
LOG.info('{} +\- {}'.format(median, std))
norm = ImageNormalize(rate,
stretch=LinearStretch(),
vmin=mean - thresh*std, vmax=mean + thresh*std)
cbar_below_mean = [mean - (i+1)*std for i in range(thresh)]
cbar_above_mean = [mean + (i+1)*std for i in range(thresh)]
cbar_bounds = cbar_below_mean + [mean] + cbar_above_mean
print(cbar_bounds)
cbar_bounds.sort()
sci_cmap = plt.cm.viridis
custom_norm = colors.BoundaryNorm(boundaries=cbar_bounds,
ncolors=sci_cmap.N)
scat = self.map.scatter(lon.values, lat.values,
marker='o',
s=5,
latlon=True,
c=rate, alpha=0.15,
norm = custom_norm,
cmap='viridis')
#im = self.map.contourf(lon_grid, lat_grid, rate, norm=norm, cmap='viridis')
ax = plt.gca()
ax.set_title(title)
# Plot the path of HST
#self.map.plot(
# orbital_path1.metadata['longitude'],
# orbital_path1.metadata['latitude'],lw=1.25,
# label=f'Int. Time: {1000:.1f}s', color='k', ls='-'
#)
if orbital_path2 is not None:
self.map.scatter(
orbital_path2.metadata['longitude'][::4][1:],
orbital_path2.metadata['latitude'][::4][1:],c='k',s=20,label='285 seccond interval'
)
if orbital_path1 is not None:
self.map.plot(
orbital_path2.metadata['longitude'],
orbital_path2.metadata['latitude'],
label=f'Orbital Path Over {2000:.0f} seconds',color='k', ls='--', lw=1.25
)
ax1_legend = ax.legend(loc='upper right',
ncol=1,
labelspacing=0.2,
columnspacing=0.5,
edgecolor='k')
# for i in range(len(ax1_legend.legendHandles)):
# ax1_legend.legendHandles[i]._sizes = [30]
#cbar_tick_labels = [f'<x>-{thresh}$\sigma$', '<x>', f'<x>+{thresh}$\sigma$']
#cbar_ticks = [mean - thresh*std,mean, mean + thresh*std]
cbar_ticks = cbar_bounds
cax = self.fig.add_axes([0.1, 0.1, 0.8, 0.05])
cbar = self.fig.colorbar(scat, cax=cax,
ticks=cbar_ticks,orientation='horizontal')
cbar.set_alpha(1)
cbar.draw_all()
cbar_tick_labels = [f'<x>-{i}$\sigma$' for i in [5,4,3,2,1]] +['<x>']+ [f'<x>+{i}$\sigma$' for i in [1,2,3,4,5]]
cbar.ax.set_xticklabels(cbar_tick_labels, horizontalalignment='right', rotation=30)
cbar.set_label('CR Flux [CR/s/$cm^2$]', fontsize=10)
# cbar.ax.set_xticklabels(cbar.ax.get_xticklabels(),
# fontweight='medium',fontsize=8)
if save:
if not fout:
fout = 'lat_lon_{}.png'.format(key)
self.fig.savefig(fout,
format='png',bbox_inches='tight',
dpi=350, transparent=False)
plt.show()
return self.fig
def plot_hst_loc_cartopy(self, i = 5, df = None, title='',thresh=5,
fout='',min_exptime=800, key='start', save=False,
orbital_path1=None, orbital_path2=None, projection=ccrs.PlateCarree()):
fig, ax = plt.subplots(
nrows=1,
ncols=1,
figsize=(8,7),
tight_layout=True,
subplot_kw={'projection': projection}
)
crs = projection
transform = crs._as_mpl_transform(ax)
df = df[df.integration_time.gt(min_exptime)]
df.sort_values(by='incident_cr_rate', inplace=True)
# Plot configuration
ax.coastlines()
gl = ax.gridlines(crs=crs, draw_labels=True,
linewidth=1, color='k', alpha=0.4, linestyle='--')
fname ='/ifs/missions/projects/plcosmic/hst_cosmic_rays/APJ_plots/HYP_50M_SR_W.tif'
ax.imshow(
plt.imread(fname),
origin='upper',
transform=crs,
extent=[-180, 180, -90, 90]
)
gl.xlabels_top = False
gl.ylabels_left = True
gl.ylabels_right = False
gl.xlines = True
# gl.xlocator = mticker.FixedLocator([-180, -45, 0, 45, 180])
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlocator = MultipleLocator(60)
gl.ylocator = MultipleLocator(15)
gl.xlabel_style = {'size': 10, 'color': 'black'}
gl.xlabel_style = {'color': 'black'}
date = 2005
altitude = 565
# Calculate the B field grid
# Evenly space grid with 1 degree resolution in both Latitude and Longitude
lat = np.linspace(-90, 90, 1 * 180 + 1)
lon = np.linspace(0, 360, 1 * 360 + 1)
lat_grid, lon_grid = np.meshgrid(lat, lon)
coordinates = list(zip(lat_grid.ravel(), lon_grid.ravel()))
B_strength = []
for coords in coordinates:
b_field = ipmag.igrf([date, altitude, coords[0], coords[1]])
B_strength.append(b_field[-1])
B_strength_grid = np.array(B_strength).reshape(lat_grid.shape)
# Get the CR rate information
lat, lon, rate = df['latitude_{}'.format(key)], \
df['longitude_{}'.format(key)], \
df['incident_cr_rate']
LOG.info('{} {} {}'.format(len(lat), len(lon), len(rate)))
# Get average statistics to generate contour
mean, median, std = sigma_clipped_stats(rate, sigma_lower=3,
sigma_upper=3)
LOG.info('{} +\- {}'.format(mean, std))
norm = ImageNormalize(rate,
stretch=LinearStretch(),
vmin=mean - thresh*std, vmax=mean + thresh*std)
cbar_below_mean = [mean - (i+1)*std for i in range(thresh)]
cbar_above_mean = [mean + (i+1)*std for i in range(thresh)]
cbar_bounds = cbar_below_mean + [mean] + cbar_above_mean
print(cbar_bounds)
cbar_bounds.sort()
sci_cmap = plt.cm.viridis
custom_norm = colors.BoundaryNorm(boundaries=cbar_bounds,
ncolors=sci_cmap.N)
scat = ax.scatter(
lon.values,
lat.values,
marker='o',
s=3.5,
c=rate, alpha=0.2,
norm = custom_norm,
cmap='viridis',
transform=ccrs.PlateCarree()
)
cbar_ticks = cbar_bounds
cax = fig.add_axes([0.1, 0.2, 0.8, 0.05])
cbar = fig.colorbar(scat, cax=cax,
ticks=cbar_ticks,orientation='horizontal')
cbar.set_alpha(1)
cbar.draw_all()
cbar_tick_labels = [f'<x>-{i}$\sigma$' for i in [5,4,3,2,1]] +['<x>']+ [f'<x>+{i}$\sigma$' for i in [1,2,3,4,5]]
cbar.ax.set_xticklabels(cbar_tick_labels, horizontalalignment='right', rotation=30)
cbar.set_label('CR Flux [CR/s/$cm^2$]', fontsize=10)
cntr = ax.contour(
lon_grid,
lat_grid,
B_strength_grid,
cmap='plasma',
levels=10,
alpha=1,
lw=2,
transform=ccrs.PlateCarree()
)
h1, l1 = cntr.legend_elements("B_strength_grid")
l1_custom = [f"{val.split('=')[-1].strip('$').strip()} nT" for val in l1]
leg1 = Legend(
ax, h1, l1_custom, loc='upper left', edgecolor='k',
fontsize=8,framealpha=0.45,facecolor='tab:gray',
bbox_to_anchor=(1.05, 1.03), title='Total Magnetic Intensity'
)
ax.add_artist(leg1)
if orbital_path1 is not None:
ax.scatter(
orbital_path1.metadata['longitude'][::4][1:],
orbital_path1.metadata['latitude'][::4][1:],c='k',s=20,label='285 seccond interval'
)
if orbital_path2 is not None:
ax.plot(
orbital_path2.metadata['longitude'],
orbital_path2.metadata['latitude'],
label=f'Orbital Path Over {2000:.0f} seconds',color='k', ls='--', lw=1.25
)
plt.show()
return fig
def plot_solar_cycle(self, variable=None, ax = None, smoothed=False):
""" Retrieve solar cycle information
Parameters
----------
variable
ax
smoothed
Returns
-------
"""
noaa = sunpy.timeseries.TimeSeries(sunpy.data.sample.NOAAINDICES_TIMESERIES,
source='NOAAIndices')
if variable is None and ax is not None:
noaa.peek(type='sunspot RI', ax=ax)
elif ax is not None:
noaa.peek(type=variable, ax=ax)
return noaa
# if __name__ == '__main__':
# main()
| en | 0.508808 | #!/usr/bin/env python A module to facilitate the visualization of data generated by the pipeline. # import costools #mpl.use('qt5agg') # from matplotlib import rc # rc('text', usetex=True) A class for visualizing data generated by the pipeline Convenience method for creating a matplotlib figure Parameters ---------- nrows : int Number of row-subplots to make ncols : int Number of column-subplots to make figsize : tupple of ints Size of the figure Returns ------- fig : :py:class:`matplotlib.Figure` axes : tuple of :py:class:`matplotlib.axes.Axes` # lon/lat Generate a histogram for a given dataset Parameters ---------- data : :py:class:`dask.array` THe dask array to use to generate a histogram bins: int The number of bins to use ax : :py:class:`matplotlib.axes.Axes` If passed, the histogram will be added to the plot contained by this `Axes` instance. Otherwise, one will be created. logy : bool If True, the y-axis will be plotted on log-scale logx : bool If True, the logarithm of the `data` input will be taken prior to creating the histogram Returns ------- fig : :py:class:`matplotlib.figure.Figure` ax : :py:class:`matplotlib.axes.Axes` hist : # if logx: # data = da.log10(data) #if normalize: # hist = hist/hist.max() # Create an axis if it doesnt exists # self.ax.step(edges[:-1], h.compute(), color='r') # ax.legend(loc='best') Generate a 2D KDE for the given parameters. Parameters ---------- parameter1 : `numpy.array` X-axis variable parameter2 : `numpy.array` Y-axis variable normtype : {'log', 'linear', 'sqrt'} Normalization type to apply to the data interval : tuple Limits of the interval to use when computing the image scaling xlim : tuple X-limits to use for the plot and the KDE grid ylim : tuple Y-limits to use for the plot and the KDE grid gridsize : int Step-size for the grid Returns ------- fig : :py:class:`matplotlib.figure.Figure` ax : :py:class:`matplotlib.axes.Axes` surface : numpy.array The KDE surface plot # Generate a grid to compute the KDE over Generate a periodogram of the incident CR rate Parameters ---------- df : :py:class:`pandas.DataFrame` Dataframe containing all of the statistics for the incident CR rate legend_label : str Label name for the data to display in the plot's legend ax : :py:class:`matplotlib.axes.Axes` If passed, the histogram will be added to the plot contained by this `Axes` instance. Otherwise, one will be created. window : str String alias for the time period representing the size of the moving window. Some common ones are listed below: - 'W', one week window - '15D', 15 day window - 'M', one month window A complete list may be found `here <http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_ min_periods : int Minimum number of datapoints that must be in a given window Returns ------- frequency : :py:class:`numpy.array` or :py:class:`astropy.Quantity` An array of all the frequencies consider in the periodogram power : :py:class: `numpy.array` An array of the spectral power density associated with frequency ax : `matplotlib.axes.Axes` The `Axes` for the corresponding plot Plot the observed cosmic ray rate as a function of time. Parameters ---------- df : `pandas.DataFrame` DataFrame containing the incident cosmic ray rate information legend_label : str Label to use for the dataset in the plot legend ax : `matplotlib.axes.Axes` An instance of a plot to add the current dataset too i : int Integer used to determine the color of the points used in the scatter plot smooth_type : {'rolling', 'resample'} Type of smoothing to apply to the cosmic ray rate dataset. - :py:meth:`pandas.DataFrame.rolling()` - :py:meth:`pandas.DataFrame.resample()` window : str String alias for the time period representing the size of the moving window. Some common ones are listed below: - 'W', one week window - '15D', 15 day window - 'M', one month window A complete list may be found `here <http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_ min_periods : int Minimum number of datapoints that must be in a given window Returns ------- fig : `matplotlib.figure.Figure` ax : `matplotlib.axes.Axes` # Get the long exposures with reliable statistics #df = self._perform_SAA_cut(df, key='start') # Smooth the cosmic ray rate # Color cycle to use for repeated use of ax argument # Make the scatter plot # ax.set_xlabel('Date') # ax.set_title('Smoothed Cosmic Ray Rate') # Set the background map up #self.map.drawcoastlines() #self.map.fillcontinents() # Draw the meridians # lats and longs are returned as a dictionary # keys contain the plt.Line2D instances # cycle through these lines and set the desired style # Get the model for the SAA # Generate an SAA contour # Ensure the polygon representing the SAA is a closed curve by adding # the starting points to the end of the list of lat/lon coords # df = self.perform_SAA_cut(df=df, key=key) #df = df[df['integration_time'] > 800] # lat1, lon1, rate1 = lat[rate >0], lon[rate >0], rate[rate>0] # LOG.info('{} {} {}'.format(len(lat), len(lon), len(rate))) # median = np.median(rate) # std = np.std(rate) #im = self.map.contourf(lon_grid, lat_grid, rate, norm=norm, cmap='viridis') # Plot the path of HST #self.map.plot( # orbital_path1.metadata['longitude'], # orbital_path1.metadata['latitude'],lw=1.25, # label=f'Int. Time: {1000:.1f}s', color='k', ls='-' #) # for i in range(len(ax1_legend.legendHandles)): # ax1_legend.legendHandles[i]._sizes = [30] #cbar_tick_labels = [f'<x>-{thresh}$\sigma$', '<x>', f'<x>+{thresh}$\sigma$'] #cbar_ticks = [mean - thresh*std,mean, mean + thresh*std] # cbar.ax.set_xticklabels(cbar.ax.get_xticklabels(), # fontweight='medium',fontsize=8) # Plot configuration # gl.xlocator = mticker.FixedLocator([-180, -45, 0, 45, 180]) # Calculate the B field grid # Evenly space grid with 1 degree resolution in both Latitude and Longitude # Get the CR rate information # Get average statistics to generate contour Retrieve solar cycle information Parameters ---------- variable ax smoothed Returns ------- # if __name__ == '__main__': # main() | 2.335313 | 2 |
Lane_process.py | bobd988/laneline-advanced | 0 | 6622035 | <filename>Lane_process.py
import os
import numpy as np
import cv2
import pickle
import glob
from moviepy.editor import VideoFileClip
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from moviepy.editor import VideoFileClip
CAMERA_PARAMETERS_FILE = "parameter_camera.pkl"
WARP_PARAMETERS_FILE = "parameter_warp.pkl"
ret, mtx, dist, rvecs, tvecs = (None, None, None, None, None)
# undistort image
def undistort_image(img):
global ret, mtx, dist, rvecs, tvecs
if mtx is None or dist is None:
# try to load from file
try:
camera_pickle = pickle.load(open(CAMERA_PARAMETERS_FILE, "rb"))
(ret, mtx, dist, rvecs, tvecs) = camera_pickle
except:
calibrate_camera('camera_cal')
return cv2.undistort(img, mtx, dist, None, mtx)
# Calibrate camera using the OpenCv chessboad method
def calibrate_camera(folder, nx=9, ny=6, show_corners=False):
# prepare object points
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9, 0:6].T.reshape(-1,2)
objpoints = []
imgpoints = []
for fname in os.listdir(folder):
print(fname)
img = cv2.imread(folder + '/' + fname)
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
# If found, draw corners
if ret == True:
#Append corners and object
objpoints.append(objp)
imgpoints.append(corners)
if show_corners:
# Draw and display the corners
cv2.drawChessboardCorners(img, (nx, ny), corners, ret)
plt.imshow(img)
plt.savefig('output_images/corners.png', dpi=100)
plt.show()
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
output = open(CAMERA_PARAMETERS_FILE, 'wb')
pickle.dump((ret, mtx, dist, rvecs, tvecs), output)
output.close()
# Read Warp parameter based on src_points and dst_points
def warpImageParameters(src_points, dst_points):
Mw = cv2.getPerspectiveTransform(src_points, dst_points)
Minv = cv2.getPerspectiveTransform(dst_points, src_points)
return Mw, Minv
# Convert to HLS color space
def hls_color_thresh(img, threshLow, threshHigh):
imgHLS = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
# Return a binary image of threshold
binary_output = np.zeros((img.shape[0], img.shape[1]))
binary_output[
(imgHLS[:, :, 0] >= threshLow[0]) & (imgHLS[:, :, 0] <= threshHigh[0]) & (imgHLS[:, :, 1] >= threshLow[1]) & (
imgHLS[:, :, 1] <= threshHigh[1]) & (imgHLS[:, :, 2] >= threshLow[2]) & (
imgHLS[:, :, 2] <= threshHigh[2])] = 1
return binary_output
def sobel_x(img, sobel_kernel=3, min_thres=20, max_thres=100):
# Apply the following steps to img
# Convert to grayscale
imghsl = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
# Channels L and S from HLS
sobelx1 = cv2.Sobel(imghsl[:, :, 1], cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobelx2 = cv2.Sobel(imghsl[:, :, 2], cv2.CV_64F, 1, 0, ksize=sobel_kernel)
# Scale to 8-bit (0 - 255) and convert to type = np.uint8
scaled_sobelx1 = np.uint8(255 * sobelx1 / np.max(sobelx1))
scaled_sobelx2 = np.uint8(255 * sobelx2 / np.max(sobelx2))
# Create a binary mask where mag thresholds are met
binary_outputx1 = np.zeros_like(scaled_sobelx1)
binary_outputx1[(scaled_sobelx1 >= min_thres) & (scaled_sobelx1 <= max_thres)] = 1
binary_outputx2 = np.zeros_like(scaled_sobelx2)
binary_outputx2[(scaled_sobelx2 >= min_thres) & (scaled_sobelx2 <= max_thres)] = 1
binary_output = np.zeros_like(scaled_sobelx1)
binary_output[(binary_outputx1 == 1) | (binary_outputx2 == 1)] = 1
# Return this mask as your binary_output image
return binary_output
def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Calculate the magnitude
gradmag = np.sqrt(sobelx ** 2 + sobely ** 2)
# Scale to 8-bit (0 - 255) and convert to type = np.uint8
scaled_sobel = np.uint8(255 * gradmag / np.max(gradmag))
# Create a binary mask where mag thresholds are met
binary_output = np.zeros_like(scaled_sobel)
binary_output[(scaled_sobel >= mag_thresh[0]) & (scaled_sobel <= mag_thresh[1])] = 1
# Return this mask as your binary_output image
return binary_output
# Direction threshold
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi / 2)):
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Take the absolute value of the x and y gradients
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
absgraddir = np.arctan2(abs_sobely, abs_sobelx)
# Create a binary mask where direction thresholds are met
binary_output = np.zeros_like(absgraddir)
binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1
return binary_output
# Both Magnitude and direction threshold
def mag_dir_thresh(img, sobel_kernel=3, mag_thresh=(0, 255), dir_thresh=(0, np.pi / 2)):
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Take the gradient in x and y separately
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Calculate the magnitude
gradmag = np.sqrt(sobelx ** 2 + sobely ** 2)
# Calc angle
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
absgraddir = np.arctan2(abs_sobely, abs_sobelx)
# Scale to 8-bit (0 - 255) and convert to type = np.uint8
scaled_sobel = np.uint8(255 * gradmag / np.max(gradmag))
# Create a binary mask where mag thresholds are met
binary_output = np.zeros_like(scaled_sobel)
binary_output[(scaled_sobel >= mag_thresh[0]) & (scaled_sobel <= mag_thresh[1]) & (absgraddir >= dir_thresh[0]) & (
absgraddir <= dir_thresh[1])] = 1
return binary_output
def fitlines(binary_warped):
# histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0] // 2:, :], axis=0)
out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255
# Peak of the left and right histogram
midpoint = np.int(histogram.shape[0] / 2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
nwindows = 9
window_height = np.int(binary_warped.shape[0] / nwindows)
# nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
leftx_current = leftx_base
rightx_current = rightx_base
margin = 100
minpix = 50
left_lane_inds = []
right_lane_inds = []
for window in range(nwindows):
win_y_low = binary_warped.shape[0] - (window + 1) * window_height
win_y_high = binary_warped.shape[0] - window * window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
cv2.rectangle(out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high), (0, 255, 0), 2)
cv2.rectangle(out_img, (win_xright_low, win_y_low), (win_xright_high, win_y_high), (0, 255, 0), 2)
# Identify the nonzero pixels
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (
nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (
nonzerox < win_xright_high)).nonzero()[0]
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
if len(leftx) == 0:
left_fit = []
else:
left_fit = np.polyfit(lefty, leftx, 2)
if len(rightx) == 0:
right_fit = []
else:
right_fit = np.polyfit(righty, rightx, 2)
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
if SHOW_IMAGE:
show_image("Sliding window", out_img)
return left_fit, right_fit, out_img
def fit_continuous(left_fit, right_fit, binary_warped):
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 100
left_lane_inds = ((nonzerox > (left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy + left_fit[2] - margin)) & (
nonzerox < (left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy + left_fit[2] + margin)))
right_lane_inds = (
(nonzerox > (right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy + right_fit[2] - margin)) & (
nonzerox < (right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy + right_fit[2] + margin)))
# extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial
if len(leftx) == 0:
left_fit_updated = []
else:
left_fit_updated = np.polyfit(lefty, leftx, 2)
if len(rightx) == 0:
right_fit_updated = []
else:
right_fit_updated = np.polyfit(righty, rightx, 2)
return left_fit_updated, right_fit_updated
# find Curvature
def curvature(left_fit, right_fit, binary_warped):
ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])
y_eval = np.max(ploty)
ym_per_pix = 25 / 720 # meters per pixel
xm_per_pix = 3.7 / 700 # meters per pixel
# Calculate the new radii of curvature
left_curverad = ((1 + (2 * left_fit[0] * y_eval * ym_per_pix + left_fit[1]) ** 2) ** 1.5) / np.absolute(
2 * left_fit[0])
right_curverad = ((1 + (2 * right_fit[0] * y_eval * ym_per_pix + right_fit[1]) ** 2) ** 1.5) / np.absolute(
2 * right_fit[0])
center = (((left_fit[0] * 720 ** 2 + left_fit[1] * 720 + left_fit[2]) + (
right_fit[0] * 720 ** 2 + right_fit[1] * 720 + right_fit[2])) / 2 - 640) * xm_per_pix
return left_curverad, right_curverad, center
# Draw line and return image
def drawLine(undist, warped, left_fit, right_fit):
# Create an image to draw the lines on
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
ploty = np.linspace(0, warped.shape[0] - 1, warped.shape[0])
# Fit new polynomials
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
# Recast the x and y points
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
cv2.polylines(color_warp, np.int32([pts_left]), color=(255, 0, 0), thickness=50, isClosed=False)
cv2.polylines(color_warp, np.int32([pts_right]), color=(0, 0, 255), thickness=50, isClosed=False)
# Warp the blank back to original image
newwarp = cv2.warpPerspective(color_warp, Minv_persp, (color_warp.shape[1], color_warp.shape[0]))
# Combine the result
result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)
return (result, color_warp)
def sanity_check(left_fit, right_fit, minSlope, maxSlope):
# check if left and right fits exists
# Calculates the tangent between left and right in two points, and check if it is in a reasonable threshold
xm_per_pix = 3.7 / 700 # meters per pixel in x dimension
if len(left_fit) == 0 or len(right_fit) == 0:
status = False
d0 = 0
d1 = 0
else:
# Difference of slope
L_0 = 2 * left_fit[0] * 460 + left_fit[1]
R_0 = 2 * right_fit[0] * 460 + right_fit[1]
d0 = np.abs(L_0 - R_0)
L_1 = 2 * left_fit[0] * 720 + left_fit[1]
R_1 = 2 * right_fit[0] * 720 + right_fit[1]
d1 = np.abs(L_1 - R_1)
if d0 >= minSlope and d0 <= maxSlope and d1 >= minSlope and d1 <= maxSlope:
status = True
else:
status = False
return (status, d0, d1)
def process_image(image):
# Calibration arrays pre-calculated
img_undist = undistort_image(image)
if SHOW_IMAGE:
show_image("Undistort Image", img_undist)
global counter
global ref_left
global ref_right
global left_fit
global right_fit
global M_persp
global Minv_persp
#test sample src and dst for transformation
src = np.float32([[585, 450], [203, 720], [1127, 720], [695, 450]])
dst = np.float32([[320, 0], [320, 720], [960, 720], [960, 0]])
M_persp, Minv_persp = warpImageParameters(src, dst)
# 2.Magnitude Threshold
# Threshold color
yellow_low = np.array([0, 100, 100])
yellow_high = np.array([50, 255, 255])
white_low = np.array([18, 0, 180])
white_high = np.array([255, 80, 255])
imgThres_yellow = hls_color_thresh(img_undist, yellow_low, yellow_high)
imgThres_white = hls_color_thresh(img_undist, white_low, white_high)
imgThr_sobelx = sobel_x(img_undist, 9, 80, 220) # Sobel x
img_mag_thr = np.zeros_like(imgThres_yellow)
# imgThresColor[(imgThres_yellow==1) | (imgThres_white==1)] =1
img_mag_thr[(imgThres_yellow == 1) | (imgThres_white == 1) | (imgThr_sobelx == 1)] = 1
if SHOW_IMAGE:
show_image("Combine Color and sobel Image",img_mag_thr)
# 3. Birds-eye
# Perspective array pre-calculated
img_size = (img_mag_thr.shape[1], img_mag_thr.shape[0])
binary_warped = cv2.warpPerspective(img_mag_thr, M_persp, img_size, flags=cv2.INTER_LINEAR)
# 4. Detect lanes and return fit curves
if counter == 0:
left_fit, right_fit, out_imgfit = fitlines(binary_warped)
else:
left_fit, right_fit = fit_continuous(left_fit, right_fit, binary_warped)
if SHOW_IMAGE:
ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.imshow(binary_warped)
plt.show()
status_sanity, d0, d1 = sanity_check(left_fit, right_fit, 0, .55)
# Calc curvature and center
if status_sanity is True:
# Save as last reliable fit
ref_left, ref_right = left_fit, right_fit
counter += 1
else: # Use the last realible fit
left_fit, right_fit = ref_left, ref_right
left_curv, right_curv, center_off = curvature(left_fit, right_fit, binary_warped)
if SHOW_IMAGE:
show_image("Warped Image", binary_warped)
# Warp back to original and merge with image
img_merge, img_birds = drawLine(img_undist, binary_warped, left_fit, right_fit)
# Composition of images to final display
img_out = np.zeros((576, 1280, 3), dtype=np.uint8)
img_out[0:576, 0:1024, :] = cv2.resize(img_merge, (1024, 576))
# b) Threshold
img_out[0:288, 1024:1280, 0] = cv2.resize(img_mag_thr * 255, (256, 288))
img_out[0:288, 1024:1280, 1] = cv2.resize(img_mag_thr * 255, (256, 288))
img_out[0:288, 1024:1280, 2] = cv2.resize(img_mag_thr * 255, (256, 288))
# c)Birds eye view
img_out[310:576, 1024:1280, :] = cv2.resize(img_birds, (256, 266))
# Write curvature and center in image
TextL = "Left r: " + str(int(left_curv)) + " m"
TextR = "Right r: " + str(int(right_curv)) + " m"
TextC = "Center offset: " + str(round(center_off, 2)) + "m"
fontScale = 1
thickness = 2
fontFace = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img_out, TextL, (30, 40), fontFace, fontScale, (255, 255, 255), thickness, lineType=cv2.LINE_AA)
cv2.putText(img_out, TextR, (30, 70), fontFace, fontScale, (255, 255, 255), thickness, lineType=cv2.LINE_AA)
cv2.putText(img_out, TextC, (30, 100), fontFace, fontScale, (255, 255, 255), thickness, lineType=cv2.LINE_AA)
cv2.putText(img_out, "Threshold view", (1070, 30), fontFace, .8, (200, 200, 0), thickness, lineType=cv2.LINE_AA)
cv2.putText(img_out, "Birds view", (1080, 305), fontFace, .8, (200, 200, 0), thickness, lineType=cv2.LINE_AA)
if SHOW_IMAGE:
show_image("final Image", img_out)
return img_out
def show_image(title, img):
f, axes = plt.subplots(1, 1, figsize=(30, 30))
axes.set_title(title, fontsize=20)
if len(img.shape) > 2:
axes.imshow(img)
else:
#Gray image
axes.imshow(img, cmap='gray')
plt.show()
if __name__ == "__main__":
# init flags
global counter, SHOW_IMAGE
counter = 0
SHOW_IMAGE = False
#test calibration
img = cv2.imread("camera_cal/calibration1.jpg")
img_undist = undistort_image(img)
if SHOW_IMAGE:
show_image("Undistort image example", img_undist)
# test image file
img = cv2.imread("test_images/test6.jpg")
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img2 = process_image(imgRGB)
#test video file
if SHOW_IMAGE is False:
counter=0
output = 'project_video_output.mp4'
#verified ok for previous off track part
#clip1 = VideoFileClip("project_video.mp4").subclip(34, 43)
# complete test
clip1 = VideoFileClip("project_video.mp4")
out_clip = clip1.fl_image(process_image)
out_clip.write_videofile(output, audio=False)
| <filename>Lane_process.py
import os
import numpy as np
import cv2
import pickle
import glob
from moviepy.editor import VideoFileClip
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from moviepy.editor import VideoFileClip
CAMERA_PARAMETERS_FILE = "parameter_camera.pkl"
WARP_PARAMETERS_FILE = "parameter_warp.pkl"
ret, mtx, dist, rvecs, tvecs = (None, None, None, None, None)
# undistort image
def undistort_image(img):
global ret, mtx, dist, rvecs, tvecs
if mtx is None or dist is None:
# try to load from file
try:
camera_pickle = pickle.load(open(CAMERA_PARAMETERS_FILE, "rb"))
(ret, mtx, dist, rvecs, tvecs) = camera_pickle
except:
calibrate_camera('camera_cal')
return cv2.undistort(img, mtx, dist, None, mtx)
# Calibrate camera using the OpenCv chessboad method
def calibrate_camera(folder, nx=9, ny=6, show_corners=False):
# prepare object points
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9, 0:6].T.reshape(-1,2)
objpoints = []
imgpoints = []
for fname in os.listdir(folder):
print(fname)
img = cv2.imread(folder + '/' + fname)
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
# If found, draw corners
if ret == True:
#Append corners and object
objpoints.append(objp)
imgpoints.append(corners)
if show_corners:
# Draw and display the corners
cv2.drawChessboardCorners(img, (nx, ny), corners, ret)
plt.imshow(img)
plt.savefig('output_images/corners.png', dpi=100)
plt.show()
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
output = open(CAMERA_PARAMETERS_FILE, 'wb')
pickle.dump((ret, mtx, dist, rvecs, tvecs), output)
output.close()
# Read Warp parameter based on src_points and dst_points
def warpImageParameters(src_points, dst_points):
Mw = cv2.getPerspectiveTransform(src_points, dst_points)
Minv = cv2.getPerspectiveTransform(dst_points, src_points)
return Mw, Minv
# Convert to HLS color space
def hls_color_thresh(img, threshLow, threshHigh):
imgHLS = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
# Return a binary image of threshold
binary_output = np.zeros((img.shape[0], img.shape[1]))
binary_output[
(imgHLS[:, :, 0] >= threshLow[0]) & (imgHLS[:, :, 0] <= threshHigh[0]) & (imgHLS[:, :, 1] >= threshLow[1]) & (
imgHLS[:, :, 1] <= threshHigh[1]) & (imgHLS[:, :, 2] >= threshLow[2]) & (
imgHLS[:, :, 2] <= threshHigh[2])] = 1
return binary_output
def sobel_x(img, sobel_kernel=3, min_thres=20, max_thres=100):
# Apply the following steps to img
# Convert to grayscale
imghsl = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
# Channels L and S from HLS
sobelx1 = cv2.Sobel(imghsl[:, :, 1], cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobelx2 = cv2.Sobel(imghsl[:, :, 2], cv2.CV_64F, 1, 0, ksize=sobel_kernel)
# Scale to 8-bit (0 - 255) and convert to type = np.uint8
scaled_sobelx1 = np.uint8(255 * sobelx1 / np.max(sobelx1))
scaled_sobelx2 = np.uint8(255 * sobelx2 / np.max(sobelx2))
# Create a binary mask where mag thresholds are met
binary_outputx1 = np.zeros_like(scaled_sobelx1)
binary_outputx1[(scaled_sobelx1 >= min_thres) & (scaled_sobelx1 <= max_thres)] = 1
binary_outputx2 = np.zeros_like(scaled_sobelx2)
binary_outputx2[(scaled_sobelx2 >= min_thres) & (scaled_sobelx2 <= max_thres)] = 1
binary_output = np.zeros_like(scaled_sobelx1)
binary_output[(binary_outputx1 == 1) | (binary_outputx2 == 1)] = 1
# Return this mask as your binary_output image
return binary_output
def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Calculate the magnitude
gradmag = np.sqrt(sobelx ** 2 + sobely ** 2)
# Scale to 8-bit (0 - 255) and convert to type = np.uint8
scaled_sobel = np.uint8(255 * gradmag / np.max(gradmag))
# Create a binary mask where mag thresholds are met
binary_output = np.zeros_like(scaled_sobel)
binary_output[(scaled_sobel >= mag_thresh[0]) & (scaled_sobel <= mag_thresh[1])] = 1
# Return this mask as your binary_output image
return binary_output
# Direction threshold
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi / 2)):
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Take the absolute value of the x and y gradients
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
absgraddir = np.arctan2(abs_sobely, abs_sobelx)
# Create a binary mask where direction thresholds are met
binary_output = np.zeros_like(absgraddir)
binary_output[(absgraddir >= thresh[0]) & (absgraddir <= thresh[1])] = 1
return binary_output
# Both Magnitude and direction threshold
def mag_dir_thresh(img, sobel_kernel=3, mag_thresh=(0, 255), dir_thresh=(0, np.pi / 2)):
# Convert to grayscale
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Take the gradient in x and y separately
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Calculate the magnitude
gradmag = np.sqrt(sobelx ** 2 + sobely ** 2)
# Calc angle
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
absgraddir = np.arctan2(abs_sobely, abs_sobelx)
# Scale to 8-bit (0 - 255) and convert to type = np.uint8
scaled_sobel = np.uint8(255 * gradmag / np.max(gradmag))
# Create a binary mask where mag thresholds are met
binary_output = np.zeros_like(scaled_sobel)
binary_output[(scaled_sobel >= mag_thresh[0]) & (scaled_sobel <= mag_thresh[1]) & (absgraddir >= dir_thresh[0]) & (
absgraddir <= dir_thresh[1])] = 1
return binary_output
def fitlines(binary_warped):
# histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0] // 2:, :], axis=0)
out_img = np.dstack((binary_warped, binary_warped, binary_warped)) * 255
# Peak of the left and right histogram
midpoint = np.int(histogram.shape[0] / 2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
nwindows = 9
window_height = np.int(binary_warped.shape[0] / nwindows)
# nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
leftx_current = leftx_base
rightx_current = rightx_base
margin = 100
minpix = 50
left_lane_inds = []
right_lane_inds = []
for window in range(nwindows):
win_y_low = binary_warped.shape[0] - (window + 1) * window_height
win_y_high = binary_warped.shape[0] - window * window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
cv2.rectangle(out_img, (win_xleft_low, win_y_low), (win_xleft_high, win_y_high), (0, 255, 0), 2)
cv2.rectangle(out_img, (win_xright_low, win_y_low), (win_xright_high, win_y_high), (0, 255, 0), 2)
# Identify the nonzero pixels
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) & (
nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) & (
nonzerox < win_xright_high)).nonzero()[0]
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial to each
if len(leftx) == 0:
left_fit = []
else:
left_fit = np.polyfit(lefty, leftx, 2)
if len(rightx) == 0:
right_fit = []
else:
right_fit = np.polyfit(righty, rightx, 2)
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
if SHOW_IMAGE:
show_image("Sliding window", out_img)
return left_fit, right_fit, out_img
def fit_continuous(left_fit, right_fit, binary_warped):
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 100
left_lane_inds = ((nonzerox > (left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy + left_fit[2] - margin)) & (
nonzerox < (left_fit[0] * (nonzeroy ** 2) + left_fit[1] * nonzeroy + left_fit[2] + margin)))
right_lane_inds = (
(nonzerox > (right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy + right_fit[2] - margin)) & (
nonzerox < (right_fit[0] * (nonzeroy ** 2) + right_fit[1] * nonzeroy + right_fit[2] + margin)))
# extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit a second order polynomial
if len(leftx) == 0:
left_fit_updated = []
else:
left_fit_updated = np.polyfit(lefty, leftx, 2)
if len(rightx) == 0:
right_fit_updated = []
else:
right_fit_updated = np.polyfit(righty, rightx, 2)
return left_fit_updated, right_fit_updated
# find Curvature
def curvature(left_fit, right_fit, binary_warped):
ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])
y_eval = np.max(ploty)
ym_per_pix = 25 / 720 # meters per pixel
xm_per_pix = 3.7 / 700 # meters per pixel
# Calculate the new radii of curvature
left_curverad = ((1 + (2 * left_fit[0] * y_eval * ym_per_pix + left_fit[1]) ** 2) ** 1.5) / np.absolute(
2 * left_fit[0])
right_curverad = ((1 + (2 * right_fit[0] * y_eval * ym_per_pix + right_fit[1]) ** 2) ** 1.5) / np.absolute(
2 * right_fit[0])
center = (((left_fit[0] * 720 ** 2 + left_fit[1] * 720 + left_fit[2]) + (
right_fit[0] * 720 ** 2 + right_fit[1] * 720 + right_fit[2])) / 2 - 640) * xm_per_pix
return left_curverad, right_curverad, center
# Draw line and return image
def drawLine(undist, warped, left_fit, right_fit):
# Create an image to draw the lines on
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
ploty = np.linspace(0, warped.shape[0] - 1, warped.shape[0])
# Fit new polynomials
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
# Recast the x and y points
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
cv2.polylines(color_warp, np.int32([pts_left]), color=(255, 0, 0), thickness=50, isClosed=False)
cv2.polylines(color_warp, np.int32([pts_right]), color=(0, 0, 255), thickness=50, isClosed=False)
# Warp the blank back to original image
newwarp = cv2.warpPerspective(color_warp, Minv_persp, (color_warp.shape[1], color_warp.shape[0]))
# Combine the result
result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)
return (result, color_warp)
def sanity_check(left_fit, right_fit, minSlope, maxSlope):
# check if left and right fits exists
# Calculates the tangent between left and right in two points, and check if it is in a reasonable threshold
xm_per_pix = 3.7 / 700 # meters per pixel in x dimension
if len(left_fit) == 0 or len(right_fit) == 0:
status = False
d0 = 0
d1 = 0
else:
# Difference of slope
L_0 = 2 * left_fit[0] * 460 + left_fit[1]
R_0 = 2 * right_fit[0] * 460 + right_fit[1]
d0 = np.abs(L_0 - R_0)
L_1 = 2 * left_fit[0] * 720 + left_fit[1]
R_1 = 2 * right_fit[0] * 720 + right_fit[1]
d1 = np.abs(L_1 - R_1)
if d0 >= minSlope and d0 <= maxSlope and d1 >= minSlope and d1 <= maxSlope:
status = True
else:
status = False
return (status, d0, d1)
def process_image(image):
# Calibration arrays pre-calculated
img_undist = undistort_image(image)
if SHOW_IMAGE:
show_image("Undistort Image", img_undist)
global counter
global ref_left
global ref_right
global left_fit
global right_fit
global M_persp
global Minv_persp
#test sample src and dst for transformation
src = np.float32([[585, 450], [203, 720], [1127, 720], [695, 450]])
dst = np.float32([[320, 0], [320, 720], [960, 720], [960, 0]])
M_persp, Minv_persp = warpImageParameters(src, dst)
# 2.Magnitude Threshold
# Threshold color
yellow_low = np.array([0, 100, 100])
yellow_high = np.array([50, 255, 255])
white_low = np.array([18, 0, 180])
white_high = np.array([255, 80, 255])
imgThres_yellow = hls_color_thresh(img_undist, yellow_low, yellow_high)
imgThres_white = hls_color_thresh(img_undist, white_low, white_high)
imgThr_sobelx = sobel_x(img_undist, 9, 80, 220) # Sobel x
img_mag_thr = np.zeros_like(imgThres_yellow)
# imgThresColor[(imgThres_yellow==1) | (imgThres_white==1)] =1
img_mag_thr[(imgThres_yellow == 1) | (imgThres_white == 1) | (imgThr_sobelx == 1)] = 1
if SHOW_IMAGE:
show_image("Combine Color and sobel Image",img_mag_thr)
# 3. Birds-eye
# Perspective array pre-calculated
img_size = (img_mag_thr.shape[1], img_mag_thr.shape[0])
binary_warped = cv2.warpPerspective(img_mag_thr, M_persp, img_size, flags=cv2.INTER_LINEAR)
# 4. Detect lanes and return fit curves
if counter == 0:
left_fit, right_fit, out_imgfit = fitlines(binary_warped)
else:
left_fit, right_fit = fit_continuous(left_fit, right_fit, binary_warped)
if SHOW_IMAGE:
ploty = np.linspace(0, binary_warped.shape[0] - 1, binary_warped.shape[0])
left_fitx = left_fit[0] * ploty ** 2 + left_fit[1] * ploty + left_fit[2]
right_fitx = right_fit[0] * ploty ** 2 + right_fit[1] * ploty + right_fit[2]
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.imshow(binary_warped)
plt.show()
status_sanity, d0, d1 = sanity_check(left_fit, right_fit, 0, .55)
# Calc curvature and center
if status_sanity is True:
# Save as last reliable fit
ref_left, ref_right = left_fit, right_fit
counter += 1
else: # Use the last realible fit
left_fit, right_fit = ref_left, ref_right
left_curv, right_curv, center_off = curvature(left_fit, right_fit, binary_warped)
if SHOW_IMAGE:
show_image("Warped Image", binary_warped)
# Warp back to original and merge with image
img_merge, img_birds = drawLine(img_undist, binary_warped, left_fit, right_fit)
# Composition of images to final display
img_out = np.zeros((576, 1280, 3), dtype=np.uint8)
img_out[0:576, 0:1024, :] = cv2.resize(img_merge, (1024, 576))
# b) Threshold
img_out[0:288, 1024:1280, 0] = cv2.resize(img_mag_thr * 255, (256, 288))
img_out[0:288, 1024:1280, 1] = cv2.resize(img_mag_thr * 255, (256, 288))
img_out[0:288, 1024:1280, 2] = cv2.resize(img_mag_thr * 255, (256, 288))
# c)Birds eye view
img_out[310:576, 1024:1280, :] = cv2.resize(img_birds, (256, 266))
# Write curvature and center in image
TextL = "Left r: " + str(int(left_curv)) + " m"
TextR = "Right r: " + str(int(right_curv)) + " m"
TextC = "Center offset: " + str(round(center_off, 2)) + "m"
fontScale = 1
thickness = 2
fontFace = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img_out, TextL, (30, 40), fontFace, fontScale, (255, 255, 255), thickness, lineType=cv2.LINE_AA)
cv2.putText(img_out, TextR, (30, 70), fontFace, fontScale, (255, 255, 255), thickness, lineType=cv2.LINE_AA)
cv2.putText(img_out, TextC, (30, 100), fontFace, fontScale, (255, 255, 255), thickness, lineType=cv2.LINE_AA)
cv2.putText(img_out, "Threshold view", (1070, 30), fontFace, .8, (200, 200, 0), thickness, lineType=cv2.LINE_AA)
cv2.putText(img_out, "Birds view", (1080, 305), fontFace, .8, (200, 200, 0), thickness, lineType=cv2.LINE_AA)
if SHOW_IMAGE:
show_image("final Image", img_out)
return img_out
def show_image(title, img):
f, axes = plt.subplots(1, 1, figsize=(30, 30))
axes.set_title(title, fontsize=20)
if len(img.shape) > 2:
axes.imshow(img)
else:
#Gray image
axes.imshow(img, cmap='gray')
plt.show()
if __name__ == "__main__":
# init flags
global counter, SHOW_IMAGE
counter = 0
SHOW_IMAGE = False
#test calibration
img = cv2.imread("camera_cal/calibration1.jpg")
img_undist = undistort_image(img)
if SHOW_IMAGE:
show_image("Undistort image example", img_undist)
# test image file
img = cv2.imread("test_images/test6.jpg")
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img2 = process_image(imgRGB)
#test video file
if SHOW_IMAGE is False:
counter=0
output = 'project_video_output.mp4'
#verified ok for previous off track part
#clip1 = VideoFileClip("project_video.mp4").subclip(34, 43)
# complete test
clip1 = VideoFileClip("project_video.mp4")
out_clip = clip1.fl_image(process_image)
out_clip.write_videofile(output, audio=False)
| en | 0.762283 | # undistort image # try to load from file # Calibrate camera using the OpenCv chessboad method # prepare object points # Convert to grayscale # Find the chessboard corners # If found, draw corners #Append corners and object # Draw and display the corners # Read Warp parameter based on src_points and dst_points # Convert to HLS color space # Return a binary image of threshold # Apply the following steps to img # Convert to grayscale # Channels L and S from HLS # Scale to 8-bit (0 - 255) and convert to type = np.uint8 # Create a binary mask where mag thresholds are met # Return this mask as your binary_output image # Convert to grayscale # Take the gradient in x and y separately # Calculate the magnitude # Scale to 8-bit (0 - 255) and convert to type = np.uint8 # Create a binary mask where mag thresholds are met # Return this mask as your binary_output image # Direction threshold # Convert to grayscale # Take the gradient in x and y separately # Take the absolute value of the x and y gradients # Create a binary mask where direction thresholds are met # Both Magnitude and direction threshold # Convert to grayscale # Take the gradient in x and y separately # Calculate the magnitude # Calc angle # Scale to 8-bit (0 - 255) and convert to type = np.uint8 # Create a binary mask where mag thresholds are met # histogram of the bottom half of the image # Peak of the left and right histogram # nonzero pixels in the image # Identify the nonzero pixels # Extract left and right line pixel positions # Fit a second order polynomial to each # extract left and right line pixel positions # Fit a second order polynomial # find Curvature # meters per pixel # meters per pixel # Calculate the new radii of curvature # Draw line and return image # Create an image to draw the lines on # Fit new polynomials # Recast the x and y points # Draw the lane onto the warped blank image # Warp the blank back to original image # Combine the result # check if left and right fits exists # Calculates the tangent between left and right in two points, and check if it is in a reasonable threshold # meters per pixel in x dimension # Difference of slope # Calibration arrays pre-calculated #test sample src and dst for transformation # 2.Magnitude Threshold # Threshold color # Sobel x # imgThresColor[(imgThres_yellow==1) | (imgThres_white==1)] =1 # 3. Birds-eye # Perspective array pre-calculated # 4. Detect lanes and return fit curves # Calc curvature and center # Save as last reliable fit # Use the last realible fit # Warp back to original and merge with image # Composition of images to final display # b) Threshold # c)Birds eye view # Write curvature and center in image #Gray image # init flags #test calibration # test image file #test video file #verified ok for previous off track part #clip1 = VideoFileClip("project_video.mp4").subclip(34, 43) # complete test | 2.73009 | 3 |
lib/array_to_delimited_values.py | erictheise/trctr-pllr | 0 | 6622036 | import io, csv
from flask import make_response
def array_to_delimited_values(array, delimiter):
si = io.StringIO()
writer = csv.writer(si, delimiter=delimiter, quoting=csv.QUOTE_NONNUMERIC)
writer.writerows(array)
output = make_response(si.getvalue())
# output.headers["Content-Disposition"] = "attachment; filename=trctr-pllr.csv"
# output.headers["Content-type"] = "text/csv"
return output
| import io, csv
from flask import make_response
def array_to_delimited_values(array, delimiter):
si = io.StringIO()
writer = csv.writer(si, delimiter=delimiter, quoting=csv.QUOTE_NONNUMERIC)
writer.writerows(array)
output = make_response(si.getvalue())
# output.headers["Content-Disposition"] = "attachment; filename=trctr-pllr.csv"
# output.headers["Content-type"] = "text/csv"
return output
| en | 0.415418 | # output.headers["Content-Disposition"] = "attachment; filename=trctr-pllr.csv" # output.headers["Content-type"] = "text/csv" | 2.949726 | 3 |
cparser/Node.py | rusphantom/cparser | 3 | 6622037 | from cparser import analyzer
class NodeChildIterator():
def __init__(self, node):
self.node = node
self.ptr = 0
def __next__(self):
if self.ptr >= self.node.size():
raise StopIteration
else:
self.ptr += 1
return self.node.get(self.ptr-1)
def create(name, node=None):
if node is not None:
return node
return Node(name)
class Node():
def __init__(self, name):
self.name = name
self.children = []
self.parent = None
def __iter__(self):
return NodeChildIterator(self)
def count(self):
return len(self.children)
def size(self):
return self.count()
def get(self, idx):
return self.children[idx]
def append(self, el, concate=False):
flag = True
comments = ()
value = None
if type(el) is tuple:
if type(el[0]) is tuple:
value = el[0][0]
comments = el[0][1]
else:
value = el[0]
if type(el[1]) is list:
comments = el[1]
if type(el[-1]) is bool:
flag = el[-1]
else:
value = el
if flag:
if value:
if concate:
for e in value:
e.parent = self
self.children.append(e)
else:
value.parent = self
self.children.append(value)
for e in comments:
e.parent = self
self.children.append(e)
return flag
def concat(self, el):
return self.append(el, True)
def skip(self, el):
if type(el) is tuple and len(el) == 2 and type(el[1]) is bool:
return el[1]
return el
def normalize(self):
if len(self.children) == 1:
return self.children[0]
return self
def dropExceptionIfEmpty(self):
if self.count() == 0:
raise analyzer.AnalyzerException(self.name, 'empty node')
return self
def __str__(self):
s = self.name + '(' + ', '.join(str(child) for child in self.children) + ')'
return s
| from cparser import analyzer
class NodeChildIterator():
def __init__(self, node):
self.node = node
self.ptr = 0
def __next__(self):
if self.ptr >= self.node.size():
raise StopIteration
else:
self.ptr += 1
return self.node.get(self.ptr-1)
def create(name, node=None):
if node is not None:
return node
return Node(name)
class Node():
def __init__(self, name):
self.name = name
self.children = []
self.parent = None
def __iter__(self):
return NodeChildIterator(self)
def count(self):
return len(self.children)
def size(self):
return self.count()
def get(self, idx):
return self.children[idx]
def append(self, el, concate=False):
flag = True
comments = ()
value = None
if type(el) is tuple:
if type(el[0]) is tuple:
value = el[0][0]
comments = el[0][1]
else:
value = el[0]
if type(el[1]) is list:
comments = el[1]
if type(el[-1]) is bool:
flag = el[-1]
else:
value = el
if flag:
if value:
if concate:
for e in value:
e.parent = self
self.children.append(e)
else:
value.parent = self
self.children.append(value)
for e in comments:
e.parent = self
self.children.append(e)
return flag
def concat(self, el):
return self.append(el, True)
def skip(self, el):
if type(el) is tuple and len(el) == 2 and type(el[1]) is bool:
return el[1]
return el
def normalize(self):
if len(self.children) == 1:
return self.children[0]
return self
def dropExceptionIfEmpty(self):
if self.count() == 0:
raise analyzer.AnalyzerException(self.name, 'empty node')
return self
def __str__(self):
s = self.name + '(' + ', '.join(str(child) for child in self.children) + ')'
return s
| none | 1 | 2.970099 | 3 | |
aoc2018/aoc2018.py | TonyFlury/aoc2018 | 0 | 6622038 | <filename>aoc2018/aoc2018.py<gh_stars>0
#!/usr/bin/env python
# coding=utf-8
"""
# aoc2018 : Advent of Code 2018
Summary :
Solve the various AOC challenges from 2018
Use Case :
Solve AOC2018
Testable Statements :
...
"""
from . version import *
| <filename>aoc2018/aoc2018.py<gh_stars>0
#!/usr/bin/env python
# coding=utf-8
"""
# aoc2018 : Advent of Code 2018
Summary :
Solve the various AOC challenges from 2018
Use Case :
Solve AOC2018
Testable Statements :
...
"""
from . version import *
| en | 0.516442 | #!/usr/bin/env python # coding=utf-8 # aoc2018 : Advent of Code 2018 Summary : Solve the various AOC challenges from 2018 Use Case : Solve AOC2018 Testable Statements : ... | 1.119033 | 1 |
scripts/helpful_scripts.py | AndreyGordeev1234/solidity-fund-me-app | 0 | 6622039 | <reponame>AndreyGordeev1234/solidity-fund-me-app
from brownie import network, accounts, config, MockV3Aggregator
FORKED_LOCAL_ENVIRONMENTS = ["mainnet-fork", "mainnet-fork-dev"]
LOCAL_BLOCKCHAIN_ENVIRONMENTS = ["development", "ganache-local"]
DECIMALS = 8
STARTING_PRICE = 200000000000
def get_account():
if network.show_active() in LOCAL_BLOCKCHAIN_ENVIRONMENTS or network.show_active() in FORKED_LOCAL_ENVIRONMENTS:
return accounts[0]
return accounts.add(config["wallets"]["from_key"])
def deploy_mocks():
print(f"The active network is {network.show_active()}")
print("Deploying mocks...")
if len(MockV3Aggregator) <= 0:
MockV3Aggregator.deploy(
DECIMALS, STARTING_PRICE, {"from": get_account()})
print("Mocks deployed")
| from brownie import network, accounts, config, MockV3Aggregator
FORKED_LOCAL_ENVIRONMENTS = ["mainnet-fork", "mainnet-fork-dev"]
LOCAL_BLOCKCHAIN_ENVIRONMENTS = ["development", "ganache-local"]
DECIMALS = 8
STARTING_PRICE = 200000000000
def get_account():
if network.show_active() in LOCAL_BLOCKCHAIN_ENVIRONMENTS or network.show_active() in FORKED_LOCAL_ENVIRONMENTS:
return accounts[0]
return accounts.add(config["wallets"]["from_key"])
def deploy_mocks():
print(f"The active network is {network.show_active()}")
print("Deploying mocks...")
if len(MockV3Aggregator) <= 0:
MockV3Aggregator.deploy(
DECIMALS, STARTING_PRICE, {"from": get_account()})
print("Mocks deployed") | none | 1 | 2.012769 | 2 | |
nomadgen/api/docker_task.py | smintz/nomadgen | 19 | 6622040 | <reponame>smintz/nomadgen
from nomadgen.jobspec.ttypes import DriverConfig, DockerDriverAuth
from nomadgen.api.task import NGTask
class DockerTask(NGTask):
def setTaskDriver(self):
self.Driver = "docker"
self.Config = DriverConfig(
image=self.image, force_pull=self.force_pull_image, port_map=[]
)
def setDockerAuth(self, username, password):
self.Config.auth = [
DockerDriverAuth(username=username, password=password)
]
| from nomadgen.jobspec.ttypes import DriverConfig, DockerDriverAuth
from nomadgen.api.task import NGTask
class DockerTask(NGTask):
def setTaskDriver(self):
self.Driver = "docker"
self.Config = DriverConfig(
image=self.image, force_pull=self.force_pull_image, port_map=[]
)
def setDockerAuth(self, username, password):
self.Config.auth = [
DockerDriverAuth(username=username, password=password)
] | none | 1 | 2.074787 | 2 | |
check_json.py | alexgorin/dc_bots | 0 | 6622041 | #!/usr/bin/env python
import json
import sys
filename = sys.argv[1]
with open(filename) as f:
data = f.read()
json.loads(data)
print "%s is a correct json file" % filename
| #!/usr/bin/env python
import json
import sys
filename = sys.argv[1]
with open(filename) as f:
data = f.read()
json.loads(data)
print "%s is a correct json file" % filename
| ru | 0.26433 | #!/usr/bin/env python | 2.729214 | 3 |
bolshoi/fixLightconeRAs.py | sniemi/SamPy | 5 | 6622042 | import glob
import os
def fixLigconeRAs(files):
'''
Fixes the problem that some lightcones
had negative RAs. The fix is extemely crude
one only adds 360 to the RA value.
:param files: a list of files to be fixed
:type files: list
'''
for filename in files:
removeFile = True
fh = open(filename, 'r')
out = open(filename + '_mod', 'w')
line = fh.readline()
while line:
if line.startswith('#'):
out.write(line)
else:
tmp = line.split()
ra = float(tmp[2])
if ra < 0.0:
removeFile = False
newra = ra + 360.0
newstr = '{0:>s} {1:>s} {2:f} {3:>s} {4:>s}\n'.format(tmp[0], tmp[1], newra, tmp[3], tmp[4])
out.write(newstr)
line = fh.readline()
fh.close()
out.close()
if removeFile:
os.remove(filename + '_mod')
else:
print 'File {0:>s} was modified...'.format(filename)
if __name__ == '__main__':
files = glob.glob('*/lightcone.dat')
fixLigconeRAs(files)
| import glob
import os
def fixLigconeRAs(files):
'''
Fixes the problem that some lightcones
had negative RAs. The fix is extemely crude
one only adds 360 to the RA value.
:param files: a list of files to be fixed
:type files: list
'''
for filename in files:
removeFile = True
fh = open(filename, 'r')
out = open(filename + '_mod', 'w')
line = fh.readline()
while line:
if line.startswith('#'):
out.write(line)
else:
tmp = line.split()
ra = float(tmp[2])
if ra < 0.0:
removeFile = False
newra = ra + 360.0
newstr = '{0:>s} {1:>s} {2:f} {3:>s} {4:>s}\n'.format(tmp[0], tmp[1], newra, tmp[3], tmp[4])
out.write(newstr)
line = fh.readline()
fh.close()
out.close()
if removeFile:
os.remove(filename + '_mod')
else:
print 'File {0:>s} was modified...'.format(filename)
if __name__ == '__main__':
files = glob.glob('*/lightcone.dat')
fixLigconeRAs(files)
| en | 0.854679 | Fixes the problem that some lightcones had negative RAs. The fix is extemely crude one only adds 360 to the RA value. :param files: a list of files to be fixed :type files: list | 2.480851 | 2 |
main/views.py | Shu-Naing/online_shopping | 0 | 6622043 | <reponame>Shu-Naing/online_shopping<gh_stars>0
from django.shortcuts import render, redirect, HttpResponseRedirect, get_object_or_404
from django.views.decorators.http import require_POST
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import EmailMessage
from django.template.loader import render_to_string
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_protect
from django.contrib.auth import hashers
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.contrib import messages
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.contrib.postgres.search import SearchVector
from .forms import RegistrationForm, LoginForm, DeliveryAddressForm, PaymentVerificationForm
from .models import Customer, Brand, Product, Category, OrderDetail, Address, Cart, Image
from datetime import datetime
import json
@csrf_protect
def registration(request):
if request.method == "GET":
form = RegistrationForm()
elif request.method == "POST":
form = RegistrationForm(request.POST)
if form.is_valid():
if not Customer.objects.filter(customer_email=form.cleaned_data['customer_email']).exists():
hashed_password = hashers.make_password(form.cleaned_data['customer_password'])
customer = Customer(
customer_username=form.cleaned_data['customer_username'],
customer_firstname=form.cleaned_data['customer_firstname'],
customer_lastname=form.cleaned_data['customer_lastname'],
customer_email=form.cleaned_data['customer_email'],
password=<PASSWORD>,
last_login=datetime.today()
)
customer.save()
return redirect("main:login")
else:
messages.warning(request, "Email already taken!!")
return redirect("main:registration")
return render(request, 'registration.html', {'form': form})
@csrf_protect
def login(request):
form = LoginForm()
if request.method == "POST":
form = LoginForm(request.POST)
if form.is_valid():
email = form.cleaned_data.get('customer_email')
password = form.cleaned_data.get('customer_password')
email_exists = Customer.objects.filter(customer_email=form.cleaned_data['customer_email']).exists()
if not email_exists:
return redirect('main:login')
else:
query = Customer.objects.get(customer_email=email)
cust_pass = getattr(query, 'password')
matchcheck = hashers.check_password(password, cust_pass)
if matchcheck:
cust_id = getattr(query, 'id')
customer = get_object_or_404(Customer, password = cust_pass, pk = cust_id)
customer.save(update_fields = ['last_login'])
Customer.objects.filter(pk = customer.pk).update(last_login = datetime.today())
request.session.set_expiry(1800)
request.session['customer'] = cust_id
if not 'cart' in request.session:
request.session['cart'] = []
else:
for cart in request.session['cart']:
cart.update((key, request.session['customer']) for key, value in cart.items() if value == None)
if request.POST.get('next') == "":
return redirect('/')
return redirect("/"+request.POST.get('next'))
return render(request, 'login.html', {'form': form})
@csrf_protect
def manageAccount(request):
if request.method == "POST":
customer_id = request.POST.get('id')
customer = get_object_or_404(Customer, pk = customer_id)
customer.save(update_fields = ['customer_firstname', 'customer_lastname', 'customer_dob', 'customer_gender'])
Customer.objects.filter(pk = customer.pk).update(customer_firstname = request.POST.get('firstname'),
customer_lastname = request.POST.get('lastname'),
customer_dob = datetime.strptime(request.POST.get('dob'), "%Y-%m-%d").date(),
customer_gender = request.POST.get('gender'))
return render(request, 'manage_account.html')
def logout(request):
request.session.flush()
return redirect("main:home")
def home(request):
if request.method == "GET":
home_list = []
product = Product.objects.filter().values('product_name', 'product_price', 'product_featureImage').order_by('?')
for product in product:
home_list.append({"h_name": product['product_name'], "h_price": product['product_price'],
"h_image": product['product_featureImage']})
return render(request, 'index.html', {'homeProduct': home_list})
def singleProduct(request, product):
if request.method == "GET":
single_list = []
subImages_list = []
cat_list = []
related_list = []
oneProduct = Product.objects.filter(product_name = product).values_list('id','product_name', 'product_price', 'product_featureImage', 'product_description', 'category_id')
for product in oneProduct[0]:
single_list.append(product)
imagesView = Image.objects.filter(product_id = single_list[0]).values('image_path')
for allimage in imagesView:
subImages_list.append({"s_image": allimage['image_path']})
oneCat = Category.objects.get(pk = single_list[-1])
relateProduct = Product.objects.filter(category_id = oneCat.id).values('category_id','product_name', 'product_price', 'product_featureImage').order_by()
for repro in relateProduct:
related_list.append({"h_name": repro['product_name'], "h_price": repro['product_price'],
"h_image": repro['product_featureImage']})
return render(request, 'single-product.html', {'single_product': single_list, 'result_images': subImages_list, 'categoryName': oneCat.sub_category, 'relatedProduct': related_list[:4]})
def shop(request, category, sub__category):
if request.method == "GET":
product_list = []
brand_names = []
category = Category.objects.filter(category_name = category, sub_category = sub__category).values_list('id', flat = True)
product = Product.objects.filter(category_id = category[0]).values('product_name', 'product_price', 'product_featureImage')
brand_list = Product.objects.filter(category_id = category[0]).values('brand_id')
for brand_id in brand_list:
brand = Brand.objects.filter(pk = brand_id['brand_id']).values_list('brand_name', flat = True)
brand_names.append({'brand_name': brand[0]})
brand_names = sorted(brand_names, key = lambda i: i['brand_name'])
brand_names = list({v['brand_name']:v for v in brand_names}.values())
for product in product:
product_list.append({"p_name": product['product_name'], "p_price": product['product_price'],
"p_image": product['product_featureImage']})
return render(request, 'shop.html', {'product': product_list, 'brand': brand_names})
def confirm_email(request):
if request.method == "POST":
customer = Customer.objects.get(pk = request.session['customer'])
email = Customer.objects.filter(pk = request.session['customer']).values_list('customer_email', flat = True)
if email[0] == request.POST.get('email'):
current_site = get_current_site(request)
email_subject = "Verify your email"
message = render_to_string("account_activate.html", {
'domain': request.get_host(),
'uid': urlsafe_base64_encode(force_bytes(request.session['customer'])),
'token': PasswordResetTokenGenerator().make_token(customer),
})
to_email = email[0]
send_email = EmailMessage(email_subject, message, to = [to_email])
send_email.send()
return render(request, 'confirmation.html')
else:
return render(request, 'confirm_email.html', {"error": True})
return render(request, 'confirm_email.html', {})
def activate(request, uidb64, token):
try:
uid = urlsafe_base64_decode(uidb64).decode()
customer = Customer.objects.get(pk = uid)
except(TypeError, ValueError, OverflowError, Customer.DoesNotExist):
customer = None
if customer is not None and PasswordResetTokenGenerator().check_token(customer, token):
return render(request, 'change_email.html', {"uid": uidb64, "token":token})
else:
return HttpResponse('Activation link is invalid!')
def manage_by_email(request):
if request.method == "POST":
customer_id = request.session['customer']
customer = get_object_or_404(Customer, pk = customer_id)
customer.save(update_fields = ['customer_email'])
Customer.objects.filter(pk = customer.pk).update(customer_email = request.POST.get('email'))
return redirect("main:manage_account")
def get_from_cart(cart):
order_products = []
total_cost = 0
for cart_item in cart:
product = Product.objects.filter(product_name = cart_item['product_name']).values_list('product_name', 'product_price', 'product_featureImage')
quantity = cart_item['quantity']
total_price = cart_item['total_price']
total_cost += total_price
order_detail = {
"product_name": product[0][0],
"product_price": product[0][1],
"product_featureImage": product[0][2],
"qty": quantity,
"total_price": total_price,
}
order_products.append(order_detail)
return order_products, total_cost
def addtocart(request):
if request.method == "POST":
if not 'cart' in request.session:
request.session['cart'] = []
request.session.set_expiry(1800)
customer = request.POST.get('customer')
if not customer:
customer = None
product = Product.objects.filter(pk = request.POST.get('product_id')).values_list('product_name', 'product_price')
quantity = request.POST.get('quantity')
if any(d['product_name'] == product[0][0] for d in request.session['cart']):
cart_index = [i for i, d in enumerate(request.session['cart']) if product[0][0] in d.values()]
qty = request.session['cart'][cart_index[0]]['quantity']
qty += int(quantity)
request.session['cart'][cart_index[0]]['total_price'] = float(product[0][1]) * int(qty)
request.session.modified = True
return HttpResponse("Successfully added to cart")
cart = {
"customer": customer,
"product_name": product[0][0],
"quantity": int(quantity),
"total_price": float(product[0][1]) * int(quantity)
}
request.session['cart'].append(cart)
request.session.modified = True
return HttpResponse("Successfully added to cart")
else:
if not 'cart' in request.session:
return render(request, '404-page.html', {'error': "Session Time Out", 'status': '404'})
order_products, total_cost = get_from_cart(request.session['cart'])
return render(request, "checkout.html", {"order_products": order_products, "total_cost": total_cost,})
def remove_from_cart(request):
if request.method == "POST":
request.session['cart'] = [i for i in request.session['cart'] if not (i['product_name'] == request.POST.get('productName'))]
request.session.modified = True
return HttpResponse("Successfully removed from cart")
@require_POST
def checkout(request):
if request.method == "POST":
cart = request.session['cart']
updated_cart = request.POST.getlist('list[]')
if len(cart) == len(updated_cart):
for item in range(len(cart)):
load_data = json.loads(updated_cart[item])
updated_product_name = load_data['product_name']
updated_qty = load_data['quantity']
updated_totalprice = load_data['total_price']
cart[item].update({'product_name': updated_product_name})
cart[item].update({'quantity': int(updated_qty)})
cart[item].update({'total_price': float(updated_totalprice)})
request.session.modified = True
return HttpResponse("Checkout Completed!")
def confirm_checkout(request, payment):
if payment != "cash on delivery" and payment != "credit cards":
return render(request, '404-page.html', {'error': "Page Not Found", 'status': '404'})
if not 'cart' in request.session:
return render(request, '404-page.html', {'error': "Session Time Out", 'status': '404'})
if payment == "credit cards":
return render(request, '404-page.html', {'error': "The function will be available soon!", 'status': 'Coming Soon!'})
order_products, total_cost = get_from_cart(request.session['cart'])
customer = get_object_or_404(Customer, pk = request.session['customer'])
if request.method == "POST":
orderDetail = OrderDetail(
customer_id = customer,
payment_method = payment,
number_of_products = len(order_products),
sub_total = total_cost,
order_date = datetime.now().date(),
order_time = datetime.now().time(),
)
orderDetail.save()
for item in order_products:
product = Product.objects.get(product_name = item['product_name'])
quantity = item['qty']
total_price = item['total_price']
cart = Cart(
orderDetail_id = orderDetail,
product_id = product,
quantity = quantity,
total_price = total_price
)
cart.save()
address = Address(
street_1 = request.POST.get('street_1'),
street_2 = request.POST.get('street_2'),
township = request.POST.get('township'),
customer_id = customer
)
address.save()
del request.session['cart']
return redirect("main:order_success")
else:
delivery_form = DeliveryAddressForm(initial={'firstname': customer.customer_firstname, 'lastname': customer.customer_lastname})
payment_form = None
if payment == "credit cards":
payment_form = PaymentVerificationForm()
return render(request, "confirm_checkout.html", {"delivery": delivery_form, "payment": payment_form ,"cart": order_products, "total_cost": total_cost, "method": payment})
def order_success(request):
return render(request, "order_successful.html")
#search Product
def search(request):
if request.method == 'POST':
search_query = request.POST.get('search_result')
if search_query:
search_result = Product.objects.annotate(
search = SearchVector('product_name', 'category_id__category_name', 'category_id__sub_category' ,'brand_id__brand_name')
).filter(search=search_query).values('product_name', 'product_price', 'product_featureImage')
if search_result:
search_result_list = []
for search_result in search_result:
search_result_list.append({"name": search_result['product_name'], "price": search_result['product_price'],
"image": search_result['product_featureImage']})
return render(request, 'search-results.html', {'search': search_result_list})
return render(request, 'search-results.html')
def error404(request, exception):
return render(request, '404-page.html', {'error': "Page Not Found", 'status': '404'})
def about_us(request):
return render(request, "about-us.html")
def faqs(request):
return render(request, "faqs.html") | from django.shortcuts import render, redirect, HttpResponseRedirect, get_object_or_404
from django.views.decorators.http import require_POST
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import EmailMessage
from django.template.loader import render_to_string
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_protect
from django.contrib.auth import hashers
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.contrib import messages
from django.contrib.auth.tokens import PasswordResetTokenGenerator
from django.contrib.postgres.search import SearchVector
from .forms import RegistrationForm, LoginForm, DeliveryAddressForm, PaymentVerificationForm
from .models import Customer, Brand, Product, Category, OrderDetail, Address, Cart, Image
from datetime import datetime
import json
@csrf_protect
def registration(request):
if request.method == "GET":
form = RegistrationForm()
elif request.method == "POST":
form = RegistrationForm(request.POST)
if form.is_valid():
if not Customer.objects.filter(customer_email=form.cleaned_data['customer_email']).exists():
hashed_password = hashers.make_password(form.cleaned_data['customer_password'])
customer = Customer(
customer_username=form.cleaned_data['customer_username'],
customer_firstname=form.cleaned_data['customer_firstname'],
customer_lastname=form.cleaned_data['customer_lastname'],
customer_email=form.cleaned_data['customer_email'],
password=<PASSWORD>,
last_login=datetime.today()
)
customer.save()
return redirect("main:login")
else:
messages.warning(request, "Email already taken!!")
return redirect("main:registration")
return render(request, 'registration.html', {'form': form})
@csrf_protect
def login(request):
form = LoginForm()
if request.method == "POST":
form = LoginForm(request.POST)
if form.is_valid():
email = form.cleaned_data.get('customer_email')
password = form.cleaned_data.get('customer_password')
email_exists = Customer.objects.filter(customer_email=form.cleaned_data['customer_email']).exists()
if not email_exists:
return redirect('main:login')
else:
query = Customer.objects.get(customer_email=email)
cust_pass = getattr(query, 'password')
matchcheck = hashers.check_password(password, cust_pass)
if matchcheck:
cust_id = getattr(query, 'id')
customer = get_object_or_404(Customer, password = cust_pass, pk = cust_id)
customer.save(update_fields = ['last_login'])
Customer.objects.filter(pk = customer.pk).update(last_login = datetime.today())
request.session.set_expiry(1800)
request.session['customer'] = cust_id
if not 'cart' in request.session:
request.session['cart'] = []
else:
for cart in request.session['cart']:
cart.update((key, request.session['customer']) for key, value in cart.items() if value == None)
if request.POST.get('next') == "":
return redirect('/')
return redirect("/"+request.POST.get('next'))
return render(request, 'login.html', {'form': form})
@csrf_protect
def manageAccount(request):
if request.method == "POST":
customer_id = request.POST.get('id')
customer = get_object_or_404(Customer, pk = customer_id)
customer.save(update_fields = ['customer_firstname', 'customer_lastname', 'customer_dob', 'customer_gender'])
Customer.objects.filter(pk = customer.pk).update(customer_firstname = request.POST.get('firstname'),
customer_lastname = request.POST.get('lastname'),
customer_dob = datetime.strptime(request.POST.get('dob'), "%Y-%m-%d").date(),
customer_gender = request.POST.get('gender'))
return render(request, 'manage_account.html')
def logout(request):
request.session.flush()
return redirect("main:home")
def home(request):
if request.method == "GET":
home_list = []
product = Product.objects.filter().values('product_name', 'product_price', 'product_featureImage').order_by('?')
for product in product:
home_list.append({"h_name": product['product_name'], "h_price": product['product_price'],
"h_image": product['product_featureImage']})
return render(request, 'index.html', {'homeProduct': home_list})
def singleProduct(request, product):
if request.method == "GET":
single_list = []
subImages_list = []
cat_list = []
related_list = []
oneProduct = Product.objects.filter(product_name = product).values_list('id','product_name', 'product_price', 'product_featureImage', 'product_description', 'category_id')
for product in oneProduct[0]:
single_list.append(product)
imagesView = Image.objects.filter(product_id = single_list[0]).values('image_path')
for allimage in imagesView:
subImages_list.append({"s_image": allimage['image_path']})
oneCat = Category.objects.get(pk = single_list[-1])
relateProduct = Product.objects.filter(category_id = oneCat.id).values('category_id','product_name', 'product_price', 'product_featureImage').order_by()
for repro in relateProduct:
related_list.append({"h_name": repro['product_name'], "h_price": repro['product_price'],
"h_image": repro['product_featureImage']})
return render(request, 'single-product.html', {'single_product': single_list, 'result_images': subImages_list, 'categoryName': oneCat.sub_category, 'relatedProduct': related_list[:4]})
def shop(request, category, sub__category):
if request.method == "GET":
product_list = []
brand_names = []
category = Category.objects.filter(category_name = category, sub_category = sub__category).values_list('id', flat = True)
product = Product.objects.filter(category_id = category[0]).values('product_name', 'product_price', 'product_featureImage')
brand_list = Product.objects.filter(category_id = category[0]).values('brand_id')
for brand_id in brand_list:
brand = Brand.objects.filter(pk = brand_id['brand_id']).values_list('brand_name', flat = True)
brand_names.append({'brand_name': brand[0]})
brand_names = sorted(brand_names, key = lambda i: i['brand_name'])
brand_names = list({v['brand_name']:v for v in brand_names}.values())
for product in product:
product_list.append({"p_name": product['product_name'], "p_price": product['product_price'],
"p_image": product['product_featureImage']})
return render(request, 'shop.html', {'product': product_list, 'brand': brand_names})
def confirm_email(request):
if request.method == "POST":
customer = Customer.objects.get(pk = request.session['customer'])
email = Customer.objects.filter(pk = request.session['customer']).values_list('customer_email', flat = True)
if email[0] == request.POST.get('email'):
current_site = get_current_site(request)
email_subject = "Verify your email"
message = render_to_string("account_activate.html", {
'domain': request.get_host(),
'uid': urlsafe_base64_encode(force_bytes(request.session['customer'])),
'token': PasswordResetTokenGenerator().make_token(customer),
})
to_email = email[0]
send_email = EmailMessage(email_subject, message, to = [to_email])
send_email.send()
return render(request, 'confirmation.html')
else:
return render(request, 'confirm_email.html', {"error": True})
return render(request, 'confirm_email.html', {})
def activate(request, uidb64, token):
try:
uid = urlsafe_base64_decode(uidb64).decode()
customer = Customer.objects.get(pk = uid)
except(TypeError, ValueError, OverflowError, Customer.DoesNotExist):
customer = None
if customer is not None and PasswordResetTokenGenerator().check_token(customer, token):
return render(request, 'change_email.html', {"uid": uidb64, "token":token})
else:
return HttpResponse('Activation link is invalid!')
def manage_by_email(request):
if request.method == "POST":
customer_id = request.session['customer']
customer = get_object_or_404(Customer, pk = customer_id)
customer.save(update_fields = ['customer_email'])
Customer.objects.filter(pk = customer.pk).update(customer_email = request.POST.get('email'))
return redirect("main:manage_account")
def get_from_cart(cart):
order_products = []
total_cost = 0
for cart_item in cart:
product = Product.objects.filter(product_name = cart_item['product_name']).values_list('product_name', 'product_price', 'product_featureImage')
quantity = cart_item['quantity']
total_price = cart_item['total_price']
total_cost += total_price
order_detail = {
"product_name": product[0][0],
"product_price": product[0][1],
"product_featureImage": product[0][2],
"qty": quantity,
"total_price": total_price,
}
order_products.append(order_detail)
return order_products, total_cost
def addtocart(request):
if request.method == "POST":
if not 'cart' in request.session:
request.session['cart'] = []
request.session.set_expiry(1800)
customer = request.POST.get('customer')
if not customer:
customer = None
product = Product.objects.filter(pk = request.POST.get('product_id')).values_list('product_name', 'product_price')
quantity = request.POST.get('quantity')
if any(d['product_name'] == product[0][0] for d in request.session['cart']):
cart_index = [i for i, d in enumerate(request.session['cart']) if product[0][0] in d.values()]
qty = request.session['cart'][cart_index[0]]['quantity']
qty += int(quantity)
request.session['cart'][cart_index[0]]['total_price'] = float(product[0][1]) * int(qty)
request.session.modified = True
return HttpResponse("Successfully added to cart")
cart = {
"customer": customer,
"product_name": product[0][0],
"quantity": int(quantity),
"total_price": float(product[0][1]) * int(quantity)
}
request.session['cart'].append(cart)
request.session.modified = True
return HttpResponse("Successfully added to cart")
else:
if not 'cart' in request.session:
return render(request, '404-page.html', {'error': "Session Time Out", 'status': '404'})
order_products, total_cost = get_from_cart(request.session['cart'])
return render(request, "checkout.html", {"order_products": order_products, "total_cost": total_cost,})
def remove_from_cart(request):
if request.method == "POST":
request.session['cart'] = [i for i in request.session['cart'] if not (i['product_name'] == request.POST.get('productName'))]
request.session.modified = True
return HttpResponse("Successfully removed from cart")
@require_POST
def checkout(request):
if request.method == "POST":
cart = request.session['cart']
updated_cart = request.POST.getlist('list[]')
if len(cart) == len(updated_cart):
for item in range(len(cart)):
load_data = json.loads(updated_cart[item])
updated_product_name = load_data['product_name']
updated_qty = load_data['quantity']
updated_totalprice = load_data['total_price']
cart[item].update({'product_name': updated_product_name})
cart[item].update({'quantity': int(updated_qty)})
cart[item].update({'total_price': float(updated_totalprice)})
request.session.modified = True
return HttpResponse("Checkout Completed!")
def confirm_checkout(request, payment):
if payment != "cash on delivery" and payment != "credit cards":
return render(request, '404-page.html', {'error': "Page Not Found", 'status': '404'})
if not 'cart' in request.session:
return render(request, '404-page.html', {'error': "Session Time Out", 'status': '404'})
if payment == "credit cards":
return render(request, '404-page.html', {'error': "The function will be available soon!", 'status': 'Coming Soon!'})
order_products, total_cost = get_from_cart(request.session['cart'])
customer = get_object_or_404(Customer, pk = request.session['customer'])
if request.method == "POST":
orderDetail = OrderDetail(
customer_id = customer,
payment_method = payment,
number_of_products = len(order_products),
sub_total = total_cost,
order_date = datetime.now().date(),
order_time = datetime.now().time(),
)
orderDetail.save()
for item in order_products:
product = Product.objects.get(product_name = item['product_name'])
quantity = item['qty']
total_price = item['total_price']
cart = Cart(
orderDetail_id = orderDetail,
product_id = product,
quantity = quantity,
total_price = total_price
)
cart.save()
address = Address(
street_1 = request.POST.get('street_1'),
street_2 = request.POST.get('street_2'),
township = request.POST.get('township'),
customer_id = customer
)
address.save()
del request.session['cart']
return redirect("main:order_success")
else:
delivery_form = DeliveryAddressForm(initial={'firstname': customer.customer_firstname, 'lastname': customer.customer_lastname})
payment_form = None
if payment == "credit cards":
payment_form = PaymentVerificationForm()
return render(request, "confirm_checkout.html", {"delivery": delivery_form, "payment": payment_form ,"cart": order_products, "total_cost": total_cost, "method": payment})
def order_success(request):
return render(request, "order_successful.html")
#search Product
def search(request):
if request.method == 'POST':
search_query = request.POST.get('search_result')
if search_query:
search_result = Product.objects.annotate(
search = SearchVector('product_name', 'category_id__category_name', 'category_id__sub_category' ,'brand_id__brand_name')
).filter(search=search_query).values('product_name', 'product_price', 'product_featureImage')
if search_result:
search_result_list = []
for search_result in search_result:
search_result_list.append({"name": search_result['product_name'], "price": search_result['product_price'],
"image": search_result['product_featureImage']})
return render(request, 'search-results.html', {'search': search_result_list})
return render(request, 'search-results.html')
def error404(request, exception):
return render(request, '404-page.html', {'error': "Page Not Found", 'status': '404'})
def about_us(request):
return render(request, "about-us.html")
def faqs(request):
return render(request, "faqs.html") | en | 0.619675 | #search Product | 2.019088 | 2 |
lib/pyasm/__init__.py | clayne/syringe-1 | 25 | 6622044 | <gh_stars>10-100
import marshal,opcode
from . import utils
from .assemble import assemble,oplength,assemble_insn
from .disassemble import disassemble,fetch_op,fetch_insn
# for lazy typists...
asm = assemble
dis = disassemble
def pretty_dis(obj, address=0):
''' returns a disassembly w/ labels '''
insns = []
labels = {}
# collect labels and other fun stuff
for ofs,mnem,arg,op in disassemble(obj):
opnum,oparg = op
if opnum in opcode.hasjrel:
labels[arg] = ofs
elif opnum in opcode.hasjabs:
labels[oparg] = ofs
insns.append( (ofs, mnem, arg, (opnum,oparg)) )
insns = iter(insns)
# format results (might want to align this into some columns
## yes, i know the function name is really ironic. ;)
res = []
for i in insns:
ofs, mnem, arg, op = i
opnum,oparg = op
mnem = mnem.lower()
if ofs in labels.keys() and ofs > 0:
res.append('\nlabel_%x:'% (ofs+address))
elif ofs in labels.keys():
res.append('label_%x:'% (ofs+address))
if oparg == None:
res.append(' %s'% mnem.ljust(16) )
continue
comment = repr(arg)
if opnum in opcode.hasjrel and arg in labels.keys():
comment = ''
arg = 'label_%x'% arg
elif opnum in opcode.hasjabs and oparg in labels.keys():
comment = ''
arg = 'label_%x'% oparg
else:
arg = oparg
if comment:
comment = '# -> %s'% repr(comment)
# FIXME: hardcoded length is 32. (why would you need such huge names for a label anyways)
res.append(' %s %s %s'% (mnem.ljust(16), str(arg).ljust(32), comment))
if ofs not in labels.keys():
if opnum in opcode.hasjrel or mnem.startswith('store') or mnem.startswith('call'):
res.append('')
return '\n'.join(res)
if __name__ == '__main__':
pass
| import marshal,opcode
from . import utils
from .assemble import assemble,oplength,assemble_insn
from .disassemble import disassemble,fetch_op,fetch_insn
# for lazy typists...
asm = assemble
dis = disassemble
def pretty_dis(obj, address=0):
''' returns a disassembly w/ labels '''
insns = []
labels = {}
# collect labels and other fun stuff
for ofs,mnem,arg,op in disassemble(obj):
opnum,oparg = op
if opnum in opcode.hasjrel:
labels[arg] = ofs
elif opnum in opcode.hasjabs:
labels[oparg] = ofs
insns.append( (ofs, mnem, arg, (opnum,oparg)) )
insns = iter(insns)
# format results (might want to align this into some columns
## yes, i know the function name is really ironic. ;)
res = []
for i in insns:
ofs, mnem, arg, op = i
opnum,oparg = op
mnem = mnem.lower()
if ofs in labels.keys() and ofs > 0:
res.append('\nlabel_%x:'% (ofs+address))
elif ofs in labels.keys():
res.append('label_%x:'% (ofs+address))
if oparg == None:
res.append(' %s'% mnem.ljust(16) )
continue
comment = repr(arg)
if opnum in opcode.hasjrel and arg in labels.keys():
comment = ''
arg = 'label_%x'% arg
elif opnum in opcode.hasjabs and oparg in labels.keys():
comment = ''
arg = 'label_%x'% oparg
else:
arg = oparg
if comment:
comment = '# -> %s'% repr(comment)
# FIXME: hardcoded length is 32. (why would you need such huge names for a label anyways)
res.append(' %s %s %s'% (mnem.ljust(16), str(arg).ljust(32), comment))
if ofs not in labels.keys():
if opnum in opcode.hasjrel or mnem.startswith('store') or mnem.startswith('call'):
res.append('')
return '\n'.join(res)
if __name__ == '__main__':
pass | en | 0.891774 | # for lazy typists... returns a disassembly w/ labels # collect labels and other fun stuff # format results (might want to align this into some columns ## yes, i know the function name is really ironic. ;) # FIXME: hardcoded length is 32. (why would you need such huge names for a label anyways) | 2.558032 | 3 |
RecoTracker/FinalTrackSelectors/python/classifierTest_cff.py | ckamtsikis/cmssw | 852 | 6622045 | <gh_stars>100-1000
from RecoTracker.FinalTrackSelectors.TrackCutClassifier_cff import *
from RecoTracker.FinalTrackSelectors.TrackMVAClassifierPrompt_cfi import *
from RecoTracker.FinalTrackSelectors.TrackMVAClassifierDetached_cfi import *
testTrackClassifier1 = TrackMVAClassifierPrompt.clone(
src = 'initialStepTracks',
mva = dict(GBRForestLabel = 'MVASelectorIter0_13TeV'),
qualityCuts = [-0.9,-0.8,-0.7]
)
testTrackClassifier2 = TrackCutClassifier.clone(
src = 'initialStepTracks',
mva = dict(minPixelHits = [0,1,1])
)
from RecoTracker.FinalTrackSelectors.ClassifierMerger_cfi import *
testMergedClassifier = ClassifierMerger.clone(
inputClassifiers=['testTrackClassifier1','testTrackClassifier2']
)
from RecoTracker.FinalTrackSelectors.TrackCollectionMerger_cfi import *
testTrackMerger = TrackCollectionMerger.clone(
trackProducers = ['initialStepTracks'],
inputClassifiers =['testMergedClassifier'],
minQuality = 'tight'
)
testTrackClassifier3 = TrackMVAClassifierDetached.clone(
src = 'detachedTripletStepTracks',
mva = dict(GBRForestLabel = 'MVASelectorIter3_13TeV'),
qualityCuts = [-0.5,0.0,0.5]
)
from RecoTracker.FinalTrackSelectors.trackAlgoPriorityOrder_cfi import trackAlgoPriorityOrder
from RecoTracker.FinalTrackSelectors.TrackCollectionMerger_cfi import *
testTrackMerger2 = TrackCollectionMerger.clone(
trackProducers = ['initialStepTracks','detachedTripletStepTracks'],
inputClassifiers =['testMergedClassifier','testTrackClassifier3'],
minQuality = 'tight'
)
testTrackCloning = cms.Sequence(testTrackClassifier1*testTrackClassifier2*testTrackClassifier3*
testMergedClassifier*testTrackMerger*testTrackMerger2)
| from RecoTracker.FinalTrackSelectors.TrackCutClassifier_cff import *
from RecoTracker.FinalTrackSelectors.TrackMVAClassifierPrompt_cfi import *
from RecoTracker.FinalTrackSelectors.TrackMVAClassifierDetached_cfi import *
testTrackClassifier1 = TrackMVAClassifierPrompt.clone(
src = 'initialStepTracks',
mva = dict(GBRForestLabel = 'MVASelectorIter0_13TeV'),
qualityCuts = [-0.9,-0.8,-0.7]
)
testTrackClassifier2 = TrackCutClassifier.clone(
src = 'initialStepTracks',
mva = dict(minPixelHits = [0,1,1])
)
from RecoTracker.FinalTrackSelectors.ClassifierMerger_cfi import *
testMergedClassifier = ClassifierMerger.clone(
inputClassifiers=['testTrackClassifier1','testTrackClassifier2']
)
from RecoTracker.FinalTrackSelectors.TrackCollectionMerger_cfi import *
testTrackMerger = TrackCollectionMerger.clone(
trackProducers = ['initialStepTracks'],
inputClassifiers =['testMergedClassifier'],
minQuality = 'tight'
)
testTrackClassifier3 = TrackMVAClassifierDetached.clone(
src = 'detachedTripletStepTracks',
mva = dict(GBRForestLabel = 'MVASelectorIter3_13TeV'),
qualityCuts = [-0.5,0.0,0.5]
)
from RecoTracker.FinalTrackSelectors.trackAlgoPriorityOrder_cfi import trackAlgoPriorityOrder
from RecoTracker.FinalTrackSelectors.TrackCollectionMerger_cfi import *
testTrackMerger2 = TrackCollectionMerger.clone(
trackProducers = ['initialStepTracks','detachedTripletStepTracks'],
inputClassifiers =['testMergedClassifier','testTrackClassifier3'],
minQuality = 'tight'
)
testTrackCloning = cms.Sequence(testTrackClassifier1*testTrackClassifier2*testTrackClassifier3*
testMergedClassifier*testTrackMerger*testTrackMerger2) | none | 1 | 1.465198 | 1 | |
COVID19_BOT.py | pronayguha13/COVID19_BOT | 2 | 6622046 | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 20 10:46:47 2020
@author: Pronay
ProjectName:COVID19_BOT
"""
import datetime
import json
import requests
# import argparse
import logging
from bs4 import BeautifulSoup
from tabulate import tabulate
from slack_client import slacker
FORMAT = '[%(asctime)-15s] %(message)s'
logging.basicConfig(format=FORMAT, level=logging.DEBUG, filename='bot.log', filemode='a')
URL = 'https://www.mohfw.gov.in'
SHORT_HEADERS = ('State', 'Inf', 'Cure', 'Dt')
FILE_NAME = 'corona_india_data.json'
extract_contents = lambda row: [x.text.replace('\n', '') for x in row]
def save(x):
with open(FILE_NAME, 'w') as f:
json.dump(x, f)
def load():
res = {}
with open(FILE_NAME, 'r') as f:
res = json.load(f)
return res
def tableCreator(dataSet):
return tabulate(dataSet, headers = SHORT_HEADERS,tablefmt = "presto", missingval = "X")
if __name__ == '__main__':
current_time = datetime.datetime.now().strftime('%d/%m/%Y %H:%M')
info = []
try:
response = requests.get(URL).content
soup = BeautifulSoup(response, 'html.parser')
header = extract_contents(soup.tr.find_all('th'))
stats = []
all_rows = soup.find_all('tr')
for row in all_rows:
stat = extract_contents(row.find_all('td'))
if stat:
if len(stat) == 5 or len(stat) == 4:
# last row
stat = ['', *stat]
stats.append(stat)
p = 1
temp = []
print(stats)
print("\n\n\n\n\n\n\n")
for x in stats:
x = x[2:6]
temp.append(x)
y = ["Total", temp[len(temp) - 1]]
temp = temp[0:len(temp) - 1]
y1 = []
print(temp)
print("\n\n\n\n\n\n\n")
y1.append("Total")
y1.append(y[1][0])
y1.append(y[1][1])
y1.append(y[1][2])
print(y1)
# save(temp)
past_data = load()
t = current_time
cur_data = temp
# for x in stats:
# cur_data.append(x[2:6])
flag = 0
change = []
change1 = [["State", "Infected", "Cured", "Death"], ]
for (x, y) in zip(past_data, cur_data):
if (int(x[1]) != int(y[1])):
inf = abs(int(x[1]) - int(y[1]))
c = abs(int(x[2]) - int(y[2]))
dt = abs(int(x[3]) - int(y[3]))
change.append(y[0])
change.append(inf)
change.append(c)
change.append(dt)
flag = 1
change1.append(change)
events_info = ''
# print(cur_data)
if flag:
save(cur_data)
events_info = "Aleart !!!COVID-19 increases " + t
slack_text = f'CoronaVirus Summary for India below:\n{events_info}'
# slacker()(slack_text)
events_info = "Leatest Summary :: "
table = tableCreator(change1)
slack_text = f'CoronaVirus leatest Summary for India:\n{table}'
# slacker()(slack_text)
cur_data.append(y1)
table = tableCreator(cur_data)
else:
events_info = "No COVID-19 updation in India on " + t
slack_text = f'CoronaVirus Summary for India below:\n{events_info}'
slacker()(slack_text)
# events_info="Previous Summary :: "
past_data.append(y1)
table = tableCreator(past_data)
# print(table)
slack_text = f'CoronaVirus Previous Summary for India:\n{table}'
print(table)
slacker()(slack_text)
except Exception as e:
logging.exception('oops, corona script failed.')
slacker()(f'Exception occured: [{e}]')
print("\nUpdation Done")
| # -*- coding: utf-8 -*-
"""
Created on Fri Mar 20 10:46:47 2020
@author: Pronay
ProjectName:COVID19_BOT
"""
import datetime
import json
import requests
# import argparse
import logging
from bs4 import BeautifulSoup
from tabulate import tabulate
from slack_client import slacker
FORMAT = '[%(asctime)-15s] %(message)s'
logging.basicConfig(format=FORMAT, level=logging.DEBUG, filename='bot.log', filemode='a')
URL = 'https://www.mohfw.gov.in'
SHORT_HEADERS = ('State', 'Inf', 'Cure', 'Dt')
FILE_NAME = 'corona_india_data.json'
extract_contents = lambda row: [x.text.replace('\n', '') for x in row]
def save(x):
with open(FILE_NAME, 'w') as f:
json.dump(x, f)
def load():
res = {}
with open(FILE_NAME, 'r') as f:
res = json.load(f)
return res
def tableCreator(dataSet):
return tabulate(dataSet, headers = SHORT_HEADERS,tablefmt = "presto", missingval = "X")
if __name__ == '__main__':
current_time = datetime.datetime.now().strftime('%d/%m/%Y %H:%M')
info = []
try:
response = requests.get(URL).content
soup = BeautifulSoup(response, 'html.parser')
header = extract_contents(soup.tr.find_all('th'))
stats = []
all_rows = soup.find_all('tr')
for row in all_rows:
stat = extract_contents(row.find_all('td'))
if stat:
if len(stat) == 5 or len(stat) == 4:
# last row
stat = ['', *stat]
stats.append(stat)
p = 1
temp = []
print(stats)
print("\n\n\n\n\n\n\n")
for x in stats:
x = x[2:6]
temp.append(x)
y = ["Total", temp[len(temp) - 1]]
temp = temp[0:len(temp) - 1]
y1 = []
print(temp)
print("\n\n\n\n\n\n\n")
y1.append("Total")
y1.append(y[1][0])
y1.append(y[1][1])
y1.append(y[1][2])
print(y1)
# save(temp)
past_data = load()
t = current_time
cur_data = temp
# for x in stats:
# cur_data.append(x[2:6])
flag = 0
change = []
change1 = [["State", "Infected", "Cured", "Death"], ]
for (x, y) in zip(past_data, cur_data):
if (int(x[1]) != int(y[1])):
inf = abs(int(x[1]) - int(y[1]))
c = abs(int(x[2]) - int(y[2]))
dt = abs(int(x[3]) - int(y[3]))
change.append(y[0])
change.append(inf)
change.append(c)
change.append(dt)
flag = 1
change1.append(change)
events_info = ''
# print(cur_data)
if flag:
save(cur_data)
events_info = "Aleart !!!COVID-19 increases " + t
slack_text = f'CoronaVirus Summary for India below:\n{events_info}'
# slacker()(slack_text)
events_info = "Leatest Summary :: "
table = tableCreator(change1)
slack_text = f'CoronaVirus leatest Summary for India:\n{table}'
# slacker()(slack_text)
cur_data.append(y1)
table = tableCreator(cur_data)
else:
events_info = "No COVID-19 updation in India on " + t
slack_text = f'CoronaVirus Summary for India below:\n{events_info}'
slacker()(slack_text)
# events_info="Previous Summary :: "
past_data.append(y1)
table = tableCreator(past_data)
# print(table)
slack_text = f'CoronaVirus Previous Summary for India:\n{table}'
print(table)
slacker()(slack_text)
except Exception as e:
logging.exception('oops, corona script failed.')
slacker()(f'Exception occured: [{e}]')
print("\nUpdation Done")
| en | 0.461358 | # -*- coding: utf-8 -*- Created on Fri Mar 20 10:46:47 2020
@author: Pronay
ProjectName:COVID19_BOT # import argparse # last row # save(temp) # for x in stats: # cur_data.append(x[2:6]) # print(cur_data) # slacker()(slack_text) # slacker()(slack_text) # events_info="Previous Summary :: " # print(table) | 2.508064 | 3 |
utils/fdfs/storage.py | Bean-jun/shop | 0 | 6622047 | import os
from django.core.files.storage import Storage
from fdfs_client.client import Fdfs_client, get_tracker_conf
from django.conf import settings
class FDFSStorage(Storage):
"""
自定义文件存储类
"""
def _open(self, name, mode='rb'):
# 打开文件时使用
pass
def _save(self, name, content):
# 保存文件时使用
# name:选择上传文件的名称
# content: 包含上传文件内容的File类的对象
# 获取配置文件
path = get_tracker_conf(settings.FAST_DFS_CONF_PATH)
# 创建一个Fdfs_client对象
client = Fdfs_client(path)
# 上传文件到fastdfs系统中
res = client.upload_by_buffer(content.read())
# 判断内容是否判断成功
if res.get('Status') != "Upload successed.":
# 上传失败
raise Exception("上传文件到fast dfs失败")
else:
# 获取返回文件ID
filename = res.get('Remote file_id')
# 返回保存文件,这里返回的是什么,数据表里面就会保存什么内容,注意返回类型为bytes类型
return filename.decode()
def exists(self, name):
# Django判断文件名是否可用
return False
def url(self, name):
# 返回访问文件的URL路径
# name: 表中文件的路径ID
return settings.FAST_DFS_ADDRESS + str(name) | import os
from django.core.files.storage import Storage
from fdfs_client.client import Fdfs_client, get_tracker_conf
from django.conf import settings
class FDFSStorage(Storage):
"""
自定义文件存储类
"""
def _open(self, name, mode='rb'):
# 打开文件时使用
pass
def _save(self, name, content):
# 保存文件时使用
# name:选择上传文件的名称
# content: 包含上传文件内容的File类的对象
# 获取配置文件
path = get_tracker_conf(settings.FAST_DFS_CONF_PATH)
# 创建一个Fdfs_client对象
client = Fdfs_client(path)
# 上传文件到fastdfs系统中
res = client.upload_by_buffer(content.read())
# 判断内容是否判断成功
if res.get('Status') != "Upload successed.":
# 上传失败
raise Exception("上传文件到fast dfs失败")
else:
# 获取返回文件ID
filename = res.get('Remote file_id')
# 返回保存文件,这里返回的是什么,数据表里面就会保存什么内容,注意返回类型为bytes类型
return filename.decode()
def exists(self, name):
# Django判断文件名是否可用
return False
def url(self, name):
# 返回访问文件的URL路径
# name: 表中文件的路径ID
return settings.FAST_DFS_ADDRESS + str(name) | zh | 0.986093 | 自定义文件存储类 # 打开文件时使用 # 保存文件时使用 # name:选择上传文件的名称 # content: 包含上传文件内容的File类的对象 # 获取配置文件 # 创建一个Fdfs_client对象 # 上传文件到fastdfs系统中 # 判断内容是否判断成功 # 上传失败 # 获取返回文件ID # 返回保存文件,这里返回的是什么,数据表里面就会保存什么内容,注意返回类型为bytes类型 # Django判断文件名是否可用 # 返回访问文件的URL路径 # name: 表中文件的路径ID | 2.257743 | 2 |
seaborn_analyzer/custom_reg_plot.py | c60evaporator/seaborn-analyzer | 38 | 6622048 | <filename>seaborn_analyzer/custom_reg_plot.py
from typing import List, Dict
import seaborn as sns
import matplotlib.pyplot as plt
import numbers
import numpy as np
import pandas as pd
from scipy import stats
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error, mean_squared_log_error, mean_absolute_percentage_error
from sklearn.model_selection import KFold, LeaveOneOut, GroupKFold, LeaveOneGroupOut
import decimal
from ._cv_eval_set import init_eval_set, _make_transformer, _eval_set_selection, cross_val_score_eval_set
class regplot():
# regression_heat_plotメソッド (回帰モデルヒートマップ表示)における、散布図カラーマップ
_HEAT_SCATTER_HUECOLORS = ['red', 'mediumblue', 'darkorange', 'darkmagenta', 'cyan', 'pink', 'brown', 'gold', 'grey']
def _round_digits(src: float, rounddigit: int = None, method='decimal'):
"""
指定桁数で小数を丸める
Parameters
----------
src : float
丸め対象の数値
rounddigit : int
フィッティング線の表示範囲(標準偏差の何倍まで表示するか指定)
method : int
桁数決定手法('decimal':小数点以下, 'sig':有効数字(Decimal指定), 'format':formatで有効桁数指定)
"""
if method == 'decimal':
return round(src, rounddigit)
elif method == 'sig':
with decimal.localcontext() as ctx:
ctx.prec = rounddigit
return ctx.create_decimal(src)
elif method == 'format':
return '{:.{width}g}'.format(src, width=rounddigit)
@classmethod
def _round_dict_digits(cls, srcdict: Dict[str, float], rounddigit: int = None, method='decimal'):
"""
指定桁数でdictの値を丸める
Parameters
----------
srcdict : dict[str, float]
丸め対象のdict
rounddigit : int
フィッティング線の表示範囲(標準偏差の何倍まで表示するか指定)
method : int
桁数決定手法('decimal':小数点以下, 'sig':有効数字(Decimal指定), 'format':formatで有効桁数指定)
"""
dstdict = {}
for k, v in srcdict.items():
if rounddigit is not None and isinstance(v, float):
dstdict[k] = cls._round_digits(v, rounddigit=rounddigit, method=method)
else:
dstdict[k] = v
return dstdict
def _make_score_dict(y_true, y_pred, scores):
"""
回帰評価指標を算出してdict化
"""
score_dict = {}
for scoring in scores:
if scoring == 'r2':
score_dict['r2'] = r2_score(y_true, y_pred)
elif scoring == 'mae':
score_dict['mae'] = mean_absolute_error(y_true, y_pred)
elif scoring == 'mse':
score_dict['mse'] = mean_squared_error(y_true, y_pred, squared=True)
elif scoring == 'rmse':
score_dict['rmse'] = mean_squared_error(y_true, y_pred, squared=False)
elif scoring == 'rmsle':
score_dict['rmsle'] = mean_squared_log_error(y_true, y_pred)
elif scoring == 'mape':
score_dict['mape'] = mean_absolute_percentage_error(y_true, y_pred)
elif scoring == 'max_error':
score_dict['max_error'] = max([abs(p - r) for r, p in zip(y_true, y_pred)])
return score_dict
def _reshape_input_data(x, y, data, x_colnames, cv_group):
"""
入力データの形式統一(pd.DataFrame or np.ndarray)
"""
# dataがpd.DataFrameのとき
if isinstance(data, pd.DataFrame):
if not isinstance(x, list):
raise Exception('`x` argument should be list[str] if `data` is pd.DataFrame')
if not isinstance(y, str):
raise Exception('`y` argument should be str if `data` is pd.DataFrame')
if x_colnames is not None:
raise Exception('`x_colnames` argument should be None if `data` is pd.DataFrame')
X = data[x].values
y_true = data[y].values
x_colnames = x
y_colname = y
cv_group_colname = cv_group
# dataがNoneのとき(x, y, cv_groupがnp.ndarray)
elif data is None:
if not isinstance(x, np.ndarray):
raise Exception('`x` argument should be np.ndarray if `data` is None')
if not isinstance(y, np.ndarray):
raise Exception('`y` argument should be np.ndarray if `data` is None')
X = x if len(x.shape) == 2 else x.reshape([x.shape[0], 1])
y_true = y.ravel()
# x_colnameとXの整合性確認
if x_colnames is None:
x_colnames = list(range(X.shape[1]))
elif X.shape[1] != len(x_colnames):
raise Exception('width of X must be equal to length of x_colnames')
else:
x_colnames = x_colnames
y_colname = 'objective_variable'
if cv_group is not None: # cv_group指定時
cv_group_colname = 'group'
data = pd.DataFrame(np.column_stack((X, y_true, cv_group)),
columns=x_colnames + [y_colname] + [cv_group_colname])
else:
cv_group_colname = None
data = pd.DataFrame(np.column_stack((X, y)),
columns=x_colnames + [y_colname])
else:
raise Exception('`data` argument should be pd.DataFrame or None')
return X, y_true, data, x_colnames, y_colname, cv_group_colname
@classmethod
def _rank_display(cls, y_true, y_pred, rank_number, rank_col, rank_col_data, x=None, ax=None, rounddigit=None):
"""
誤差上位を文字プロット
Parameters
----------
y_true : np.ndarray
目的変数実測値
y_pred : np.ndarray
目的変数予測値
rank_number : int
誤差上位何番目までを文字表示するか
rank_col : List[str]
誤差上位と一緒に表示するフィールド名 (NoneならIndexを使用)
x : np.ndarray
説明変数の値 (Noneなら横軸y_true縦軸y_pred、Noneでなければ横軸x縦軸y_true)
ax : matplotlib.axes.Axes
表示対象のax(Noneならmatplotlib.pyplot.plotで1枚ごとにプロット)
rounddigit: int
表示指標の小数丸め桁数
"""
# 描画用axがNoneのとき、matplotlib.pyplot.gca()を使用
if ax is None:
ax=plt.gca()
if rank_col is None:
rank_col = 'index'
y_error = y_pred - y_true
y_error_abs = np.abs(y_error)
rank_index = np.argsort(-y_error_abs)[:rank_number]
for rank, i in enumerate(rank_index):
error = cls._round_digits(y_error[i], rounddigit=rounddigit, method='decimal')
rank_text = f' no{rank+1}\n-<-error={error}\n {rank_col}={rank_col_data[i]}'
if x is None: # 横軸y_true縦軸y_pred (regression_pred_trueメソッド用)
ax.text(y_true[i], y_pred[i], rank_text, verticalalignment='center', horizontalalignment='left')
else: # 横軸x縦軸y_true (regression_plot_1dメソッド用)
ax.text(x[i], y_true[i], rank_text, verticalalignment='center', horizontalalignment='left')
@classmethod
def _scatterplot_ndarray(cls, x, x_name, y, y_name, hue_data, hue_name, ax, scatter_kws, legend_kws):
"""
np.ndarrayを入力として散布図表示(scatterplot)
"""
# X値とY値を合体してDataFrame化
data = np.stack([x, y], axis=1)
data = pd.DataFrame(data, columns=[x_name, y_name])
# 色分け指定しているとき、色分け用のフィールドを追加
if hue_data is not None:
if hue_name is None:
hue_name = 'hue'
data[hue_name] = pd.Series(hue_data)
# 散布図プロット
sns.scatterplot(x=x_name, y=y_name, data=data, ax=ax, hue=hue_name, **scatter_kws)
# 凡例追加
if 'title' not in legend_kws.keys():
legend_kws['title'] = hue_name
ax.legend(**legend_kws)
@classmethod
def _plot_pred_true(cls, y_true, y_pred, hue_data=None, hue_name=None, ax=None,
linecolor='red', linesplit=200, rounddigit=None,
score_dict=None, scatter_kws=None, legend_kws=None):
"""
予測値と実測値を、回帰評価指標とともにプロット
Parameters
----------
y_true : ndarray
目的変数実測値
y_pred : ndarray
目的変数予測値
hue_data : ndarray
色分け用ラベルデータ
hue_name : str
色分け用の列名
ax : matplotlib.axes.Axes
表示対象のax (Noneならmatplotlib.pyplot.plotで1枚ごとにプロット)
linecolor : str
予測値=実測値の線の色
linesplit : int
フィッティング線の分割数 (カクカクしたら増やす)
rounddigit: int
表示指標の小数丸め桁数
score_dict : dict[str, float]
算出した評価指標一覧
scatter_kws : dict
Additional parameters passed to sns.scatterplot(), e.g. ``alpha``. See https://seaborn.pydata.org/generated/seaborn.scatterplot.html
legend_kws : dict
Additional parameters passed to ax.legend(), e.g. ``loc``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.legend.html
"""
# 描画用axがNoneのとき、matplotlib.pyplot.gca()を使用
if ax is None:
ax=plt.gca()
# score_dictがNoneのとき、空のDictを加瀬宇
if score_dict is None:
score_dict = {}
# scatter_kwsがNoneなら空のdictを入力
if scatter_kws is None:
scatter_kws = {}
# 散布図プロット
cls._scatterplot_ndarray(y_true, 'y_true', y_pred, 'y_pred', hue_data, hue_name, ax, scatter_kws, legend_kws)
# 予測値=実測値の線を作成
true_min = np.amin(y_true)
true_max = np.amax(y_true)
true_line = np.linspace(true_min, true_max, linesplit)
# 評価指標文字列作成
score_list = [f'{k}={v}' for k, v in cls._round_dict_digits(score_dict, rounddigit, 'sig').items()]
score_text = "\n".join(score_list)
# 線と文字をプロット
ax.plot(true_line, true_line, color=linecolor)
ax.text(true_max, np.amin(y_pred), score_text, verticalalignment='bottom', horizontalalignment='right')
@classmethod
def regression_pred_true(cls, estimator, x: List[str], y: str, data: pd.DataFrame = None,
x_colnames: List[str] = None, hue=None, linecolor='red', rounddigit=3,
rank_number=None, rank_col=None, scores='mae',
cv_stats='mean', cv=None, cv_seed=42, cv_group=None, ax=None,
estimator_params=None, fit_params=None, eval_set_selection=None,
subplot_kws=None, scatter_kws=None, legend_kws=None):
"""
Plot prediction vs. true scatter plots of any scikit-learn regression estimator
Parameters
----------
estimator : estimator object implementing ``fit``
Regression estimator. This is assumed to implement the scikit-learn estimator interface.
x : str or list[str]
Explanatory variables.
y : str
Objective variable.
data : pd.DataFrame
Input data structure.
x_colnames: list[str], optional
Names of explanatory variables. Available only if ``data`` is NOT pd.DataFrame
hue : str, optional
Grouping variable that will produce points with different colors.
linecolor : str, optional
Color of prediction = true line. See https://matplotlib.org/stable/gallery/color/named_colors.html
rounddigit: int, optional
Round a number of score to a given precision in decimal digits.
rank_number : int, optional
Number of emphasized data that are in the top posiotions for regression error.
rank_col : list[str], optional
Variables that are displayed with emphasized data that are in the top posiotions for regression error.
scores : {'r2', 'mae', 'mse', 'rmse', 'rmsle', 'mape', 'max_error'} or list, optional
Regression score that are displayed at the lower right of the graph.
cv_stats : {'mean', 'median', 'max', 'min'}, optional
Statistical method of cross validation score that are displayed at the lower right of the graph.
cv : int, cross-validation generator, or an iterable, optional
Determines the cross-validation splitting strategy. If None, to use the default 5-fold cross validation. If int, to specify the number of folds in a KFold.
cv_seed : int, optional
Seed for random number generator of cross validation.
cv_group: str, optional
Group variable for the samples used while splitting the dataset into train/test set. This argument is passed to ``groups`` argument of cv.split().
ax : {matplotlib.axes.Axes, list[matplotlib.axes.Axes]}, optional
Pre-existing axes for the plot or list of it. Otherwise, call matplotlib.pyplot.subplot() internally.
estimator_params : dict, optional
Parameters passed to the regression estimator. If the estimator is pipeline, each parameter name must be prefixed such that parameter p for step s has key s__p.
fit_params : dict, optional
Parameters passed to the fit() method of the regression estimator, e.g. ``early_stopping_round`` and ``eval_set`` of XGBRegressor. If the estimator is pipeline, each parameter name must be prefixed such that parameter p for step s has key s__p.
eval_set_selection: {'all', 'test', 'train', 'original', 'original_transformed'}, optional
Select data passed to `eval_set` in `fit_params`. Available only if `estimator` is LightGBM or XGBoost and `cv` is not None.
If "all", use all data in `X` and `y`.
If "train", select train data from `X` and `y` using cv.split().
If "test", select test data from `X` and `y` using cv.split().
If "original", use raw `eval_set`.
If "original_transformed", use `eval_set` transformed by fit_transform() of pipeline if `estimater` is pipeline.
subplot_kws : dict, optional
Additional parameters passed to matplotlib.pyplot.subplots(), e.g. figsize. Available only if ``axes`` is None. See https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.subplots.html
scatter_kws: dict, optional
Additional parameters passed to sns.scatterplot(), e.g. ``alpha``. See https://seaborn.pydata.org/generated/seaborn.scatterplot.html
legend_kws : dict
Additional parameters passed to ax.legend(), e.g. ``loc``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.legend.html
Returns
----------
score_dict : dict
Validation scores, e.g. r2, mae and rmse
"""
# 入力データの形式統一
X, y_true, data, x_colnames, y_colname, cv_group_colname = cls._reshape_input_data([x] if isinstance(x, str) else x,
y, data,
x_colnames,
cv_group)
# scoresの型をListに統一
if scores is None:
scores = []
elif isinstance(scores, str):
scores = [scores]
elif not isinstance(scores, list):
raise Exception('the "scores" argument must be str or list[str]')
# 学習器パラメータがあれば適用
if estimator_params is not None:
estimator.set_params(**estimator_params)
# 学習時パラメータがNoneなら空のdictを入力
if fit_params is None:
fit_params = {}
# subplot_kwsがNoneなら空のdictを入力
if subplot_kws is None:
subplot_kws = {}
# scatter_kwsがNoneなら空のdictを入力
if scatter_kws is None:
scatter_kws = {}
# legend_kwsがNoneなら空のdictを入力
if legend_kws is None:
legend_kws = {}
# クロスバリデーション有無で場合分け
# クロスバリデーション未実施時(学習データからプロット&指標算出)
if cv is None:
# 学習と推論
estimator.fit(X, y_true, **fit_params)
y_pred = estimator.predict(X)
# 評価指標算出
score_dict = cls._make_score_dict(y_true, y_pred, scores)
# 色分け用データ取得
hue_data = None if hue is None else data[hue]
hue_name = None if hue is None else hue
# 誤差上位表示用データ取得
if rank_number is not None:
if rank_col is None: # 表示フィールド指定ないとき、Index使用
rank_col_data = data.index.values
else: # 表示フィールド指定あるとき
rank_col_data = data[rank_col].values
# 予測値と実測値プロット
cls._plot_pred_true(y_true, y_pred, hue_data=hue_data, hue_name=hue_name, ax=ax,
linecolor=linecolor, rounddigit=rounddigit, score_dict=score_dict,
scatter_kws=scatter_kws, legend_kws=legend_kws)
# 誤差上位を文字表示
if rank_number is not None:
cls._rank_display(y_true, y_pred, rank_number, rank_col, rank_col_data, rounddigit=rounddigit)
return score_dict
# クロスバリデーション実施時(分割ごとに別々にプロット&指標算出)
if cv is not None:
# 分割法未指定時、cv_numとseedに基づきKFoldでランダムに分割
if isinstance(cv, numbers.Integral):
cv = KFold(n_splits=cv, shuffle=True, random_state=cv_seed)
#LeaveOneOutかどうかを判定
isLeaveOneOut = isinstance(cv, LeaveOneOut)
# cv_groupをグルーピング対象に指定(GroupKFold、LeaveOneGroupOut等)
split_kws={}
if cv_group_colname is not None:
split_kws['groups'] = data[cv_group_colname].values
elif isinstance(cv, GroupKFold) or isinstance(cv, LeaveOneGroupOut):
raise Exception('"GroupKFold" and "LeaveOneGroupOut" cross validations need ``cv_group`` argument')
# LeaveOneGroupOutのとき、クロスバリデーション分割数をcv_groupの数に指定
if isinstance(cv, LeaveOneGroupOut):
cv_num = len(set(data[cv_group_colname].values))
elif isLeaveOneOut:
cv_num = 1
else:
cv_num = cv.n_splits
# fit_paramsにeval_metricが入力されており、eval_setが入力されていないときの処理(eval_setにテストデータを使用)
if eval_set_selection is None:
eval_set_selection = 'test'
fit_params, eval_set_selection = init_eval_set(
eval_set_selection, fit_params, X, y)
# 最終学習器以外の前処理変換器作成
transformer = _make_transformer(eval_set_selection, estimator)
# スコア種類ごとにクロスバリデーションスコアの算出
score_all_dict = {}
for scoring in scores:
# cross_val_scoreでクロスバリデーション
if scoring == 'r2':
score_all_dict['r2'] = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='r2',
fit_params=fit_params, n_jobs=-1, **split_kws)
elif scoring == 'mae':
neg_mae = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='neg_mean_absolute_error',
fit_params=fit_params, n_jobs=-1, **split_kws)
score_all_dict['mae'] = -neg_mae # scikit-learnの仕様に合わせ正負を逆に
elif scoring == 'mse':
neg_mse = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='neg_mean_squared_error',
fit_params=fit_params, n_jobs=-1, **split_kws)
score_all_dict['mse'] = -neg_mse # scikit-learnの仕様に合わせ正負を逆に
elif scoring == 'rmse':
neg_rmse = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='neg_root_mean_squared_error',
fit_params=fit_params, n_jobs=-1, **split_kws)
score_all_dict['rmse'] = -neg_rmse # scikit-learnの仕様に合わせ正負を逆に
elif scoring == 'rmsle':
neg_msle = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='neg_mean_squared_log_error',
fit_params=fit_params, n_jobs=-1, **split_kws)
score_all_dict['rmsle'] = np.sqrt(-neg_msle) # 正負を逆にしてルートをとる
elif scoring == 'mape':
neg_mape = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='neg_mean_absolute_percentage_error',
fit_params=fit_params, n_jobs=-1, **split_kws)
score_all_dict['mape'] = -neg_mape # scikit-learnの仕様に合わせ正負を逆に
elif scoring == 'max_error':
neg_max_error = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='max_error',
fit_params=fit_params, n_jobs=-1, **split_kws)
score_all_dict['max_error'] = - neg_max_error # scikit-learnの仕様に合わせ正負を逆に
# 表示用のax作成
if ax is None:
# LeaveOneOutのとき、クロスバリデーションごとの図は作成せず
if isLeaveOneOut:
if 'figsize' not in subplot_kws.keys():
subplot_kws['figsize'] = (6, 6)
fig, ax = plt.subplots(1, 1, **subplot_kws)
# LeaveOneOut以外のとき、クロスバリデーションごとに図作成
else:
if 'figsize' not in subplot_kws.keys():
subplot_kws['figsize'] = (6, (cv_num + 1) * 6)
fig, ax = plt.subplots(cv_num + 1, 1, **subplot_kws)
# クロスバリデーション
y_true_all = []
y_pred_all = []
hue_all = []
rank_col_all = []
score_train_dict = {}
for i, (train, test) in enumerate(cv.split(X, y_true, **split_kws)):
# 表示用にテストデータと学習データ分割
X_train = X[train]
y_train = y_true[train]
X_test = X[test]
y_test = y_true[test]
# 色分け用データ取得(していないときは、クロスバリデーション番号を使用、LeaveOuneOutのときは番号分けない)
if hue is None:
hue_test = np.full(1 ,'leave_one_out') if isLeaveOneOut else np.full(len(test) ,f'cv_{i}')
hue_name = 'cv_number' # 色分け名を'cv_number'に指定
else:
hue_test = data[hue].values[test]
hue_name = hue
# 誤差上位表示用データ取得
if rank_number is not None:
if rank_col is None: # 表示フィールド指定ないとき、Index使用
rank_col_test = data.index.values[test]
else: # 表示フィールド指定あるとき
rank_col_test = data[rank_col].values[test]
else:
rank_col_test = np.array([])
# eval_setの中から学習データ or テストデータのみを抽出
fit_params_modified = _eval_set_selection(eval_set_selection, transformer,
fit_params, train, test)
# 学習と推論
estimator.fit(X_train, y_train, **fit_params_modified)
y_pred = estimator.predict(X_test)
# 学習データスコア算出
y_pred_train = estimator.predict(X_train)
score_dict = cls._make_score_dict(y_train, y_pred_train, scores)
for score in scores:
if f'{score}_train' not in score_train_dict:
score_train_dict[f'{score}_train'] = []
score_train_dict[f'{score}_train'].append(score_dict[score])
# CV内結果をプロット(LeaveOneOutのときはプロットしない)
if not isLeaveOneOut:
score_cv_dict = {k: v[i] for k, v in score_all_dict.items()}
score_cv_dict.update({f'{k}_train': v for k, v in score_dict.items()})
cls._plot_pred_true(y_test, y_pred, hue_data=hue_test, hue_name=hue_name, ax=ax[i],
linecolor=linecolor, rounddigit=rounddigit, score_dict=score_cv_dict,
scatter_kws=scatter_kws, legend_kws=legend_kws)
ax[i].set_title(f'Cross Validation Fold{i}')
# 全体プロット用データに追加
y_true_all.append(y_test)
y_pred_all.append(y_pred)
hue_all.append(hue_test)
rank_col_all.append(rank_col_test)
# 全体プロット用データを合体
y_true_all = np.hstack(y_true_all)
y_pred_all = np.hstack(y_pred_all)
hue_all = np.hstack(hue_all)
rank_col_all = np.hstack(rank_col_all)
# スコアの統計値を計算
if cv_stats == 'mean':
score_stats_dict = {f'{k}_mean': np.mean(v) for k, v in score_all_dict.items()}
train_stats_dict = {k: np.mean(v) for k, v in score_train_dict.items()}
elif cv_stats == 'median':
score_stats_dict = {f'{k}_median': np.median(v) for k, v in score_all_dict.items()}
train_stats_dict = {k: np.median(v) for k, v in score_train_dict.items()}
elif cv_stats == 'min':
score_stats_dict = {f'{k}_min': np.amin(v) for k, v in score_all_dict.items()}
train_stats_dict = {k: np.amin(v) for k, v in score_train_dict.items()}
elif cv_stats == 'max':
score_stats_dict = {f'{k}_max': np.amax(v) for k, v in score_all_dict.items()}
train_stats_dict = {k: np.amax(v) for k, v in score_train_dict.items()}
# 学習データスコアをdictに追加
score_stats_dict.update(train_stats_dict)
# 全体プロット
ax_all = ax if isLeaveOneOut else ax[cv_num]
cls._plot_pred_true(y_true_all, y_pred_all, hue_data=hue_all, hue_name=hue_name, ax=ax_all,
linecolor=linecolor, rounddigit=rounddigit, score_dict=score_stats_dict,
scatter_kws=scatter_kws, legend_kws=legend_kws)
ax_all.set_title('All Cross Validations')
# 誤差上位を文字表示
if rank_number is not None:
cls._rank_display(y_true_all, y_pred_all, rank_number, rank_col, rank_col_all,
ax=ax_all, rounddigit=rounddigit)
return score_stats_dict
def _average_plot(estimator, data, x_colnames, y_colname, hue,
aggregate, subplot_kws, plot_kws, scatter_kws, legend_kws,
cv_index, x_range=200):
# figsize (全ての図全体のサイズ)指定
if 'figsize' not in subplot_kws.keys():
subplot_kws['figsize'] = (6, len(x_colnames) * 5)
if 'color' not in plot_kws:
plot_kws['color'] = 'red'
# プロット用のaxes作成
fig, axes = plt.subplots(len(x_colnames), 1, **subplot_kws)
if cv_index is not None:
fig.suptitle(f'CV No.{cv_index}')
# 全列を走査
for i, colname in enumerate(x_colnames):
# 該当列(グラフのX軸)の値を作成
x_max = data[colname].max()
x_min = data[colname].min()
x_array = np.linspace(x_min, x_max, x_range)
# 該当列以外を抽出して平均値算出
if aggregate == 'mean':
other_x_agg = data[[col for col in x_colnames if col != colname]].mean()
elif aggregate == 'median':
other_x_agg = data[[col for col in x_colnames if col != colname]].median()
else:
raise ValueError('the `aggregate` argument should be "mean" or "median"')
X_mean = np.tile(other_x_agg, (x_range, 1))
# 該当列を挿入して説明変数とし、モデルで推論
X_mean = np.insert(X_mean, i, x_array, axis=1)
y_pred = estimator.predict(X_mean)
# 実測値を散布図プロット
ax = axes if len(x_colnames) == 1 else axes[i]
sns.scatterplot(x=colname, y=y_colname, hue=hue, data=data, ax=ax, **scatter_kws)
# 推測値曲線をプロット
ax.plot(x_array, y_pred, **plot_kws)
# 色分け時は凡例表示
if hue is not None:
ax.legend(**legend_kws)
fig.tight_layout(rect=[0, 0, 1, 0.98])
@classmethod
def average_plot(cls, estimator, x: List[str], y: str, data: pd.DataFrame = None,
x_colnames: List[str] = None, hue=None,
aggregate='mean',
cv=None, cv_seed=42, cv_group=None, display_cv_indices = 0,
estimator_params=None, fit_params=None, eval_set_selection=None,
subplot_kws=None, plot_kws=None, scatter_kws=None, legend_kws=None):
"""
Plot relationship between one explanatory variable and predicted value by line graph.
Other explanatory variables are fixed to aggregated values such as mean values or median values.
Parameters
----------
estimator : estimator object implementing ``fit``
Regression estimator. This is assumed to implement the scikit-learn estimator interface.
x : list[str] or np.ndarray
Explanatory variables. Should be list[str] if ``data`` is pd.DataFrame. Should be np.ndarray if ``data`` is None
y : str or np.ndarray
Objective variable. Should be str if ``data`` is pd.DataFrame. Should be np.ndarray if ``data`` is None
data: pd.DataFrame
Input data structure.
x_colnames: list[str], optional
Names of explanatory variables. Available only if ``data`` is NOT pd.DataFrame
hue : str, optional
Grouping variable that will produce points with different colors.
aggregate : {'mean', 'median'}, optional
Statistic method of aggregating explanatory variables except x_axis variable.
cv : int, cross-validation generator, or an iterable, optional
Determines the cross-validation splitting strategy. If None, to use the default 5-fold cross validation. If int, to specify the number of folds in a KFold.
cv_seed : int, optional
Seed for random number generator of cross validation.
cv_group: str, optional
Group variable for the samples used while splitting the dataset into train/test set. This argument is passed to ``groups`` argument of cv.split().
display_cv_indices : int or list, optional
Cross validation index or indices to display.
estimator_params : dict, optional
Parameters passed to the regression estimator. If the estimator is pipeline, each parameter name must be prefixed such that parameter p for step s has key s__p.
fit_params : dict, optional
Parameters passed to the fit() method of the regression estimator, e.g. ``early_stopping_round`` and ``eval_set`` of XGBRegressor. If the estimator is pipeline, each parameter name must be prefixed such that parameter p for step s has key s__p.
eval_set_selection: {'all', 'test', 'train', 'original', 'original_transformed'}, optional
Select data passed to `eval_set` in `fit_params`. Available only if `estimator` is LightGBM or XGBoost and `cv` is not None.
If "all", use all data in `X` and `y`.
If "train", select train data from `X` and `y` using cv.split().
If "test", select test data from `X` and `y` using cv.split().
If "original", use raw `eval_set`.
If "original_transformed", use `eval_set` transformed by fit_transform() of pipeline if `estimater` is pipeline.
subplot_kws: dict, optional
Additional parameters passed to matplotlib.pyplot.subplots(), e.g. ``figsize``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.subplots.html
plot_kws: dict, optional
Additional parameters passed to matplotlib.axes.Axes.plot(), e.g. ``alpha``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.plot.html
scatter_kws: dict, optional
Additional parameters passed to seaborn.scatterplot(), e.g. ``alpha``. See https://seaborn.pydata.org/generated/seaborn.scatterplot.html
legend_kws : dict
Additional parameters passed to matplotlib.axes.Axes.legend(), e.g. ``loc``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.legend.html
"""
# 入力データの形式統一
X, y_true, data, x_colnames, y_colname, cv_group_colname = cls._reshape_input_data(x, y, data,
x_colnames,
cv_group)
# display_cv_indicesをList化
if isinstance(display_cv_indices, int):
display_cv_indices = [display_cv_indices]
elif not isinstance(x_colnames, list):
raise Exception('the "cv_display_indices" argument should be int or List[int]')
# 学習器パラメータがあれば適用
if estimator_params is not None:
estimator.set_params(**estimator_params)
# 学習時パラメータがNoneなら空のdictを入力
if fit_params is None:
fit_params = {}
# subplot_kwsがNoneなら空のdictを入力
if subplot_kws is None:
subplot_kws = {}
# plot_kwsがNoneなら空のdictを入力
if plot_kws is None:
plot_kws = {}
# scatter_kwsがNoneなら空のdictを入力
if scatter_kws is None:
scatter_kws = {}
# legend_kwsがNoneなら空のdictを入力
if legend_kws is None:
legend_kws = {}
# クロスバリデーション有無で場合分け
# クロスバリデーション未実施時(学習データからプロット&指標算出)
if cv is None:
# 学習と推論
estimator.fit(X, y_true, **fit_params)
# 平均値
cls._average_plot(estimator, data, x_colnames, y_colname, hue,
aggregate=aggregate,
subplot_kws=subplot_kws, plot_kws=plot_kws,
scatter_kws=scatter_kws, legend_kws=legend_kws,
cv_index=None)
# クロスバリデーション実施時(分割ごとに別々にプロット&指標算出)
if cv is not None:
# 分割法未指定時、cv_numとseedに基づきKFoldでランダムに分割
if isinstance(cv, numbers.Integral):
cv = KFold(n_splits=cv, shuffle=True, random_state=cv_seed)
# LeaveOneOutのときエラーを出す
if isinstance(cv, LeaveOneOut):
raise Exception('"regression_heat_plot" method does not support "LeaveOneOut" cross validation')
# cv_groupをグルーピング対象に指定(GroupKFold、LeaveOneGroupOut等)
split_kws={}
if cv_group_colname is not None:
split_kws['groups'] = data[cv_group_colname].values
elif isinstance(cv, GroupKFold) or isinstance(cv, LeaveOneGroupOut):
raise Exception('"GroupKFold" and "LeaveOneGroupOut" cross validations need ``cv_group`` argument')
# LeaveOneGroupOutのとき、クロスバリデーション分割数をcv_groupの数に指定
if isinstance(cv, LeaveOneGroupOut):
cv_num = len(set(data[cv_group_colname].values))
else:
cv_num = cv.n_splits
# fit_paramsにeval_metricが入力されており、eval_setが入力されていないときの処理(eval_setにテストデータを使用)
if eval_set_selection is None:
eval_set_selection = 'test'
fit_params, eval_set_selection = init_eval_set(
eval_set_selection, fit_params, X, y)
# 最終学習器以外の前処理変換器作成
transformer = _make_transformer(eval_set_selection, estimator)
# クロスバリデーション
for i, (train, test) in enumerate(cv.split(X, y_true, **split_kws)):
# 表示対象以外のCVなら飛ばす
if i not in display_cv_indices:
continue
print(f'cv_number={i}/{cv_num}')
# 表示用にテストデータと学習データ分割
X_train = X[train]
y_train = y_true[train]
data_test = data.iloc[test]
# eval_setの中から学習データ or テストデータのみを抽出
fit_params_modified = _eval_set_selection(eval_set_selection, transformer,
fit_params, train, test)
# 学習と推論
estimator.fit(X_train, y_train, **fit_params_modified)
# ヒートマップをプロット
cls._average_plot(estimator, data_test, x_colnames, y_colname, hue,
aggregate=aggregate,
subplot_kws=subplot_kws, plot_kws=plot_kws,
scatter_kws=scatter_kws, legend_kws=legend_kws,
cv_index=i)
@classmethod
def linear_plot(cls, x: str, y: str, data: pd.DataFrame = None,
x_colname: str = None,
ax=None, hue=None, linecolor='red',
rounddigit=5, plot_scores=True, scatter_kws=None, legend_kws=None):
"""
Plot linear regression line and calculate Pearson correlation coefficient.
Parameters
----------
x : str
Variable that specify positions on the x.
y : str
Variable that specify positions on the y.
data : pd.DataFrame
Input data structure.
x_colname: str, optional
Names of explanatory variable. Available only if ``data`` is NOT pd.DataFrame
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, call matplotlib.pyplot.gca() internally.
hue : str, optional
Grouping variable that will produce points with different colors.
linecolor : str, optional
Color of regression line. See https://matplotlib.org/stable/gallery/color/named_colors.html
rounddigit: int, optional
Round a number of score to a given precision in decimal digits.
plot_scores: bool, optional
If True, display Pearson correlation coefficient and the p-value.
scatter_kws: dict, optional
Additional parameters passed to sns.scatterplot(), e.g. ``alpha``. See https://seaborn.pydata.org/generated/seaborn.scatterplot.html
legend_kws : dict
Additional parameters passed to ax.legend(), e.g. ``loc``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.legend.html
Returns
----------
ax : matplotlib.axes.Axes
Returns the Axes object with the plot drawn onto it.
"""
# 入力データの形式統一
X, y_true, data, x_colnames, y_colname, cv_group_colname = cls._reshape_input_data([x] if isinstance(x, str) else x,
y, data,
[x_colname] if x_colname is not None else x_colname,
cv_group=None)
if x_colname is None:
x_colname = x_colnames[0]
# scatter_kwsがNoneなら空のdictを入力
if scatter_kws is None:
scatter_kws = {}
# legend_kwsがNoneなら空のdictを入力
if legend_kws is None:
legend_kws = {}
# まずは散布図プロット
ax = sns.scatterplot(x=x_colname, y=y_colname, data=data, ax=ax, hue=hue, **scatter_kws)
# 凡例追加
if 'title' not in legend_kws.keys():
legend_kws['title'] = hue
ax.legend(**legend_kws)
# 線形回帰モデル作成
lr = LinearRegression()
lr.fit(X, y_true)
xmin = np.amin(X)
xmax = np.amax(X)
linesplit=200
Xline = np.linspace(xmin, xmax, linesplit)
Xline = Xline.reshape(len(Xline), 1)
# 回帰線を描画
ax.plot(Xline, lr.predict(Xline), color=linecolor)
# 回帰式、ピアソンの相関係数およびp値を表示
if plot_scores == True:
# 回帰式
coef = cls._round_digits(lr.coef_[0], rounddigit=rounddigit, method="decimal")
intercept = cls._round_digits(lr.intercept_, rounddigit=rounddigit, method="decimal")
equation = f'y={coef}x+{intercept}' if intercept >= 0 else f'y={coef}x-{-intercept}'
# ピアソン相関係数
pearsonr = stats.pearsonr(data[x_colname], data[y_colname])
r = cls._round_digits(pearsonr[0], rounddigit=rounddigit, method="decimal")
pvalue = cls._round_digits(pearsonr[1], rounddigit=rounddigit, method="decimal")
# プロット
rtext = f'{equation}\nr={r}\np={pvalue}'
ax.text(xmax, np.amin(y_true), rtext, verticalalignment='bottom', horizontalalignment='right')
return ax
@classmethod
def _estimator_plot_1d(cls, trained_estimator, X, y_true, hue_data=None, hue_name=None, ax=None, linecolor='red', linesplit=1000, rounddigit=None,
score_dict=None, scatter_kws=None, legend_kws=None):
"""
1次説明変数回帰曲線を、回帰評価指標とともにプロット
Parameters
----------
trained_estimator :
学習済の回帰モデル(scikit-learn API)
X : ndarray
説明変数
y_true : ndarray
目的変数実測値
hue_data : ndarray
色分け用ラベルデータ
hue_name : str
色分け用の列名
ax : matplotlib.axes.Axes
表示対象のax (Noneならplt.plotで1枚ごとにプロット)
linecolor : str
予測値=実測値の線の色
linesplit : int
フィッティング線の分割数 (カクカクしたら増やす)
rounddigit: int
表示指標の小数丸め桁数
score_dict : dict[str, float]
算出した評価指標一覧
scatter_kws: dict, optional
Additional parameters passed to sns.scatterplot(), e.g. ``alpha``. See https://seaborn.pydata.org/generated/seaborn.scatterplot.html
legend_kws : dict
Additional parameters passed to ax.legend(), e.g. ``loc``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.legend.html
"""
# 描画用axがNoneのとき、matplotlib.pyplot.gca()を使用
if ax is None:
ax=plt.gca()
# score_dictがNoneのとき、空のDictを入力
if score_dict is None:
score_dict = {}
# scatter_kwsがNoneなら空のdictを入力
if scatter_kws is None:
scatter_kws = {}
# legend_kwsがNoneなら空のdictを入力
if legend_kws is None:
legend_kws = {}
# 散布図プロット
cls._scatterplot_ndarray(np.ravel(X), 'X', y_true, 'Y', hue_data, hue_name, ax, scatter_kws, legend_kws)
# 回帰モデルの線を作成
xmin = np.amin(X)
xmax = np.amax(X)
Xline = np.linspace(xmin, xmax, linesplit)
Xline = Xline.reshape(len(Xline), 1)
# 回帰線を描画
ax.plot(Xline, trained_estimator.predict(Xline), color=linecolor)
# 評価指標文字列作成
score_list = [f'{k}={v}' for k, v in cls._round_dict_digits(score_dict, rounddigit, 'sig').items()]
score_text = "\n".join(score_list)
ax.text(xmax, np.amin(y_true), score_text, verticalalignment='bottom', horizontalalignment='right')
@classmethod
def regression_plot_1d(cls, estimator, x: str, y: str, data: pd.DataFrame = None, x_colname: str = None,
hue=None, linecolor='red', rounddigit=3,
rank_number=None, rank_col=None, scores='mae',
cv_stats='mean', cv=None, cv_seed=42, cv_group=None,
estimator_params=None, fit_params=None, eval_set_selection=None,
subplot_kws=None, scatter_kws=None, legend_kws=None):
"""
Plot regression lines of any scikit-learn regressor with 1D explanatory variable.
Parameters
----------
estimator : estimator object implementing ``fit``
Regression estimator. This is assumed to implement the scikit-learn estimator interface.
x : str, or np.ndarray
Explanatory variables. Should be str if ``data`` is pd.DataFrame. Should be np.ndarray if ``data`` is None
y : str or np.ndarray
Objective variable. Should be str if ``data`` is pd.DataFrame. Should be np.ndarray if ``data`` is None
data: pd.DataFrame
Input data structure.
x_colname: str, optional
Names of explanatory variable. Available only if ``data`` is NOT pd.DataFrame
hue : str, optional
Grouping variable that will produce points with different colors.
linecolor : str, optional
Color of prediction = true line. See https://matplotlib.org/stable/gallery/color/named_colors.html
rounddigit: int, optional
Round a number of score to a given precision in decimal digits.
rank_number : int, optional
Number of emphasized data that are in the top positions for regression error.
rank_col : list[str], optional
Variables that are displayed with emphasized data that are in the top posiotions for regression error.
scores : {'r2', 'mae', 'mse', 'rmse', 'rmsle', 'mape', 'max_error'} or list,, optional
Regression score that are displayed at the lower right of the graph.
cv_stats : {'mean', 'median', 'max', 'min'}, optional
Statistical method of cross validation score that are displayed at the lower right of the graph.
cv : int, cross-validation generator, or an iterable, optional
Determines the cross-validation splitting strategy. If None, to use the default 5-fold cross validation. If int, to specify the number of folds in a KFold.
cv_seed : int, optional
Seed for random number generator of cross validation.
cv_group: str, optional
Group variable for the samples used while splitting the dataset into train/test set. This argument is passed to ``groups`` argument of cv.split().
estimator_params : dict, optional
Parameters passed to the regression estimator. If the estimator is pipeline, each parameter name must be prefixed such that parameter p for step s has key s__p.
fit_params : dict, optional
Parameters passed to the fit() method of the regression estimator, e.g. ``early_stopping_round`` and ``eval_set`` of XGBRegressor. If the estimator is pipeline, each parameter name must be prefixed such that parameter p for step s has key s__p.
subplot_kws : dict, optional
Additional parameters passed to matplotlib.pyplot.subplots(), e.g. ``figsize``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.subplots.html
eval_set_selection: {'all', 'test', 'train', 'original', 'original_transformed'}, optional
Select data passed to `eval_set` in `fit_params`. Available only if "estimator" is LightGBM or XGBoost.
If "all", use all data in `X` and `y`.
If "train", select train data from `X` and `y` using cv.split().
If "test", select test data from `X` and `y` using cv.split().
If "original", use raw `eval_set`.
If "original_transformed", use `eval_set` transformed by fit_transform() of pipeline if `estimater` is pipeline.
scatter_kws: dict, optional
Additional parameters passed to sns.scatterplot(), e.g. ``alpha``. See https://seaborn.pydata.org/generated/seaborn.scatterplot.html
legend_kws : dict
Additional parameters passed to ax.legend(), e.g. ``loc``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.legend.html
Returns
----------
score_dict : dict
Validation scores, e.g. r2, mae and rmse
"""
# 入力データの形式統一
X, y_true, data, x_colnames, y_colname, cv_group_colname = cls._reshape_input_data([x] if isinstance(x, str) else x,
y, data,
[x_colname] if x_colname is not None else x_colname,
cv_group)
# scoresの型をListに統一
if scores is None:
scores = []
elif isinstance(scores, str):
scores = [scores]
elif not isinstance(scores, list):
raise Exception('the "scores" argument must be str or list[str]')
# 学習器パラメータがあれば適用
if estimator_params is not None:
estimator.set_params(**estimator_params)
# 学習時パラメータがNoneなら空のdictを入力
if fit_params is None:
fit_params = {}
# subplot_kwsがNoneなら空のdictを入力
if subplot_kws is None:
subplot_kws = {}
# scatter_kwsがNoneなら空のdictを入力
if scatter_kws is None:
scatter_kws = {}
# legend_kwsがNoneなら空のdictを入力
if legend_kws is None:
legend_kws = {}
# クロスバリデーション有無で場合分け
# クロスバリデーション未実施時(学習データからプロット&指標算出)
if cv is None:
# 学習と推論
estimator.fit(X, y_true, **fit_params)
y_pred = estimator.predict(X)
# 評価指標算出
score_dict = cls._make_score_dict(y_true, y_pred, scores)
# 色分け用データ取得
hue_data = None if hue is None else data[hue]
hue_name = None if hue is None else hue
# 誤差上位表示用データ取得
if rank_number is not None:
if rank_col is None: # 表示フィールド指定ないとき、Index使用
rank_col_data = data.index.values
else: # 表示フィールド指定あるとき
rank_col_data = data[rank_col].values
# 回帰線プロット
cls._estimator_plot_1d(estimator, X, y_true, hue_data=hue_data, hue_name=hue_name,
linecolor=linecolor, rounddigit=rounddigit, score_dict=score_dict,
scatter_kws=scatter_kws, legend_kws=legend_kws)
# 誤差上位を文字表示
if rank_number is not None:
cls._rank_display(y_true, y_pred, rank_number, rank_col, rank_col_data, x=X, rounddigit=rounddigit)
return score_dict
# クロスバリデーション実施時(分割ごとに別々にプロット&指標算出)
if cv is not None:
# 分割法未指定時、cv_numとseedに基づきKFoldでランダムに分割
if isinstance(cv, numbers.Integral):
cv = KFold(n_splits=cv, shuffle=True, random_state=cv_seed)
#LeaveOneOutのときエラーを出す
if isinstance(cv, LeaveOneOut):
raise Exception('"regression_plot_1d" method does not support "LeaveOneOut" cross validation')
# cv_groupをグルーピング対象に指定(GroupKFold、LeaveOneGroupOut等)
split_kws={}
if cv_group_colname is not None:
split_kws['groups'] = data[cv_group_colname].values
elif isinstance(cv, GroupKFold) or isinstance(cv, LeaveOneGroupOut):
raise Exception('"GroupKFold" and "LeaveOneGroupOut" cross validations need ``cv_group`` argument')
# LeaveOneGroupOutのとき、クロスバリデーション分割数をcv_groupの数に指定
if isinstance(cv, LeaveOneGroupOut):
cv_num = len(set(data[cv_group_colname].values))
else:
cv_num = cv.n_splits
# fit_paramsにeval_metricが入力されており、eval_setが入力されていないときの処理(eval_setにテストデータを使用)
if eval_set_selection is None:
eval_set_selection = 'test'
fit_params, eval_set_selection = init_eval_set(
eval_set_selection, fit_params, X, y)
# 最終学習器以外の前処理変換器作成
transformer = _make_transformer(eval_set_selection, estimator)
# スコア種類ごとにクロスバリデーションスコアの算出
score_all_dict = {}
for scoring in scores:
# cross_val_scoreでクロスバリデーション
if scoring == 'r2':
score_all_dict['r2'] = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='r2',
fit_params=fit_params, n_jobs=-1, **split_kws)
elif scoring == 'mae':
neg_mae = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='neg_mean_absolute_error',
fit_params=fit_params, n_jobs=-1, **split_kws)
score_all_dict['mae'] = -neg_mae # scikit-learnの仕様に合わせ正負を逆に
elif scoring == 'mse':
neg_mse = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='neg_mean_squared_error',
fit_params=fit_params, n_jobs=-1, **split_kws)
score_all_dict['mse'] = -neg_mse # scikit-learnの仕様に合わせ正負を逆に
elif scoring == 'rmse':
neg_rmse = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='neg_root_mean_squared_error',
fit_params=fit_params, n_jobs=-1, **split_kws)
score_all_dict['rmse'] = -neg_rmse # scikit-learnの仕様に合わせ正負を逆に
elif scoring == 'rmsle':
neg_msle = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='neg_mean_squared_log_error',
fit_params=fit_params, n_jobs=-1, **split_kws)
score_all_dict['rmsle'] = np.sqrt(-neg_msle) # 正負を逆にしてルートをとる
elif scoring == 'mape':
neg_mape = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='neg_mean_absolute_percentage_error',
fit_params=fit_params, n_jobs=-1, **split_kws)
score_all_dict['mape'] = -neg_mape # scikit-learnの仕様に合わせ正負を逆に
elif scoring == 'max_error':
neg_max_error = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='max_error',
fit_params=fit_params, n_jobs=-1, **split_kws)
score_all_dict['max_error'] = - neg_max_error # scikit-learnの仕様に合わせ正負を逆に
# 表示用のaxes作成
# クロスバリデーションごとに図作成
if 'figsize' not in subplot_kws.keys():
subplot_kws['figsize'] = (6, (cv_num + 1) * 6)
fig, axes = plt.subplots(cv_num + 1, 1, **subplot_kws)
# クロスバリデーション
score_train_dict = {}
for i, (train, test) in enumerate(cv.split(X, y_true, **split_kws)):
# 表示用にテストデータと学習データ分割
X_train = X[train]
y_train = y_true[train]
X_test = X[test]
y_test = y_true[test]
# 色分け用データ取得(していないときは、クロスバリデーション番号を使用、LeaveOuneOutのときは番号分けない)
if hue is None:
hue_test = np.full(len(test) ,f'cv_{i}')
hue_name = 'cv_number' # 色分け名を'cv_number'に指定
else:
hue_test = data[hue].values[test]
hue_name = hue
# 誤差上位表示用データ取得
if rank_number is not None:
if rank_col is None: # 表示フィールド指定ないとき、Index使用
rank_col_test = data.index.values[test]
else: # 表示フィールド指定あるとき
rank_col_test = data[rank_col].values[test]
# eval_setの中から学習データ or テストデータのみを抽出
fit_params_modified = _eval_set_selection(eval_set_selection, transformer,
fit_params, train, test)
# 学習と推論
estimator.fit(X_train, y_train, **fit_params_modified)
# 学習データスコア算出
y_pred_train = estimator.predict(X_train)
score_dict = cls._make_score_dict(y_train, y_pred_train, scores)
for score in scores:
if f'{score}_train' not in score_train_dict:
score_train_dict[f'{score}_train'] = []
score_train_dict[f'{score}_train'].append(score_dict[score])
# CV内結果をプロット
score_cv_dict = {k: v[i] for k, v in score_all_dict.items()}
score_cv_dict.update({f'{k}_train': v for k, v in score_dict.items()})
cls._estimator_plot_1d(estimator, X_test, y_test, hue_data=hue_test, hue_name=hue_name, ax=axes[i],
linecolor=linecolor, rounddigit=rounddigit, score_dict=score_cv_dict,
scatter_kws=scatter_kws, legend_kws=legend_kws)
# 誤差上位を文字表示
if rank_number is not None:
cls._rank_display(y_test, estimator.predict(X_test), rank_number, rank_col, rank_col_test, x=X_test, ax=axes[i], rounddigit=rounddigit)
axes[i].set_title(f'Cross Validation Fold{i}')
# スコアの統計値を計算
if cv_stats == 'mean':
score_stats_dict = {f'{k}_mean': np.mean(v) for k, v in score_all_dict.items()}
train_stats_dict = {k: np.mean(v) for k, v in score_train_dict.items()}
elif cv_stats == 'median':
score_stats_dict = {f'{k}_median': np.median(v) for k, v in score_all_dict.items()}
train_stats_dict = {k: np.median(v) for k, v in score_train_dict.items()}
elif cv_stats == 'min':
score_stats_dict = {f'{k}_min': np.amin(v) for k, v in score_all_dict.items()}
train_stats_dict = {k: np.amin(v) for k, v in score_train_dict.items()}
elif cv_stats == 'max':
score_stats_dict = {f'{k}_max': np.amax(v) for k, v in score_all_dict.items()}
train_stats_dict = {k: np.amax(v) for k, v in score_train_dict.items()}
# 学習データスコアをdictに追加
score_stats_dict.update(train_stats_dict)
# 全体色分け用データ取得
hue_data = None if hue is None else data[hue]
hue_name = None if hue is None else hue
# 全体プロット
ax_all = axes[cv_num]
cls._estimator_plot_1d(estimator, X, y_true, hue_data=hue_data, hue_name=hue_name, ax=ax_all,
linecolor=linecolor, rounddigit=rounddigit, score_dict=score_stats_dict,
scatter_kws=scatter_kws, legend_kws=legend_kws)
ax_all.set_title('All Cross Validations')
return score_stats_dict
@classmethod
def _reg_heat_plot_2d(cls, trained_estimator, x_heat, y_true_col, y_pred_col, rank_col, data, x_heat_indices, hue_name,
x1_start, x1_end, x2_start, x2_end, heat_division, other_x,
vmin, vmax, ax, plot_scatter, maxerror, rank_dict, scatter_hue_dict,
rounddigit_rank, rounddigit_x1, rounddigit_x2,
heat_kws=None, scatter_kws=None, legend_kws=None):
"""
回帰予測値ヒートマップと各種散布図の表示
(regression_heat_plotメソッドの描画処理部分)
"""
# 描画用axがNoneのとき、matplotlib.pyplot.gca()を使用
if ax is None:
ax=plt.gca()
# ヒートマップ用グリッドデータを作成
xx = np.linspace(x1_start, x1_end, heat_division)
yy = np.linspace(x2_start, x2_end, heat_division)
X1, X2 = np.meshgrid(xx, yy)
X_grid = np.c_[X1.ravel(), X2.ravel()]
df_heat = pd.DataFrame(X_grid, columns=x_heat)
# 推論用に全説明変数を保持したndarrayを作成 (ヒートマップ非使用変数は固定値other_xとして追加)
n_rows = X_grid.shape[0]
X_all = []
other_add_flg = False
for i in range(2 + len(other_x)):
if i == x_heat_indices[0]: # ヒートマップ使用変数(1個目)を追加
X_all.append(X_grid[:, 0].reshape(n_rows, 1))
elif i == x_heat_indices[1]: # ヒートマップ使用変数(2個目)を追加
X_all.append(X_grid[:, 1].reshape(n_rows, 1))
elif len(other_x) >= 1 and not other_add_flg: # ヒートマップ非使用変数(1個目)を固定値として追加
X_all.append(np.full((n_rows, 1), other_x[0]))
other_add_flg = True
elif len(other_x) == 2: # ヒートマップ非使用変数(2個目)を固定値として追加
X_all.append(np.full((n_rows, 1), other_x[1]))
X_all = np.hstack(X_all)
# グリッドデータに対して学習し、推定値を作成
y_pred_grid = trained_estimator.predict(X_all)
df_heat['y_pred'] = pd.Series(y_pred_grid)
# グリッドデータ縦軸横軸の表示桁数を調整
df_heat[x_heat[0]] = df_heat[x_heat[0]].map(lambda x: cls._round_digits(x, rounddigit=rounddigit_x1))
df_heat[x_heat[1]] = df_heat[x_heat[1]].map(lambda x: cls._round_digits(x, rounddigit=rounddigit_x2))
# グリッドデータをピボット化
df_heat_pivot = pd.pivot_table(data=df_heat, values='y_pred',
columns=x_heat[0], index=x_heat[1], aggfunc=np.mean)
# 横軸の列数がheat_divisionに満たない時、分解能不足のためrounddigit_x1桁数を増やすようエラー表示
if len(df_heat_pivot.columns) < heat_division:
raise Exception(f'the "rounddigit_x1" argument must be bigger than {rounddigit_x1} because of the shortage of the "{x_heat[0]}" resolution')
# 縦軸の列数がheat_divisionに満たない時、分解能不足のためrounddigit_x2桁数を増やすようエラー表示
if len(df_heat_pivot) < heat_division:
raise Exception(f'the "rounddigit_x2" argument must be bigger than {rounddigit_x2} because of the shortage of the "{x_heat[1]}" resolution')
# ヒートマップのカラーマップ指定ないとき、YlGnを指定
if 'cmap' not in heat_kws.keys():
heat_kws['cmap'] = 'YlGn'
# ヒートマップをプロット
sns.heatmap(df_heat_pivot, ax=ax, vmax=vmax, vmin=vmin, center=(vmax+vmin)/2, **heat_kws)
# 誤差散布図をプロット
if plot_scatter is not None:
# 軸範囲が0~heat_divisionになっているので、スケール変換
x1_scatter = 0.5 + (data[x_heat[0]].values - x1_start) * (heat_division - 1) / (x1_end - x1_start)
x2_scatter = 0.5 + (data[x_heat[1]].values - x2_start) * (heat_division - 1) / (x2_end - x2_start)
# 色分け
if plot_scatter == 'error': # 誤差で色分け
scatter_c = data[y_pred_col].values - data[y_true_col].values
scatter_vmin = -maxerror
scatter_vmax = maxerror
if 'cmap' not in scatter_kws.keys(): # 散布図のカラーマップ指定ないとき、seismicを指定
scatter_kws['cmap'] = 'seismic'
elif plot_scatter == 'true': # 真値で色分け
scatter_c = data[y_true_col].values
scatter_vmin = vmin
scatter_vmax = vmax
if 'cmap' not in scatter_kws.keys(): # 散布図のカラーマップ指定ないとき、ヒートマップと同cmap使用
scatter_kws['cmap'] = heat_kws['cmap']
if 'edgecolors' not in scatter_kws.keys(): # 線の色指定ないとき、ブラウンを指定
scatter_kws['edgecolors'] = 'brown'
# 散布図プロット (誤差or真値で色分けしたとき)
if plot_scatter == 'error' or plot_scatter == 'true':
ax.scatter(x1_scatter, x2_scatter, vmin=scatter_vmin, vmax=scatter_vmax, c=scatter_c, **scatter_kws)
# 散布図プロット (hue列名で色分けしたとき)
if plot_scatter == 'hue':
scatter_data = pd.DataFrame(np.stack([x1_scatter, x2_scatter, data[hue_name]], 1), columns=['x1', 'x2', hue_name])
for name, group in scatter_data.groupby(hue_name):
ax.scatter(group['x1'].values, group['x2'].values, label=name, c=scatter_hue_dict[name], **scatter_kws)
ax.legend(**legend_kws)
# 誤差上位を文字表示
df_rank = data[data.index.isin(rank_dict.keys())]
for index, row in df_rank.iterrows():
# rank_col指定ないとき、indexがfloat型に変換されてしまうので、int型に戻す
rank_col_value = int(row[rank_col]) if rank_col == 'index' else row[rank_col]
# 誤差を計算してテキスト化
error = cls._round_digits(row['y_pred'] - row['y_true'], rounddigit=rounddigit_rank)
rank_text = f' no{rank_dict[index]+1}\n-<-error={error}\n {rank_col}={rank_col_value}'
# 軸範囲が0~heat_divisionになっているので、スケール変換してプロット
x1_text = 0.5 + (row[x_heat[0]] - x1_start) * (heat_division - 1) / (x1_end - x1_start)
x2_text = 0.5 + (row[x_heat[1]] - x2_start) * (heat_division - 1) / (x2_end - x2_start)
ax.text(x1_text, x2_text, rank_text, verticalalignment='center', horizontalalignment='left')
@classmethod
def _reg_heat_plot(cls, trained_estimator, X, y_pred, y_true, x_heat, x_not_heat, x_heat_indices, hue_data, hue_name,
pair_sigmarange=1.0, pair_sigmainterval=0.5, heat_extendsigma=0.5, heat_division=30,
vmin=None, vmax=None, plot_scatter='true', maxerror=None,
rank_number=None, rank_col=None, rank_col_data=None, scatter_hue_dict=None,
rounddigit_rank=None, rounddigit_x1=None, rounddigit_x2=None, rounddigit_x3=None,
cv_index=None, subplot_kws=None, heat_kws=None, scatter_kws=None, legend_kws=None):
"""
回帰予測値ヒートマップ表示の、説明変数の数に応じた分岐処理
(regression_heat_plotメソッド処理のうち、説明変数の数に応じたデータ分割等を行う)
"""
# 説明変数の数
x_num = X.shape[1]
# ヒートマップ使用DataFrame
df_heat = pd.DataFrame(X[:, x_heat_indices], columns=x_heat)
# ヒートマップ非使用DataFrame
X_not_heat = X[:, [i for i in range(X.shape[1]) if i not in x_heat_indices]]
df_not_heat = pd.DataFrame(X_not_heat, columns=x_not_heat)
# 結合&目的変数実測値と予測値追加
df_all = df_heat.join(df_not_heat)
df_all = df_all.join(pd.DataFrame(y_true, columns=['y_true']))
df_all = df_all.join(pd.DataFrame(y_pred, columns=['y_pred']))
# ヒートップ非使用変数を標準化してDataFrameに追加
if x_num >= 3:
X_not_heat_norm = stats.zscore(X_not_heat)
df_all = df_all.join(pd.DataFrame(X_not_heat_norm, columns=[f'normalize_{c}' for c in x_not_heat]))
# 誤差上位表示用IDデータをDataFrameに追加
rank_col = 'index' if rank_col is None else rank_col
df_all = df_all.join(pd.DataFrame(rank_col_data, columns=[rank_col]))
# 散布図色分け用列をDataFrameに追加(hue_nameがNoneでないときのみ))
if hue_name is not None:
df_all = df_all.join(pd.DataFrame(hue_data, columns=[hue_name]))
# 誤差の順位を計算
if rank_number is not None:
y_error_abs = np.abs(y_pred - y_true)
rank_index = np.argsort(-y_error_abs)[:rank_number]
rank_dict = dict(zip(rank_index.tolist(), range(rank_number)))
else:
rank_dict = {}
# ヒートマップのX1軸およびX2軸の表示範囲(最大最小値 + extendsigma)
x1_min = np.min(X[:, x_heat_indices[0]])
x1_max = np.max(X[:, x_heat_indices[0]])
x1_std = np.std(X[:, x_heat_indices[0]])
x1_start = x1_min - x1_std * heat_extendsigma
x1_end = x1_max + x1_std * heat_extendsigma
x2_min = np.min(X[:, x_heat_indices[1]])
x2_max = np.max(X[:, x_heat_indices[1]])
x2_std = np.std(X[:, x_heat_indices[1]])
x2_start = x2_min - x2_std * heat_extendsigma
x2_end = x2_max + x2_std * heat_extendsigma
# プロットする図の数(sigmarange外「2枚」 + sigmarange内「int(pair_sigmarange / pair_sigmainterval) * 2枚」)
pair_n = int(pair_sigmarange / pair_sigmainterval) * 2 + 2
# ヒートップ非使用変数をプロットする範囲の下限(標準化後)
pair_min = -(pair_n - 2) / 2 * pair_sigmainterval
# 説明変数が2次元のとき (図は1枚のみ)
if x_num == 2:
pair_w = 1
pair_h = 1
# 説明変数が3次元のとき (図はpair_n × 1枚)
elif x_num == 3:
pair_w = 1
pair_h = pair_n
# 説明変数が4次元のとき (図はpair_n × pair_n枚)
elif x_num == 4:
pair_w = pair_n
pair_h = pair_n
# figsize (全ての図全体のサイズ)指定
if 'figsize' not in subplot_kws.keys():
subplot_kws['figsize'] = (pair_w * 6, pair_h * 5)
# プロット用のaxes作成
fig, axes = plt.subplots(pair_h, pair_w, **subplot_kws)
if cv_index is not None:
fig.suptitle(f'CV No.{cv_index}')
# 図ごとにプロット
for i in range(pair_h):
for j in range(pair_w):
# pair縦軸変数(標準化後)の最小値
if i == 0:
h_min = -float('inf')
h_mean = pair_min - pair_sigmainterval / 2 # ヒートマップ非使用変数指定用の平均値
else:
h_min = pair_min + (i - 1) * pair_sigmainterval
h_mean = pair_min + (i - 0.5) * pair_sigmainterval # ヒートマップ非使用変数指定用の平均値
# pair縦軸変数(標準化後)の最大値
if i == pair_h - 1:
h_max = float('inf')
else:
h_max = pair_min + i * pair_sigmainterval
# pair横軸変数(標準化後)の最小値
if j == 0:
w_min = -float('inf')
w_mean = pair_min - pair_sigmainterval / 2 # ヒートマップ非使用変数指定用の平均値
else:
w_min = pair_min + (j - 1) * pair_sigmainterval
w_mean = pair_min + (j - 0.5) * pair_sigmainterval # ヒートマップ非使用変数指定用の平均値
# pair横軸変数(標準化後)の最大値
if j == pair_w - 1:
w_max = float('inf')
else:
w_max = pair_min + j * pair_sigmainterval
# 説明変数が2次元のとき (図は1枚のみ)
if x_num == 2:
ax = axes
df_pair = df_all.copy()
other_x = []
# 説明変数が3次元のとき (図はpair_n × 1枚)
elif x_num == 3:
ax = axes[i]
# 縦軸変数範囲内のみのデータを抽出
df_pair = df_all[(df_all[f'normalize_{x_not_heat[0]}'] >= h_min) & (df_all[f'normalize_{x_not_heat[0]}'] < h_max)].copy()
# ヒートマップ非使用変数の標準化逆変換
x3_mean = np.mean(X_not_heat[:, 0])
x3_std = np.std(X_not_heat[:, 0])
other_x = [h_mean * x3_std + x3_mean]
# 説明変数が4次元のとき (図はpair_n × pair_n枚)
elif x_num == 4:
ax = axes[j, i]
# 縦軸変数範囲内のみのデータを抽出
df_pair = df_all[(df_all[f'normalize_{x_not_heat[0]}'] >= h_min) & (df_all[f'normalize_{x_not_heat[0]}'] < h_max)].copy()
# 横軸変数範囲内のみのデータを抽出
df_pair = df_pair[(df_pair[f'normalize_{x_not_heat[1]}'] >= w_min) & (df_pair[f'normalize_{x_not_heat[1]}'] < w_max)]
# ヒートマップ非使用変数の標準化逆変換
x3_mean = np.mean(X_not_heat[:, 0])
x3_std = np.std(X_not_heat[:, 0])
x4_mean = np.mean(X_not_heat[:, 1])
x4_std = np.std(X_not_heat[:, 1])
other_x = [h_mean * x3_std + x3_mean, w_mean * x4_std + x4_mean]
cls._reg_heat_plot_2d(trained_estimator, x_heat, 'y_true', 'y_pred', rank_col, df_pair, x_heat_indices, hue_name,
x1_start, x1_end, x2_start, x2_end, heat_division, other_x,
vmin, vmax, ax, plot_scatter, maxerror, rank_dict, scatter_hue_dict,
rounddigit_rank, rounddigit_x1, rounddigit_x2,
heat_kws=heat_kws, scatter_kws=scatter_kws, legend_kws=legend_kws)
# グラフタイトルとして、ヒートマップ非使用変数の範囲を記載(説明変数が3次元以上のとき)
if x_num == 3:
if i == 0:
ax.set_title(f'{x_not_heat[0]}=- {cls._round_digits(h_max * x3_std + x3_mean, rounddigit=rounddigit_x3)} (- {h_max}σ)')
elif i == pair_h - 1:
ax.set_title(f'{x_not_heat[0]}={cls._round_digits(h_min * x3_std + x3_mean, rounddigit=rounddigit_x3)} - ({h_min}σ -)')
else:
ax.set_title(f'{x_not_heat[0]}={cls._round_digits(h_min * x3_std + x3_mean, rounddigit=rounddigit_x3)} - {cls._round_digits(h_max * x3_std + x3_mean, rounddigit=rounddigit_x3)} ({h_min}σ - {h_max}σ)')
if x_num == 4:
ax.set_title(f'{x_not_heat[0]}= {h_min}σ - {h_max}σ {x_not_heat[1]}= {w_min}σ - {w_max}σ')
# 字が重なるのでtight_layoutにする
plt.tight_layout(rect=[0, 0, 1, 0.98])
@classmethod
def regression_heat_plot(cls, estimator, x: List[str], y: str, data: pd.DataFrame = None,
x_colnames: List[str] = None, x_heat: List[str] = None, scatter_hue=None,
pair_sigmarange = 1.0, pair_sigmainterval = 0.5, heat_extendsigma = 0.5,
heat_division = 30, color_extendsigma = 0.5,
plot_scatter = 'true', rounddigit_rank=3, rounddigit_x1=2, rounddigit_x2=2, rounddigit_x3=2,
rank_number=None, rank_col=None,
cv=None, cv_seed=42, cv_group=None, display_cv_indices = 0,
estimator_params=None, fit_params=None, eval_set_selection=None,
subplot_kws=None, heat_kws=None, scatter_kws=None, legend_kws=None):
"""
Plot regression heatmaps of any scikit-learn regressor with 2 to 4D explanatory variables.
Parameters
----------
estimator : estimator object implementing ``fit``
Regression estimator. This is assumed to implement the scikit-learn estimator interface.
x : list[str] or np.ndarray
Explanatory variables. Should be list[str] if ``data`` is pd.DataFrame. Should be np.ndarray if ``data`` is None
y : str or np.ndarray
Objective variable. Should be str if ``data`` is pd.DataFrame. Should be np.ndarray if ``data`` is None
data: pd.DataFrame
Input data structure.
x_colnames: list[str], optional
Names of explanatory variables. Available only if ``data`` is NOT pd.DataFrame
x_heat: list[str], optional
X-axis and y-axis variables of heatmap. If None, use two variables in ``x`` from the front.
scatter_hue : str, optional
Grouping variable that will produce points with different colors. Available only if plot_scatter is set to ``hue``.
pair_sigmarange: float, optional
Set the range of subplots. The lower limit is mean({x3, x4}) - ``pair_sigmarange`` * std({x3, x4}). The higher limit is mean({x3, x4}) + ``pair_sigmarange`` * std({x3, x4}). Available only if len(x) is bigger than 2.
pair_sigmainterval: float, optional
Set the interval of subplots. For example, if ``pair_sigmainterval`` is set to 0.5 and ``pair_sigmarange`` is set to 1.0, The ranges of subplots are lower than μ-1σ, μ-1σ to μ-0.5σ, μ-0.5σ to μ, μ to μ+0.5σ, μ+0.5σ to μ+1σ, and higher than μ+1σ. Available only if len(x) is bigger than 2.
heat_extendsigma: float, optional
Set the axis view limits of the heatmap. The lower limit is min({x1, x2}) - std({x1, x2}) * ``heat_extendsigma``. The higher limit is max({x1, x2}) + std({x1, x2}) * ``heat_extendsigma``
heat_division: int, optional
Resolution of the heatmap.
color_extendsigma: float, optional
Set the colormap limits of the heatmap. The lower limit is min(y_ture) - std(y_ture) * ``color_extendsigma``. The higher limit is max(y_ture) - std(y_ture) * ``color_extendsigma``.
plot_scatter: {'error', 'true', 'hue'}, optional
Color decision of scatter plot. If 'error', to be mapped to colors using error value. If 'true', to be mapped to colors using y_ture value. If 'hue', to be mapped to colors using scatter_hue variable. If None, no scatter.
rounddigit_rank: int, optional
Round a number of error that are in the top posiotions for regression error to a given precision in decimal digits.
rounddigit_x1: int, optional
Round a number of x-axis valiable of the heatmap to a given precision in decimal digits.
rounddigit_x2: int, optional
Round a number of y-axis valiable of the heatmap to a given precision in decimal digits.
rounddigit_x3: int, optional
Round a number of y-axis valiable of subplots to a given precision in decimal digits.
rank_number: int, optional
Number of emphasized data that are in the top posiotions for regression error.
rank_col: str, optional
Variables that are displayed with emphasized data that are in the top posiotions for regression error.
cv : int, cross-validation generator, or an iterable, optional
Determines the cross-validation splitting strategy. If None, to use the default 5-fold cross validation. If int, to specify the number of folds in a KFold.
cv_seed : int, optional
Seed for random number generator of cross validation.
cv_group: str, optional
Group variable for the samples used while splitting the dataset into train/test set. This argument is passed to ``groups`` argument of cv.split().
display_cv_indices : int or list, optional
Cross validation index or indices to display.
estimator_params : dict, optional
Parameters passed to the regression estimator. If the estimator is pipeline, each parameter name must be prefixed such that parameter p for step s has key s__p.
fit_params : dict, optional
Parameters passed to the fit() method of the regression estimator, e.g. ``early_stopping_round`` and ``eval_set`` of XGBRegressor. If the estimator is pipeline, each parameter name must be prefixed such that parameter p for step s has key s__p.
eval_set_selection: {'all', 'test', 'train', 'original', 'original_transformed'}, optional
Select data passed to `eval_set` in `fit_params`. Available only if "estimator" is LightGBM or XGBoost.
If "all", use all data in `X` and `y`.
If "train", select train data from `X` and `y` using cv.split().
If "test", select test data from `X` and `y` using cv.split().
If "original", use raw `eval_set`.
If "original_transformed", use `eval_set` transformed by fit_transform() of pipeline if `estimater` is pipeline.
subplot_kws: dict, optional
Additional parameters passed to matplotlib.pyplot.subplots(), e.g. ``figsize``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.subplots.html
heat_kws: dict, optional
Additional parameters passed to sns.heatmap(), e.g. ``cmap``. See https://seaborn.pydata.org/generated/seaborn.heatmap.html
scatter_kws: dict, optional
Additional parameters passed to matplotlib.pyplot.scatter(), e.g. ``alpha``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.scatter.html
legend_kws : dict
Additional parameters passed to ax.legend(), e.g. ``loc``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.legend.html
"""
# 入力データの形式統一
X, y_true, data, x_colnames, y_colname, cv_group_colname = cls._reshape_input_data(x, y, data,
x_colnames,
cv_group)
# 説明変数xの次元が2~4以外ならエラーを出す
if len(x_colnames) < 2 or len(x_colnames) > 4:
raise Exception('Dimension of x must be 2 to 4')
# display_cv_indicesをList化
if isinstance(display_cv_indices, int):
display_cv_indices = [display_cv_indices]
elif not isinstance(x_colnames, list):
raise Exception('the "cv_display_indices" argument must be int or List[int]')
# 学習器パラメータがあれば適用
if estimator_params is not None:
estimator.set_params(**estimator_params)
# 学習時パラメータがNoneなら空のdictを入力
if fit_params is None:
fit_params = {}
# subplot_kwsがNoneなら空のdictを入力
if subplot_kws is None:
subplot_kws = {}
# heat_kwsがNoneなら空のdictを入力
if heat_kws is None:
heat_kws = {}
# scatter_kwsがNoneなら空のdictを入力
if scatter_kws is None:
scatter_kws = {}
# legend_kwsがNoneなら空のdictを入力
if legend_kws is None:
legend_kws = {}
# ヒートマップ表示用の列を抽出
if x_heat is None: # 列名指定していないとき、前から2列を抽出
x_heat = x_colnames[:2]
x_heat_indices = [0, 1]
else: # 列名指定しているとき、該当列のXにおけるインデックス(0~3)を保持
if len(x_heat) != 2:
raise Exception('length of x_heat must be 2')
x_heat_indices = []
for colname in x_heat:
x_heat_indices.append(x_colnames.index(colname))
# ヒートマップ表示以外の列
x_not_heat = [colname for colname in x_colnames if colname not in x_heat]
# ヒートマップの色分け最大最小値(y_trueの最大最小値 ± y_trueの標準偏差 × color_extendsigma)
y_true_std = np.std(y_true)
vmin = np.min(y_true) - y_true_std * color_extendsigma
vmax = np.max(y_true) + y_true_std * color_extendsigma
# 引数plot_scatter='hue'とscatter_hueが同時指定されていないとき、エラーを出す
if scatter_hue is not None:
if plot_scatter != 'hue' and not isinstance(cv, GroupKFold) and not isinstance(cv, LeaveOneGroupOut):
raise Exception('the "plot_scatter" argument must be "hue" when the argument "scatter_hue" is not None')
elif plot_scatter == 'hue':
raise Exception('the "scatter_hue" argument is required when the argument "plot_scatter" is "hue"')
# 引数plot_scatter='hue'のとき、色分け対象列とカラーマップを紐づけ(色分けを全ての図で統一用)
if plot_scatter == 'hue':
hue_list = data[scatter_hue].values.tolist()
hue_list = sorted(set(hue_list), key=hue_list.index)
scatter_hue_dict = dict(zip(hue_list, cls._HEAT_SCATTER_HUECOLORS[0:len(hue_list)]))
else:
scatter_hue_dict = None
# クロスバリデーション有無で場合分け
# クロスバリデーション未実施時(学習データからプロット&指標算出)
if cv is None:
# 学習と推論
estimator.fit(X, y_true, **fit_params)
y_pred = estimator.predict(X)
# 誤差上位表示用データ取得
if rank_number is not None:
if rank_col is None: # 表示フィールド指定ないとき、Index使用
rank_col_data = data.index.values
else: # 表示フィールド指定あるとき
rank_col_data = data[rank_col].values
else:
rank_col_data = None
# 誤差最大値
maxerror = np.max(np.abs(y_pred - y_true))
# 散布図色分け用データ取得(plot_scatter='hue'のときのみ有効)
hue_data = data[scatter_hue] if scatter_hue is not None and plot_scatter=='hue' else None
hue_name = scatter_hue if scatter_hue is not None and plot_scatter=='hue' else None
# ヒートマップをプロット
cls._reg_heat_plot(estimator, X, y_pred, y_true, x_heat, x_not_heat, x_heat_indices, hue_data, hue_name,
pair_sigmarange = pair_sigmarange, pair_sigmainterval=pair_sigmainterval, heat_extendsigma=heat_extendsigma, heat_division=heat_division,
vmin=vmin, vmax=vmax, plot_scatter=plot_scatter, maxerror=maxerror,
rank_number=rank_number, rank_col=rank_col, rank_col_data=rank_col_data, scatter_hue_dict=scatter_hue_dict,
rounddigit_rank=rounddigit_rank, rounddigit_x1=rounddigit_x1, rounddigit_x2=rounddigit_x2, rounddigit_x3=rounddigit_x3,
cv_index=None, subplot_kws=subplot_kws, heat_kws=heat_kws, scatter_kws=scatter_kws, legend_kws=legend_kws)
# クロスバリデーション実施時(分割ごとに別々にプロット&指標算出)
if cv is not None:
# 分割法未指定時、cv_numとseedに基づきKFoldでランダムに分割
if isinstance(cv, numbers.Integral):
cv = KFold(n_splits=cv, shuffle=True, random_state=cv_seed)
# LeaveOneOutのときエラーを出す
if isinstance(cv, LeaveOneOut):
raise Exception('"regression_heat_plot" method does not support "LeaveOneOut" cross validation')
# cv_groupをグルーピング対象に指定(GroupKFold、LeaveOneGroupOut等)
split_kws={}
if cv_group_colname is not None:
split_kws['groups'] = data[cv_group_colname].values
elif isinstance(cv, GroupKFold) or isinstance(cv, LeaveOneGroupOut):
raise Exception('"GroupKFold" and "LeaveOneGroupOut" cross validations need ``cv_group`` argument')
# LeaveOneGroupOutのとき、クロスバリデーション分割数をcv_groupの数に指定
if isinstance(cv, LeaveOneGroupOut):
cv_num = len(set(data[cv_group_colname].values))
else:
cv_num = cv.n_splits
# fit_paramsにeval_metricが入力されており、eval_setが入力されていないときの処理(eval_setにテストデータを使用)
if eval_set_selection is None:
eval_set_selection = 'test'
fit_params, eval_set_selection = init_eval_set(
eval_set_selection, fit_params, X, y)
# 最終学習器以外の前処理変換器作成
transformer = _make_transformer(eval_set_selection, estimator)
# クロスバリデーション
for i, (train, test) in enumerate(cv.split(X, y_true, **split_kws)):
# 表示対象以外のCVなら飛ばす
if i not in display_cv_indices:
continue
print(f'cv_number={i}/{cv_num}')
# 表示用にテストデータと学習データ分割
X_train = X[train]
y_train = y_true[train]
X_test = X[test]
y_test = y_true[test]
# eval_setの中から学習データ or テストデータのみを抽出
fit_params_modified = _eval_set_selection(eval_set_selection, transformer,
fit_params, train, test)
# 学習と推論
estimator.fit(X_train, y_train, **fit_params_modified)
y_pred = estimator.predict(X_test)
# 誤差上位表示用データ取得
if rank_number is not None:
if rank_col is None: # 表示フィールド指定ないとき、Index使用
rank_col_test = data.index.values[test]
else: # 表示フィールド指定あるとき
rank_col_test = data[rank_col].values[test]
else:
rank_col_test = None
# 誤差最大値
maxerror = np.max(np.abs(y_pred - y_test))
# 散布図色分け用データ取得(plot_scatter='hue'のときのみ有効))
hue_data = data[scatter_hue].values[test] if scatter_hue is not None and plot_scatter=='hue' else None
hue_name = scatter_hue if scatter_hue is not None and plot_scatter=='hue' else None
# ヒートマップをプロット
cls._reg_heat_plot(estimator, X_test, y_pred, y_test, x_heat, x_not_heat, x_heat_indices, hue_data, hue_name,
pair_sigmarange = pair_sigmarange, pair_sigmainterval = pair_sigmainterval, heat_extendsigma=heat_extendsigma, heat_division=heat_division,
vmin=vmin, vmax=vmax, plot_scatter = plot_scatter, maxerror=maxerror,
rank_number=rank_number, rank_col=rank_col, rank_col_data=rank_col_test, scatter_hue_dict=scatter_hue_dict,
rounddigit_rank=rounddigit_rank, rounddigit_x1=rounddigit_x1, rounddigit_x2=rounddigit_x2, rounddigit_x3=rounddigit_x3,
cv_index=i, subplot_kws=subplot_kws, heat_kws=heat_kws, scatter_kws=scatter_kws, legend_kws=legend_kws)
| <filename>seaborn_analyzer/custom_reg_plot.py
from typing import List, Dict
import seaborn as sns
import matplotlib.pyplot as plt
import numbers
import numpy as np
import pandas as pd
from scipy import stats
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error, mean_squared_log_error, mean_absolute_percentage_error
from sklearn.model_selection import KFold, LeaveOneOut, GroupKFold, LeaveOneGroupOut
import decimal
from ._cv_eval_set import init_eval_set, _make_transformer, _eval_set_selection, cross_val_score_eval_set
class regplot():
# regression_heat_plotメソッド (回帰モデルヒートマップ表示)における、散布図カラーマップ
_HEAT_SCATTER_HUECOLORS = ['red', 'mediumblue', 'darkorange', 'darkmagenta', 'cyan', 'pink', 'brown', 'gold', 'grey']
def _round_digits(src: float, rounddigit: int = None, method='decimal'):
"""
指定桁数で小数を丸める
Parameters
----------
src : float
丸め対象の数値
rounddigit : int
フィッティング線の表示範囲(標準偏差の何倍まで表示するか指定)
method : int
桁数決定手法('decimal':小数点以下, 'sig':有効数字(Decimal指定), 'format':formatで有効桁数指定)
"""
if method == 'decimal':
return round(src, rounddigit)
elif method == 'sig':
with decimal.localcontext() as ctx:
ctx.prec = rounddigit
return ctx.create_decimal(src)
elif method == 'format':
return '{:.{width}g}'.format(src, width=rounddigit)
@classmethod
def _round_dict_digits(cls, srcdict: Dict[str, float], rounddigit: int = None, method='decimal'):
"""
指定桁数でdictの値を丸める
Parameters
----------
srcdict : dict[str, float]
丸め対象のdict
rounddigit : int
フィッティング線の表示範囲(標準偏差の何倍まで表示するか指定)
method : int
桁数決定手法('decimal':小数点以下, 'sig':有効数字(Decimal指定), 'format':formatで有効桁数指定)
"""
dstdict = {}
for k, v in srcdict.items():
if rounddigit is not None and isinstance(v, float):
dstdict[k] = cls._round_digits(v, rounddigit=rounddigit, method=method)
else:
dstdict[k] = v
return dstdict
def _make_score_dict(y_true, y_pred, scores):
"""
回帰評価指標を算出してdict化
"""
score_dict = {}
for scoring in scores:
if scoring == 'r2':
score_dict['r2'] = r2_score(y_true, y_pred)
elif scoring == 'mae':
score_dict['mae'] = mean_absolute_error(y_true, y_pred)
elif scoring == 'mse':
score_dict['mse'] = mean_squared_error(y_true, y_pred, squared=True)
elif scoring == 'rmse':
score_dict['rmse'] = mean_squared_error(y_true, y_pred, squared=False)
elif scoring == 'rmsle':
score_dict['rmsle'] = mean_squared_log_error(y_true, y_pred)
elif scoring == 'mape':
score_dict['mape'] = mean_absolute_percentage_error(y_true, y_pred)
elif scoring == 'max_error':
score_dict['max_error'] = max([abs(p - r) for r, p in zip(y_true, y_pred)])
return score_dict
def _reshape_input_data(x, y, data, x_colnames, cv_group):
"""
入力データの形式統一(pd.DataFrame or np.ndarray)
"""
# dataがpd.DataFrameのとき
if isinstance(data, pd.DataFrame):
if not isinstance(x, list):
raise Exception('`x` argument should be list[str] if `data` is pd.DataFrame')
if not isinstance(y, str):
raise Exception('`y` argument should be str if `data` is pd.DataFrame')
if x_colnames is not None:
raise Exception('`x_colnames` argument should be None if `data` is pd.DataFrame')
X = data[x].values
y_true = data[y].values
x_colnames = x
y_colname = y
cv_group_colname = cv_group
# dataがNoneのとき(x, y, cv_groupがnp.ndarray)
elif data is None:
if not isinstance(x, np.ndarray):
raise Exception('`x` argument should be np.ndarray if `data` is None')
if not isinstance(y, np.ndarray):
raise Exception('`y` argument should be np.ndarray if `data` is None')
X = x if len(x.shape) == 2 else x.reshape([x.shape[0], 1])
y_true = y.ravel()
# x_colnameとXの整合性確認
if x_colnames is None:
x_colnames = list(range(X.shape[1]))
elif X.shape[1] != len(x_colnames):
raise Exception('width of X must be equal to length of x_colnames')
else:
x_colnames = x_colnames
y_colname = 'objective_variable'
if cv_group is not None: # cv_group指定時
cv_group_colname = 'group'
data = pd.DataFrame(np.column_stack((X, y_true, cv_group)),
columns=x_colnames + [y_colname] + [cv_group_colname])
else:
cv_group_colname = None
data = pd.DataFrame(np.column_stack((X, y)),
columns=x_colnames + [y_colname])
else:
raise Exception('`data` argument should be pd.DataFrame or None')
return X, y_true, data, x_colnames, y_colname, cv_group_colname
@classmethod
def _rank_display(cls, y_true, y_pred, rank_number, rank_col, rank_col_data, x=None, ax=None, rounddigit=None):
"""
誤差上位を文字プロット
Parameters
----------
y_true : np.ndarray
目的変数実測値
y_pred : np.ndarray
目的変数予測値
rank_number : int
誤差上位何番目までを文字表示するか
rank_col : List[str]
誤差上位と一緒に表示するフィールド名 (NoneならIndexを使用)
x : np.ndarray
説明変数の値 (Noneなら横軸y_true縦軸y_pred、Noneでなければ横軸x縦軸y_true)
ax : matplotlib.axes.Axes
表示対象のax(Noneならmatplotlib.pyplot.plotで1枚ごとにプロット)
rounddigit: int
表示指標の小数丸め桁数
"""
# 描画用axがNoneのとき、matplotlib.pyplot.gca()を使用
if ax is None:
ax=plt.gca()
if rank_col is None:
rank_col = 'index'
y_error = y_pred - y_true
y_error_abs = np.abs(y_error)
rank_index = np.argsort(-y_error_abs)[:rank_number]
for rank, i in enumerate(rank_index):
error = cls._round_digits(y_error[i], rounddigit=rounddigit, method='decimal')
rank_text = f' no{rank+1}\n-<-error={error}\n {rank_col}={rank_col_data[i]}'
if x is None: # 横軸y_true縦軸y_pred (regression_pred_trueメソッド用)
ax.text(y_true[i], y_pred[i], rank_text, verticalalignment='center', horizontalalignment='left')
else: # 横軸x縦軸y_true (regression_plot_1dメソッド用)
ax.text(x[i], y_true[i], rank_text, verticalalignment='center', horizontalalignment='left')
@classmethod
def _scatterplot_ndarray(cls, x, x_name, y, y_name, hue_data, hue_name, ax, scatter_kws, legend_kws):
"""
np.ndarrayを入力として散布図表示(scatterplot)
"""
# X値とY値を合体してDataFrame化
data = np.stack([x, y], axis=1)
data = pd.DataFrame(data, columns=[x_name, y_name])
# 色分け指定しているとき、色分け用のフィールドを追加
if hue_data is not None:
if hue_name is None:
hue_name = 'hue'
data[hue_name] = pd.Series(hue_data)
# 散布図プロット
sns.scatterplot(x=x_name, y=y_name, data=data, ax=ax, hue=hue_name, **scatter_kws)
# 凡例追加
if 'title' not in legend_kws.keys():
legend_kws['title'] = hue_name
ax.legend(**legend_kws)
@classmethod
def _plot_pred_true(cls, y_true, y_pred, hue_data=None, hue_name=None, ax=None,
linecolor='red', linesplit=200, rounddigit=None,
score_dict=None, scatter_kws=None, legend_kws=None):
"""
予測値と実測値を、回帰評価指標とともにプロット
Parameters
----------
y_true : ndarray
目的変数実測値
y_pred : ndarray
目的変数予測値
hue_data : ndarray
色分け用ラベルデータ
hue_name : str
色分け用の列名
ax : matplotlib.axes.Axes
表示対象のax (Noneならmatplotlib.pyplot.plotで1枚ごとにプロット)
linecolor : str
予測値=実測値の線の色
linesplit : int
フィッティング線の分割数 (カクカクしたら増やす)
rounddigit: int
表示指標の小数丸め桁数
score_dict : dict[str, float]
算出した評価指標一覧
scatter_kws : dict
Additional parameters passed to sns.scatterplot(), e.g. ``alpha``. See https://seaborn.pydata.org/generated/seaborn.scatterplot.html
legend_kws : dict
Additional parameters passed to ax.legend(), e.g. ``loc``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.legend.html
"""
# 描画用axがNoneのとき、matplotlib.pyplot.gca()を使用
if ax is None:
ax=plt.gca()
# score_dictがNoneのとき、空のDictを加瀬宇
if score_dict is None:
score_dict = {}
# scatter_kwsがNoneなら空のdictを入力
if scatter_kws is None:
scatter_kws = {}
# 散布図プロット
cls._scatterplot_ndarray(y_true, 'y_true', y_pred, 'y_pred', hue_data, hue_name, ax, scatter_kws, legend_kws)
# 予測値=実測値の線を作成
true_min = np.amin(y_true)
true_max = np.amax(y_true)
true_line = np.linspace(true_min, true_max, linesplit)
# 評価指標文字列作成
score_list = [f'{k}={v}' for k, v in cls._round_dict_digits(score_dict, rounddigit, 'sig').items()]
score_text = "\n".join(score_list)
# 線と文字をプロット
ax.plot(true_line, true_line, color=linecolor)
ax.text(true_max, np.amin(y_pred), score_text, verticalalignment='bottom', horizontalalignment='right')
@classmethod
def regression_pred_true(cls, estimator, x: List[str], y: str, data: pd.DataFrame = None,
x_colnames: List[str] = None, hue=None, linecolor='red', rounddigit=3,
rank_number=None, rank_col=None, scores='mae',
cv_stats='mean', cv=None, cv_seed=42, cv_group=None, ax=None,
estimator_params=None, fit_params=None, eval_set_selection=None,
subplot_kws=None, scatter_kws=None, legend_kws=None):
"""
Plot prediction vs. true scatter plots of any scikit-learn regression estimator
Parameters
----------
estimator : estimator object implementing ``fit``
Regression estimator. This is assumed to implement the scikit-learn estimator interface.
x : str or list[str]
Explanatory variables.
y : str
Objective variable.
data : pd.DataFrame
Input data structure.
x_colnames: list[str], optional
Names of explanatory variables. Available only if ``data`` is NOT pd.DataFrame
hue : str, optional
Grouping variable that will produce points with different colors.
linecolor : str, optional
Color of prediction = true line. See https://matplotlib.org/stable/gallery/color/named_colors.html
rounddigit: int, optional
Round a number of score to a given precision in decimal digits.
rank_number : int, optional
Number of emphasized data that are in the top posiotions for regression error.
rank_col : list[str], optional
Variables that are displayed with emphasized data that are in the top posiotions for regression error.
scores : {'r2', 'mae', 'mse', 'rmse', 'rmsle', 'mape', 'max_error'} or list, optional
Regression score that are displayed at the lower right of the graph.
cv_stats : {'mean', 'median', 'max', 'min'}, optional
Statistical method of cross validation score that are displayed at the lower right of the graph.
cv : int, cross-validation generator, or an iterable, optional
Determines the cross-validation splitting strategy. If None, to use the default 5-fold cross validation. If int, to specify the number of folds in a KFold.
cv_seed : int, optional
Seed for random number generator of cross validation.
cv_group: str, optional
Group variable for the samples used while splitting the dataset into train/test set. This argument is passed to ``groups`` argument of cv.split().
ax : {matplotlib.axes.Axes, list[matplotlib.axes.Axes]}, optional
Pre-existing axes for the plot or list of it. Otherwise, call matplotlib.pyplot.subplot() internally.
estimator_params : dict, optional
Parameters passed to the regression estimator. If the estimator is pipeline, each parameter name must be prefixed such that parameter p for step s has key s__p.
fit_params : dict, optional
Parameters passed to the fit() method of the regression estimator, e.g. ``early_stopping_round`` and ``eval_set`` of XGBRegressor. If the estimator is pipeline, each parameter name must be prefixed such that parameter p for step s has key s__p.
eval_set_selection: {'all', 'test', 'train', 'original', 'original_transformed'}, optional
Select data passed to `eval_set` in `fit_params`. Available only if `estimator` is LightGBM or XGBoost and `cv` is not None.
If "all", use all data in `X` and `y`.
If "train", select train data from `X` and `y` using cv.split().
If "test", select test data from `X` and `y` using cv.split().
If "original", use raw `eval_set`.
If "original_transformed", use `eval_set` transformed by fit_transform() of pipeline if `estimater` is pipeline.
subplot_kws : dict, optional
Additional parameters passed to matplotlib.pyplot.subplots(), e.g. figsize. Available only if ``axes`` is None. See https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.subplots.html
scatter_kws: dict, optional
Additional parameters passed to sns.scatterplot(), e.g. ``alpha``. See https://seaborn.pydata.org/generated/seaborn.scatterplot.html
legend_kws : dict
Additional parameters passed to ax.legend(), e.g. ``loc``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.legend.html
Returns
----------
score_dict : dict
Validation scores, e.g. r2, mae and rmse
"""
# 入力データの形式統一
X, y_true, data, x_colnames, y_colname, cv_group_colname = cls._reshape_input_data([x] if isinstance(x, str) else x,
y, data,
x_colnames,
cv_group)
# scoresの型をListに統一
if scores is None:
scores = []
elif isinstance(scores, str):
scores = [scores]
elif not isinstance(scores, list):
raise Exception('the "scores" argument must be str or list[str]')
# 学習器パラメータがあれば適用
if estimator_params is not None:
estimator.set_params(**estimator_params)
# 学習時パラメータがNoneなら空のdictを入力
if fit_params is None:
fit_params = {}
# subplot_kwsがNoneなら空のdictを入力
if subplot_kws is None:
subplot_kws = {}
# scatter_kwsがNoneなら空のdictを入力
if scatter_kws is None:
scatter_kws = {}
# legend_kwsがNoneなら空のdictを入力
if legend_kws is None:
legend_kws = {}
# クロスバリデーション有無で場合分け
# クロスバリデーション未実施時(学習データからプロット&指標算出)
if cv is None:
# 学習と推論
estimator.fit(X, y_true, **fit_params)
y_pred = estimator.predict(X)
# 評価指標算出
score_dict = cls._make_score_dict(y_true, y_pred, scores)
# 色分け用データ取得
hue_data = None if hue is None else data[hue]
hue_name = None if hue is None else hue
# 誤差上位表示用データ取得
if rank_number is not None:
if rank_col is None: # 表示フィールド指定ないとき、Index使用
rank_col_data = data.index.values
else: # 表示フィールド指定あるとき
rank_col_data = data[rank_col].values
# 予測値と実測値プロット
cls._plot_pred_true(y_true, y_pred, hue_data=hue_data, hue_name=hue_name, ax=ax,
linecolor=linecolor, rounddigit=rounddigit, score_dict=score_dict,
scatter_kws=scatter_kws, legend_kws=legend_kws)
# 誤差上位を文字表示
if rank_number is not None:
cls._rank_display(y_true, y_pred, rank_number, rank_col, rank_col_data, rounddigit=rounddigit)
return score_dict
# クロスバリデーション実施時(分割ごとに別々にプロット&指標算出)
if cv is not None:
# 分割法未指定時、cv_numとseedに基づきKFoldでランダムに分割
if isinstance(cv, numbers.Integral):
cv = KFold(n_splits=cv, shuffle=True, random_state=cv_seed)
#LeaveOneOutかどうかを判定
isLeaveOneOut = isinstance(cv, LeaveOneOut)
# cv_groupをグルーピング対象に指定(GroupKFold、LeaveOneGroupOut等)
split_kws={}
if cv_group_colname is not None:
split_kws['groups'] = data[cv_group_colname].values
elif isinstance(cv, GroupKFold) or isinstance(cv, LeaveOneGroupOut):
raise Exception('"GroupKFold" and "LeaveOneGroupOut" cross validations need ``cv_group`` argument')
# LeaveOneGroupOutのとき、クロスバリデーション分割数をcv_groupの数に指定
if isinstance(cv, LeaveOneGroupOut):
cv_num = len(set(data[cv_group_colname].values))
elif isLeaveOneOut:
cv_num = 1
else:
cv_num = cv.n_splits
# fit_paramsにeval_metricが入力されており、eval_setが入力されていないときの処理(eval_setにテストデータを使用)
if eval_set_selection is None:
eval_set_selection = 'test'
fit_params, eval_set_selection = init_eval_set(
eval_set_selection, fit_params, X, y)
# 最終学習器以外の前処理変換器作成
transformer = _make_transformer(eval_set_selection, estimator)
# スコア種類ごとにクロスバリデーションスコアの算出
score_all_dict = {}
for scoring in scores:
# cross_val_scoreでクロスバリデーション
if scoring == 'r2':
score_all_dict['r2'] = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='r2',
fit_params=fit_params, n_jobs=-1, **split_kws)
elif scoring == 'mae':
neg_mae = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='neg_mean_absolute_error',
fit_params=fit_params, n_jobs=-1, **split_kws)
score_all_dict['mae'] = -neg_mae # scikit-learnの仕様に合わせ正負を逆に
elif scoring == 'mse':
neg_mse = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='neg_mean_squared_error',
fit_params=fit_params, n_jobs=-1, **split_kws)
score_all_dict['mse'] = -neg_mse # scikit-learnの仕様に合わせ正負を逆に
elif scoring == 'rmse':
neg_rmse = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='neg_root_mean_squared_error',
fit_params=fit_params, n_jobs=-1, **split_kws)
score_all_dict['rmse'] = -neg_rmse # scikit-learnの仕様に合わせ正負を逆に
elif scoring == 'rmsle':
neg_msle = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='neg_mean_squared_log_error',
fit_params=fit_params, n_jobs=-1, **split_kws)
score_all_dict['rmsle'] = np.sqrt(-neg_msle) # 正負を逆にしてルートをとる
elif scoring == 'mape':
neg_mape = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='neg_mean_absolute_percentage_error',
fit_params=fit_params, n_jobs=-1, **split_kws)
score_all_dict['mape'] = -neg_mape # scikit-learnの仕様に合わせ正負を逆に
elif scoring == 'max_error':
neg_max_error = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='max_error',
fit_params=fit_params, n_jobs=-1, **split_kws)
score_all_dict['max_error'] = - neg_max_error # scikit-learnの仕様に合わせ正負を逆に
# 表示用のax作成
if ax is None:
# LeaveOneOutのとき、クロスバリデーションごとの図は作成せず
if isLeaveOneOut:
if 'figsize' not in subplot_kws.keys():
subplot_kws['figsize'] = (6, 6)
fig, ax = plt.subplots(1, 1, **subplot_kws)
# LeaveOneOut以外のとき、クロスバリデーションごとに図作成
else:
if 'figsize' not in subplot_kws.keys():
subplot_kws['figsize'] = (6, (cv_num + 1) * 6)
fig, ax = plt.subplots(cv_num + 1, 1, **subplot_kws)
# クロスバリデーション
y_true_all = []
y_pred_all = []
hue_all = []
rank_col_all = []
score_train_dict = {}
for i, (train, test) in enumerate(cv.split(X, y_true, **split_kws)):
# 表示用にテストデータと学習データ分割
X_train = X[train]
y_train = y_true[train]
X_test = X[test]
y_test = y_true[test]
# 色分け用データ取得(していないときは、クロスバリデーション番号を使用、LeaveOuneOutのときは番号分けない)
if hue is None:
hue_test = np.full(1 ,'leave_one_out') if isLeaveOneOut else np.full(len(test) ,f'cv_{i}')
hue_name = 'cv_number' # 色分け名を'cv_number'に指定
else:
hue_test = data[hue].values[test]
hue_name = hue
# 誤差上位表示用データ取得
if rank_number is not None:
if rank_col is None: # 表示フィールド指定ないとき、Index使用
rank_col_test = data.index.values[test]
else: # 表示フィールド指定あるとき
rank_col_test = data[rank_col].values[test]
else:
rank_col_test = np.array([])
# eval_setの中から学習データ or テストデータのみを抽出
fit_params_modified = _eval_set_selection(eval_set_selection, transformer,
fit_params, train, test)
# 学習と推論
estimator.fit(X_train, y_train, **fit_params_modified)
y_pred = estimator.predict(X_test)
# 学習データスコア算出
y_pred_train = estimator.predict(X_train)
score_dict = cls._make_score_dict(y_train, y_pred_train, scores)
for score in scores:
if f'{score}_train' not in score_train_dict:
score_train_dict[f'{score}_train'] = []
score_train_dict[f'{score}_train'].append(score_dict[score])
# CV内結果をプロット(LeaveOneOutのときはプロットしない)
if not isLeaveOneOut:
score_cv_dict = {k: v[i] for k, v in score_all_dict.items()}
score_cv_dict.update({f'{k}_train': v for k, v in score_dict.items()})
cls._plot_pred_true(y_test, y_pred, hue_data=hue_test, hue_name=hue_name, ax=ax[i],
linecolor=linecolor, rounddigit=rounddigit, score_dict=score_cv_dict,
scatter_kws=scatter_kws, legend_kws=legend_kws)
ax[i].set_title(f'Cross Validation Fold{i}')
# 全体プロット用データに追加
y_true_all.append(y_test)
y_pred_all.append(y_pred)
hue_all.append(hue_test)
rank_col_all.append(rank_col_test)
# 全体プロット用データを合体
y_true_all = np.hstack(y_true_all)
y_pred_all = np.hstack(y_pred_all)
hue_all = np.hstack(hue_all)
rank_col_all = np.hstack(rank_col_all)
# スコアの統計値を計算
if cv_stats == 'mean':
score_stats_dict = {f'{k}_mean': np.mean(v) for k, v in score_all_dict.items()}
train_stats_dict = {k: np.mean(v) for k, v in score_train_dict.items()}
elif cv_stats == 'median':
score_stats_dict = {f'{k}_median': np.median(v) for k, v in score_all_dict.items()}
train_stats_dict = {k: np.median(v) for k, v in score_train_dict.items()}
elif cv_stats == 'min':
score_stats_dict = {f'{k}_min': np.amin(v) for k, v in score_all_dict.items()}
train_stats_dict = {k: np.amin(v) for k, v in score_train_dict.items()}
elif cv_stats == 'max':
score_stats_dict = {f'{k}_max': np.amax(v) for k, v in score_all_dict.items()}
train_stats_dict = {k: np.amax(v) for k, v in score_train_dict.items()}
# 学習データスコアをdictに追加
score_stats_dict.update(train_stats_dict)
# 全体プロット
ax_all = ax if isLeaveOneOut else ax[cv_num]
cls._plot_pred_true(y_true_all, y_pred_all, hue_data=hue_all, hue_name=hue_name, ax=ax_all,
linecolor=linecolor, rounddigit=rounddigit, score_dict=score_stats_dict,
scatter_kws=scatter_kws, legend_kws=legend_kws)
ax_all.set_title('All Cross Validations')
# 誤差上位を文字表示
if rank_number is not None:
cls._rank_display(y_true_all, y_pred_all, rank_number, rank_col, rank_col_all,
ax=ax_all, rounddigit=rounddigit)
return score_stats_dict
def _average_plot(estimator, data, x_colnames, y_colname, hue,
aggregate, subplot_kws, plot_kws, scatter_kws, legend_kws,
cv_index, x_range=200):
# figsize (全ての図全体のサイズ)指定
if 'figsize' not in subplot_kws.keys():
subplot_kws['figsize'] = (6, len(x_colnames) * 5)
if 'color' not in plot_kws:
plot_kws['color'] = 'red'
# プロット用のaxes作成
fig, axes = plt.subplots(len(x_colnames), 1, **subplot_kws)
if cv_index is not None:
fig.suptitle(f'CV No.{cv_index}')
# 全列を走査
for i, colname in enumerate(x_colnames):
# 該当列(グラフのX軸)の値を作成
x_max = data[colname].max()
x_min = data[colname].min()
x_array = np.linspace(x_min, x_max, x_range)
# 該当列以外を抽出して平均値算出
if aggregate == 'mean':
other_x_agg = data[[col for col in x_colnames if col != colname]].mean()
elif aggregate == 'median':
other_x_agg = data[[col for col in x_colnames if col != colname]].median()
else:
raise ValueError('the `aggregate` argument should be "mean" or "median"')
X_mean = np.tile(other_x_agg, (x_range, 1))
# 該当列を挿入して説明変数とし、モデルで推論
X_mean = np.insert(X_mean, i, x_array, axis=1)
y_pred = estimator.predict(X_mean)
# 実測値を散布図プロット
ax = axes if len(x_colnames) == 1 else axes[i]
sns.scatterplot(x=colname, y=y_colname, hue=hue, data=data, ax=ax, **scatter_kws)
# 推測値曲線をプロット
ax.plot(x_array, y_pred, **plot_kws)
# 色分け時は凡例表示
if hue is not None:
ax.legend(**legend_kws)
fig.tight_layout(rect=[0, 0, 1, 0.98])
@classmethod
def average_plot(cls, estimator, x: List[str], y: str, data: pd.DataFrame = None,
x_colnames: List[str] = None, hue=None,
aggregate='mean',
cv=None, cv_seed=42, cv_group=None, display_cv_indices = 0,
estimator_params=None, fit_params=None, eval_set_selection=None,
subplot_kws=None, plot_kws=None, scatter_kws=None, legend_kws=None):
"""
Plot relationship between one explanatory variable and predicted value by line graph.
Other explanatory variables are fixed to aggregated values such as mean values or median values.
Parameters
----------
estimator : estimator object implementing ``fit``
Regression estimator. This is assumed to implement the scikit-learn estimator interface.
x : list[str] or np.ndarray
Explanatory variables. Should be list[str] if ``data`` is pd.DataFrame. Should be np.ndarray if ``data`` is None
y : str or np.ndarray
Objective variable. Should be str if ``data`` is pd.DataFrame. Should be np.ndarray if ``data`` is None
data: pd.DataFrame
Input data structure.
x_colnames: list[str], optional
Names of explanatory variables. Available only if ``data`` is NOT pd.DataFrame
hue : str, optional
Grouping variable that will produce points with different colors.
aggregate : {'mean', 'median'}, optional
Statistic method of aggregating explanatory variables except x_axis variable.
cv : int, cross-validation generator, or an iterable, optional
Determines the cross-validation splitting strategy. If None, to use the default 5-fold cross validation. If int, to specify the number of folds in a KFold.
cv_seed : int, optional
Seed for random number generator of cross validation.
cv_group: str, optional
Group variable for the samples used while splitting the dataset into train/test set. This argument is passed to ``groups`` argument of cv.split().
display_cv_indices : int or list, optional
Cross validation index or indices to display.
estimator_params : dict, optional
Parameters passed to the regression estimator. If the estimator is pipeline, each parameter name must be prefixed such that parameter p for step s has key s__p.
fit_params : dict, optional
Parameters passed to the fit() method of the regression estimator, e.g. ``early_stopping_round`` and ``eval_set`` of XGBRegressor. If the estimator is pipeline, each parameter name must be prefixed such that parameter p for step s has key s__p.
eval_set_selection: {'all', 'test', 'train', 'original', 'original_transformed'}, optional
Select data passed to `eval_set` in `fit_params`. Available only if `estimator` is LightGBM or XGBoost and `cv` is not None.
If "all", use all data in `X` and `y`.
If "train", select train data from `X` and `y` using cv.split().
If "test", select test data from `X` and `y` using cv.split().
If "original", use raw `eval_set`.
If "original_transformed", use `eval_set` transformed by fit_transform() of pipeline if `estimater` is pipeline.
subplot_kws: dict, optional
Additional parameters passed to matplotlib.pyplot.subplots(), e.g. ``figsize``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.subplots.html
plot_kws: dict, optional
Additional parameters passed to matplotlib.axes.Axes.plot(), e.g. ``alpha``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.plot.html
scatter_kws: dict, optional
Additional parameters passed to seaborn.scatterplot(), e.g. ``alpha``. See https://seaborn.pydata.org/generated/seaborn.scatterplot.html
legend_kws : dict
Additional parameters passed to matplotlib.axes.Axes.legend(), e.g. ``loc``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.legend.html
"""
# 入力データの形式統一
X, y_true, data, x_colnames, y_colname, cv_group_colname = cls._reshape_input_data(x, y, data,
x_colnames,
cv_group)
# display_cv_indicesをList化
if isinstance(display_cv_indices, int):
display_cv_indices = [display_cv_indices]
elif not isinstance(x_colnames, list):
raise Exception('the "cv_display_indices" argument should be int or List[int]')
# 学習器パラメータがあれば適用
if estimator_params is not None:
estimator.set_params(**estimator_params)
# 学習時パラメータがNoneなら空のdictを入力
if fit_params is None:
fit_params = {}
# subplot_kwsがNoneなら空のdictを入力
if subplot_kws is None:
subplot_kws = {}
# plot_kwsがNoneなら空のdictを入力
if plot_kws is None:
plot_kws = {}
# scatter_kwsがNoneなら空のdictを入力
if scatter_kws is None:
scatter_kws = {}
# legend_kwsがNoneなら空のdictを入力
if legend_kws is None:
legend_kws = {}
# クロスバリデーション有無で場合分け
# クロスバリデーション未実施時(学習データからプロット&指標算出)
if cv is None:
# 学習と推論
estimator.fit(X, y_true, **fit_params)
# 平均値
cls._average_plot(estimator, data, x_colnames, y_colname, hue,
aggregate=aggregate,
subplot_kws=subplot_kws, plot_kws=plot_kws,
scatter_kws=scatter_kws, legend_kws=legend_kws,
cv_index=None)
# クロスバリデーション実施時(分割ごとに別々にプロット&指標算出)
if cv is not None:
# 分割法未指定時、cv_numとseedに基づきKFoldでランダムに分割
if isinstance(cv, numbers.Integral):
cv = KFold(n_splits=cv, shuffle=True, random_state=cv_seed)
# LeaveOneOutのときエラーを出す
if isinstance(cv, LeaveOneOut):
raise Exception('"regression_heat_plot" method does not support "LeaveOneOut" cross validation')
# cv_groupをグルーピング対象に指定(GroupKFold、LeaveOneGroupOut等)
split_kws={}
if cv_group_colname is not None:
split_kws['groups'] = data[cv_group_colname].values
elif isinstance(cv, GroupKFold) or isinstance(cv, LeaveOneGroupOut):
raise Exception('"GroupKFold" and "LeaveOneGroupOut" cross validations need ``cv_group`` argument')
# LeaveOneGroupOutのとき、クロスバリデーション分割数をcv_groupの数に指定
if isinstance(cv, LeaveOneGroupOut):
cv_num = len(set(data[cv_group_colname].values))
else:
cv_num = cv.n_splits
# fit_paramsにeval_metricが入力されており、eval_setが入力されていないときの処理(eval_setにテストデータを使用)
if eval_set_selection is None:
eval_set_selection = 'test'
fit_params, eval_set_selection = init_eval_set(
eval_set_selection, fit_params, X, y)
# 最終学習器以外の前処理変換器作成
transformer = _make_transformer(eval_set_selection, estimator)
# クロスバリデーション
for i, (train, test) in enumerate(cv.split(X, y_true, **split_kws)):
# 表示対象以外のCVなら飛ばす
if i not in display_cv_indices:
continue
print(f'cv_number={i}/{cv_num}')
# 表示用にテストデータと学習データ分割
X_train = X[train]
y_train = y_true[train]
data_test = data.iloc[test]
# eval_setの中から学習データ or テストデータのみを抽出
fit_params_modified = _eval_set_selection(eval_set_selection, transformer,
fit_params, train, test)
# 学習と推論
estimator.fit(X_train, y_train, **fit_params_modified)
# ヒートマップをプロット
cls._average_plot(estimator, data_test, x_colnames, y_colname, hue,
aggregate=aggregate,
subplot_kws=subplot_kws, plot_kws=plot_kws,
scatter_kws=scatter_kws, legend_kws=legend_kws,
cv_index=i)
@classmethod
def linear_plot(cls, x: str, y: str, data: pd.DataFrame = None,
x_colname: str = None,
ax=None, hue=None, linecolor='red',
rounddigit=5, plot_scores=True, scatter_kws=None, legend_kws=None):
"""
Plot linear regression line and calculate Pearson correlation coefficient.
Parameters
----------
x : str
Variable that specify positions on the x.
y : str
Variable that specify positions on the y.
data : pd.DataFrame
Input data structure.
x_colname: str, optional
Names of explanatory variable. Available only if ``data`` is NOT pd.DataFrame
ax : matplotlib.axes.Axes, optional
Pre-existing axes for the plot. Otherwise, call matplotlib.pyplot.gca() internally.
hue : str, optional
Grouping variable that will produce points with different colors.
linecolor : str, optional
Color of regression line. See https://matplotlib.org/stable/gallery/color/named_colors.html
rounddigit: int, optional
Round a number of score to a given precision in decimal digits.
plot_scores: bool, optional
If True, display Pearson correlation coefficient and the p-value.
scatter_kws: dict, optional
Additional parameters passed to sns.scatterplot(), e.g. ``alpha``. See https://seaborn.pydata.org/generated/seaborn.scatterplot.html
legend_kws : dict
Additional parameters passed to ax.legend(), e.g. ``loc``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.legend.html
Returns
----------
ax : matplotlib.axes.Axes
Returns the Axes object with the plot drawn onto it.
"""
# 入力データの形式統一
X, y_true, data, x_colnames, y_colname, cv_group_colname = cls._reshape_input_data([x] if isinstance(x, str) else x,
y, data,
[x_colname] if x_colname is not None else x_colname,
cv_group=None)
if x_colname is None:
x_colname = x_colnames[0]
# scatter_kwsがNoneなら空のdictを入力
if scatter_kws is None:
scatter_kws = {}
# legend_kwsがNoneなら空のdictを入力
if legend_kws is None:
legend_kws = {}
# まずは散布図プロット
ax = sns.scatterplot(x=x_colname, y=y_colname, data=data, ax=ax, hue=hue, **scatter_kws)
# 凡例追加
if 'title' not in legend_kws.keys():
legend_kws['title'] = hue
ax.legend(**legend_kws)
# 線形回帰モデル作成
lr = LinearRegression()
lr.fit(X, y_true)
xmin = np.amin(X)
xmax = np.amax(X)
linesplit=200
Xline = np.linspace(xmin, xmax, linesplit)
Xline = Xline.reshape(len(Xline), 1)
# 回帰線を描画
ax.plot(Xline, lr.predict(Xline), color=linecolor)
# 回帰式、ピアソンの相関係数およびp値を表示
if plot_scores == True:
# 回帰式
coef = cls._round_digits(lr.coef_[0], rounddigit=rounddigit, method="decimal")
intercept = cls._round_digits(lr.intercept_, rounddigit=rounddigit, method="decimal")
equation = f'y={coef}x+{intercept}' if intercept >= 0 else f'y={coef}x-{-intercept}'
# ピアソン相関係数
pearsonr = stats.pearsonr(data[x_colname], data[y_colname])
r = cls._round_digits(pearsonr[0], rounddigit=rounddigit, method="decimal")
pvalue = cls._round_digits(pearsonr[1], rounddigit=rounddigit, method="decimal")
# プロット
rtext = f'{equation}\nr={r}\np={pvalue}'
ax.text(xmax, np.amin(y_true), rtext, verticalalignment='bottom', horizontalalignment='right')
return ax
@classmethod
def _estimator_plot_1d(cls, trained_estimator, X, y_true, hue_data=None, hue_name=None, ax=None, linecolor='red', linesplit=1000, rounddigit=None,
score_dict=None, scatter_kws=None, legend_kws=None):
"""
1次説明変数回帰曲線を、回帰評価指標とともにプロット
Parameters
----------
trained_estimator :
学習済の回帰モデル(scikit-learn API)
X : ndarray
説明変数
y_true : ndarray
目的変数実測値
hue_data : ndarray
色分け用ラベルデータ
hue_name : str
色分け用の列名
ax : matplotlib.axes.Axes
表示対象のax (Noneならplt.plotで1枚ごとにプロット)
linecolor : str
予測値=実測値の線の色
linesplit : int
フィッティング線の分割数 (カクカクしたら増やす)
rounddigit: int
表示指標の小数丸め桁数
score_dict : dict[str, float]
算出した評価指標一覧
scatter_kws: dict, optional
Additional parameters passed to sns.scatterplot(), e.g. ``alpha``. See https://seaborn.pydata.org/generated/seaborn.scatterplot.html
legend_kws : dict
Additional parameters passed to ax.legend(), e.g. ``loc``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.legend.html
"""
# 描画用axがNoneのとき、matplotlib.pyplot.gca()を使用
if ax is None:
ax=plt.gca()
# score_dictがNoneのとき、空のDictを入力
if score_dict is None:
score_dict = {}
# scatter_kwsがNoneなら空のdictを入力
if scatter_kws is None:
scatter_kws = {}
# legend_kwsがNoneなら空のdictを入力
if legend_kws is None:
legend_kws = {}
# 散布図プロット
cls._scatterplot_ndarray(np.ravel(X), 'X', y_true, 'Y', hue_data, hue_name, ax, scatter_kws, legend_kws)
# 回帰モデルの線を作成
xmin = np.amin(X)
xmax = np.amax(X)
Xline = np.linspace(xmin, xmax, linesplit)
Xline = Xline.reshape(len(Xline), 1)
# 回帰線を描画
ax.plot(Xline, trained_estimator.predict(Xline), color=linecolor)
# 評価指標文字列作成
score_list = [f'{k}={v}' for k, v in cls._round_dict_digits(score_dict, rounddigit, 'sig').items()]
score_text = "\n".join(score_list)
ax.text(xmax, np.amin(y_true), score_text, verticalalignment='bottom', horizontalalignment='right')
@classmethod
def regression_plot_1d(cls, estimator, x: str, y: str, data: pd.DataFrame = None, x_colname: str = None,
hue=None, linecolor='red', rounddigit=3,
rank_number=None, rank_col=None, scores='mae',
cv_stats='mean', cv=None, cv_seed=42, cv_group=None,
estimator_params=None, fit_params=None, eval_set_selection=None,
subplot_kws=None, scatter_kws=None, legend_kws=None):
"""
Plot regression lines of any scikit-learn regressor with 1D explanatory variable.
Parameters
----------
estimator : estimator object implementing ``fit``
Regression estimator. This is assumed to implement the scikit-learn estimator interface.
x : str, or np.ndarray
Explanatory variables. Should be str if ``data`` is pd.DataFrame. Should be np.ndarray if ``data`` is None
y : str or np.ndarray
Objective variable. Should be str if ``data`` is pd.DataFrame. Should be np.ndarray if ``data`` is None
data: pd.DataFrame
Input data structure.
x_colname: str, optional
Names of explanatory variable. Available only if ``data`` is NOT pd.DataFrame
hue : str, optional
Grouping variable that will produce points with different colors.
linecolor : str, optional
Color of prediction = true line. See https://matplotlib.org/stable/gallery/color/named_colors.html
rounddigit: int, optional
Round a number of score to a given precision in decimal digits.
rank_number : int, optional
Number of emphasized data that are in the top positions for regression error.
rank_col : list[str], optional
Variables that are displayed with emphasized data that are in the top posiotions for regression error.
scores : {'r2', 'mae', 'mse', 'rmse', 'rmsle', 'mape', 'max_error'} or list,, optional
Regression score that are displayed at the lower right of the graph.
cv_stats : {'mean', 'median', 'max', 'min'}, optional
Statistical method of cross validation score that are displayed at the lower right of the graph.
cv : int, cross-validation generator, or an iterable, optional
Determines the cross-validation splitting strategy. If None, to use the default 5-fold cross validation. If int, to specify the number of folds in a KFold.
cv_seed : int, optional
Seed for random number generator of cross validation.
cv_group: str, optional
Group variable for the samples used while splitting the dataset into train/test set. This argument is passed to ``groups`` argument of cv.split().
estimator_params : dict, optional
Parameters passed to the regression estimator. If the estimator is pipeline, each parameter name must be prefixed such that parameter p for step s has key s__p.
fit_params : dict, optional
Parameters passed to the fit() method of the regression estimator, e.g. ``early_stopping_round`` and ``eval_set`` of XGBRegressor. If the estimator is pipeline, each parameter name must be prefixed such that parameter p for step s has key s__p.
subplot_kws : dict, optional
Additional parameters passed to matplotlib.pyplot.subplots(), e.g. ``figsize``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.subplots.html
eval_set_selection: {'all', 'test', 'train', 'original', 'original_transformed'}, optional
Select data passed to `eval_set` in `fit_params`. Available only if "estimator" is LightGBM or XGBoost.
If "all", use all data in `X` and `y`.
If "train", select train data from `X` and `y` using cv.split().
If "test", select test data from `X` and `y` using cv.split().
If "original", use raw `eval_set`.
If "original_transformed", use `eval_set` transformed by fit_transform() of pipeline if `estimater` is pipeline.
scatter_kws: dict, optional
Additional parameters passed to sns.scatterplot(), e.g. ``alpha``. See https://seaborn.pydata.org/generated/seaborn.scatterplot.html
legend_kws : dict
Additional parameters passed to ax.legend(), e.g. ``loc``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.legend.html
Returns
----------
score_dict : dict
Validation scores, e.g. r2, mae and rmse
"""
# 入力データの形式統一
X, y_true, data, x_colnames, y_colname, cv_group_colname = cls._reshape_input_data([x] if isinstance(x, str) else x,
y, data,
[x_colname] if x_colname is not None else x_colname,
cv_group)
# scoresの型をListに統一
if scores is None:
scores = []
elif isinstance(scores, str):
scores = [scores]
elif not isinstance(scores, list):
raise Exception('the "scores" argument must be str or list[str]')
# 学習器パラメータがあれば適用
if estimator_params is not None:
estimator.set_params(**estimator_params)
# 学習時パラメータがNoneなら空のdictを入力
if fit_params is None:
fit_params = {}
# subplot_kwsがNoneなら空のdictを入力
if subplot_kws is None:
subplot_kws = {}
# scatter_kwsがNoneなら空のdictを入力
if scatter_kws is None:
scatter_kws = {}
# legend_kwsがNoneなら空のdictを入力
if legend_kws is None:
legend_kws = {}
# クロスバリデーション有無で場合分け
# クロスバリデーション未実施時(学習データからプロット&指標算出)
if cv is None:
# 学習と推論
estimator.fit(X, y_true, **fit_params)
y_pred = estimator.predict(X)
# 評価指標算出
score_dict = cls._make_score_dict(y_true, y_pred, scores)
# 色分け用データ取得
hue_data = None if hue is None else data[hue]
hue_name = None if hue is None else hue
# 誤差上位表示用データ取得
if rank_number is not None:
if rank_col is None: # 表示フィールド指定ないとき、Index使用
rank_col_data = data.index.values
else: # 表示フィールド指定あるとき
rank_col_data = data[rank_col].values
# 回帰線プロット
cls._estimator_plot_1d(estimator, X, y_true, hue_data=hue_data, hue_name=hue_name,
linecolor=linecolor, rounddigit=rounddigit, score_dict=score_dict,
scatter_kws=scatter_kws, legend_kws=legend_kws)
# 誤差上位を文字表示
if rank_number is not None:
cls._rank_display(y_true, y_pred, rank_number, rank_col, rank_col_data, x=X, rounddigit=rounddigit)
return score_dict
# クロスバリデーション実施時(分割ごとに別々にプロット&指標算出)
if cv is not None:
# 分割法未指定時、cv_numとseedに基づきKFoldでランダムに分割
if isinstance(cv, numbers.Integral):
cv = KFold(n_splits=cv, shuffle=True, random_state=cv_seed)
#LeaveOneOutのときエラーを出す
if isinstance(cv, LeaveOneOut):
raise Exception('"regression_plot_1d" method does not support "LeaveOneOut" cross validation')
# cv_groupをグルーピング対象に指定(GroupKFold、LeaveOneGroupOut等)
split_kws={}
if cv_group_colname is not None:
split_kws['groups'] = data[cv_group_colname].values
elif isinstance(cv, GroupKFold) or isinstance(cv, LeaveOneGroupOut):
raise Exception('"GroupKFold" and "LeaveOneGroupOut" cross validations need ``cv_group`` argument')
# LeaveOneGroupOutのとき、クロスバリデーション分割数をcv_groupの数に指定
if isinstance(cv, LeaveOneGroupOut):
cv_num = len(set(data[cv_group_colname].values))
else:
cv_num = cv.n_splits
# fit_paramsにeval_metricが入力されており、eval_setが入力されていないときの処理(eval_setにテストデータを使用)
if eval_set_selection is None:
eval_set_selection = 'test'
fit_params, eval_set_selection = init_eval_set(
eval_set_selection, fit_params, X, y)
# 最終学習器以外の前処理変換器作成
transformer = _make_transformer(eval_set_selection, estimator)
# スコア種類ごとにクロスバリデーションスコアの算出
score_all_dict = {}
for scoring in scores:
# cross_val_scoreでクロスバリデーション
if scoring == 'r2':
score_all_dict['r2'] = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='r2',
fit_params=fit_params, n_jobs=-1, **split_kws)
elif scoring == 'mae':
neg_mae = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='neg_mean_absolute_error',
fit_params=fit_params, n_jobs=-1, **split_kws)
score_all_dict['mae'] = -neg_mae # scikit-learnの仕様に合わせ正負を逆に
elif scoring == 'mse':
neg_mse = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='neg_mean_squared_error',
fit_params=fit_params, n_jobs=-1, **split_kws)
score_all_dict['mse'] = -neg_mse # scikit-learnの仕様に合わせ正負を逆に
elif scoring == 'rmse':
neg_rmse = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='neg_root_mean_squared_error',
fit_params=fit_params, n_jobs=-1, **split_kws)
score_all_dict['rmse'] = -neg_rmse # scikit-learnの仕様に合わせ正負を逆に
elif scoring == 'rmsle':
neg_msle = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='neg_mean_squared_log_error',
fit_params=fit_params, n_jobs=-1, **split_kws)
score_all_dict['rmsle'] = np.sqrt(-neg_msle) # 正負を逆にしてルートをとる
elif scoring == 'mape':
neg_mape = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='neg_mean_absolute_percentage_error',
fit_params=fit_params, n_jobs=-1, **split_kws)
score_all_dict['mape'] = -neg_mape # scikit-learnの仕様に合わせ正負を逆に
elif scoring == 'max_error':
neg_max_error = cross_val_score_eval_set(eval_set_selection, estimator, X, y_true,
cv=cv, scoring='max_error',
fit_params=fit_params, n_jobs=-1, **split_kws)
score_all_dict['max_error'] = - neg_max_error # scikit-learnの仕様に合わせ正負を逆に
# 表示用のaxes作成
# クロスバリデーションごとに図作成
if 'figsize' not in subplot_kws.keys():
subplot_kws['figsize'] = (6, (cv_num + 1) * 6)
fig, axes = plt.subplots(cv_num + 1, 1, **subplot_kws)
# クロスバリデーション
score_train_dict = {}
for i, (train, test) in enumerate(cv.split(X, y_true, **split_kws)):
# 表示用にテストデータと学習データ分割
X_train = X[train]
y_train = y_true[train]
X_test = X[test]
y_test = y_true[test]
# 色分け用データ取得(していないときは、クロスバリデーション番号を使用、LeaveOuneOutのときは番号分けない)
if hue is None:
hue_test = np.full(len(test) ,f'cv_{i}')
hue_name = 'cv_number' # 色分け名を'cv_number'に指定
else:
hue_test = data[hue].values[test]
hue_name = hue
# 誤差上位表示用データ取得
if rank_number is not None:
if rank_col is None: # 表示フィールド指定ないとき、Index使用
rank_col_test = data.index.values[test]
else: # 表示フィールド指定あるとき
rank_col_test = data[rank_col].values[test]
# eval_setの中から学習データ or テストデータのみを抽出
fit_params_modified = _eval_set_selection(eval_set_selection, transformer,
fit_params, train, test)
# 学習と推論
estimator.fit(X_train, y_train, **fit_params_modified)
# 学習データスコア算出
y_pred_train = estimator.predict(X_train)
score_dict = cls._make_score_dict(y_train, y_pred_train, scores)
for score in scores:
if f'{score}_train' not in score_train_dict:
score_train_dict[f'{score}_train'] = []
score_train_dict[f'{score}_train'].append(score_dict[score])
# CV内結果をプロット
score_cv_dict = {k: v[i] for k, v in score_all_dict.items()}
score_cv_dict.update({f'{k}_train': v for k, v in score_dict.items()})
cls._estimator_plot_1d(estimator, X_test, y_test, hue_data=hue_test, hue_name=hue_name, ax=axes[i],
linecolor=linecolor, rounddigit=rounddigit, score_dict=score_cv_dict,
scatter_kws=scatter_kws, legend_kws=legend_kws)
# 誤差上位を文字表示
if rank_number is not None:
cls._rank_display(y_test, estimator.predict(X_test), rank_number, rank_col, rank_col_test, x=X_test, ax=axes[i], rounddigit=rounddigit)
axes[i].set_title(f'Cross Validation Fold{i}')
# スコアの統計値を計算
if cv_stats == 'mean':
score_stats_dict = {f'{k}_mean': np.mean(v) for k, v in score_all_dict.items()}
train_stats_dict = {k: np.mean(v) for k, v in score_train_dict.items()}
elif cv_stats == 'median':
score_stats_dict = {f'{k}_median': np.median(v) for k, v in score_all_dict.items()}
train_stats_dict = {k: np.median(v) for k, v in score_train_dict.items()}
elif cv_stats == 'min':
score_stats_dict = {f'{k}_min': np.amin(v) for k, v in score_all_dict.items()}
train_stats_dict = {k: np.amin(v) for k, v in score_train_dict.items()}
elif cv_stats == 'max':
score_stats_dict = {f'{k}_max': np.amax(v) for k, v in score_all_dict.items()}
train_stats_dict = {k: np.amax(v) for k, v in score_train_dict.items()}
# 学習データスコアをdictに追加
score_stats_dict.update(train_stats_dict)
# 全体色分け用データ取得
hue_data = None if hue is None else data[hue]
hue_name = None if hue is None else hue
# 全体プロット
ax_all = axes[cv_num]
cls._estimator_plot_1d(estimator, X, y_true, hue_data=hue_data, hue_name=hue_name, ax=ax_all,
linecolor=linecolor, rounddigit=rounddigit, score_dict=score_stats_dict,
scatter_kws=scatter_kws, legend_kws=legend_kws)
ax_all.set_title('All Cross Validations')
return score_stats_dict
@classmethod
def _reg_heat_plot_2d(cls, trained_estimator, x_heat, y_true_col, y_pred_col, rank_col, data, x_heat_indices, hue_name,
x1_start, x1_end, x2_start, x2_end, heat_division, other_x,
vmin, vmax, ax, plot_scatter, maxerror, rank_dict, scatter_hue_dict,
rounddigit_rank, rounddigit_x1, rounddigit_x2,
heat_kws=None, scatter_kws=None, legend_kws=None):
"""
回帰予測値ヒートマップと各種散布図の表示
(regression_heat_plotメソッドの描画処理部分)
"""
# 描画用axがNoneのとき、matplotlib.pyplot.gca()を使用
if ax is None:
ax=plt.gca()
# ヒートマップ用グリッドデータを作成
xx = np.linspace(x1_start, x1_end, heat_division)
yy = np.linspace(x2_start, x2_end, heat_division)
X1, X2 = np.meshgrid(xx, yy)
X_grid = np.c_[X1.ravel(), X2.ravel()]
df_heat = pd.DataFrame(X_grid, columns=x_heat)
# 推論用に全説明変数を保持したndarrayを作成 (ヒートマップ非使用変数は固定値other_xとして追加)
n_rows = X_grid.shape[0]
X_all = []
other_add_flg = False
for i in range(2 + len(other_x)):
if i == x_heat_indices[0]: # ヒートマップ使用変数(1個目)を追加
X_all.append(X_grid[:, 0].reshape(n_rows, 1))
elif i == x_heat_indices[1]: # ヒートマップ使用変数(2個目)を追加
X_all.append(X_grid[:, 1].reshape(n_rows, 1))
elif len(other_x) >= 1 and not other_add_flg: # ヒートマップ非使用変数(1個目)を固定値として追加
X_all.append(np.full((n_rows, 1), other_x[0]))
other_add_flg = True
elif len(other_x) == 2: # ヒートマップ非使用変数(2個目)を固定値として追加
X_all.append(np.full((n_rows, 1), other_x[1]))
X_all = np.hstack(X_all)
# グリッドデータに対して学習し、推定値を作成
y_pred_grid = trained_estimator.predict(X_all)
df_heat['y_pred'] = pd.Series(y_pred_grid)
# グリッドデータ縦軸横軸の表示桁数を調整
df_heat[x_heat[0]] = df_heat[x_heat[0]].map(lambda x: cls._round_digits(x, rounddigit=rounddigit_x1))
df_heat[x_heat[1]] = df_heat[x_heat[1]].map(lambda x: cls._round_digits(x, rounddigit=rounddigit_x2))
# グリッドデータをピボット化
df_heat_pivot = pd.pivot_table(data=df_heat, values='y_pred',
columns=x_heat[0], index=x_heat[1], aggfunc=np.mean)
# 横軸の列数がheat_divisionに満たない時、分解能不足のためrounddigit_x1桁数を増やすようエラー表示
if len(df_heat_pivot.columns) < heat_division:
raise Exception(f'the "rounddigit_x1" argument must be bigger than {rounddigit_x1} because of the shortage of the "{x_heat[0]}" resolution')
# 縦軸の列数がheat_divisionに満たない時、分解能不足のためrounddigit_x2桁数を増やすようエラー表示
if len(df_heat_pivot) < heat_division:
raise Exception(f'the "rounddigit_x2" argument must be bigger than {rounddigit_x2} because of the shortage of the "{x_heat[1]}" resolution')
# ヒートマップのカラーマップ指定ないとき、YlGnを指定
if 'cmap' not in heat_kws.keys():
heat_kws['cmap'] = 'YlGn'
# ヒートマップをプロット
sns.heatmap(df_heat_pivot, ax=ax, vmax=vmax, vmin=vmin, center=(vmax+vmin)/2, **heat_kws)
# 誤差散布図をプロット
if plot_scatter is not None:
# 軸範囲が0~heat_divisionになっているので、スケール変換
x1_scatter = 0.5 + (data[x_heat[0]].values - x1_start) * (heat_division - 1) / (x1_end - x1_start)
x2_scatter = 0.5 + (data[x_heat[1]].values - x2_start) * (heat_division - 1) / (x2_end - x2_start)
# 色分け
if plot_scatter == 'error': # 誤差で色分け
scatter_c = data[y_pred_col].values - data[y_true_col].values
scatter_vmin = -maxerror
scatter_vmax = maxerror
if 'cmap' not in scatter_kws.keys(): # 散布図のカラーマップ指定ないとき、seismicを指定
scatter_kws['cmap'] = 'seismic'
elif plot_scatter == 'true': # 真値で色分け
scatter_c = data[y_true_col].values
scatter_vmin = vmin
scatter_vmax = vmax
if 'cmap' not in scatter_kws.keys(): # 散布図のカラーマップ指定ないとき、ヒートマップと同cmap使用
scatter_kws['cmap'] = heat_kws['cmap']
if 'edgecolors' not in scatter_kws.keys(): # 線の色指定ないとき、ブラウンを指定
scatter_kws['edgecolors'] = 'brown'
# 散布図プロット (誤差or真値で色分けしたとき)
if plot_scatter == 'error' or plot_scatter == 'true':
ax.scatter(x1_scatter, x2_scatter, vmin=scatter_vmin, vmax=scatter_vmax, c=scatter_c, **scatter_kws)
# 散布図プロット (hue列名で色分けしたとき)
if plot_scatter == 'hue':
scatter_data = pd.DataFrame(np.stack([x1_scatter, x2_scatter, data[hue_name]], 1), columns=['x1', 'x2', hue_name])
for name, group in scatter_data.groupby(hue_name):
ax.scatter(group['x1'].values, group['x2'].values, label=name, c=scatter_hue_dict[name], **scatter_kws)
ax.legend(**legend_kws)
# 誤差上位を文字表示
df_rank = data[data.index.isin(rank_dict.keys())]
for index, row in df_rank.iterrows():
# rank_col指定ないとき、indexがfloat型に変換されてしまうので、int型に戻す
rank_col_value = int(row[rank_col]) if rank_col == 'index' else row[rank_col]
# 誤差を計算してテキスト化
error = cls._round_digits(row['y_pred'] - row['y_true'], rounddigit=rounddigit_rank)
rank_text = f' no{rank_dict[index]+1}\n-<-error={error}\n {rank_col}={rank_col_value}'
# 軸範囲が0~heat_divisionになっているので、スケール変換してプロット
x1_text = 0.5 + (row[x_heat[0]] - x1_start) * (heat_division - 1) / (x1_end - x1_start)
x2_text = 0.5 + (row[x_heat[1]] - x2_start) * (heat_division - 1) / (x2_end - x2_start)
ax.text(x1_text, x2_text, rank_text, verticalalignment='center', horizontalalignment='left')
@classmethod
def _reg_heat_plot(cls, trained_estimator, X, y_pred, y_true, x_heat, x_not_heat, x_heat_indices, hue_data, hue_name,
pair_sigmarange=1.0, pair_sigmainterval=0.5, heat_extendsigma=0.5, heat_division=30,
vmin=None, vmax=None, plot_scatter='true', maxerror=None,
rank_number=None, rank_col=None, rank_col_data=None, scatter_hue_dict=None,
rounddigit_rank=None, rounddigit_x1=None, rounddigit_x2=None, rounddigit_x3=None,
cv_index=None, subplot_kws=None, heat_kws=None, scatter_kws=None, legend_kws=None):
"""
回帰予測値ヒートマップ表示の、説明変数の数に応じた分岐処理
(regression_heat_plotメソッド処理のうち、説明変数の数に応じたデータ分割等を行う)
"""
# 説明変数の数
x_num = X.shape[1]
# ヒートマップ使用DataFrame
df_heat = pd.DataFrame(X[:, x_heat_indices], columns=x_heat)
# ヒートマップ非使用DataFrame
X_not_heat = X[:, [i for i in range(X.shape[1]) if i not in x_heat_indices]]
df_not_heat = pd.DataFrame(X_not_heat, columns=x_not_heat)
# 結合&目的変数実測値と予測値追加
df_all = df_heat.join(df_not_heat)
df_all = df_all.join(pd.DataFrame(y_true, columns=['y_true']))
df_all = df_all.join(pd.DataFrame(y_pred, columns=['y_pred']))
# ヒートップ非使用変数を標準化してDataFrameに追加
if x_num >= 3:
X_not_heat_norm = stats.zscore(X_not_heat)
df_all = df_all.join(pd.DataFrame(X_not_heat_norm, columns=[f'normalize_{c}' for c in x_not_heat]))
# 誤差上位表示用IDデータをDataFrameに追加
rank_col = 'index' if rank_col is None else rank_col
df_all = df_all.join(pd.DataFrame(rank_col_data, columns=[rank_col]))
# 散布図色分け用列をDataFrameに追加(hue_nameがNoneでないときのみ))
if hue_name is not None:
df_all = df_all.join(pd.DataFrame(hue_data, columns=[hue_name]))
# 誤差の順位を計算
if rank_number is not None:
y_error_abs = np.abs(y_pred - y_true)
rank_index = np.argsort(-y_error_abs)[:rank_number]
rank_dict = dict(zip(rank_index.tolist(), range(rank_number)))
else:
rank_dict = {}
# ヒートマップのX1軸およびX2軸の表示範囲(最大最小値 + extendsigma)
x1_min = np.min(X[:, x_heat_indices[0]])
x1_max = np.max(X[:, x_heat_indices[0]])
x1_std = np.std(X[:, x_heat_indices[0]])
x1_start = x1_min - x1_std * heat_extendsigma
x1_end = x1_max + x1_std * heat_extendsigma
x2_min = np.min(X[:, x_heat_indices[1]])
x2_max = np.max(X[:, x_heat_indices[1]])
x2_std = np.std(X[:, x_heat_indices[1]])
x2_start = x2_min - x2_std * heat_extendsigma
x2_end = x2_max + x2_std * heat_extendsigma
# プロットする図の数(sigmarange外「2枚」 + sigmarange内「int(pair_sigmarange / pair_sigmainterval) * 2枚」)
pair_n = int(pair_sigmarange / pair_sigmainterval) * 2 + 2
# ヒートップ非使用変数をプロットする範囲の下限(標準化後)
pair_min = -(pair_n - 2) / 2 * pair_sigmainterval
# 説明変数が2次元のとき (図は1枚のみ)
if x_num == 2:
pair_w = 1
pair_h = 1
# 説明変数が3次元のとき (図はpair_n × 1枚)
elif x_num == 3:
pair_w = 1
pair_h = pair_n
# 説明変数が4次元のとき (図はpair_n × pair_n枚)
elif x_num == 4:
pair_w = pair_n
pair_h = pair_n
# figsize (全ての図全体のサイズ)指定
if 'figsize' not in subplot_kws.keys():
subplot_kws['figsize'] = (pair_w * 6, pair_h * 5)
# プロット用のaxes作成
fig, axes = plt.subplots(pair_h, pair_w, **subplot_kws)
if cv_index is not None:
fig.suptitle(f'CV No.{cv_index}')
# 図ごとにプロット
for i in range(pair_h):
for j in range(pair_w):
# pair縦軸変数(標準化後)の最小値
if i == 0:
h_min = -float('inf')
h_mean = pair_min - pair_sigmainterval / 2 # ヒートマップ非使用変数指定用の平均値
else:
h_min = pair_min + (i - 1) * pair_sigmainterval
h_mean = pair_min + (i - 0.5) * pair_sigmainterval # ヒートマップ非使用変数指定用の平均値
# pair縦軸変数(標準化後)の最大値
if i == pair_h - 1:
h_max = float('inf')
else:
h_max = pair_min + i * pair_sigmainterval
# pair横軸変数(標準化後)の最小値
if j == 0:
w_min = -float('inf')
w_mean = pair_min - pair_sigmainterval / 2 # ヒートマップ非使用変数指定用の平均値
else:
w_min = pair_min + (j - 1) * pair_sigmainterval
w_mean = pair_min + (j - 0.5) * pair_sigmainterval # ヒートマップ非使用変数指定用の平均値
# pair横軸変数(標準化後)の最大値
if j == pair_w - 1:
w_max = float('inf')
else:
w_max = pair_min + j * pair_sigmainterval
# 説明変数が2次元のとき (図は1枚のみ)
if x_num == 2:
ax = axes
df_pair = df_all.copy()
other_x = []
# 説明変数が3次元のとき (図はpair_n × 1枚)
elif x_num == 3:
ax = axes[i]
# 縦軸変数範囲内のみのデータを抽出
df_pair = df_all[(df_all[f'normalize_{x_not_heat[0]}'] >= h_min) & (df_all[f'normalize_{x_not_heat[0]}'] < h_max)].copy()
# ヒートマップ非使用変数の標準化逆変換
x3_mean = np.mean(X_not_heat[:, 0])
x3_std = np.std(X_not_heat[:, 0])
other_x = [h_mean * x3_std + x3_mean]
# 説明変数が4次元のとき (図はpair_n × pair_n枚)
elif x_num == 4:
ax = axes[j, i]
# 縦軸変数範囲内のみのデータを抽出
df_pair = df_all[(df_all[f'normalize_{x_not_heat[0]}'] >= h_min) & (df_all[f'normalize_{x_not_heat[0]}'] < h_max)].copy()
# 横軸変数範囲内のみのデータを抽出
df_pair = df_pair[(df_pair[f'normalize_{x_not_heat[1]}'] >= w_min) & (df_pair[f'normalize_{x_not_heat[1]}'] < w_max)]
# ヒートマップ非使用変数の標準化逆変換
x3_mean = np.mean(X_not_heat[:, 0])
x3_std = np.std(X_not_heat[:, 0])
x4_mean = np.mean(X_not_heat[:, 1])
x4_std = np.std(X_not_heat[:, 1])
other_x = [h_mean * x3_std + x3_mean, w_mean * x4_std + x4_mean]
cls._reg_heat_plot_2d(trained_estimator, x_heat, 'y_true', 'y_pred', rank_col, df_pair, x_heat_indices, hue_name,
x1_start, x1_end, x2_start, x2_end, heat_division, other_x,
vmin, vmax, ax, plot_scatter, maxerror, rank_dict, scatter_hue_dict,
rounddigit_rank, rounddigit_x1, rounddigit_x2,
heat_kws=heat_kws, scatter_kws=scatter_kws, legend_kws=legend_kws)
# グラフタイトルとして、ヒートマップ非使用変数の範囲を記載(説明変数が3次元以上のとき)
if x_num == 3:
if i == 0:
ax.set_title(f'{x_not_heat[0]}=- {cls._round_digits(h_max * x3_std + x3_mean, rounddigit=rounddigit_x3)} (- {h_max}σ)')
elif i == pair_h - 1:
ax.set_title(f'{x_not_heat[0]}={cls._round_digits(h_min * x3_std + x3_mean, rounddigit=rounddigit_x3)} - ({h_min}σ -)')
else:
ax.set_title(f'{x_not_heat[0]}={cls._round_digits(h_min * x3_std + x3_mean, rounddigit=rounddigit_x3)} - {cls._round_digits(h_max * x3_std + x3_mean, rounddigit=rounddigit_x3)} ({h_min}σ - {h_max}σ)')
if x_num == 4:
ax.set_title(f'{x_not_heat[0]}= {h_min}σ - {h_max}σ {x_not_heat[1]}= {w_min}σ - {w_max}σ')
# 字が重なるのでtight_layoutにする
plt.tight_layout(rect=[0, 0, 1, 0.98])
@classmethod
def regression_heat_plot(cls, estimator, x: List[str], y: str, data: pd.DataFrame = None,
x_colnames: List[str] = None, x_heat: List[str] = None, scatter_hue=None,
pair_sigmarange = 1.0, pair_sigmainterval = 0.5, heat_extendsigma = 0.5,
heat_division = 30, color_extendsigma = 0.5,
plot_scatter = 'true', rounddigit_rank=3, rounddigit_x1=2, rounddigit_x2=2, rounddigit_x3=2,
rank_number=None, rank_col=None,
cv=None, cv_seed=42, cv_group=None, display_cv_indices = 0,
estimator_params=None, fit_params=None, eval_set_selection=None,
subplot_kws=None, heat_kws=None, scatter_kws=None, legend_kws=None):
"""
Plot regression heatmaps of any scikit-learn regressor with 2 to 4D explanatory variables.
Parameters
----------
estimator : estimator object implementing ``fit``
Regression estimator. This is assumed to implement the scikit-learn estimator interface.
x : list[str] or np.ndarray
Explanatory variables. Should be list[str] if ``data`` is pd.DataFrame. Should be np.ndarray if ``data`` is None
y : str or np.ndarray
Objective variable. Should be str if ``data`` is pd.DataFrame. Should be np.ndarray if ``data`` is None
data: pd.DataFrame
Input data structure.
x_colnames: list[str], optional
Names of explanatory variables. Available only if ``data`` is NOT pd.DataFrame
x_heat: list[str], optional
X-axis and y-axis variables of heatmap. If None, use two variables in ``x`` from the front.
scatter_hue : str, optional
Grouping variable that will produce points with different colors. Available only if plot_scatter is set to ``hue``.
pair_sigmarange: float, optional
Set the range of subplots. The lower limit is mean({x3, x4}) - ``pair_sigmarange`` * std({x3, x4}). The higher limit is mean({x3, x4}) + ``pair_sigmarange`` * std({x3, x4}). Available only if len(x) is bigger than 2.
pair_sigmainterval: float, optional
Set the interval of subplots. For example, if ``pair_sigmainterval`` is set to 0.5 and ``pair_sigmarange`` is set to 1.0, The ranges of subplots are lower than μ-1σ, μ-1σ to μ-0.5σ, μ-0.5σ to μ, μ to μ+0.5σ, μ+0.5σ to μ+1σ, and higher than μ+1σ. Available only if len(x) is bigger than 2.
heat_extendsigma: float, optional
Set the axis view limits of the heatmap. The lower limit is min({x1, x2}) - std({x1, x2}) * ``heat_extendsigma``. The higher limit is max({x1, x2}) + std({x1, x2}) * ``heat_extendsigma``
heat_division: int, optional
Resolution of the heatmap.
color_extendsigma: float, optional
Set the colormap limits of the heatmap. The lower limit is min(y_ture) - std(y_ture) * ``color_extendsigma``. The higher limit is max(y_ture) - std(y_ture) * ``color_extendsigma``.
plot_scatter: {'error', 'true', 'hue'}, optional
Color decision of scatter plot. If 'error', to be mapped to colors using error value. If 'true', to be mapped to colors using y_ture value. If 'hue', to be mapped to colors using scatter_hue variable. If None, no scatter.
rounddigit_rank: int, optional
Round a number of error that are in the top posiotions for regression error to a given precision in decimal digits.
rounddigit_x1: int, optional
Round a number of x-axis valiable of the heatmap to a given precision in decimal digits.
rounddigit_x2: int, optional
Round a number of y-axis valiable of the heatmap to a given precision in decimal digits.
rounddigit_x3: int, optional
Round a number of y-axis valiable of subplots to a given precision in decimal digits.
rank_number: int, optional
Number of emphasized data that are in the top posiotions for regression error.
rank_col: str, optional
Variables that are displayed with emphasized data that are in the top posiotions for regression error.
cv : int, cross-validation generator, or an iterable, optional
Determines the cross-validation splitting strategy. If None, to use the default 5-fold cross validation. If int, to specify the number of folds in a KFold.
cv_seed : int, optional
Seed for random number generator of cross validation.
cv_group: str, optional
Group variable for the samples used while splitting the dataset into train/test set. This argument is passed to ``groups`` argument of cv.split().
display_cv_indices : int or list, optional
Cross validation index or indices to display.
estimator_params : dict, optional
Parameters passed to the regression estimator. If the estimator is pipeline, each parameter name must be prefixed such that parameter p for step s has key s__p.
fit_params : dict, optional
Parameters passed to the fit() method of the regression estimator, e.g. ``early_stopping_round`` and ``eval_set`` of XGBRegressor. If the estimator is pipeline, each parameter name must be prefixed such that parameter p for step s has key s__p.
eval_set_selection: {'all', 'test', 'train', 'original', 'original_transformed'}, optional
Select data passed to `eval_set` in `fit_params`. Available only if "estimator" is LightGBM or XGBoost.
If "all", use all data in `X` and `y`.
If "train", select train data from `X` and `y` using cv.split().
If "test", select test data from `X` and `y` using cv.split().
If "original", use raw `eval_set`.
If "original_transformed", use `eval_set` transformed by fit_transform() of pipeline if `estimater` is pipeline.
subplot_kws: dict, optional
Additional parameters passed to matplotlib.pyplot.subplots(), e.g. ``figsize``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.subplots.html
heat_kws: dict, optional
Additional parameters passed to sns.heatmap(), e.g. ``cmap``. See https://seaborn.pydata.org/generated/seaborn.heatmap.html
scatter_kws: dict, optional
Additional parameters passed to matplotlib.pyplot.scatter(), e.g. ``alpha``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.scatter.html
legend_kws : dict
Additional parameters passed to ax.legend(), e.g. ``loc``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.legend.html
"""
# 入力データの形式統一
X, y_true, data, x_colnames, y_colname, cv_group_colname = cls._reshape_input_data(x, y, data,
x_colnames,
cv_group)
# 説明変数xの次元が2~4以外ならエラーを出す
if len(x_colnames) < 2 or len(x_colnames) > 4:
raise Exception('Dimension of x must be 2 to 4')
# display_cv_indicesをList化
if isinstance(display_cv_indices, int):
display_cv_indices = [display_cv_indices]
elif not isinstance(x_colnames, list):
raise Exception('the "cv_display_indices" argument must be int or List[int]')
# 学習器パラメータがあれば適用
if estimator_params is not None:
estimator.set_params(**estimator_params)
# 学習時パラメータがNoneなら空のdictを入力
if fit_params is None:
fit_params = {}
# subplot_kwsがNoneなら空のdictを入力
if subplot_kws is None:
subplot_kws = {}
# heat_kwsがNoneなら空のdictを入力
if heat_kws is None:
heat_kws = {}
# scatter_kwsがNoneなら空のdictを入力
if scatter_kws is None:
scatter_kws = {}
# legend_kwsがNoneなら空のdictを入力
if legend_kws is None:
legend_kws = {}
# ヒートマップ表示用の列を抽出
if x_heat is None: # 列名指定していないとき、前から2列を抽出
x_heat = x_colnames[:2]
x_heat_indices = [0, 1]
else: # 列名指定しているとき、該当列のXにおけるインデックス(0~3)を保持
if len(x_heat) != 2:
raise Exception('length of x_heat must be 2')
x_heat_indices = []
for colname in x_heat:
x_heat_indices.append(x_colnames.index(colname))
# ヒートマップ表示以外の列
x_not_heat = [colname for colname in x_colnames if colname not in x_heat]
# ヒートマップの色分け最大最小値(y_trueの最大最小値 ± y_trueの標準偏差 × color_extendsigma)
y_true_std = np.std(y_true)
vmin = np.min(y_true) - y_true_std * color_extendsigma
vmax = np.max(y_true) + y_true_std * color_extendsigma
# 引数plot_scatter='hue'とscatter_hueが同時指定されていないとき、エラーを出す
if scatter_hue is not None:
if plot_scatter != 'hue' and not isinstance(cv, GroupKFold) and not isinstance(cv, LeaveOneGroupOut):
raise Exception('the "plot_scatter" argument must be "hue" when the argument "scatter_hue" is not None')
elif plot_scatter == 'hue':
raise Exception('the "scatter_hue" argument is required when the argument "plot_scatter" is "hue"')
# 引数plot_scatter='hue'のとき、色分け対象列とカラーマップを紐づけ(色分けを全ての図で統一用)
if plot_scatter == 'hue':
hue_list = data[scatter_hue].values.tolist()
hue_list = sorted(set(hue_list), key=hue_list.index)
scatter_hue_dict = dict(zip(hue_list, cls._HEAT_SCATTER_HUECOLORS[0:len(hue_list)]))
else:
scatter_hue_dict = None
# クロスバリデーション有無で場合分け
# クロスバリデーション未実施時(学習データからプロット&指標算出)
if cv is None:
# 学習と推論
estimator.fit(X, y_true, **fit_params)
y_pred = estimator.predict(X)
# 誤差上位表示用データ取得
if rank_number is not None:
if rank_col is None: # 表示フィールド指定ないとき、Index使用
rank_col_data = data.index.values
else: # 表示フィールド指定あるとき
rank_col_data = data[rank_col].values
else:
rank_col_data = None
# 誤差最大値
maxerror = np.max(np.abs(y_pred - y_true))
# 散布図色分け用データ取得(plot_scatter='hue'のときのみ有効)
hue_data = data[scatter_hue] if scatter_hue is not None and plot_scatter=='hue' else None
hue_name = scatter_hue if scatter_hue is not None and plot_scatter=='hue' else None
# ヒートマップをプロット
cls._reg_heat_plot(estimator, X, y_pred, y_true, x_heat, x_not_heat, x_heat_indices, hue_data, hue_name,
pair_sigmarange = pair_sigmarange, pair_sigmainterval=pair_sigmainterval, heat_extendsigma=heat_extendsigma, heat_division=heat_division,
vmin=vmin, vmax=vmax, plot_scatter=plot_scatter, maxerror=maxerror,
rank_number=rank_number, rank_col=rank_col, rank_col_data=rank_col_data, scatter_hue_dict=scatter_hue_dict,
rounddigit_rank=rounddigit_rank, rounddigit_x1=rounddigit_x1, rounddigit_x2=rounddigit_x2, rounddigit_x3=rounddigit_x3,
cv_index=None, subplot_kws=subplot_kws, heat_kws=heat_kws, scatter_kws=scatter_kws, legend_kws=legend_kws)
# クロスバリデーション実施時(分割ごとに別々にプロット&指標算出)
if cv is not None:
# 分割法未指定時、cv_numとseedに基づきKFoldでランダムに分割
if isinstance(cv, numbers.Integral):
cv = KFold(n_splits=cv, shuffle=True, random_state=cv_seed)
# LeaveOneOutのときエラーを出す
if isinstance(cv, LeaveOneOut):
raise Exception('"regression_heat_plot" method does not support "LeaveOneOut" cross validation')
# cv_groupをグルーピング対象に指定(GroupKFold、LeaveOneGroupOut等)
split_kws={}
if cv_group_colname is not None:
split_kws['groups'] = data[cv_group_colname].values
elif isinstance(cv, GroupKFold) or isinstance(cv, LeaveOneGroupOut):
raise Exception('"GroupKFold" and "LeaveOneGroupOut" cross validations need ``cv_group`` argument')
# LeaveOneGroupOutのとき、クロスバリデーション分割数をcv_groupの数に指定
if isinstance(cv, LeaveOneGroupOut):
cv_num = len(set(data[cv_group_colname].values))
else:
cv_num = cv.n_splits
# fit_paramsにeval_metricが入力されており、eval_setが入力されていないときの処理(eval_setにテストデータを使用)
if eval_set_selection is None:
eval_set_selection = 'test'
fit_params, eval_set_selection = init_eval_set(
eval_set_selection, fit_params, X, y)
# 最終学習器以外の前処理変換器作成
transformer = _make_transformer(eval_set_selection, estimator)
# クロスバリデーション
for i, (train, test) in enumerate(cv.split(X, y_true, **split_kws)):
# 表示対象以外のCVなら飛ばす
if i not in display_cv_indices:
continue
print(f'cv_number={i}/{cv_num}')
# 表示用にテストデータと学習データ分割
X_train = X[train]
y_train = y_true[train]
X_test = X[test]
y_test = y_true[test]
# eval_setの中から学習データ or テストデータのみを抽出
fit_params_modified = _eval_set_selection(eval_set_selection, transformer,
fit_params, train, test)
# 学習と推論
estimator.fit(X_train, y_train, **fit_params_modified)
y_pred = estimator.predict(X_test)
# 誤差上位表示用データ取得
if rank_number is not None:
if rank_col is None: # 表示フィールド指定ないとき、Index使用
rank_col_test = data.index.values[test]
else: # 表示フィールド指定あるとき
rank_col_test = data[rank_col].values[test]
else:
rank_col_test = None
# 誤差最大値
maxerror = np.max(np.abs(y_pred - y_test))
# 散布図色分け用データ取得(plot_scatter='hue'のときのみ有効))
hue_data = data[scatter_hue].values[test] if scatter_hue is not None and plot_scatter=='hue' else None
hue_name = scatter_hue if scatter_hue is not None and plot_scatter=='hue' else None
# ヒートマップをプロット
cls._reg_heat_plot(estimator, X_test, y_pred, y_test, x_heat, x_not_heat, x_heat_indices, hue_data, hue_name,
pair_sigmarange = pair_sigmarange, pair_sigmainterval = pair_sigmainterval, heat_extendsigma=heat_extendsigma, heat_division=heat_division,
vmin=vmin, vmax=vmax, plot_scatter = plot_scatter, maxerror=maxerror,
rank_number=rank_number, rank_col=rank_col, rank_col_data=rank_col_test, scatter_hue_dict=scatter_hue_dict,
rounddigit_rank=rounddigit_rank, rounddigit_x1=rounddigit_x1, rounddigit_x2=rounddigit_x2, rounddigit_x3=rounddigit_x3,
cv_index=i, subplot_kws=subplot_kws, heat_kws=heat_kws, scatter_kws=scatter_kws, legend_kws=legend_kws)
| ja | 0.545925 | # regression_heat_plotメソッド (回帰モデルヒートマップ表示)における、散布図カラーマップ 指定桁数で小数を丸める Parameters ---------- src : float 丸め対象の数値 rounddigit : int フィッティング線の表示範囲(標準偏差の何倍まで表示するか指定) method : int 桁数決定手法('decimal':小数点以下, 'sig':有効数字(Decimal指定), 'format':formatで有効桁数指定) 指定桁数でdictの値を丸める Parameters ---------- srcdict : dict[str, float] 丸め対象のdict rounddigit : int フィッティング線の表示範囲(標準偏差の何倍まで表示するか指定) method : int 桁数決定手法('decimal':小数点以下, 'sig':有効数字(Decimal指定), 'format':formatで有効桁数指定) 回帰評価指標を算出してdict化 入力データの形式統一(pd.DataFrame or np.ndarray) # dataがpd.DataFrameのとき # dataがNoneのとき(x, y, cv_groupがnp.ndarray) # x_colnameとXの整合性確認 # cv_group指定時 誤差上位を文字プロット Parameters ---------- y_true : np.ndarray 目的変数実測値 y_pred : np.ndarray 目的変数予測値 rank_number : int 誤差上位何番目までを文字表示するか rank_col : List[str] 誤差上位と一緒に表示するフィールド名 (NoneならIndexを使用) x : np.ndarray 説明変数の値 (Noneなら横軸y_true縦軸y_pred、Noneでなければ横軸x縦軸y_true) ax : matplotlib.axes.Axes 表示対象のax(Noneならmatplotlib.pyplot.plotで1枚ごとにプロット) rounddigit: int 表示指標の小数丸め桁数 # 描画用axがNoneのとき、matplotlib.pyplot.gca()を使用 # 横軸y_true縦軸y_pred (regression_pred_trueメソッド用) # 横軸x縦軸y_true (regression_plot_1dメソッド用) np.ndarrayを入力として散布図表示(scatterplot) # X値とY値を合体してDataFrame化 # 色分け指定しているとき、色分け用のフィールドを追加 # 散布図プロット # 凡例追加 予測値と実測値を、回帰評価指標とともにプロット Parameters ---------- y_true : ndarray 目的変数実測値 y_pred : ndarray 目的変数予測値 hue_data : ndarray 色分け用ラベルデータ hue_name : str 色分け用の列名 ax : matplotlib.axes.Axes 表示対象のax (Noneならmatplotlib.pyplot.plotで1枚ごとにプロット) linecolor : str 予測値=実測値の線の色 linesplit : int フィッティング線の分割数 (カクカクしたら増やす) rounddigit: int 表示指標の小数丸め桁数 score_dict : dict[str, float] 算出した評価指標一覧 scatter_kws : dict Additional parameters passed to sns.scatterplot(), e.g. ``alpha``. See https://seaborn.pydata.org/generated/seaborn.scatterplot.html legend_kws : dict Additional parameters passed to ax.legend(), e.g. ``loc``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.legend.html # 描画用axがNoneのとき、matplotlib.pyplot.gca()を使用 # score_dictがNoneのとき、空のDictを加瀬宇 # scatter_kwsがNoneなら空のdictを入力 # 散布図プロット # 予測値=実測値の線を作成 # 評価指標文字列作成 # 線と文字をプロット Plot prediction vs. true scatter plots of any scikit-learn regression estimator Parameters ---------- estimator : estimator object implementing ``fit`` Regression estimator. This is assumed to implement the scikit-learn estimator interface. x : str or list[str] Explanatory variables. y : str Objective variable. data : pd.DataFrame Input data structure. x_colnames: list[str], optional Names of explanatory variables. Available only if ``data`` is NOT pd.DataFrame hue : str, optional Grouping variable that will produce points with different colors. linecolor : str, optional Color of prediction = true line. See https://matplotlib.org/stable/gallery/color/named_colors.html rounddigit: int, optional Round a number of score to a given precision in decimal digits. rank_number : int, optional Number of emphasized data that are in the top posiotions for regression error. rank_col : list[str], optional Variables that are displayed with emphasized data that are in the top posiotions for regression error. scores : {'r2', 'mae', 'mse', 'rmse', 'rmsle', 'mape', 'max_error'} or list, optional Regression score that are displayed at the lower right of the graph. cv_stats : {'mean', 'median', 'max', 'min'}, optional Statistical method of cross validation score that are displayed at the lower right of the graph. cv : int, cross-validation generator, or an iterable, optional Determines the cross-validation splitting strategy. If None, to use the default 5-fold cross validation. If int, to specify the number of folds in a KFold. cv_seed : int, optional Seed for random number generator of cross validation. cv_group: str, optional Group variable for the samples used while splitting the dataset into train/test set. This argument is passed to ``groups`` argument of cv.split(). ax : {matplotlib.axes.Axes, list[matplotlib.axes.Axes]}, optional Pre-existing axes for the plot or list of it. Otherwise, call matplotlib.pyplot.subplot() internally. estimator_params : dict, optional Parameters passed to the regression estimator. If the estimator is pipeline, each parameter name must be prefixed such that parameter p for step s has key s__p. fit_params : dict, optional Parameters passed to the fit() method of the regression estimator, e.g. ``early_stopping_round`` and ``eval_set`` of XGBRegressor. If the estimator is pipeline, each parameter name must be prefixed such that parameter p for step s has key s__p. eval_set_selection: {'all', 'test', 'train', 'original', 'original_transformed'}, optional Select data passed to `eval_set` in `fit_params`. Available only if `estimator` is LightGBM or XGBoost and `cv` is not None. If "all", use all data in `X` and `y`. If "train", select train data from `X` and `y` using cv.split(). If "test", select test data from `X` and `y` using cv.split(). If "original", use raw `eval_set`. If "original_transformed", use `eval_set` transformed by fit_transform() of pipeline if `estimater` is pipeline. subplot_kws : dict, optional Additional parameters passed to matplotlib.pyplot.subplots(), e.g. figsize. Available only if ``axes`` is None. See https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.subplots.html scatter_kws: dict, optional Additional parameters passed to sns.scatterplot(), e.g. ``alpha``. See https://seaborn.pydata.org/generated/seaborn.scatterplot.html legend_kws : dict Additional parameters passed to ax.legend(), e.g. ``loc``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.legend.html Returns ---------- score_dict : dict Validation scores, e.g. r2, mae and rmse # 入力データの形式統一 # scoresの型をListに統一 # 学習器パラメータがあれば適用 # 学習時パラメータがNoneなら空のdictを入力 # subplot_kwsがNoneなら空のdictを入力 # scatter_kwsがNoneなら空のdictを入力 # legend_kwsがNoneなら空のdictを入力 # クロスバリデーション有無で場合分け # クロスバリデーション未実施時(学習データからプロット&指標算出) # 学習と推論 # 評価指標算出 # 色分け用データ取得 # 誤差上位表示用データ取得 # 表示フィールド指定ないとき、Index使用 # 表示フィールド指定あるとき # 予測値と実測値プロット # 誤差上位を文字表示 # クロスバリデーション実施時(分割ごとに別々にプロット&指標算出) # 分割法未指定時、cv_numとseedに基づきKFoldでランダムに分割 #LeaveOneOutかどうかを判定 # cv_groupをグルーピング対象に指定(GroupKFold、LeaveOneGroupOut等) # LeaveOneGroupOutのとき、クロスバリデーション分割数をcv_groupの数に指定 # fit_paramsにeval_metricが入力されており、eval_setが入力されていないときの処理(eval_setにテストデータを使用) # 最終学習器以外の前処理変換器作成 # スコア種類ごとにクロスバリデーションスコアの算出 # cross_val_scoreでクロスバリデーション # scikit-learnの仕様に合わせ正負を逆に # scikit-learnの仕様に合わせ正負を逆に # scikit-learnの仕様に合わせ正負を逆に # 正負を逆にしてルートをとる # scikit-learnの仕様に合わせ正負を逆に # scikit-learnの仕様に合わせ正負を逆に # 表示用のax作成 # LeaveOneOutのとき、クロスバリデーションごとの図は作成せず # LeaveOneOut以外のとき、クロスバリデーションごとに図作成 # クロスバリデーション # 表示用にテストデータと学習データ分割 # 色分け用データ取得(していないときは、クロスバリデーション番号を使用、LeaveOuneOutのときは番号分けない) # 色分け名を'cv_number'に指定 # 誤差上位表示用データ取得 # 表示フィールド指定ないとき、Index使用 # 表示フィールド指定あるとき # eval_setの中から学習データ or テストデータのみを抽出 # 学習と推論 # 学習データスコア算出 # CV内結果をプロット(LeaveOneOutのときはプロットしない) # 全体プロット用データに追加 # 全体プロット用データを合体 # スコアの統計値を計算 # 学習データスコアをdictに追加 # 全体プロット # 誤差上位を文字表示 # figsize (全ての図全体のサイズ)指定 # プロット用のaxes作成 # 全列を走査 # 該当列(グラフのX軸)の値を作成 # 該当列以外を抽出して平均値算出 # 該当列を挿入して説明変数とし、モデルで推論 # 実測値を散布図プロット # 推測値曲線をプロット # 色分け時は凡例表示 Plot relationship between one explanatory variable and predicted value by line graph. Other explanatory variables are fixed to aggregated values such as mean values or median values. Parameters ---------- estimator : estimator object implementing ``fit`` Regression estimator. This is assumed to implement the scikit-learn estimator interface. x : list[str] or np.ndarray Explanatory variables. Should be list[str] if ``data`` is pd.DataFrame. Should be np.ndarray if ``data`` is None y : str or np.ndarray Objective variable. Should be str if ``data`` is pd.DataFrame. Should be np.ndarray if ``data`` is None data: pd.DataFrame Input data structure. x_colnames: list[str], optional Names of explanatory variables. Available only if ``data`` is NOT pd.DataFrame hue : str, optional Grouping variable that will produce points with different colors. aggregate : {'mean', 'median'}, optional Statistic method of aggregating explanatory variables except x_axis variable. cv : int, cross-validation generator, or an iterable, optional Determines the cross-validation splitting strategy. If None, to use the default 5-fold cross validation. If int, to specify the number of folds in a KFold. cv_seed : int, optional Seed for random number generator of cross validation. cv_group: str, optional Group variable for the samples used while splitting the dataset into train/test set. This argument is passed to ``groups`` argument of cv.split(). display_cv_indices : int or list, optional Cross validation index or indices to display. estimator_params : dict, optional Parameters passed to the regression estimator. If the estimator is pipeline, each parameter name must be prefixed such that parameter p for step s has key s__p. fit_params : dict, optional Parameters passed to the fit() method of the regression estimator, e.g. ``early_stopping_round`` and ``eval_set`` of XGBRegressor. If the estimator is pipeline, each parameter name must be prefixed such that parameter p for step s has key s__p. eval_set_selection: {'all', 'test', 'train', 'original', 'original_transformed'}, optional Select data passed to `eval_set` in `fit_params`. Available only if `estimator` is LightGBM or XGBoost and `cv` is not None. If "all", use all data in `X` and `y`. If "train", select train data from `X` and `y` using cv.split(). If "test", select test data from `X` and `y` using cv.split(). If "original", use raw `eval_set`. If "original_transformed", use `eval_set` transformed by fit_transform() of pipeline if `estimater` is pipeline. subplot_kws: dict, optional Additional parameters passed to matplotlib.pyplot.subplots(), e.g. ``figsize``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.subplots.html plot_kws: dict, optional Additional parameters passed to matplotlib.axes.Axes.plot(), e.g. ``alpha``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.plot.html scatter_kws: dict, optional Additional parameters passed to seaborn.scatterplot(), e.g. ``alpha``. See https://seaborn.pydata.org/generated/seaborn.scatterplot.html legend_kws : dict Additional parameters passed to matplotlib.axes.Axes.legend(), e.g. ``loc``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.legend.html # 入力データの形式統一 # display_cv_indicesをList化 # 学習器パラメータがあれば適用 # 学習時パラメータがNoneなら空のdictを入力 # subplot_kwsがNoneなら空のdictを入力 # plot_kwsがNoneなら空のdictを入力 # scatter_kwsがNoneなら空のdictを入力 # legend_kwsがNoneなら空のdictを入力 # クロスバリデーション有無で場合分け # クロスバリデーション未実施時(学習データからプロット&指標算出) # 学習と推論 # 平均値 # クロスバリデーション実施時(分割ごとに別々にプロット&指標算出) # 分割法未指定時、cv_numとseedに基づきKFoldでランダムに分割 # LeaveOneOutのときエラーを出す # cv_groupをグルーピング対象に指定(GroupKFold、LeaveOneGroupOut等) # LeaveOneGroupOutのとき、クロスバリデーション分割数をcv_groupの数に指定 # fit_paramsにeval_metricが入力されており、eval_setが入力されていないときの処理(eval_setにテストデータを使用) # 最終学習器以外の前処理変換器作成 # クロスバリデーション # 表示対象以外のCVなら飛ばす # 表示用にテストデータと学習データ分割 # eval_setの中から学習データ or テストデータのみを抽出 # 学習と推論 # ヒートマップをプロット Plot linear regression line and calculate Pearson correlation coefficient. Parameters ---------- x : str Variable that specify positions on the x. y : str Variable that specify positions on the y. data : pd.DataFrame Input data structure. x_colname: str, optional Names of explanatory variable. Available only if ``data`` is NOT pd.DataFrame ax : matplotlib.axes.Axes, optional Pre-existing axes for the plot. Otherwise, call matplotlib.pyplot.gca() internally. hue : str, optional Grouping variable that will produce points with different colors. linecolor : str, optional Color of regression line. See https://matplotlib.org/stable/gallery/color/named_colors.html rounddigit: int, optional Round a number of score to a given precision in decimal digits. plot_scores: bool, optional If True, display Pearson correlation coefficient and the p-value. scatter_kws: dict, optional Additional parameters passed to sns.scatterplot(), e.g. ``alpha``. See https://seaborn.pydata.org/generated/seaborn.scatterplot.html legend_kws : dict Additional parameters passed to ax.legend(), e.g. ``loc``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.legend.html Returns ---------- ax : matplotlib.axes.Axes Returns the Axes object with the plot drawn onto it. # 入力データの形式統一 # scatter_kwsがNoneなら空のdictを入力 # legend_kwsがNoneなら空のdictを入力 # まずは散布図プロット # 凡例追加 # 線形回帰モデル作成 # 回帰線を描画 # 回帰式、ピアソンの相関係数およびp値を表示 # 回帰式 # ピアソン相関係数 # プロット 1次説明変数回帰曲線を、回帰評価指標とともにプロット Parameters ---------- trained_estimator : 学習済の回帰モデル(scikit-learn API) X : ndarray 説明変数 y_true : ndarray 目的変数実測値 hue_data : ndarray 色分け用ラベルデータ hue_name : str 色分け用の列名 ax : matplotlib.axes.Axes 表示対象のax (Noneならplt.plotで1枚ごとにプロット) linecolor : str 予測値=実測値の線の色 linesplit : int フィッティング線の分割数 (カクカクしたら増やす) rounddigit: int 表示指標の小数丸め桁数 score_dict : dict[str, float] 算出した評価指標一覧 scatter_kws: dict, optional Additional parameters passed to sns.scatterplot(), e.g. ``alpha``. See https://seaborn.pydata.org/generated/seaborn.scatterplot.html legend_kws : dict Additional parameters passed to ax.legend(), e.g. ``loc``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.legend.html # 描画用axがNoneのとき、matplotlib.pyplot.gca()を使用 # score_dictがNoneのとき、空のDictを入力 # scatter_kwsがNoneなら空のdictを入力 # legend_kwsがNoneなら空のdictを入力 # 散布図プロット # 回帰モデルの線を作成 # 回帰線を描画 # 評価指標文字列作成 Plot regression lines of any scikit-learn regressor with 1D explanatory variable. Parameters ---------- estimator : estimator object implementing ``fit`` Regression estimator. This is assumed to implement the scikit-learn estimator interface. x : str, or np.ndarray Explanatory variables. Should be str if ``data`` is pd.DataFrame. Should be np.ndarray if ``data`` is None y : str or np.ndarray Objective variable. Should be str if ``data`` is pd.DataFrame. Should be np.ndarray if ``data`` is None data: pd.DataFrame Input data structure. x_colname: str, optional Names of explanatory variable. Available only if ``data`` is NOT pd.DataFrame hue : str, optional Grouping variable that will produce points with different colors. linecolor : str, optional Color of prediction = true line. See https://matplotlib.org/stable/gallery/color/named_colors.html rounddigit: int, optional Round a number of score to a given precision in decimal digits. rank_number : int, optional Number of emphasized data that are in the top positions for regression error. rank_col : list[str], optional Variables that are displayed with emphasized data that are in the top posiotions for regression error. scores : {'r2', 'mae', 'mse', 'rmse', 'rmsle', 'mape', 'max_error'} or list,, optional Regression score that are displayed at the lower right of the graph. cv_stats : {'mean', 'median', 'max', 'min'}, optional Statistical method of cross validation score that are displayed at the lower right of the graph. cv : int, cross-validation generator, or an iterable, optional Determines the cross-validation splitting strategy. If None, to use the default 5-fold cross validation. If int, to specify the number of folds in a KFold. cv_seed : int, optional Seed for random number generator of cross validation. cv_group: str, optional Group variable for the samples used while splitting the dataset into train/test set. This argument is passed to ``groups`` argument of cv.split(). estimator_params : dict, optional Parameters passed to the regression estimator. If the estimator is pipeline, each parameter name must be prefixed such that parameter p for step s has key s__p. fit_params : dict, optional Parameters passed to the fit() method of the regression estimator, e.g. ``early_stopping_round`` and ``eval_set`` of XGBRegressor. If the estimator is pipeline, each parameter name must be prefixed such that parameter p for step s has key s__p. subplot_kws : dict, optional Additional parameters passed to matplotlib.pyplot.subplots(), e.g. ``figsize``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.subplots.html eval_set_selection: {'all', 'test', 'train', 'original', 'original_transformed'}, optional Select data passed to `eval_set` in `fit_params`. Available only if "estimator" is LightGBM or XGBoost. If "all", use all data in `X` and `y`. If "train", select train data from `X` and `y` using cv.split(). If "test", select test data from `X` and `y` using cv.split(). If "original", use raw `eval_set`. If "original_transformed", use `eval_set` transformed by fit_transform() of pipeline if `estimater` is pipeline. scatter_kws: dict, optional Additional parameters passed to sns.scatterplot(), e.g. ``alpha``. See https://seaborn.pydata.org/generated/seaborn.scatterplot.html legend_kws : dict Additional parameters passed to ax.legend(), e.g. ``loc``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.legend.html Returns ---------- score_dict : dict Validation scores, e.g. r2, mae and rmse # 入力データの形式統一 # scoresの型をListに統一 # 学習器パラメータがあれば適用 # 学習時パラメータがNoneなら空のdictを入力 # subplot_kwsがNoneなら空のdictを入力 # scatter_kwsがNoneなら空のdictを入力 # legend_kwsがNoneなら空のdictを入力 # クロスバリデーション有無で場合分け # クロスバリデーション未実施時(学習データからプロット&指標算出) # 学習と推論 # 評価指標算出 # 色分け用データ取得 # 誤差上位表示用データ取得 # 表示フィールド指定ないとき、Index使用 # 表示フィールド指定あるとき # 回帰線プロット # 誤差上位を文字表示 # クロスバリデーション実施時(分割ごとに別々にプロット&指標算出) # 分割法未指定時、cv_numとseedに基づきKFoldでランダムに分割 #LeaveOneOutのときエラーを出す # cv_groupをグルーピング対象に指定(GroupKFold、LeaveOneGroupOut等) # LeaveOneGroupOutのとき、クロスバリデーション分割数をcv_groupの数に指定 # fit_paramsにeval_metricが入力されており、eval_setが入力されていないときの処理(eval_setにテストデータを使用) # 最終学習器以外の前処理変換器作成 # スコア種類ごとにクロスバリデーションスコアの算出 # cross_val_scoreでクロスバリデーション # scikit-learnの仕様に合わせ正負を逆に # scikit-learnの仕様に合わせ正負を逆に # scikit-learnの仕様に合わせ正負を逆に # 正負を逆にしてルートをとる # scikit-learnの仕様に合わせ正負を逆に # scikit-learnの仕様に合わせ正負を逆に # 表示用のaxes作成 # クロスバリデーションごとに図作成 # クロスバリデーション # 表示用にテストデータと学習データ分割 # 色分け用データ取得(していないときは、クロスバリデーション番号を使用、LeaveOuneOutのときは番号分けない) # 色分け名を'cv_number'に指定 # 誤差上位表示用データ取得 # 表示フィールド指定ないとき、Index使用 # 表示フィールド指定あるとき # eval_setの中から学習データ or テストデータのみを抽出 # 学習と推論 # 学習データスコア算出 # CV内結果をプロット # 誤差上位を文字表示 # スコアの統計値を計算 # 学習データスコアをdictに追加 # 全体色分け用データ取得 # 全体プロット 回帰予測値ヒートマップと各種散布図の表示 (regression_heat_plotメソッドの描画処理部分) # 描画用axがNoneのとき、matplotlib.pyplot.gca()を使用 # ヒートマップ用グリッドデータを作成 # 推論用に全説明変数を保持したndarrayを作成 (ヒートマップ非使用変数は固定値other_xとして追加) # ヒートマップ使用変数(1個目)を追加 # ヒートマップ使用変数(2個目)を追加 # ヒートマップ非使用変数(1個目)を固定値として追加 # ヒートマップ非使用変数(2個目)を固定値として追加 # グリッドデータに対して学習し、推定値を作成 # グリッドデータ縦軸横軸の表示桁数を調整 # グリッドデータをピボット化 # 横軸の列数がheat_divisionに満たない時、分解能不足のためrounddigit_x1桁数を増やすようエラー表示 # 縦軸の列数がheat_divisionに満たない時、分解能不足のためrounddigit_x2桁数を増やすようエラー表示 # ヒートマップのカラーマップ指定ないとき、YlGnを指定 # ヒートマップをプロット # 誤差散布図をプロット # 軸範囲が0~heat_divisionになっているので、スケール変換 # 色分け # 誤差で色分け # 散布図のカラーマップ指定ないとき、seismicを指定 # 真値で色分け # 散布図のカラーマップ指定ないとき、ヒートマップと同cmap使用 # 線の色指定ないとき、ブラウンを指定 # 散布図プロット (誤差or真値で色分けしたとき) # 散布図プロット (hue列名で色分けしたとき) # 誤差上位を文字表示 # rank_col指定ないとき、indexがfloat型に変換されてしまうので、int型に戻す # 誤差を計算してテキスト化 # 軸範囲が0~heat_divisionになっているので、スケール変換してプロット 回帰予測値ヒートマップ表示の、説明変数の数に応じた分岐処理 (regression_heat_plotメソッド処理のうち、説明変数の数に応じたデータ分割等を行う) # 説明変数の数 # ヒートマップ使用DataFrame # ヒートマップ非使用DataFrame # 結合&目的変数実測値と予測値追加 # ヒートップ非使用変数を標準化してDataFrameに追加 # 誤差上位表示用IDデータをDataFrameに追加 # 散布図色分け用列をDataFrameに追加(hue_nameがNoneでないときのみ)) # 誤差の順位を計算 # ヒートマップのX1軸およびX2軸の表示範囲(最大最小値 + extendsigma) # プロットする図の数(sigmarange外「2枚」 + sigmarange内「int(pair_sigmarange / pair_sigmainterval) * 2枚」) # ヒートップ非使用変数をプロットする範囲の下限(標準化後) # 説明変数が2次元のとき (図は1枚のみ) # 説明変数が3次元のとき (図はpair_n × 1枚) # 説明変数が4次元のとき (図はpair_n × pair_n枚) # figsize (全ての図全体のサイズ)指定 # プロット用のaxes作成 # 図ごとにプロット # pair縦軸変数(標準化後)の最小値 # ヒートマップ非使用変数指定用の平均値 # ヒートマップ非使用変数指定用の平均値 # pair縦軸変数(標準化後)の最大値 # pair横軸変数(標準化後)の最小値 # ヒートマップ非使用変数指定用の平均値 # ヒートマップ非使用変数指定用の平均値 # pair横軸変数(標準化後)の最大値 # 説明変数が2次元のとき (図は1枚のみ) # 説明変数が3次元のとき (図はpair_n × 1枚) # 縦軸変数範囲内のみのデータを抽出 # ヒートマップ非使用変数の標準化逆変換 # 説明変数が4次元のとき (図はpair_n × pair_n枚) # 縦軸変数範囲内のみのデータを抽出 # 横軸変数範囲内のみのデータを抽出 # ヒートマップ非使用変数の標準化逆変換 # グラフタイトルとして、ヒートマップ非使用変数の範囲を記載(説明変数が3次元以上のとき) # 字が重なるのでtight_layoutにする Plot regression heatmaps of any scikit-learn regressor with 2 to 4D explanatory variables. Parameters ---------- estimator : estimator object implementing ``fit`` Regression estimator. This is assumed to implement the scikit-learn estimator interface. x : list[str] or np.ndarray Explanatory variables. Should be list[str] if ``data`` is pd.DataFrame. Should be np.ndarray if ``data`` is None y : str or np.ndarray Objective variable. Should be str if ``data`` is pd.DataFrame. Should be np.ndarray if ``data`` is None data: pd.DataFrame Input data structure. x_colnames: list[str], optional Names of explanatory variables. Available only if ``data`` is NOT pd.DataFrame x_heat: list[str], optional X-axis and y-axis variables of heatmap. If None, use two variables in ``x`` from the front. scatter_hue : str, optional Grouping variable that will produce points with different colors. Available only if plot_scatter is set to ``hue``. pair_sigmarange: float, optional Set the range of subplots. The lower limit is mean({x3, x4}) - ``pair_sigmarange`` * std({x3, x4}). The higher limit is mean({x3, x4}) + ``pair_sigmarange`` * std({x3, x4}). Available only if len(x) is bigger than 2. pair_sigmainterval: float, optional Set the interval of subplots. For example, if ``pair_sigmainterval`` is set to 0.5 and ``pair_sigmarange`` is set to 1.0, The ranges of subplots are lower than μ-1σ, μ-1σ to μ-0.5σ, μ-0.5σ to μ, μ to μ+0.5σ, μ+0.5σ to μ+1σ, and higher than μ+1σ. Available only if len(x) is bigger than 2. heat_extendsigma: float, optional Set the axis view limits of the heatmap. The lower limit is min({x1, x2}) - std({x1, x2}) * ``heat_extendsigma``. The higher limit is max({x1, x2}) + std({x1, x2}) * ``heat_extendsigma`` heat_division: int, optional Resolution of the heatmap. color_extendsigma: float, optional Set the colormap limits of the heatmap. The lower limit is min(y_ture) - std(y_ture) * ``color_extendsigma``. The higher limit is max(y_ture) - std(y_ture) * ``color_extendsigma``. plot_scatter: {'error', 'true', 'hue'}, optional Color decision of scatter plot. If 'error', to be mapped to colors using error value. If 'true', to be mapped to colors using y_ture value. If 'hue', to be mapped to colors using scatter_hue variable. If None, no scatter. rounddigit_rank: int, optional Round a number of error that are in the top posiotions for regression error to a given precision in decimal digits. rounddigit_x1: int, optional Round a number of x-axis valiable of the heatmap to a given precision in decimal digits. rounddigit_x2: int, optional Round a number of y-axis valiable of the heatmap to a given precision in decimal digits. rounddigit_x3: int, optional Round a number of y-axis valiable of subplots to a given precision in decimal digits. rank_number: int, optional Number of emphasized data that are in the top posiotions for regression error. rank_col: str, optional Variables that are displayed with emphasized data that are in the top posiotions for regression error. cv : int, cross-validation generator, or an iterable, optional Determines the cross-validation splitting strategy. If None, to use the default 5-fold cross validation. If int, to specify the number of folds in a KFold. cv_seed : int, optional Seed for random number generator of cross validation. cv_group: str, optional Group variable for the samples used while splitting the dataset into train/test set. This argument is passed to ``groups`` argument of cv.split(). display_cv_indices : int or list, optional Cross validation index or indices to display. estimator_params : dict, optional Parameters passed to the regression estimator. If the estimator is pipeline, each parameter name must be prefixed such that parameter p for step s has key s__p. fit_params : dict, optional Parameters passed to the fit() method of the regression estimator, e.g. ``early_stopping_round`` and ``eval_set`` of XGBRegressor. If the estimator is pipeline, each parameter name must be prefixed such that parameter p for step s has key s__p. eval_set_selection: {'all', 'test', 'train', 'original', 'original_transformed'}, optional Select data passed to `eval_set` in `fit_params`. Available only if "estimator" is LightGBM or XGBoost. If "all", use all data in `X` and `y`. If "train", select train data from `X` and `y` using cv.split(). If "test", select test data from `X` and `y` using cv.split(). If "original", use raw `eval_set`. If "original_transformed", use `eval_set` transformed by fit_transform() of pipeline if `estimater` is pipeline. subplot_kws: dict, optional Additional parameters passed to matplotlib.pyplot.subplots(), e.g. ``figsize``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.subplots.html heat_kws: dict, optional Additional parameters passed to sns.heatmap(), e.g. ``cmap``. See https://seaborn.pydata.org/generated/seaborn.heatmap.html scatter_kws: dict, optional Additional parameters passed to matplotlib.pyplot.scatter(), e.g. ``alpha``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.scatter.html legend_kws : dict Additional parameters passed to ax.legend(), e.g. ``loc``. See https://matplotlib.org/stable/api/_as_gen/matplotlib.axes.Axes.legend.html # 入力データの形式統一 # 説明変数xの次元が2~4以外ならエラーを出す # display_cv_indicesをList化 # 学習器パラメータがあれば適用 # 学習時パラメータがNoneなら空のdictを入力 # subplot_kwsがNoneなら空のdictを入力 # heat_kwsがNoneなら空のdictを入力 # scatter_kwsがNoneなら空のdictを入力 # legend_kwsがNoneなら空のdictを入力 # ヒートマップ表示用の列を抽出 # 列名指定していないとき、前から2列を抽出 # 列名指定しているとき、該当列のXにおけるインデックス(0~3)を保持 # ヒートマップ表示以外の列 # ヒートマップの色分け最大最小値(y_trueの最大最小値 ± y_trueの標準偏差 × color_extendsigma) # 引数plot_scatter='hue'とscatter_hueが同時指定されていないとき、エラーを出す # 引数plot_scatter='hue'のとき、色分け対象列とカラーマップを紐づけ(色分けを全ての図で統一用) # クロスバリデーション有無で場合分け # クロスバリデーション未実施時(学習データからプロット&指標算出) # 学習と推論 # 誤差上位表示用データ取得 # 表示フィールド指定ないとき、Index使用 # 表示フィールド指定あるとき # 誤差最大値 # 散布図色分け用データ取得(plot_scatter='hue'のときのみ有効) # ヒートマップをプロット # クロスバリデーション実施時(分割ごとに別々にプロット&指標算出) # 分割法未指定時、cv_numとseedに基づきKFoldでランダムに分割 # LeaveOneOutのときエラーを出す # cv_groupをグルーピング対象に指定(GroupKFold、LeaveOneGroupOut等) # LeaveOneGroupOutのとき、クロスバリデーション分割数をcv_groupの数に指定 # fit_paramsにeval_metricが入力されており、eval_setが入力されていないときの処理(eval_setにテストデータを使用) # 最終学習器以外の前処理変換器作成 # クロスバリデーション # 表示対象以外のCVなら飛ばす # 表示用にテストデータと学習データ分割 # eval_setの中から学習データ or テストデータのみを抽出 # 学習と推論 # 誤差上位表示用データ取得 # 表示フィールド指定ないとき、Index使用 # 表示フィールド指定あるとき # 誤差最大値 # 散布図色分け用データ取得(plot_scatter='hue'のときのみ有効)) # ヒートマップをプロット | 2.546615 | 3 |
get_repos.py | Mmesek/MFramework.py | 0 | 6622049 | <gh_stars>0
import os, importlib
try:
from git import Repo
HAS_GIT = True
except ImportError:
print("Couldn't find git, repos won't be cloned or updated if missing")
HAS_GIT = False
BASE_PATH = "/repos/"
def make_pth(package: str):
path = BASE_PATH + package + "/" + package
with open(f'{package}.pth','w',encoding='utf-8') as file:
file.write(path)
print(package, "- created .pth file to", path)
def clone_repo(module: str, url: str):
path = BASE_PATH + module
if not os.path.exists(path):
Repo.clone_from(url, path)
print(module, "- Cloned from", url, "to", path)
else:
r = Repo(path).remotes.origin
p = r.pull()
if p[0].commit.hexsha != r.repo.head.commit.hexsha:
print(module, "- pulled new commit: ", p[0].commit.summary)
def check_package(module: str, repo: str):
try:
importlib.import_module(module)
except ModuleNotFoundError:
print(module, "- Module not found")
make_pth(module)
if HAS_GIT:
clone_repo(module, repo)
check_package("mlib", "Mmesek/mlib")
check_package("mdiscord", "Mmesek/mdiscord")
check_package("MFramework", "Mmesek/MFramework.py")
| import os, importlib
try:
from git import Repo
HAS_GIT = True
except ImportError:
print("Couldn't find git, repos won't be cloned or updated if missing")
HAS_GIT = False
BASE_PATH = "/repos/"
def make_pth(package: str):
path = BASE_PATH + package + "/" + package
with open(f'{package}.pth','w',encoding='utf-8') as file:
file.write(path)
print(package, "- created .pth file to", path)
def clone_repo(module: str, url: str):
path = BASE_PATH + module
if not os.path.exists(path):
Repo.clone_from(url, path)
print(module, "- Cloned from", url, "to", path)
else:
r = Repo(path).remotes.origin
p = r.pull()
if p[0].commit.hexsha != r.repo.head.commit.hexsha:
print(module, "- pulled new commit: ", p[0].commit.summary)
def check_package(module: str, repo: str):
try:
importlib.import_module(module)
except ModuleNotFoundError:
print(module, "- Module not found")
make_pth(module)
if HAS_GIT:
clone_repo(module, repo)
check_package("mlib", "Mmesek/mlib")
check_package("mdiscord", "Mmesek/mdiscord")
check_package("MFramework", "Mmesek/MFramework.py") | none | 1 | 2.539119 | 3 | |
Net.py | xuhongzuo/plsd | 1 | 6622050 | import torch
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
import logging
class BaseNet(nn.Module):
"""Base class for all neural networks."""
def __init__(self):
super().__init__()
self.logger = logging.getLogger(self.__class__.__name__)
self.rep_dim = None # representation dimensionality, i.e. dim of the code layer or last layer
def forward(self, *input):
"""
Forward pass logic
:return: Network output
"""
raise NotImplementedError
def summary(self):
"""Network summary."""
net_parameters = filter(lambda p: p.requires_grad, self.parameters())
params = sum([np.prod(p.size()) for p in net_parameters])
self.logger.info('Trainable parameters: {}'.format(params))
self.logger.info(self)
class MLP(torch.nn.Module):
def __init__(self, n_feature, n_hidden, n_output):
super(MLP, self).__init__()
self.hidden = torch.nn.Linear(n_feature, n_hidden) # hidden layer
self.out = torch.nn.Linear(n_hidden, n_output) # output layer
def forward(self, x):
x = F.relu(self.hidden(x))
x = self.out(x)
return x
class MLPDrop(torch.nn.Module):
def __init__(self, n_input, n_hidden, n_output, drop_p):
super(MLPDrop, self).__init__()
self.classifier = nn.Sequential(
nn.Linear(n_input, n_hidden),
nn.Dropout(drop_p),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(n_hidden, n_output)
)
def forward(self, x):
x = self.classifier(x)
return x
class MLPDrop2(torch.nn.Module):
def __init__(self, n_input, n_hidden, n_output, drop_p):
super(MLPDrop2, self).__init__()
self.fc = nn.Linear(n_input, n_hidden)
self.dropout = nn.Dropout(drop_p)
self.fc2 = nn.Linear(n_hidden, n_output)
def forward(self, x):
outs = {}
x = self.fc(x)
outs["h1"] = x
x = self.dropout(x)
outs["drop_h1"] = x
x = F.leaky_relu(x, 0.2, inplace=True)
outs["relu_h1"] = x
x = self.fc2(x)
return x, outs
class MLP2Drop(torch.nn.Module):
def __init__(self, n_input, n_hidden1, n_hidden2, n_output, drop_p=0.2):
super(MLP2Drop, self).__init__()
self.classifier = nn.Sequential(
nn.Linear(n_input, n_hidden1),
nn.Dropout(drop_p),
nn.LeakyReLU(0.2, True),
nn.Linear(n_hidden1, n_hidden2),
nn.Dropout(drop_p),
nn.LeakyReLU(0.2, True),
nn.Linear(n_hidden2, n_output)
)
def forward(self, x):
x = self.classifier(x)
return x
| import torch
import torch.nn.functional as F
import torch.nn as nn
import numpy as np
import logging
class BaseNet(nn.Module):
"""Base class for all neural networks."""
def __init__(self):
super().__init__()
self.logger = logging.getLogger(self.__class__.__name__)
self.rep_dim = None # representation dimensionality, i.e. dim of the code layer or last layer
def forward(self, *input):
"""
Forward pass logic
:return: Network output
"""
raise NotImplementedError
def summary(self):
"""Network summary."""
net_parameters = filter(lambda p: p.requires_grad, self.parameters())
params = sum([np.prod(p.size()) for p in net_parameters])
self.logger.info('Trainable parameters: {}'.format(params))
self.logger.info(self)
class MLP(torch.nn.Module):
def __init__(self, n_feature, n_hidden, n_output):
super(MLP, self).__init__()
self.hidden = torch.nn.Linear(n_feature, n_hidden) # hidden layer
self.out = torch.nn.Linear(n_hidden, n_output) # output layer
def forward(self, x):
x = F.relu(self.hidden(x))
x = self.out(x)
return x
class MLPDrop(torch.nn.Module):
def __init__(self, n_input, n_hidden, n_output, drop_p):
super(MLPDrop, self).__init__()
self.classifier = nn.Sequential(
nn.Linear(n_input, n_hidden),
nn.Dropout(drop_p),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(n_hidden, n_output)
)
def forward(self, x):
x = self.classifier(x)
return x
class MLPDrop2(torch.nn.Module):
def __init__(self, n_input, n_hidden, n_output, drop_p):
super(MLPDrop2, self).__init__()
self.fc = nn.Linear(n_input, n_hidden)
self.dropout = nn.Dropout(drop_p)
self.fc2 = nn.Linear(n_hidden, n_output)
def forward(self, x):
outs = {}
x = self.fc(x)
outs["h1"] = x
x = self.dropout(x)
outs["drop_h1"] = x
x = F.leaky_relu(x, 0.2, inplace=True)
outs["relu_h1"] = x
x = self.fc2(x)
return x, outs
class MLP2Drop(torch.nn.Module):
def __init__(self, n_input, n_hidden1, n_hidden2, n_output, drop_p=0.2):
super(MLP2Drop, self).__init__()
self.classifier = nn.Sequential(
nn.Linear(n_input, n_hidden1),
nn.Dropout(drop_p),
nn.LeakyReLU(0.2, True),
nn.Linear(n_hidden1, n_hidden2),
nn.Dropout(drop_p),
nn.LeakyReLU(0.2, True),
nn.Linear(n_hidden2, n_output)
)
def forward(self, x):
x = self.classifier(x)
return x
| en | 0.524969 | Base class for all neural networks. # representation dimensionality, i.e. dim of the code layer or last layer Forward pass logic :return: Network output Network summary. # hidden layer # output layer | 3.071465 | 3 |