code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# coding: utf-8
import os
import logging.config
from webspider import setting
LOG_FILE_PATH = os.path.join(setting.BASE_DIR, 'log', 'spider_log.txt')
LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'default': {
'format': '%(asctime)s- %(module)s:%(lineno)d [%(levelname)1.1s] %(name)s: %(message)s',
'datefmt': '%Y/%m/%d %H:%M:%S'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'formatter': 'default',
'class': 'logging.StreamHandler'
},
'smtp': {
'level': 'ERROR',
'class': 'logging.handlers.SMTPHandler',
'formatter': 'default',
'mailhost': (setting.SMTP_CONF['host'], setting.SMTP_CONF['port']),
'fromaddr': setting.SMTP_CONF['from_email'],
'toaddrs': [setting.SMTP_CONF['to_email'], ],
'subject': '爬虫系统出现异常',
'credentials': (setting.MAIL_CONF['username'], setting.MAIL_CONF['password'])
},
'file': {
'level': 'ERROR',
'formatter': 'default',
'class': 'logging.handlers.RotatingFileHandler',
'filename': LOG_FILE_PATH,
'encoding': 'utf8'
},
},
'loggers': {
'': {
'handlers': ['console', 'file'],
'level': 'DEBUG',
'propagate': False,
},
'webspider': {
'handlers': ['console', 'file'],
'level': 'DEBUG',
'propagate': False,
},
'tornado': {
'handlers': ['console', 'file'],
'level': 'DEBUG',
'propagate': False,
},
'tornado.access': {
'handlers': ['console', 'file'],
'level': 'INFO',
'propagate': False,
},
'tornado.application': {
'handlers': ['console', 'file'],
'level': 'INFO',
'propagate': False,
},
'tornado.general': {
'handlers': ['console', 'file'],
'propagate': False,
'level': 'INFO',
},
'sqlalchemy.engine': {
'handlers': ['console', 'file'],
'level': 'INFO',
'propagate': False,
},
'gunicorn': {
'handlers': ['console', 'file'],
'level': 'INFO',
'propagate': False,
},
'celery': {
'handlers': ['console', 'file'],
'level': 'DEBUG',
'propagate': False,
},
},
}
def config_logging():
"""配置日志"""
logging.config.dictConfig(LOGGING_CONFIG)
| [
"os.path.join"
] | [((96, 151), 'os.path.join', 'os.path.join', (['setting.BASE_DIR', '"""log"""', '"""spider_log.txt"""'], {}), "(setting.BASE_DIR, 'log', 'spider_log.txt')\n", (108, 151), False, 'import os\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Generate dummy data for tests/examples
"""
import numpy as np
def dummy_gauss_image(x=None, y=None,
xhalfrng=1.5, yhalfrng=None, xcen=0.5, ycen=0.9,
xnpts=1024, ynpts=None, xsigma=0.55, ysigma=0.25,
noise=0.3):
"""Create a dummy 2D Gaussian image with noise
Parameters
----------
x, y : 1D arrays (optional)
arrays where to generate the image [None -> generated]
xhalfrng : float (optional)
half range of the X axis [1.5]
yhalfrng : float or None (optional)
half range of the Y axis [None -> xhalfrng]
xcen : float (optional)
X center [0.5]
ycen : float (optional)
Y center [0.9]
xnpts : int (optional)
number of points X [1024]
ynpts : int or None (optional)
number of points Y [None -> xnpts]
xsigma : float (optional)
sigma X [0.55]
ysigma : float (optional)
sigma Y [0.25]
noise : float (optional)
random noise level between 0 and 1 [0.3]
Returns
-------
x, y : 1D arrays
signal : 2D array
"""
if yhalfrng is None:
yhalfrng = xhalfrng
if ycen is None:
ycen = xcen
if ynpts is None:
ynpts = xnpts
if x is None:
x = np.linspace(xcen-xhalfrng, xcen+xhalfrng, xnpts)
if y is None:
y = np.linspace(ycen-yhalfrng, ycen+yhalfrng, ynpts)
xx, yy = np.meshgrid(x, y)
signal = np.exp(-((xx-xcen)**2 / (2*xsigma**2) +
((yy-ycen)**2 / (2*ysigma**2))))
# add noise
signal += noise * np.random.random(size=signal.shape)
return x, y, signal
def dummy_gauss_curve(xhalfrng=15, xcen=5, xnpts=512, xsigma=0.65, noise=0.3):
"""Create a dummy 1D Gaussian curve with noise
Parameters
----------
xhalfrng : float (optional)
half range of the X axis [1.5]
xcen : float (optional)
X center [0.5]
xnpts : int (optional)
number of points X [1024]
xsigma : float (optional)
sigma X [0.55]
noise : float (optional)
random noise level between 0 and 1 [0.3]
Returns
-------
x, signal : 1D arrays
"""
x = np.linspace(xcen-xhalfrng, xcen+xhalfrng, xnpts)
signal = np.exp(-((x-xcen)**2 / (2*xsigma**2)))
# add noise
signal += noise * np.random.random(size=signal.shape)
return x, signal
def main():
"""Show two plot windows with dummy data"""
from silx import sx
sx.enable_gui()
from sloth.gui.plot.plot1D import Plot1D
from sloth.gui.plot.plot2D import Plot2D
p1 = Plot1D()
p2 = Plot2D()
x, y = dummy_gauss_curve()
p1.addCurve(x, y, legend="test dummy Gaussian with noise")
p1.show()
x, y, signal = dummy_gauss_image()
p2.addImage(signal, x=x, y=y, legend="test dummy image")
p2.show()
input("Press enter to close windows")
if __name__ == '__main__':
main()
| [
"numpy.random.random",
"numpy.exp",
"silx.sx.enable_gui",
"numpy.linspace",
"numpy.meshgrid",
"sloth.gui.plot.plot1D.Plot1D",
"sloth.gui.plot.plot2D.Plot2D"
] | [((1478, 1495), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1489, 1495), True, 'import numpy as np\n'), ((1509, 1600), 'numpy.exp', 'np.exp', (['(-((xx - xcen) ** 2 / (2 * xsigma ** 2) + (yy - ycen) ** 2 / (2 * ysigma ** 2))\n )'], {}), '(-((xx - xcen) ** 2 / (2 * xsigma ** 2) + (yy - ycen) ** 2 / (2 * \n ysigma ** 2)))\n', (1515, 1600), True, 'import numpy as np\n'), ((2246, 2298), 'numpy.linspace', 'np.linspace', (['(xcen - xhalfrng)', '(xcen + xhalfrng)', 'xnpts'], {}), '(xcen - xhalfrng, xcen + xhalfrng, xnpts)\n', (2257, 2298), True, 'import numpy as np\n'), ((2308, 2354), 'numpy.exp', 'np.exp', (['(-((x - xcen) ** 2 / (2 * xsigma ** 2)))'], {}), '(-((x - xcen) ** 2 / (2 * xsigma ** 2)))\n', (2314, 2354), True, 'import numpy as np\n'), ((2532, 2547), 'silx.sx.enable_gui', 'sx.enable_gui', ([], {}), '()\n', (2545, 2547), False, 'from silx import sx\n'), ((2647, 2655), 'sloth.gui.plot.plot1D.Plot1D', 'Plot1D', ([], {}), '()\n', (2653, 2655), False, 'from sloth.gui.plot.plot1D import Plot1D\n'), ((2665, 2673), 'sloth.gui.plot.plot2D.Plot2D', 'Plot2D', ([], {}), '()\n', (2671, 2673), False, 'from sloth.gui.plot.plot2D import Plot2D\n'), ((1337, 1389), 'numpy.linspace', 'np.linspace', (['(xcen - xhalfrng)', '(xcen + xhalfrng)', 'xnpts'], {}), '(xcen - xhalfrng, xcen + xhalfrng, xnpts)\n', (1348, 1389), True, 'import numpy as np\n'), ((1416, 1468), 'numpy.linspace', 'np.linspace', (['(ycen - yhalfrng)', '(ycen + yhalfrng)', 'ynpts'], {}), '(ycen - yhalfrng, ycen + yhalfrng, ynpts)\n', (1427, 1468), True, 'import numpy as np\n'), ((1642, 1677), 'numpy.random.random', 'np.random.random', ([], {'size': 'signal.shape'}), '(size=signal.shape)\n', (1658, 1677), True, 'import numpy as np\n'), ((2385, 2420), 'numpy.random.random', 'np.random.random', ([], {'size': 'signal.shape'}), '(size=signal.shape)\n', (2401, 2420), True, 'import numpy as np\n')] |
"""
Copyright 2021 Dynatrace LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime
from dynatrace.dynatrace_object import DynatraceObject
from typing import List, Optional, Union, Dict, Any
from dynatrace.environment_v2.schemas import EntityType, ManagementZone
from dynatrace.http_client import HttpClient
from dynatrace.pagination import PaginatedList
class NetworkZoneService:
ENDPOINT = "/api/v2/networkZones"
ENDPOINT_GLOBALSETTINGS = "/api/v2/networkZoneSettings"
def __init__(self, http_client: HttpClient):
self.__http_client = http_client
def list(self) -> PaginatedList["NetworkZone"]:
""" Lists all network zones. No params
:return: a list of Network Zones with details
"""
return PaginatedList(NetworkZone, self.__http_client, target_url=self.ENDPOINT, list_item="networkZones")
def get(self, networkzone_id: str):
""" Gets parameters of specified network zone
:param networkzone_id: the ID of the network zone
:return: a Network Zone + details
"""
response = self.__http_client.make_request(f"{self.ENDPOINT}/{networkzone_id}").json()
return NetworkZone(raw_element=response)
def update(self, networkzone_id: str, alternate_zones: Optional[List[str]] = None, description: Optional[str] = None):
""" Updates an existing network zone or creates a new one
:param networkzone_id: the ID of the network zone, if none exists, will create
:param alternate_zones: optional list of text body of alternative network zones
:param description: optional text body for short description of network zone
:return: HTTP response
"""
params = {"alternativeZones": alternate_zones, "description": description}
return self.__http_client.make_request(path=f"{self.ENDPOINT}/{networkzone_id}", params=params, method="PUT")
def delete(self, networkzone_id: str):
""" Deletes the specified network zone
:param networkzone_id: the ID of the network zone
:return: HTTP response
"""
return self.__http_client.make_request(path=f"{self.ENDPOINT}/{networkzone_id}", method="DELETE")
def getGlobalConfig(self):
""" Gets the global configuration of network zones. No params
:return: Network Zone Global Settings object
"""
response = self.__http_client.make_request(path=self.ENDPOINT_GLOBALSETTINGS).json()
return NetworkZoneSettings(raw_element=response)
def updateGlobalConfig(self, configuration: bool):
""" Updates the global configuration of network zones.
:param configuration: boolean setting to enable/disable NZs
:return: HTTP response
"""
params = {"networkZonesEnabled": configuration}
return self.__http_client.make_request(path=self.ENDPOINT_GLOBALSETTINGS, method="PUT", params=params)
class NetworkZone(DynatraceObject):
def _create_from_raw_data(self, raw_element: Dict[str, Any]):
self.id: str = raw_element.get("id")
self.description: str = raw_element.get("description")
self.alternative_zones: List[str] = raw_element.get("alternativeZones")
self.num_oneagents_using: int = raw_element.get("numOfOneAgentsUsing")
self.num_oneagents_configured: int = raw_element.get("numOfConfiguredOneAgents")
self.num_oneagents_from_other_zones: int = raw_element.get("numOfOneAgentsFromOtherZones")
self.num_configured_activegates: int = raw_element.get("numOfConfiguredActiveGates")
class NetworkZoneSettings(DynatraceObject):
def _create_from_raw_data(self, raw_element: Dict[str, bool]):
self.network_zones_enabled: bool = raw_element.get("networkZonesEnabled") | [
"dynatrace.pagination.PaginatedList"
] | [((1268, 1370), 'dynatrace.pagination.PaginatedList', 'PaginatedList', (['NetworkZone', 'self.__http_client'], {'target_url': 'self.ENDPOINT', 'list_item': '"""networkZones"""'}), "(NetworkZone, self.__http_client, target_url=self.ENDPOINT,\n list_item='networkZones')\n", (1281, 1370), False, 'from dynatrace.pagination import PaginatedList\n')] |
import numpy as np
from numpy.random import RandomState
from numpy.testing import assert_allclose
from nnlib.l_layer.backward import linear_backward, linear_backward_activation, model_backward
from nnlib.utils.derivative import sigmoid_backward, relu_backward
from nnlib.utils.activation import sigmoid, relu
def test_linear_backward():
rand = RandomState(1)
dZ = rand.randn(1, 2)
A = rand.randn(3, 2)
W = rand.randn(1, 3)
dA_prev, dW, db = linear_backward(dZ, (A, 1, W), alpha=0, keep_prob=1)
assert_allclose(dA_prev, [
[0.51822968, -0.19517421],
[-0.40506361, 0.15255393],
[2.37496825, -0.89445391]])
assert_allclose(dW, [[-0.10076895, 1.40685096, 1.64992505]])
assert_allclose(db, [[0.50629448]])
def test_linear_backward_activation_sigmoid():
rand = RandomState(2)
dA = rand.randn(1, 2)
A = rand.randn(3, 2)
W = rand.randn(1, 3)
_ = rand.randn(1, 1) # noqa: F841
Z = rand.randn(1, 2)
dA_prev, dW, db = linear_backward_activation(dA, ((A, 1, W), (Z, sigmoid(Z))), sigmoid_backward, alpha=0, keep_prob=1)
assert_allclose(dA_prev, np.array([
[0.11017994, 0.01105339],
[0.09466817, 0.00949723],
[-0.05743092, -0.00576154]]), rtol=1e-05)
assert_allclose(dW, np.array([[0.10266786, 0.09778551, -0.01968084]]), rtol=1e-05)
assert_allclose(db, np.array([[-0.05729622]]), rtol=1e-05)
def test_linear_backward_activation_relu():
rand = RandomState(2)
dA = rand.randn(1, 2)
A = rand.randn(3, 2)
W = rand.randn(1, 3)
_ = rand.randn(1, 1) # noqa: F841
Z = rand.randn(1, 2)
dA_prev, dW, db = linear_backward_activation(dA, ((A, 1, W), (Z, relu(Z))), relu_backward, alpha=0, keep_prob=1)
assert_allclose(dA_prev, np.array([
[0.44090989, 0.],
[0.37883606, 0.],
[-0.2298228, 0.]]), rtol=1e-05)
assert_allclose(dW, np.array([[0.44513824, 0.37371418, -0.10478989]]), rtol=1e-05)
assert_allclose(db, np.array([[-0.20837892]]), rtol=1e-05)
def test_model_backward():
rand = RandomState(3)
AL = rand.randn(1, 2)
Y = np.array([[1, 0]])
X = rand.randn(4, 2)
W1 = rand.randn(3, 4)
b1 = rand.randn(3, 1)
Z1 = rand.randn(3, 2)
A1 = rand.randn(3, 2)
W2 = rand.randn(1, 3)
b2 = rand.randn(1, 1)
Z2 = rand.randn(1, 2)
parameters = dict(
W={1: W1, 2: W2},
b={1: b1, 2: b2}
)
caches = dict(
Z={1: Z1, 2: Z2},
A={0: X, 1: A1, 2: sigmoid(Z2)},
D={0: 1, 1: 1}
)
grads = model_backward(AL, Y, parameters, caches, alpha=0, keep_prob=1)
assert_allclose(
grads["dW"][1],
np.array([
[0.41010002, 0.07807203, 0.13798444, 0.10502167],
[0., 0., 0., 0.],
[0.05283652, 0.01005865, 0.01777766, 0.0135308]]),
rtol=1e-05
)
assert_allclose(
grads["db"][1],
np.array([
[-0.22007063],
[0.],
[-0.02835349]])
)
assert_allclose(
grads["dA"][1],
np.array([
[0.12913162, -0.44014127],
[-0.14175655, 0.48317296],
[0.01663708, -0.05670698]]),
rtol=1e-05
)
def test_model_backward_l2_regularization():
random_state = RandomState(1)
X = random_state.randn(3, 5)
Y = np.array([[1, 1, 0, 1, 0]])
cache = (
np.array([[-1.52855314, 3.32524635, 2.13994541, 2.60700654, -0.75942115],
[-1.98043538, 4.1600994, 0.79051021, 1.46493512, -0.45506242]]),
np.array([[0., 3.32524635, 2.13994541, 2.60700654, 0.],
[0., 4.1600994, 0.79051021, 1.46493512, 0.]]),
np.array([[-1.09989127, -0.17242821, -0.87785842],
[0.04221375, 0.58281521, -1.10061918]]),
np.array([[1.14472371],
[0.90159072]]),
np.array([[0.53035547, 5.94892323, 2.31780174, 3.16005701, 0.53035547],
[-0.69166075, -3.47645987, -2.25194702, -2.65416996, -0.69166075],
[-0.39675353, -4.62285846, -2.61101729, -3.22874921, -0.39675353]]),
np.array([[0.53035547, 5.94892323, 2.31780174, 3.16005701, 0.53035547],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]]),
np.array([[0.50249434, 0.90085595],
[-0.68372786, -0.12289023],
[-0.93576943, -0.26788808]]),
np.array([[0.53035547],
[-0.69166075],
[-0.39675353]]),
np.array(
[[-0.3771104, -4.10060224, -1.60539468, -2.18416951, -0.3771104]]),
np.array(
[[0.40682402, 0.01629284, 0.16722898, 0.10118111, 0.40682402]]),
np.array([[-0.6871727, -0.84520564, -0.67124613]]),
np.array([[-0.0126646]])
)
Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, _, W3, b3 = cache
parameters = dict(
W={1: W1, 2: W2, 3: W3},
b={1: b1, 2: b2, 3: b3}
)
caches = dict(
Z={1: Z1, 2: Z2, 3: Z3},
A={0: X, 1: A1, 2: A2, 3: sigmoid(Z3)},
D={0: 1, 1: 1, 2: 1}
)
AL = caches["A"][3]
grads = model_backward(AL, Y, parameters, caches, alpha=0.7, keep_prob=1)
dW1 = np.array([[-0.25604646, 0.12298827, - 0.28297129],
[-0.17706303, 0.34536094, - 0.4410571]])
dW2 = np.array([[0.79276486, 0.85133918],
[-0.0957219, - 0.01720463],
[-0.13100772, - 0.03750433]])
dW3 = np.array([[-1.77691347, - 0.11832879, - 0.09397446]])
assert_allclose(grads['dW'][1], dW1)
assert_allclose(grads['dW'][2], dW2, rtol=1e-05)
assert_allclose(grads['dW'][3], dW3)
def test_model_backward_dropout():
random_state = RandomState(1)
X = random_state.randn(3, 5)
Y = np.array([[1, 1, 0, 1, 0]])
cache = (
np.array([[-1.52855314, 3.32524635, 2.13994541, 2.60700654, -0.75942115],
[-1.98043538, 4.1600994, 0.79051021, 1.46493512, -0.45506242]]),
np.array([[True, False, True, True, True],
[True, True, True, True, False]],
dtype=bool),
np.array([[0., 0., 4.27989081, 5.21401307, 0.],
[0., 8.32019881, 1.58102041, 2.92987024, 0.]]),
np.array([[-1.09989127, -0.17242821, -0.87785842],
[0.04221375, 0.58281521, -1.10061918]]),
np.array([[1.14472371],
[0.90159072]]),
np.array([[0.53035547, 8.02565606, 4.10524802, 5.78975856, 0.53035547],
[-0.69166075, -1.71413186, -3.81223329, -4.61667916, -0.69166075],
[-0.39675353, -2.62563561, -4.82528105, -6.0607449, -0.39675353]]),
np.array([[True, False, True, False, True],
[False, True, False, True, True],
[False, False, True, False, False]],
dtype=bool),
np.array([[1.06071093, 0., 8.21049603, 0., 1.06071093],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]]),
np.array([[0.50249434, 0.90085595],
[-0.68372786, -0.12289023],
[-0.93576943, -0.26788808]]),
np.array([[0.53035547],
[-0.69166075],
[-0.39675353]]),
np.array([[-0.7415562, -0.0126646, -5.65469333, -0.0126646, -0.7415562]]),
np.array([[0.32266394, 0.49683389, 0.00348883, 0.49683389, 0.32266394]]),
np.array([[-0.6871727, -0.84520564, -0.67124613]]),
np.array([[-0.0126646]])
)
Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3 = cache
parameters = dict(
W={1: W1, 2: W2, 3: W3},
b={1: b1, 2: b2, 3: b3}
)
caches = dict(
Z={1: Z1, 2: Z2, 3: Z3},
A={0: X, 1: A1, 2: A2, 3: sigmoid(Z3)},
D={0: 1, 1: D1, 2: D2}
)
grads = model_backward(A3, Y, parameters, caches, alpha=0, keep_prob=0.8)
dA1 = np.array([[0.36544439, 0., -0.00188233, 0., -0.17408748],
[0.65515713, 0., -0.00337459, 0., -0.]])
dA2 = np.array([[0.58180856, 0., -0.00299679, 0., -0.27715731],
[0., 0.53159854, -0., 0.53159854, -0.34089673],
[0., 0., -0.00292733, 0., -0., ]])
assert_allclose(grads['dA'][1], dA1, rtol=1e-05)
assert_allclose(grads['dA'][2], dA2, rtol=1e-05)
| [
"nnlib.utils.activation.sigmoid",
"nnlib.l_layer.backward.linear_backward",
"nnlib.utils.activation.relu",
"numpy.testing.assert_allclose",
"numpy.array",
"nnlib.l_layer.backward.model_backward",
"numpy.random.RandomState"
] | [((351, 365), 'numpy.random.RandomState', 'RandomState', (['(1)'], {}), '(1)\n', (362, 365), False, 'from numpy.random import RandomState\n'), ((465, 517), 'nnlib.l_layer.backward.linear_backward', 'linear_backward', (['dZ', '(A, 1, W)'], {'alpha': '(0)', 'keep_prob': '(1)'}), '(dZ, (A, 1, W), alpha=0, keep_prob=1)\n', (480, 517), False, 'from nnlib.l_layer.backward import linear_backward, linear_backward_activation, model_backward\n'), ((523, 635), 'numpy.testing.assert_allclose', 'assert_allclose', (['dA_prev', '[[0.51822968, -0.19517421], [-0.40506361, 0.15255393], [2.37496825, -\n 0.89445391]]'], {}), '(dA_prev, [[0.51822968, -0.19517421], [-0.40506361, \n 0.15255393], [2.37496825, -0.89445391]])\n', (538, 635), False, 'from numpy.testing import assert_allclose\n'), ((660, 720), 'numpy.testing.assert_allclose', 'assert_allclose', (['dW', '[[-0.10076895, 1.40685096, 1.64992505]]'], {}), '(dW, [[-0.10076895, 1.40685096, 1.64992505]])\n', (675, 720), False, 'from numpy.testing import assert_allclose\n'), ((725, 760), 'numpy.testing.assert_allclose', 'assert_allclose', (['db', '[[0.50629448]]'], {}), '(db, [[0.50629448]])\n', (740, 760), False, 'from numpy.testing import assert_allclose\n'), ((821, 835), 'numpy.random.RandomState', 'RandomState', (['(2)'], {}), '(2)\n', (832, 835), False, 'from numpy.random import RandomState\n'), ((1464, 1478), 'numpy.random.RandomState', 'RandomState', (['(2)'], {}), '(2)\n', (1475, 1478), False, 'from numpy.random import RandomState\n'), ((2058, 2072), 'numpy.random.RandomState', 'RandomState', (['(3)'], {}), '(3)\n', (2069, 2072), False, 'from numpy.random import RandomState\n'), ((2107, 2125), 'numpy.array', 'np.array', (['[[1, 0]]'], {}), '([[1, 0]])\n', (2115, 2125), True, 'import numpy as np\n'), ((2580, 2643), 'nnlib.l_layer.backward.model_backward', 'model_backward', (['AL', 'Y', 'parameters', 'caches'], {'alpha': '(0)', 'keep_prob': '(1)'}), '(AL, Y, parameters, caches, alpha=0, keep_prob=1)\n', (2594, 2643), False, 'from nnlib.l_layer.backward import linear_backward, linear_backward_activation, model_backward\n'), ((3398, 3412), 'numpy.random.RandomState', 'RandomState', (['(1)'], {}), '(1)\n', (3409, 3412), False, 'from numpy.random import RandomState\n'), ((3454, 3481), 'numpy.array', 'np.array', (['[[1, 1, 0, 1, 0]]'], {}), '([[1, 1, 0, 1, 0]])\n', (3462, 3481), True, 'import numpy as np\n'), ((5316, 5381), 'nnlib.l_layer.backward.model_backward', 'model_backward', (['AL', 'Y', 'parameters', 'caches'], {'alpha': '(0.7)', 'keep_prob': '(1)'}), '(AL, Y, parameters, caches, alpha=0.7, keep_prob=1)\n', (5330, 5381), False, 'from nnlib.l_layer.backward import linear_backward, linear_backward_activation, model_backward\n'), ((5393, 5486), 'numpy.array', 'np.array', (['[[-0.25604646, 0.12298827, -0.28297129], [-0.17706303, 0.34536094, -0.4410571]]'], {}), '([[-0.25604646, 0.12298827, -0.28297129], [-0.17706303, 0.34536094,\n -0.4410571]])\n', (5401, 5486), True, 'import numpy as np\n'), ((5517, 5612), 'numpy.array', 'np.array', (['[[0.79276486, 0.85133918], [-0.0957219, -0.01720463], [-0.13100772, -\n 0.03750433]]'], {}), '([[0.79276486, 0.85133918], [-0.0957219, -0.01720463], [-0.13100772,\n -0.03750433]])\n', (5525, 5612), True, 'import numpy as np\n'), ((5662, 5713), 'numpy.array', 'np.array', (['[[-1.77691347, -0.11832879, -0.09397446]]'], {}), '([[-1.77691347, -0.11832879, -0.09397446]])\n', (5670, 5713), True, 'import numpy as np\n'), ((5721, 5757), 'numpy.testing.assert_allclose', 'assert_allclose', (["grads['dW'][1]", 'dW1'], {}), "(grads['dW'][1], dW1)\n", (5736, 5757), False, 'from numpy.testing import assert_allclose\n'), ((5762, 5810), 'numpy.testing.assert_allclose', 'assert_allclose', (["grads['dW'][2]", 'dW2'], {'rtol': '(1e-05)'}), "(grads['dW'][2], dW2, rtol=1e-05)\n", (5777, 5810), False, 'from numpy.testing import assert_allclose\n'), ((5815, 5851), 'numpy.testing.assert_allclose', 'assert_allclose', (["grads['dW'][3]", 'dW3'], {}), "(grads['dW'][3], dW3)\n", (5830, 5851), False, 'from numpy.testing import assert_allclose\n'), ((5908, 5922), 'numpy.random.RandomState', 'RandomState', (['(1)'], {}), '(1)\n', (5919, 5922), False, 'from numpy.random import RandomState\n'), ((5964, 5991), 'numpy.array', 'np.array', (['[[1, 1, 0, 1, 0]]'], {}), '([[1, 1, 0, 1, 0]])\n', (5972, 5991), True, 'import numpy as np\n'), ((8048, 8113), 'nnlib.l_layer.backward.model_backward', 'model_backward', (['A3', 'Y', 'parameters', 'caches'], {'alpha': '(0)', 'keep_prob': '(0.8)'}), '(A3, Y, parameters, caches, alpha=0, keep_prob=0.8)\n', (8062, 8113), False, 'from nnlib.l_layer.backward import linear_backward, linear_backward_activation, model_backward\n'), ((8125, 8233), 'numpy.array', 'np.array', (['[[0.36544439, 0.0, -0.00188233, 0.0, -0.17408748], [0.65515713, 0.0, -\n 0.00337459, 0.0, -0.0]]'], {}), '([[0.36544439, 0.0, -0.00188233, 0.0, -0.17408748], [0.65515713, \n 0.0, -0.00337459, 0.0, -0.0]])\n', (8133, 8233), True, 'import numpy as np\n'), ((8254, 8409), 'numpy.array', 'np.array', (['[[0.58180856, 0.0, -0.00299679, 0.0, -0.27715731], [0.0, 0.53159854, -0.0, \n 0.53159854, -0.34089673], [0.0, 0.0, -0.00292733, 0.0, -0.0]]'], {}), '([[0.58180856, 0.0, -0.00299679, 0.0, -0.27715731], [0.0, \n 0.53159854, -0.0, 0.53159854, -0.34089673], [0.0, 0.0, -0.00292733, 0.0,\n -0.0]])\n', (8262, 8409), True, 'import numpy as np\n'), ((8442, 8490), 'numpy.testing.assert_allclose', 'assert_allclose', (["grads['dA'][1]", 'dA1'], {'rtol': '(1e-05)'}), "(grads['dA'][1], dA1, rtol=1e-05)\n", (8457, 8490), False, 'from numpy.testing import assert_allclose\n'), ((8495, 8543), 'numpy.testing.assert_allclose', 'assert_allclose', (["grads['dA'][2]", 'dA2'], {'rtol': '(1e-05)'}), "(grads['dA'][2], dA2, rtol=1e-05)\n", (8510, 8543), False, 'from numpy.testing import assert_allclose\n'), ((1128, 1222), 'numpy.array', 'np.array', (['[[0.11017994, 0.01105339], [0.09466817, 0.00949723], [-0.05743092, -0.00576154]\n ]'], {}), '([[0.11017994, 0.01105339], [0.09466817, 0.00949723], [-0.05743092,\n -0.00576154]])\n', (1136, 1222), True, 'import numpy as np\n'), ((1281, 1330), 'numpy.array', 'np.array', (['[[0.10266786, 0.09778551, -0.01968084]]'], {}), '([[0.10266786, 0.09778551, -0.01968084]])\n', (1289, 1330), True, 'import numpy as np\n'), ((1368, 1393), 'numpy.array', 'np.array', (['[[-0.05729622]]'], {}), '([[-0.05729622]])\n', (1376, 1393), True, 'import numpy as np\n'), ((1765, 1832), 'numpy.array', 'np.array', (['[[0.44090989, 0.0], [0.37883606, 0.0], [-0.2298228, 0.0]]'], {}), '([[0.44090989, 0.0], [0.37883606, 0.0], [-0.2298228, 0.0]])\n', (1773, 1832), True, 'import numpy as np\n'), ((1892, 1941), 'numpy.array', 'np.array', (['[[0.44513824, 0.37371418, -0.10478989]]'], {}), '([[0.44513824, 0.37371418, -0.10478989]])\n', (1900, 1941), True, 'import numpy as np\n'), ((1979, 2004), 'numpy.array', 'np.array', (['[[-0.20837892]]'], {}), '([[-0.20837892]])\n', (1987, 2004), True, 'import numpy as np\n'), ((2706, 2841), 'numpy.array', 'np.array', (['[[0.41010002, 0.07807203, 0.13798444, 0.10502167], [0.0, 0.0, 0.0, 0.0], [\n 0.05283652, 0.01005865, 0.01777766, 0.0135308]]'], {}), '([[0.41010002, 0.07807203, 0.13798444, 0.10502167], [0.0, 0.0, 0.0,\n 0.0], [0.05283652, 0.01005865, 0.01777766, 0.0135308]])\n', (2714, 2841), True, 'import numpy as np\n'), ((2982, 3029), 'numpy.array', 'np.array', (['[[-0.22007063], [0.0], [-0.02835349]]'], {}), '([[-0.22007063], [0.0], [-0.02835349]])\n', (2990, 3029), True, 'import numpy as np\n'), ((3153, 3248), 'numpy.array', 'np.array', (['[[0.12913162, -0.44014127], [-0.14175655, 0.48317296], [0.01663708, -\n 0.05670698]]'], {}), '([[0.12913162, -0.44014127], [-0.14175655, 0.48317296], [0.01663708,\n -0.05670698]])\n', (3161, 3248), True, 'import numpy as np\n'), ((3504, 3646), 'numpy.array', 'np.array', (['[[-1.52855314, 3.32524635, 2.13994541, 2.60700654, -0.75942115], [-\n 1.98043538, 4.1600994, 0.79051021, 1.46493512, -0.45506242]]'], {}), '([[-1.52855314, 3.32524635, 2.13994541, 2.60700654, -0.75942115], [\n -1.98043538, 4.1600994, 0.79051021, 1.46493512, -0.45506242]])\n', (3512, 3646), True, 'import numpy as np\n'), ((3675, 3785), 'numpy.array', 'np.array', (['[[0.0, 3.32524635, 2.13994541, 2.60700654, 0.0], [0.0, 4.1600994, \n 0.79051021, 1.46493512, 0.0]]'], {}), '([[0.0, 3.32524635, 2.13994541, 2.60700654, 0.0], [0.0, 4.1600994, \n 0.79051021, 1.46493512, 0.0]])\n', (3683, 3785), True, 'import numpy as np\n'), ((3812, 3906), 'numpy.array', 'np.array', (['[[-1.09989127, -0.17242821, -0.87785842], [0.04221375, 0.58281521, -1.10061918]\n ]'], {}), '([[-1.09989127, -0.17242821, -0.87785842], [0.04221375, 0.58281521,\n -1.10061918]])\n', (3820, 3906), True, 'import numpy as np\n'), ((3931, 3969), 'numpy.array', 'np.array', (['[[1.14472371], [0.90159072]]'], {}), '([[1.14472371], [0.90159072]])\n', (3939, 3969), True, 'import numpy as np\n'), ((3997, 4213), 'numpy.array', 'np.array', (['[[0.53035547, 5.94892323, 2.31780174, 3.16005701, 0.53035547], [-0.69166075,\n -3.47645987, -2.25194702, -2.65416996, -0.69166075], [-0.39675353, -\n 4.62285846, -2.61101729, -3.22874921, -0.39675353]]'], {}), '([[0.53035547, 5.94892323, 2.31780174, 3.16005701, 0.53035547], [-\n 0.69166075, -3.47645987, -2.25194702, -2.65416996, -0.69166075], [-\n 0.39675353, -4.62285846, -2.61101729, -3.22874921, -0.39675353]])\n', (4005, 4213), True, 'import numpy as np\n'), ((4253, 4384), 'numpy.array', 'np.array', (['[[0.53035547, 5.94892323, 2.31780174, 3.16005701, 0.53035547], [0.0, 0.0, \n 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0]]'], {}), '([[0.53035547, 5.94892323, 2.31780174, 3.16005701, 0.53035547], [\n 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0]])\n', (4261, 4384), True, 'import numpy as np\n'), ((4427, 4524), 'numpy.array', 'np.array', (['[[0.50249434, 0.90085595], [-0.68372786, -0.12289023], [-0.93576943, -\n 0.26788808]]'], {}), '([[0.50249434, 0.90085595], [-0.68372786, -0.12289023], [-\n 0.93576943, -0.26788808]])\n', (4435, 4524), True, 'import numpy as np\n'), ((4566, 4620), 'numpy.array', 'np.array', (['[[0.53035547], [-0.69166075], [-0.39675353]]'], {}), '([[0.53035547], [-0.69166075], [-0.39675353]])\n', (4574, 4620), True, 'import numpy as np\n'), ((4666, 4741), 'numpy.array', 'np.array', (['[[-0.3771104, -4.10060224, -1.60539468, -2.18416951, -0.3771104]]'], {}), '([[-0.3771104, -4.10060224, -1.60539468, -2.18416951, -0.3771104]])\n', (4674, 4741), True, 'import numpy as np\n'), ((4764, 4836), 'numpy.array', 'np.array', (['[[0.40682402, 0.01629284, 0.16722898, 0.10118111, 0.40682402]]'], {}), '([[0.40682402, 0.01629284, 0.16722898, 0.10118111, 0.40682402]])\n', (4772, 4836), True, 'import numpy as np\n'), ((4863, 4913), 'numpy.array', 'np.array', (['[[-0.6871727, -0.84520564, -0.67124613]]'], {}), '([[-0.6871727, -0.84520564, -0.67124613]])\n', (4871, 4913), True, 'import numpy as np\n'), ((4923, 4947), 'numpy.array', 'np.array', (['[[-0.0126646]]'], {}), '([[-0.0126646]])\n', (4931, 4947), True, 'import numpy as np\n'), ((6014, 6156), 'numpy.array', 'np.array', (['[[-1.52855314, 3.32524635, 2.13994541, 2.60700654, -0.75942115], [-\n 1.98043538, 4.1600994, 0.79051021, 1.46493512, -0.45506242]]'], {}), '([[-1.52855314, 3.32524635, 2.13994541, 2.60700654, -0.75942115], [\n -1.98043538, 4.1600994, 0.79051021, 1.46493512, -0.45506242]])\n', (6022, 6156), True, 'import numpy as np\n'), ((6179, 6271), 'numpy.array', 'np.array', (['[[True, False, True, True, True], [True, True, True, True, False]]'], {'dtype': 'bool'}), '([[True, False, True, True, True], [True, True, True, True, False]],\n dtype=bool)\n', (6187, 6271), True, 'import numpy as np\n'), ((6312, 6416), 'numpy.array', 'np.array', (['[[0.0, 0.0, 4.27989081, 5.21401307, 0.0], [0.0, 8.32019881, 1.58102041, \n 2.92987024, 0.0]]'], {}), '([[0.0, 0.0, 4.27989081, 5.21401307, 0.0], [0.0, 8.32019881, \n 1.58102041, 2.92987024, 0.0]])\n', (6320, 6416), True, 'import numpy as np\n'), ((6434, 6528), 'numpy.array', 'np.array', (['[[-1.09989127, -0.17242821, -0.87785842], [0.04221375, 0.58281521, -1.10061918]\n ]'], {}), '([[-1.09989127, -0.17242821, -0.87785842], [0.04221375, 0.58281521,\n -1.10061918]])\n', (6442, 6528), True, 'import numpy as np\n'), ((6553, 6591), 'numpy.array', 'np.array', (['[[1.14472371], [0.90159072]]'], {}), '([[1.14472371], [0.90159072]])\n', (6561, 6591), True, 'import numpy as np\n'), ((6619, 6834), 'numpy.array', 'np.array', (['[[0.53035547, 8.02565606, 4.10524802, 5.78975856, 0.53035547], [-0.69166075,\n -1.71413186, -3.81223329, -4.61667916, -0.69166075], [-0.39675353, -\n 2.62563561, -4.82528105, -6.0607449, -0.39675353]]'], {}), '([[0.53035547, 8.02565606, 4.10524802, 5.78975856, 0.53035547], [-\n 0.69166075, -1.71413186, -3.81223329, -4.61667916, -0.69166075], [-\n 0.39675353, -2.62563561, -4.82528105, -6.0607449, -0.39675353]])\n', (6627, 6834), True, 'import numpy as np\n'), ((6870, 7001), 'numpy.array', 'np.array', (['[[True, False, True, False, True], [False, True, False, True, True], [False,\n False, True, False, False]]'], {'dtype': 'bool'}), '([[True, False, True, False, True], [False, True, False, True, True\n ], [False, False, True, False, False]], dtype=bool)\n', (6878, 7001), True, 'import numpy as np\n'), ((7059, 7176), 'numpy.array', 'np.array', (['[[1.06071093, 0.0, 8.21049603, 0.0, 1.06071093], [0.0, 0.0, 0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0, 0.0, 0.0]]'], {}), '([[1.06071093, 0.0, 8.21049603, 0.0, 1.06071093], [0.0, 0.0, 0.0, \n 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0]])\n', (7067, 7176), True, 'import numpy as np\n'), ((7205, 7302), 'numpy.array', 'np.array', (['[[0.50249434, 0.90085595], [-0.68372786, -0.12289023], [-0.93576943, -\n 0.26788808]]'], {}), '([[0.50249434, 0.90085595], [-0.68372786, -0.12289023], [-\n 0.93576943, -0.26788808]])\n', (7213, 7302), True, 'import numpy as np\n'), ((7343, 7397), 'numpy.array', 'np.array', (['[[0.53035547], [-0.69166075], [-0.39675353]]'], {}), '([[0.53035547], [-0.69166075], [-0.39675353]])\n', (7351, 7397), True, 'import numpy as np\n'), ((7443, 7516), 'numpy.array', 'np.array', (['[[-0.7415562, -0.0126646, -5.65469333, -0.0126646, -0.7415562]]'], {}), '([[-0.7415562, -0.0126646, -5.65469333, -0.0126646, -0.7415562]])\n', (7451, 7516), True, 'import numpy as np\n'), ((7526, 7598), 'numpy.array', 'np.array', (['[[0.32266394, 0.49683389, 0.00348883, 0.49683389, 0.32266394]]'], {}), '([[0.32266394, 0.49683389, 0.00348883, 0.49683389, 0.32266394]])\n', (7534, 7598), True, 'import numpy as np\n'), ((7608, 7658), 'numpy.array', 'np.array', (['[[-0.6871727, -0.84520564, -0.67124613]]'], {}), '([[-0.6871727, -0.84520564, -0.67124613]])\n', (7616, 7658), True, 'import numpy as np\n'), ((7668, 7692), 'numpy.array', 'np.array', (['[[-0.0126646]]'], {}), '([[-0.0126646]])\n', (7676, 7692), True, 'import numpy as np\n'), ((1045, 1055), 'nnlib.utils.activation.sigmoid', 'sigmoid', (['Z'], {}), '(Z)\n', (1052, 1055), False, 'from nnlib.utils.activation import sigmoid, relu\n'), ((1688, 1695), 'nnlib.utils.activation.relu', 'relu', (['Z'], {}), '(Z)\n', (1692, 1695), False, 'from nnlib.utils.activation import sigmoid, relu\n'), ((2512, 2523), 'nnlib.utils.activation.sigmoid', 'sigmoid', (['Z2'], {}), '(Z2)\n', (2519, 2523), False, 'from nnlib.utils.activation import sigmoid, relu\n'), ((5218, 5229), 'nnlib.utils.activation.sigmoid', 'sigmoid', (['Z3'], {}), '(Z3)\n', (5225, 5229), False, 'from nnlib.utils.activation import sigmoid, relu\n'), ((7972, 7983), 'nnlib.utils.activation.sigmoid', 'sigmoid', (['Z3'], {}), '(Z3)\n', (7979, 7983), False, 'from nnlib.utils.activation import sigmoid, relu\n')] |
"""Uploaded data to nuuuwan/news_lk:data branch."""
from news_lk import scrape
if __name__ == '__main__':
scrape.scrape_and_dump()
| [
"news_lk.scrape.scrape_and_dump"
] | [((112, 136), 'news_lk.scrape.scrape_and_dump', 'scrape.scrape_and_dump', ([], {}), '()\n', (134, 136), False, 'from news_lk import scrape\n')] |
from schematics import Model
from schematics.types import IntType, UUIDType, StringType, BooleanType
from ingredients_db.models.region import RegionState, Region
from ingredients_http.schematics.types import ArrowType, EnumType
class RequestCreateRegion(Model):
name = StringType(required=True, min_length=3)
datacenter = StringType(required=True)
image_datastore = StringType(required=True)
image_folder = StringType()
class ParamsRegion(Model):
region_id = UUIDType(required=True)
class ParamsListRegion(Model):
name = StringType()
limit = IntType(default=100, max_value=100, min_value=1)
marker = UUIDType()
class RequestRegionSchedule(Model):
schedulable = BooleanType(required=True)
class ResponseRegion(Model):
id = UUIDType(required=True)
name = StringType(required=True, min_length=3)
datacenter = StringType(required=True, )
image_datastore = StringType(required=True)
image_folder = StringType()
schedulable = BooleanType(required=True)
state = EnumType(RegionState, required=True)
current_task_id = UUIDType()
created_at = ArrowType(required=True)
updated_at = ArrowType(required=True)
@classmethod
def from_database(cls, region: Region):
region_model = cls()
region_model.id = region.id
region_model.name = region.name
region_model.datacenter = region.datacenter
region_model.image_datastore = region.image_datastore
region_model.image_folder = region.image_folder
region_model.schedulable = region.schedulable
region_model.state = region.state
region_model.current_task_id = region.current_task_id
region_model.created_at = region.created_at
region_model.updated_at = region.updated_at
return region_model
| [
"ingredients_http.schematics.types.EnumType",
"schematics.types.IntType",
"schematics.types.UUIDType",
"schematics.types.StringType",
"schematics.types.BooleanType",
"ingredients_http.schematics.types.ArrowType"
] | [((276, 315), 'schematics.types.StringType', 'StringType', ([], {'required': '(True)', 'min_length': '(3)'}), '(required=True, min_length=3)\n', (286, 315), False, 'from schematics.types import IntType, UUIDType, StringType, BooleanType\n'), ((333, 358), 'schematics.types.StringType', 'StringType', ([], {'required': '(True)'}), '(required=True)\n', (343, 358), False, 'from schematics.types import IntType, UUIDType, StringType, BooleanType\n'), ((381, 406), 'schematics.types.StringType', 'StringType', ([], {'required': '(True)'}), '(required=True)\n', (391, 406), False, 'from schematics.types import IntType, UUIDType, StringType, BooleanType\n'), ((426, 438), 'schematics.types.StringType', 'StringType', ([], {}), '()\n', (436, 438), False, 'from schematics.types import IntType, UUIDType, StringType, BooleanType\n'), ((484, 507), 'schematics.types.UUIDType', 'UUIDType', ([], {'required': '(True)'}), '(required=True)\n', (492, 507), False, 'from schematics.types import IntType, UUIDType, StringType, BooleanType\n'), ((552, 564), 'schematics.types.StringType', 'StringType', ([], {}), '()\n', (562, 564), False, 'from schematics.types import IntType, UUIDType, StringType, BooleanType\n'), ((577, 625), 'schematics.types.IntType', 'IntType', ([], {'default': '(100)', 'max_value': '(100)', 'min_value': '(1)'}), '(default=100, max_value=100, min_value=1)\n', (584, 625), False, 'from schematics.types import IntType, UUIDType, StringType, BooleanType\n'), ((639, 649), 'schematics.types.UUIDType', 'UUIDType', ([], {}), '()\n', (647, 649), False, 'from schematics.types import IntType, UUIDType, StringType, BooleanType\n'), ((706, 732), 'schematics.types.BooleanType', 'BooleanType', ([], {'required': '(True)'}), '(required=True)\n', (717, 732), False, 'from schematics.types import IntType, UUIDType, StringType, BooleanType\n'), ((773, 796), 'schematics.types.UUIDType', 'UUIDType', ([], {'required': '(True)'}), '(required=True)\n', (781, 796), False, 'from schematics.types import IntType, UUIDType, StringType, BooleanType\n'), ((808, 847), 'schematics.types.StringType', 'StringType', ([], {'required': '(True)', 'min_length': '(3)'}), '(required=True, min_length=3)\n', (818, 847), False, 'from schematics.types import IntType, UUIDType, StringType, BooleanType\n'), ((865, 890), 'schematics.types.StringType', 'StringType', ([], {'required': '(True)'}), '(required=True)\n', (875, 890), False, 'from schematics.types import IntType, UUIDType, StringType, BooleanType\n'), ((915, 940), 'schematics.types.StringType', 'StringType', ([], {'required': '(True)'}), '(required=True)\n', (925, 940), False, 'from schematics.types import IntType, UUIDType, StringType, BooleanType\n'), ((960, 972), 'schematics.types.StringType', 'StringType', ([], {}), '()\n', (970, 972), False, 'from schematics.types import IntType, UUIDType, StringType, BooleanType\n'), ((991, 1017), 'schematics.types.BooleanType', 'BooleanType', ([], {'required': '(True)'}), '(required=True)\n', (1002, 1017), False, 'from schematics.types import IntType, UUIDType, StringType, BooleanType\n'), ((1030, 1066), 'ingredients_http.schematics.types.EnumType', 'EnumType', (['RegionState'], {'required': '(True)'}), '(RegionState, required=True)\n', (1038, 1066), False, 'from ingredients_http.schematics.types import ArrowType, EnumType\n'), ((1089, 1099), 'schematics.types.UUIDType', 'UUIDType', ([], {}), '()\n', (1097, 1099), False, 'from schematics.types import IntType, UUIDType, StringType, BooleanType\n'), ((1117, 1141), 'ingredients_http.schematics.types.ArrowType', 'ArrowType', ([], {'required': '(True)'}), '(required=True)\n', (1126, 1141), False, 'from ingredients_http.schematics.types import ArrowType, EnumType\n'), ((1159, 1183), 'ingredients_http.schematics.types.ArrowType', 'ArrowType', ([], {'required': '(True)'}), '(required=True)\n', (1168, 1183), False, 'from ingredients_http.schematics.types import ArrowType, EnumType\n')] |
#!/usr/bin/env python3
from src.cli import Cli
from src.core import Orchestrator
def main():
config, args = Cli.parse_and_validate()
Orchestrator.launch_modules(config, args.modules, args.targets, args.audit)
if __name__ == '__main__':
main() | [
"src.cli.Cli.parse_and_validate",
"src.core.Orchestrator.launch_modules"
] | [((114, 138), 'src.cli.Cli.parse_and_validate', 'Cli.parse_and_validate', ([], {}), '()\n', (136, 138), False, 'from src.cli import Cli\n'), ((143, 218), 'src.core.Orchestrator.launch_modules', 'Orchestrator.launch_modules', (['config', 'args.modules', 'args.targets', 'args.audit'], {}), '(config, args.modules, args.targets, args.audit)\n', (170, 218), False, 'from src.core import Orchestrator\n')] |
#!/cm/shared/languages/python-3.3.2/bin/python
# submit script for submission of mizuRoute simualtions
# <NAME> Oct 29 2019
#
# call this script from 'run_mizuRoute_templated_mswep050calib.py which creates a qsub job to submit to the HPC queue
# This script is actually called from 'call_pythonscript.sh' (which is needed to load modules before calling the script)
import os,glob,subprocess,sys,shutil,multiprocessing
import datetime
def call_subproc(cmd,logfile):
subprocess.call(cmd,stdout=open(logfile,'w'),stderr=subprocess.STDOUT)
# Print start time
print('Starting:',datetime.datetime.now())
# Get environment variables
control_files = os.environ['CONTROL_FLIST'].split(':')
logdir = os.environ['LOGDIR']
ncpus = int(os.environ['NCPUS'])
mizuexe = os.environ['MIZU_EXE']
print('running simulations',len(control_files))
print(os.environ['CONTROL_FLIST'])
pool = multiprocessing.Pool(processes=ncpus)
for control_file in control_files:
# Todo, could add check if this simulation has already been run
fname = os.path.basename(control_file)
sim_name =fname[8:-4]
logfile = os.path.join(logdir,sim_name+'.log')
cmd = ['time',mizuexe,control_file]
print('command',cmd)
print('log',logfile)
#ret = pool.apply_async(subprocess.call,cmd,{'stdout':open(logfile,'w') ,'stderr':subprocess.STDOUT})
#subprocess.call(cmd,stdout=open(logfile,'w'),stderr=subprocess.STDOUT)
ret = pool.apply_async(call_subproc,[cmd,logfile])
pool.close()
pool.join()
print('Finished:',datetime.datetime.now())
| [
"datetime.datetime.now",
"os.path.basename",
"os.path.join",
"multiprocessing.Pool"
] | [((874, 911), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {'processes': 'ncpus'}), '(processes=ncpus)\n', (894, 911), False, 'import os, glob, subprocess, sys, shutil, multiprocessing\n'), ((578, 601), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (599, 601), False, 'import datetime\n'), ((1022, 1052), 'os.path.basename', 'os.path.basename', (['control_file'], {}), '(control_file)\n', (1038, 1052), False, 'import os, glob, subprocess, sys, shutil, multiprocessing\n'), ((1087, 1126), 'os.path.join', 'os.path.join', (['logdir', "(sim_name + '.log')"], {}), "(logdir, sim_name + '.log')\n", (1099, 1126), False, 'import os, glob, subprocess, sys, shutil, multiprocessing\n'), ((1477, 1500), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1498, 1500), False, 'import datetime\n')] |
from typing import Mapping, Any, Sequence
import numpy as np
import heapq
import math
from tqdm import tqdm
import scipy.optimize
import cvxpy as cvx
def n_bias(x_count: np.ndarray, bias: float):
# return np.sum(x_count[x_count >= bias])
clipped = np.clip(x_count - bias, a_min=0, a_max=None)
return np.sum(clipped)
def cost(bs, ns):
return np.sum(bs) ** 2 + (1 / 4) * (np.sum(ns ** 2))
def opt_cvx(
x_counts: Sequence[np.ndarray],
sizes: Sequence[int],
n_iter: int=10
) -> np.ndarray:
n = len(sizes)
Bs = cvx.Variable(n)
constraints = [
Bs >= 0
]
term2 = 0
for i in range(n):
x_count = x_counts[i]
size = sizes[i]
term2 += cvx.square(cvx.sum(cvx.pos(x_count - Bs[i])) / size)
o = cvx.Minimize(
4 * cvx.square(cvx.sum(Bs)) + term2
)
prob = cvx.Problem(o, constraints)
sol = prob.solve(solver=cvx.ECOS)
b_values = Bs.value
n_adj = np.zeros(n)
for i in range(n):
n_adj[i] = n_bias(x_counts[i], b_values[i]) / sizes[i]
print("Cost: {}".format(cost(b_values, n_adj)))
return np.round(b_values)
def n_deriv(x_count, bias, nraw=1, s=1):
return nraw/s**2 * np.sum(x_count >= bias)
base = 2.0
def convert_to_bs(b_pows):
bs = base**b_pows
if isinstance(bs, np.ndarray):
bs[bs < 1] = 0
else:
if bs < 1:
bs = 0
# bs = np.floor(2.0 ** b_pows)
return bs
def opt_sequence_2(
x_counts: Sequence[np.ndarray],
sizes: Sequence[int],
n_iter: int=10
) -> np.ndarray:
n = len(x_counts)
bs = np.zeros(n)
n_adj = np.zeros(n)
for i in range(n):
n_adj[i] = n_bias(x_counts[i], bs[i]) / sizes[i]
pq = []
for s_idx in range(len(x_counts)):
n_raw = n_adj[s_idx] * sizes[s_idx]
heapq.heappush(
pq,
(-n_deriv(x_counts[s_idx], bs[s_idx], nraw=n_raw, s=sizes[s_idx]), s_idx)
)
print("Optimizing Bias")
for cur_iter in tqdm(range(n_iter)):
_, opt_idx = heapq.heappop(pq)
# opt_idx = cur_iter % 3
# print("bs:{}".format(bs))
# print("ns:{}".format(n_adj))
# print("cost: {}".format(old_cost))
old_cost = cost(bs, n_adj)
def cost_b_fun(b):
new_bs = bs.copy()
new_adj = n_adj.copy()
new_bs[opt_idx] = b
new_adj[opt_idx] = n_bias(x_counts[opt_idx], b) / sizes[opt_idx]
return cost(new_bs, new_adj)
max_b = np.sum(x_counts[opt_idx])/sizes[opt_idx]
bracket = None
if bs[opt_idx] > 0:
bracket = (0, bs[opt_idx], max_b)
res = scipy.optimize.minimize_scalar(
cost_b_fun,
bracket=bracket,
bounds=(0, max_b),
tol=0.1
)
best_b = res.x
print("best b: {}".format(best_b))
new_cost = res.fun
print("Old Cost: {}".format(old_cost))
print("New Cost: {}".format(new_cost))
# if (new_cost > old_cost*.98):
# break
bs[opt_idx] = best_b
n_adj[opt_idx] = n_bias(x_counts[opt_idx], bs[opt_idx]) / sizes[opt_idx]
n_raw = n_adj[opt_idx] * sizes[opt_idx]
heapq.heappush(
pq,
(-n_deriv(x_counts[opt_idx], bs[opt_idx], nraw=n_raw, s=sizes[opt_idx]), opt_idx)
)
print("Heap: {}".format(pq))
return bs
def opt_sequence(
x_counts: Sequence[np.ndarray],
sizes: Sequence[int],
n_iter: int=10
) -> np.ndarray:
n = len(x_counts)
b_pows = np.zeros(n) - 1
bs = convert_to_bs(b_pows)
n_adj = np.zeros(n)
for i in range(n):
n_adj[i] = n_bias(x_counts[i], bs[i]) / sizes[i]
pq = []
for s_idx in range(len(x_counts)):
heapq.heappush(
pq,
(-n_deriv(x_counts[s_idx], bs[s_idx]), s_idx)
)
shifts = np.array([-1, 0, 1])
print("Optimizing Bias")
for cur_iter in tqdm(range(n_iter)):
_, opt_idx = heapq.heappop(pq)
# print("bs:{}".format(bs))
# print("ns:{}".format(n_adj))
# print("cost: {}".format(old_cost))
new_costs = np.zeros(3)
for shift_idx, cur_shift in enumerate(shifts):
cur_b_pow = b_pows[opt_idx] + cur_shift
bs[opt_idx] = convert_to_bs(cur_b_pow)
# bs[opt_idx] = math.floor(2.0 ** cur_b_pow)
n_adj[opt_idx] = n_bias(x_counts[opt_idx], bs[opt_idx]) / sizes[opt_idx]
new_costs[shift_idx] = cost(bs, n_adj)
# print("i:{},b:{},deltas:{}".format(opt_idx, cur_b_pow, new_costs - old_cost))
best_shift_idx = np.argmin(new_costs)
print("New Cost: {}".format(new_costs[best_shift_idx]))
b_pows[opt_idx] += shifts[best_shift_idx]
# bs[opt_idx] = math.floor(2.0 ** b_pows[opt_idx])
bs[opt_idx] = convert_to_bs(b_pows[opt_idx])
n_adj[opt_idx] = n_bias(x_counts[opt_idx], bs[opt_idx]) / sizes[opt_idx]
if shifts[best_shift_idx] == 0:
break
heapq.heappush(
pq,
(-n_deriv(x_counts[opt_idx], bs[opt_idx]), opt_idx)
)
return bs | [
"numpy.clip",
"cvxpy.Variable",
"cvxpy.Problem",
"cvxpy.pos",
"cvxpy.sum",
"numpy.sum",
"numpy.zeros",
"numpy.array",
"heapq.heappop",
"numpy.argmin",
"numpy.round"
] | [((263, 307), 'numpy.clip', 'np.clip', (['(x_count - bias)'], {'a_min': '(0)', 'a_max': 'None'}), '(x_count - bias, a_min=0, a_max=None)\n', (270, 307), True, 'import numpy as np\n'), ((319, 334), 'numpy.sum', 'np.sum', (['clipped'], {}), '(clipped)\n', (325, 334), True, 'import numpy as np\n'), ((565, 580), 'cvxpy.Variable', 'cvx.Variable', (['n'], {}), '(n)\n', (577, 580), True, 'import cvxpy as cvx\n'), ((867, 894), 'cvxpy.Problem', 'cvx.Problem', (['o', 'constraints'], {}), '(o, constraints)\n', (878, 894), True, 'import cvxpy as cvx\n'), ((969, 980), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (977, 980), True, 'import numpy as np\n'), ((1130, 1148), 'numpy.round', 'np.round', (['b_values'], {}), '(b_values)\n', (1138, 1148), True, 'import numpy as np\n'), ((1620, 1631), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1628, 1631), True, 'import numpy as np\n'), ((1644, 1655), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1652, 1655), True, 'import numpy as np\n'), ((3657, 3668), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (3665, 3668), True, 'import numpy as np\n'), ((3923, 3943), 'numpy.array', 'np.array', (['[-1, 0, 1]'], {}), '([-1, 0, 1])\n', (3931, 3943), True, 'import numpy as np\n'), ((1216, 1239), 'numpy.sum', 'np.sum', (['(x_count >= bias)'], {}), '(x_count >= bias)\n', (1222, 1239), True, 'import numpy as np\n'), ((2060, 2077), 'heapq.heappop', 'heapq.heappop', (['pq'], {}), '(pq)\n', (2073, 2077), False, 'import heapq\n'), ((3598, 3609), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (3606, 3609), True, 'import numpy as np\n'), ((4035, 4052), 'heapq.heappop', 'heapq.heappop', (['pq'], {}), '(pq)\n', (4048, 4052), False, 'import heapq\n'), ((4202, 4213), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (4210, 4213), True, 'import numpy as np\n'), ((4684, 4704), 'numpy.argmin', 'np.argmin', (['new_costs'], {}), '(new_costs)\n', (4693, 4704), True, 'import numpy as np\n'), ((366, 376), 'numpy.sum', 'np.sum', (['bs'], {}), '(bs)\n', (372, 376), True, 'import numpy as np\n'), ((395, 410), 'numpy.sum', 'np.sum', (['(ns ** 2)'], {}), '(ns ** 2)\n', (401, 410), True, 'import numpy as np\n'), ((2534, 2559), 'numpy.sum', 'np.sum', (['x_counts[opt_idx]'], {}), '(x_counts[opt_idx])\n', (2540, 2559), True, 'import numpy as np\n'), ((750, 774), 'cvxpy.pos', 'cvx.pos', (['(x_count - Bs[i])'], {}), '(x_count - Bs[i])\n', (757, 774), True, 'import cvxpy as cvx\n'), ((829, 840), 'cvxpy.sum', 'cvx.sum', (['Bs'], {}), '(Bs)\n', (836, 840), True, 'import cvxpy as cvx\n')] |
import json, os
## base spawner config
try:
c.Spawner.cmd = \
json.loads(os.environ['SPAWNER_CMD'])
except KeyError:
c.Spawner.cmd = [
'jupyterhub-singleuser', # OAuth wrapped jupyter instance server
'--KernelManager.transport=ipc', # -- all kernel comms over UNIX sockets
'--MappingKernelManager.cull_idle_timeout=0' # -- no kernel culling
]
c.Spawner.http_timeout = int(os.environ.get('SPAWNER_HTTP_TIMEOUT', '20')) # grace period for spawner connect back
c.Spawner.default_url = os.environ.get('SPAWNER_DEFAULT_URL', '/lab') # default route to visit once spawned
# set jupyter instacne base directory (relative to $HOME)
if hub_id.lower() in {'jupyter', 'public', 'pub'}:
c.Spawner.notebook_dir = ''
else:
# restrict to context specific notebook path
c.Spawner.notebook_dir = f'Workspace/{hub_id}'
pass
| [
"json.loads",
"os.environ.get"
] | [((531, 576), 'os.environ.get', 'os.environ.get', (['"""SPAWNER_DEFAULT_URL"""', '"""/lab"""'], {}), "('SPAWNER_DEFAULT_URL', '/lab')\n", (545, 576), False, 'import json, os\n'), ((75, 112), 'json.loads', 'json.loads', (["os.environ['SPAWNER_CMD']"], {}), "(os.environ['SPAWNER_CMD'])\n", (85, 112), False, 'import json, os\n'), ((421, 465), 'os.environ.get', 'os.environ.get', (['"""SPAWNER_HTTP_TIMEOUT"""', '"""20"""'], {}), "('SPAWNER_HTTP_TIMEOUT', '20')\n", (435, 465), False, 'import json, os\n')] |
__all__ = ['get_dataset']
def get_dataset(params):
if params['name'] == 'multimodal_points':
from datasets.multimodal_gaussian_2d import Dataset
return Dataset(params)
elif params['name'] == 'kicks':
from datasets.kicks import Dataset
return Dataset(params)
assert False and 'Unknown dataset'
| [
"datasets.kicks.Dataset"
] | [((174, 189), 'datasets.kicks.Dataset', 'Dataset', (['params'], {}), '(params)\n', (181, 189), False, 'from datasets.kicks import Dataset\n'), ((284, 299), 'datasets.kicks.Dataset', 'Dataset', (['params'], {}), '(params)\n', (291, 299), False, 'from datasets.kicks import Dataset\n')] |
# coding: utf-8
"""
Memsource REST API
Welcome to Memsource's API documentation. To view our legacy APIs please [visit our documentation](https://wiki.memsource.com/wiki/Memsource_API) and for more information about our new APIs, [visit our blog](https://www.memsource.com/blog/2017/10/24/introducing-rest-apis-qa-with-the-memsource-api-team/). If you have any questions, please contact [Memsource Support](<mailto:<EMAIL>>). # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
# from memsource_cli.models.async_request_dto import AsyncRequestDto
from memsource_cli.models.async_response_dto import AsyncResponseDto # noqa: F401,E501
from memsource_cli.models.project_reference import ProjectReference # noqa: F401,E501
from memsource_cli.models.user_reference import UserReference # noqa: F401,E501
class AsyncRequestDto(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'created_by': 'UserReference',
'date_created': 'datetime',
'action': 'str',
'async_response': 'AsyncResponseDto',
'parent': 'AsyncRequestDto',
'project': 'ProjectReference'
}
attribute_map = {
'id': 'id',
'created_by': 'createdBy',
'date_created': 'dateCreated',
'action': 'action',
'async_response': 'asyncResponse',
'parent': 'parent',
'project': 'project'
}
def __init__(self, id=None, created_by=None, date_created=None, action=None, async_response=None, parent=None, project=None): # noqa: E501
"""AsyncRequestDto - a model defined in Swagger""" # noqa: E501
self._id = None
self._created_by = None
self._date_created = None
self._action = None
self._async_response = None
self._parent = None
self._project = None
self.discriminator = None
if id is not None:
self.id = id
if created_by is not None:
self.created_by = created_by
if date_created is not None:
self.date_created = date_created
if action is not None:
self.action = action
if async_response is not None:
self.async_response = async_response
if parent is not None:
self.parent = parent
if project is not None:
self.project = project
@property
def id(self):
"""Gets the id of this AsyncRequestDto. # noqa: E501
:return: The id of this AsyncRequestDto. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this AsyncRequestDto.
:param id: The id of this AsyncRequestDto. # noqa: E501
:type: str
"""
self._id = id
@property
def created_by(self):
"""Gets the created_by of this AsyncRequestDto. # noqa: E501
:return: The created_by of this AsyncRequestDto. # noqa: E501
:rtype: UserReference
"""
return self._created_by
@created_by.setter
def created_by(self, created_by):
"""Sets the created_by of this AsyncRequestDto.
:param created_by: The created_by of this AsyncRequestDto. # noqa: E501
:type: UserReference
"""
self._created_by = created_by
@property
def date_created(self):
"""Gets the date_created of this AsyncRequestDto. # noqa: E501
:return: The date_created of this AsyncRequestDto. # noqa: E501
:rtype: datetime
"""
return self._date_created
@date_created.setter
def date_created(self, date_created):
"""Sets the date_created of this AsyncRequestDto.
:param date_created: The date_created of this AsyncRequestDto. # noqa: E501
:type: datetime
"""
self._date_created = date_created
@property
def action(self):
"""Gets the action of this AsyncRequestDto. # noqa: E501
:return: The action of this AsyncRequestDto. # noqa: E501
:rtype: str
"""
return self._action
@action.setter
def action(self, action):
"""Sets the action of this AsyncRequestDto.
:param action: The action of this AsyncRequestDto. # noqa: E501
:type: str
"""
allowed_values = ["PRE_ANALYSE", "POST_ANALYSE", "COMPARE_ANALYSE", "PRE_TRANSLATE", "ASYNC_TRANSLATE", "IMPORT_JOB", "IMPORT_FILE", "ALIGN", "EXPORT_TMX_BY_QUERY", "IMPORT_TMX", "INSERT_INTO_TM", "QA", "UPDATE_CONTINUOUS_JOB"] # noqa: E501
if action not in allowed_values:
raise ValueError(
"Invalid value for `action` ({0}), must be one of {1}" # noqa: E501
.format(action, allowed_values)
)
self._action = action
@property
def async_response(self):
"""Gets the async_response of this AsyncRequestDto. # noqa: E501
:return: The async_response of this AsyncRequestDto. # noqa: E501
:rtype: AsyncResponseDto
"""
return self._async_response
@async_response.setter
def async_response(self, async_response):
"""Sets the async_response of this AsyncRequestDto.
:param async_response: The async_response of this AsyncRequestDto. # noqa: E501
:type: AsyncResponseDto
"""
self._async_response = async_response
@property
def parent(self):
"""Gets the parent of this AsyncRequestDto. # noqa: E501
:return: The parent of this AsyncRequestDto. # noqa: E501
:rtype: AsyncRequestDto
"""
return self._parent
@parent.setter
def parent(self, parent):
"""Sets the parent of this AsyncRequestDto.
:param parent: The parent of this AsyncRequestDto. # noqa: E501
:type: AsyncRequestDto
"""
self._parent = parent
@property
def project(self):
"""Gets the project of this AsyncRequestDto. # noqa: E501
:return: The project of this AsyncRequestDto. # noqa: E501
:rtype: ProjectReference
"""
return self._project
@project.setter
def project(self, project):
"""Sets the project of this AsyncRequestDto.
:param project: The project of this AsyncRequestDto. # noqa: E501
:type: ProjectReference
"""
self._project = project
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AsyncRequestDto, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AsyncRequestDto):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"six.iteritems"
] | [((7024, 7057), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (7037, 7057), False, 'import six\n')] |
import random
def get_random_bag():
"""Returns a bag with unique pieces. (Bag randomizer)"""
random_shapes = list(SHAPES)
random.shuffle(random_shapes)
return [Piece(0, 0, shape) for shape in random_shapes]
class Shape:
def __init__(self, code, blueprints):
self.code = code
self.rotations = len(blueprints)
self.blueprints = blueprints
self.shape_coords = []
self.width = len(blueprints[0])
self.height = len(blueprints)
for rotation in range(self.rotations):
self.shape_coords.append(list(self._create_shape_coords(rotation)))
def _get_blueprint(self, rotation):
"""Returns a list of strings that defines how the shape looks like."""
return self.blueprints[rotation % self.rotations]
def get_shape_coords(self, rotation):
"""Returns a list of relative coordinates that make up the shape."""
return self.shape_coords[rotation % self.rotations]
def _create_shape_coords(self, rotation):
blueprint = self._get_blueprint(rotation)
width = len(blueprint[0])
height = len(blueprint)
for offset_y in range(height):
for offset_x in range(width):
if blueprint[offset_y][offset_x] != ' ':
yield offset_y, offset_x
SHAPE_I = Shape(1, [[
' ',
'####',
' ',
' ',
], [
' # ',
' # ',
' # ',
' # ',
]])
SHAPE_O = Shape(2, [[
'##',
'##',
]])
SHAPE_T = Shape(3, [[
' ',
'###',
' # ',
], [
' # ',
'## ',
' # ',
], [
' # ',
'###',
' ',
], [
' # ',
' ##',
' # ',
]])
SHAPE_S = Shape(4, [[
' ',
' ##',
'## ',
], [
' # ',
' ##',
' #',
]])
SHAPE_Z = Shape(5, [[
' ',
'## ',
' ##',
], [
' #',
' ##',
' # ',
]])
SHAPE_J = Shape(6, [[
' ',
'###',
' #',
], [
' # ',
' # ',
'## ',
], [
'# ',
'###',
' ',
], [
' ##',
' # ',
' # ',
]])
SHAPE_L = Shape(7, [[
' ',
'###',
'# ',
], [
'## ',
' # ',
' # ',
], [
' #',
'###',
' ',
], [
' # ',
' # ',
' ##',
]])
SHAPES = [SHAPE_I, SHAPE_O, SHAPE_T, SHAPE_S, SHAPE_Z, SHAPE_J, SHAPE_L]
class Piece:
def __init__(self, x, y, shape: Shape, rotation=0):
self.x = x
self.y = y
self.shape = shape
self.rotation = rotation
self.shape_coords = None
def rotate(self, dir_rotate):
"""Rotate the piece."""
self.rotation += dir_rotate
self.shape_coords = None
def move(self, x, y):
"""Move the piece."""
self.x += x
self.y += y
self.shape_coords = None
def get_shape_coords(self):
"""Returns a list of coordinates that the piece occupies."""
if self.shape_coords is None:
begin_x = self.x - round(self.shape.width / 2)
begin_y = self.y
shape_coords = self.shape.get_shape_coords(self.rotation)
self.shape_coords = [(begin_x + offset_x, begin_y + offset_y) for offset_y, offset_x in shape_coords]
return self.shape_coords
class Board:
def __init__(self, columns, rows):
self.columns = columns
self.rows = rows
self.pieces_table = [[0 for i in range(columns)] for j in range(rows)]
self.piece = None
self.piece_next = None
self.piece_holding = None
self.piece_last = None
self.can_hold = True
self.bag = get_random_bag()
self.create_piece()
def create_piece(self):
"""The next piece becomes the current piece and spawn it on the board."""
if self.piece_next is not None:
self.piece = self.piece_next
else:
self.piece = self.bag.pop()
self.piece.move(int(self.columns / 2), 0)
self.piece_next = self.bag.pop()
self.can_hold = True
if not self.bag:
self.bag = get_random_bag()
def _place_piece(self):
"""Solidify the current piece onto the board and returns success."""
coords = self.piece.get_shape_coords()
if any(x < 0 or x >= self.columns or y < 0 or y >= self.rows or self.pieces_table[y][x] != 0 for x, y in
coords):
return False
for x, y in coords:
self.pieces_table[y][x] = self.piece.shape.code
self.piece_last = self.piece
self.piece = None
return True
def can_move_piece(self, dir_x, dir_y):
"""Returns true if the piece does not intersect with a non-empty cell when moved."""
for x, y in self.piece.get_shape_coords():
next_x = x + dir_x
next_y = y + dir_y
if next_x < 0 or next_x >= self.columns or next_y < 0 or next_y >= self.rows:
return False
if self.pieces_table[next_y][next_x] != 0:
return False
return True
def move_piece(self, dir_x):
"""Move the piece in a direction and returns success."""
if self.piece is None:
return False
if not self.can_move_piece(dir_x, 0):
return False
self.piece.move(dir_x, 0)
return True
def drop_piece(self):
"""Drop the piece by one cell and returns success."""
if self.piece is None:
return False
if not self.can_move_piece(0, 1):
self._place_piece()
return True
self.piece.move(0, 1)
return False
def rotate_piece(self, dir_rotation):
"""Rotate the current piece and returns success."""
if self.piece is None:
return False
self.piece.rotate(dir_rotation)
if not self.can_move_piece(0, 0):
if not self.move_piece(-1) and not self.move_piece(1):
self.piece.rotate(-dir_rotation)
return False
return True
def is_game_over(self):
"""Returns if the current piece is able to move."""
return self.piece is not None and not self.can_move_piece(0, 0)
def is_row(self, y):
"""Returns if the row is a fully filled one."""
return 0 not in self.pieces_table[y]
def remove_row(self, y):
"""Removes a row from the board."""
removed_row = self.pieces_table.pop(y)
self.pieces_table.insert(0, [0 for i in range(self.columns)])
return removed_row
def insert_row(self, y, row):
"""Inserts a row into the board."""
self.pieces_table.pop(0)
self.pieces_table.insert(y, row)
def move_and_drop(self, x, rotation):
"""Move the piece and drop it as far down as possible and returns success."""
if self.piece is None:
return False
self.piece.rotate(rotation)
return self.can_move_piece(0, 0) and self.move_piece(-self.piece.x + x) and self.drop_piece_fully()
def drop_piece_fully(self):
"""Drops the current piece as far down as possible and returns success."""
if self.piece is None:
return False
while self.can_move_piece(0, 1):
self.piece.move(0, 1)
return self._place_piece()
def hold_piece(self):
"""Switches the piece held with the current and returns success."""
if self.piece is None or not self.can_hold:
return False
piece_current = self.piece
self.piece = self.piece_holding
self.piece_holding = piece_current
self.piece_holding.move(-self.piece_holding.x, -self.piece_holding.y)
if self.piece is None:
self.create_piece()
else:
self.piece.move(int(self.columns / 2), 2)
self.can_hold = False
return True
def get_possible_states(self):
"""Returns all possible states of the board with the corresponding action tuple.
Tries out every possible way to turn and move the current piece.
The action taken and the state of the board is combined into a tuple and added to the returning list
After every try the board is reset to original state.
:rtype: A list with a tuple of (action, state).
action = (column, rotation)
state = return value of `get_info`
"""
if self.piece is None:
return []
states = []
last_piece = self.piece_last
for rotation in range(self.piece.shape.rotations):
for column in range(self.columns + 1):
piece = Piece(self.piece.x, self.piece.y, self.piece.shape, self.piece.rotation)
# Execute
if self.move_and_drop(column, rotation):
rows_cleared = self.get_cleared_rows()
removed_rows = []
for y in rows_cleared:
removed_rows.append((y, self.remove_row(y)))
# Save
states.append(((column, rotation), self.get_info(rows_cleared)))
# Reset
for y, row in reversed(removed_rows):
self.insert_row(y, row)
for x, y in self.piece_last.get_shape_coords():
self.pieces_table[y][x] = 0
self.piece = piece
self.piece_last = last_piece
return states
def get_info(self, rows_cleared):
"""Returns the state of the board using statistics.
0: Rows cleared
1: Bumpiness
2: Holes
3: Landing height
4: Row transitions
5: Column transitions
6: Cumulative wells
7: Eroded piece cells
8: Aggregate height
:rtype: Integer array
"""
if self.piece_last is not None:
last_piece_coords = self.piece_last.get_shape_coords()
eroded_piece_cells = len(rows_cleared) * sum(y in rows_cleared for x, y in last_piece_coords)
landing_height = 0 if self.piece_last is None else 1 + self.rows - max(y for x, y in last_piece_coords)
else:
eroded_piece_cells = 0
landing_height = 0
return [
len(rows_cleared),
self.get_bumpiness(),
self.get_hole_count(),
landing_height,
self.get_row_transitions(),
self.get_column_transitions(),
self.get_cumulative_wells(),
eroded_piece_cells,
self.get_aggregate_height(),
]
def get_cleared_rows(self):
"""Returns the the amount of rows cleared."""
return list(filter(lambda y: self.is_row(y), range(self.rows)))
def get_row_transitions(self):
"""Returns the number of horizontal cell transitions."""
total = 0
for y in range(self.rows):
row_count = 0
last_empty = False
for x in range(self.columns):
empty = self.pieces_table[y][x] == 0
if last_empty != empty:
row_count += 1
last_empty = empty
if last_empty:
row_count += 1
if last_empty and row_count == 2:
continue
total += row_count
return total
def get_column_transitions(self):
"""Returns the number of vertical cell transitions."""
total = 0
for x in range(self.columns):
column_count = 0
last_empty = False
for y in reversed(range(self.rows)):
empty = self.pieces_table[y][x] == 0
if last_empty and not empty:
column_count += 2
last_empty = empty
if last_empty and column_count == 1:
continue
total += column_count
return total
def get_bumpiness(self):
"""Returns the total of the difference between the height of each column."""
bumpiness = 0
last_height = -1
for x in range(self.columns):
current_height = 0
for y in range(self.rows):
if self.pieces_table[y][x] != 0:
current_height = self.rows - y
break
if last_height != -1:
bumpiness += abs(last_height - current_height)
last_height = current_height
return bumpiness
def get_cumulative_wells(self):
"""Returns the sum of all wells."""
wells = [0 for i in range(self.columns)]
for y, row in enumerate(self.pieces_table):
left_empty = True
for x, code in enumerate(row):
if code == 0:
well = False
right_empty = self.columns > x + 1 >= 0 and self.pieces_table[y][x + 1] == 0
if left_empty or right_empty:
well = True
wells[x] = 0 if well else wells[x] + 1
left_empty = True
else:
left_empty = False
return sum(wells)
def get_aggregate_height(self):
"""Returns the sum of the heights of each column."""
aggregate_height = 0
for x in range(self.columns):
for y in range(self.rows):
if self.pieces_table[y][x] != 0:
aggregate_height += self.rows - y
break
return aggregate_height
def get_hole_count(self):
"""returns the number of empty cells covered by a full cell."""
hole_count = 0
for x in range(self.columns):
below = False
for y in range(self.rows):
empty = self.pieces_table[y][x] == 0
if not below and not empty:
below = True
elif below and empty:
hole_count += 1
return hole_count
| [
"random.shuffle"
] | [((136, 165), 'random.shuffle', 'random.shuffle', (['random_shapes'], {}), '(random_shapes)\n', (150, 165), False, 'import random\n')] |
from django.shortcuts import render
# Python functions - user is going to request an url
# Create your views here.
from django.http import HttpResponse
def index(request):
return HttpResponse("<h1> This is the music app homepage</h1>") | [
"django.http.HttpResponse"
] | [((185, 241), 'django.http.HttpResponse', 'HttpResponse', (['"""<h1> This is the music app homepage</h1>"""'], {}), "('<h1> This is the music app homepage</h1>')\n", (197, 241), False, 'from django.http import HttpResponse\n')] |
'''
@author <NAME>
Please contact <EMAIL>
'''
import math
import torch
class PositionalEmbedding(torch.nn.Module):
'''
Implementation of Positional Embedding.
'''
def __init__(self, hidden_size, device=torch.device("cpu")):
super().__init__()
self.hidden_size = hidden_size
self.posEmb = torch.zeros(10000, hidden_size, dtype=torch.float)
self.posEmb.require_grad = False
position = torch.arange(10000, dtype=torch.float).unsqueeze(1)
p_term1 = torch.arange(0, hidden_size, 2, dtype=torch.float)
p_term2 = - math.log(10000.0) / hidden_size
inv_term = torch.exp(p_term1 * p_term2)
posEmb_input = position * inv_term
self.posEmb[:, 0::2] = torch.sin(posEmb_input)
self.posEmb[:, 1::2] = torch.cos(posEmb_input)
self.posEmb = self.posEmb.unsqueeze(0).to(device)
def forward(self, input_):
'''
input_: Input sequence.
'''
seq_len = input_.size(1)
pos_emb = self.posEmb[:, :seq_len]
return pos_emb
| [
"torch.sin",
"torch.exp",
"math.log",
"torch.cos",
"torch.zeros",
"torch.arange",
"torch.device"
] | [((222, 241), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (234, 241), False, 'import torch\n'), ((333, 383), 'torch.zeros', 'torch.zeros', (['(10000)', 'hidden_size'], {'dtype': 'torch.float'}), '(10000, hidden_size, dtype=torch.float)\n', (344, 383), False, 'import torch\n'), ((515, 565), 'torch.arange', 'torch.arange', (['(0)', 'hidden_size', '(2)'], {'dtype': 'torch.float'}), '(0, hidden_size, 2, dtype=torch.float)\n', (527, 565), False, 'import torch\n'), ((637, 665), 'torch.exp', 'torch.exp', (['(p_term1 * p_term2)'], {}), '(p_term1 * p_term2)\n', (646, 665), False, 'import torch\n'), ((741, 764), 'torch.sin', 'torch.sin', (['posEmb_input'], {}), '(posEmb_input)\n', (750, 764), False, 'import torch\n'), ((796, 819), 'torch.cos', 'torch.cos', (['posEmb_input'], {}), '(posEmb_input)\n', (805, 819), False, 'import torch\n'), ((445, 483), 'torch.arange', 'torch.arange', (['(10000)'], {'dtype': 'torch.float'}), '(10000, dtype=torch.float)\n', (457, 483), False, 'import torch\n'), ((586, 603), 'math.log', 'math.log', (['(10000.0)'], {}), '(10000.0)\n', (594, 603), False, 'import math\n')] |
from tor4 import tensor
def test_tensor_sum():
a = tensor(data=[-1, 1, 2])
a_sum = a.sum()
assert a_sum.tolist() == 2
assert not a_sum.requires_grad
def test_tensor_sum_backward():
a = tensor(data=[-1, 1, 2.0], requires_grad=True)
a_sum = a.sum()
a_sum.backward()
assert a_sum.tolist() == 2
assert a_sum.requires_grad
assert a.grad.tolist() == [1, 1, 1]
def test_tensor_sum_backward2():
a = tensor(data=[-1, 1, 2.0], requires_grad=True)
a_sum = a.sum()
a_sum.backward(tensor(3))
assert a_sum.tolist() == 2
assert a_sum.requires_grad
assert a.grad.tolist() == [3, 3, 3]
def test_tensor_sum1_backward():
a = tensor(data=[[-1, 1, 2], [1, 2, 3.0]], requires_grad=True)
a_sum = a.sum(dim=1)
a_sum.backward(tensor(data=[2, 3]))
assert a_sum.tolist() == [2, 6]
assert a_sum.requires_grad
assert a.grad.tolist() == [[2, 2, 2], [3, 3, 3]]
def test_tensor_sum2_backward():
a = tensor(data=[[[-1], [1], [2]], [[1], [2], [3.0]]], requires_grad=True)
a_sum = a.sum(dim=1)
a_sum.backward(tensor(data=[[2], [3]]))
assert a_sum.tolist() == [[2], [6]]
assert a_sum.requires_grad
assert a.grad.tolist() == [[[2], [2], [2]], [[3], [3], [3]]]
def test_tensor_sum3_backward():
a = tensor(data=[[[-1], [1], [2]], [[1], [2], [3.0]]], requires_grad=True)
a_sum = a.sum()
a_sum.backward()
assert a_sum.tolist() == 8
assert a_sum.requires_grad
assert a.grad.tolist() == [[[1], [1], [1]], [[1], [1], [1]]]
def test_tensor_sum4_backward():
a = tensor(data=[[[-1], [1], [2]], [[1], [2], [3.0]]], requires_grad=True)
a_sum = a.sum(dim=(1, 0))
a_sum.backward()
assert a_sum.tolist() == [8]
assert a_sum.requires_grad
assert a.grad.tolist() == [[[1], [1], [1]], [[1], [1], [1]]]
def test_tensor_sum_keepdim1_backward():
a = tensor(data=[[-1, 1, 2], [1, 2, 3.0]], requires_grad=True)
a_sum = a.sum(dim=1, keepdim=True)
a_sum.backward(tensor(data=[[2], [3]]))
assert a_sum.tolist() == [[2], [6]]
assert a_sum.requires_grad
assert a.grad.tolist() == [[2, 2, 2], [3, 3, 3]]
def test_tensor_sum_keepdim2_backward():
a = tensor(data=[[[-1], [1], [2]], [[1], [2], [3.0]]], requires_grad=True)
a_sum = a.sum(dim=1, keepdim=True)
a_sum.backward(tensor(data=[[[2]], [[3]]]))
assert a_sum.tolist() == [[[2]], [[6]]]
assert a_sum.requires_grad
assert a.grad.tolist() == [[[2], [2], [2]], [[3], [3], [3]]]
def test_tensor_sum_keepdim3_backward():
a = tensor(data=[[[-1], [1], [2]], [[1], [2], [3.0]]], requires_grad=True)
a_sum = a.sum()
a_sum.backward()
assert a_sum.tolist() == 8
assert a_sum.requires_grad
assert a.grad.tolist() == [[[1], [1], [1]], [[1], [1], [1]]]
def test_tensor_sum_keepdim4_backward():
a = tensor(data=[[[-1], [1], [2]], [[1], [2], [3.0]]], requires_grad=True)
a_sum = a.sum(dim=(1, 0), keepdim=True)
a_sum.backward()
assert a_sum.tolist() == [[[8]]]
assert a_sum.requires_grad
assert a.grad.tolist() == [[[1], [1], [1]], [[1], [1], [1]]]
| [
"tor4.tensor"
] | [((57, 80), 'tor4.tensor', 'tensor', ([], {'data': '[-1, 1, 2]'}), '(data=[-1, 1, 2])\n', (63, 80), False, 'from tor4 import tensor\n'), ((210, 255), 'tor4.tensor', 'tensor', ([], {'data': '[-1, 1, 2.0]', 'requires_grad': '(True)'}), '(data=[-1, 1, 2.0], requires_grad=True)\n', (216, 255), False, 'from tor4 import tensor\n'), ((443, 488), 'tor4.tensor', 'tensor', ([], {'data': '[-1, 1, 2.0]', 'requires_grad': '(True)'}), '(data=[-1, 1, 2.0], requires_grad=True)\n', (449, 488), False, 'from tor4 import tensor\n'), ((685, 743), 'tor4.tensor', 'tensor', ([], {'data': '[[-1, 1, 2], [1, 2, 3.0]]', 'requires_grad': '(True)'}), '(data=[[-1, 1, 2], [1, 2, 3.0]], requires_grad=True)\n', (691, 743), False, 'from tor4 import tensor\n'), ((973, 1043), 'tor4.tensor', 'tensor', ([], {'data': '[[[-1], [1], [2]], [[1], [2], [3.0]]]', 'requires_grad': '(True)'}), '(data=[[[-1], [1], [2]], [[1], [2], [3.0]]], requires_grad=True)\n', (979, 1043), False, 'from tor4 import tensor\n'), ((1293, 1363), 'tor4.tensor', 'tensor', ([], {'data': '[[[-1], [1], [2]], [[1], [2], [3.0]]]', 'requires_grad': '(True)'}), '(data=[[[-1], [1], [2]], [[1], [2], [3.0]]], requires_grad=True)\n', (1299, 1363), False, 'from tor4 import tensor\n'), ((1576, 1646), 'tor4.tensor', 'tensor', ([], {'data': '[[[-1], [1], [2]], [[1], [2], [3.0]]]', 'requires_grad': '(True)'}), '(data=[[[-1], [1], [2]], [[1], [2], [3.0]]], requires_grad=True)\n', (1582, 1646), False, 'from tor4 import tensor\n'), ((1879, 1937), 'tor4.tensor', 'tensor', ([], {'data': '[[-1, 1, 2], [1, 2, 3.0]]', 'requires_grad': '(True)'}), '(data=[[-1, 1, 2], [1, 2, 3.0]], requires_grad=True)\n', (1885, 1937), False, 'from tor4 import tensor\n'), ((2197, 2267), 'tor4.tensor', 'tensor', ([], {'data': '[[[-1], [1], [2]], [[1], [2], [3.0]]]', 'requires_grad': '(True)'}), '(data=[[[-1], [1], [2]], [[1], [2], [3.0]]], requires_grad=True)\n', (2203, 2267), False, 'from tor4 import tensor\n'), ((2547, 2617), 'tor4.tensor', 'tensor', ([], {'data': '[[[-1], [1], [2]], [[1], [2], [3.0]]]', 'requires_grad': '(True)'}), '(data=[[[-1], [1], [2]], [[1], [2], [3.0]]], requires_grad=True)\n', (2553, 2617), False, 'from tor4 import tensor\n'), ((2838, 2908), 'tor4.tensor', 'tensor', ([], {'data': '[[[-1], [1], [2]], [[1], [2], [3.0]]]', 'requires_grad': '(True)'}), '(data=[[[-1], [1], [2]], [[1], [2], [3.0]]], requires_grad=True)\n', (2844, 2908), False, 'from tor4 import tensor\n'), ((528, 537), 'tor4.tensor', 'tensor', (['(3)'], {}), '(3)\n', (534, 537), False, 'from tor4 import tensor\n'), ((788, 807), 'tor4.tensor', 'tensor', ([], {'data': '[2, 3]'}), '(data=[2, 3])\n', (794, 807), False, 'from tor4 import tensor\n'), ((1088, 1111), 'tor4.tensor', 'tensor', ([], {'data': '[[2], [3]]'}), '(data=[[2], [3]])\n', (1094, 1111), False, 'from tor4 import tensor\n'), ((1996, 2019), 'tor4.tensor', 'tensor', ([], {'data': '[[2], [3]]'}), '(data=[[2], [3]])\n', (2002, 2019), False, 'from tor4 import tensor\n'), ((2326, 2353), 'tor4.tensor', 'tensor', ([], {'data': '[[[2]], [[3]]]'}), '(data=[[[2]], [[3]]])\n', (2332, 2353), False, 'from tor4 import tensor\n')] |
"""init module for natcap.invest."""
import dataclasses
import logging
import os
import sys
import pkg_resources
LOGGER = logging.getLogger('natcap.invest')
LOGGER.addHandler(logging.NullHandler())
__all__ = ['local_dir', ]
try:
__version__ = pkg_resources.get_distribution(__name__).version
except pkg_resources.DistributionNotFound:
# package is not installed. Log the exception for debugging.
LOGGER.exception('Could not load natcap.invest version information')
@dataclasses.dataclass
class _MODELMETA:
"""Dataclass to store frequently used model metadata."""
model_title: str # display name for the model
pyname: str # importable python module name for the model
gui: str # importable python class for the corresponding Qt UI
userguide: str # name of the corresponding built userguide file
aliases: tuple # alternate names for the model, if any
MODEL_METADATA = {
'annual_water_yield': _MODELMETA(
model_title='Annual Water Yield',
pyname='natcap.invest.annual_water_yield',
gui='annual_water_yield.AnnualWaterYield',
userguide='annual_water_yield.html',
aliases=('hwy', 'awy')),
'carbon': _MODELMETA(
model_title='Carbon Storage and Sequestration',
pyname='natcap.invest.carbon',
gui='carbon.Carbon',
userguide='carbonstorage.html',
aliases=()),
'coastal_blue_carbon': _MODELMETA(
model_title='Coastal Blue Carbon',
pyname='natcap.invest.coastal_blue_carbon.coastal_blue_carbon',
gui='cbc.CoastalBlueCarbon',
userguide='coastal_blue_carbon.html',
aliases=('cbc',)),
'coastal_blue_carbon_preprocessor': _MODELMETA(
model_title='Coastal Blue Carbon Preprocessor',
pyname='natcap.invest.coastal_blue_carbon.preprocessor',
gui='cbc.CoastalBlueCarbonPreprocessor',
userguide='coastal_blue_carbon.html',
aliases=('cbc_pre',)),
'coastal_vulnerability': _MODELMETA(
model_title='Coastal Vulnerability',
pyname='natcap.invest.coastal_vulnerability',
gui='coastal_vulnerability.CoastalVulnerability',
userguide='coastal_vulnerability.html',
aliases=('cv',)),
'crop_production_percentile': _MODELMETA(
model_title='Crop Production: Percentile',
pyname='natcap.invest.crop_production_percentile',
gui='crop_production.CropProductionPercentile',
userguide='crop_production.html',
aliases=('cpp',)),
'crop_production_regression': _MODELMETA(
model_title='Crop Production: Regression',
pyname='natcap.invest.crop_production_regression',
gui='crop_production.CropProductionRegression',
userguide='crop_production.html',
aliases=('cpr',)),
'delineateit': _MODELMETA(
model_title='DelineateIt',
pyname='natcap.invest.delineateit.delineateit',
gui='delineateit.Delineateit',
userguide='delineateit.html',
aliases=()),
'finfish_aquaculture': _MODELMETA(
model_title='Finfish Aquaculture',
pyname='natcap.invest.finfish_aquaculture.finfish_aquaculture',
gui='finfish.FinfishAquaculture',
userguide='marine_fish.html',
aliases=()),
'fisheries': _MODELMETA(
model_title='Fisheries',
pyname='natcap.invest.fisheries.fisheries',
gui='fisheries.Fisheries',
userguide='fisheries.html',
aliases=()),
'fisheries_hst': _MODELMETA(
model_title='Fisheries Habitat Scenario Tool',
pyname='natcap.invest.fisheries.fisheries_hst',
gui='fisheries.FisheriesHST',
userguide='fisheries.html',
aliases=()),
'forest_carbon_edge_effect': _MODELMETA(
model_title='Forest Carbon Edge Effect',
pyname='natcap.invest.forest_carbon_edge_effect',
gui='forest_carbon.ForestCarbonEdgeEffect',
userguide='carbon_edge.html',
aliases=('fc',)),
'globio': _MODELMETA(
model_title='GLOBIO',
pyname='natcap.invest.globio',
gui='globio.GLOBIO',
userguide='globio.html',
aliases=()),
'habitat_quality': _MODELMETA(
model_title='Habitat Quality',
pyname='natcap.invest.habitat_quality',
gui='habitat_quality.HabitatQuality',
userguide='habitat_quality.html',
aliases=('hq',)),
'habitat_risk_assessment': _MODELMETA(
model_title='Habitat Risk Assessment',
pyname='natcap.invest.hra',
gui='hra.HabitatRiskAssessment',
userguide='habitat_risk_assessment.html',
aliases=('hra',)),
'ndr': _MODELMETA(
model_title='Nutrient Delivery Ratio',
pyname='natcap.invest.ndr.ndr',
gui='ndr.Nutrient',
userguide='ndr.html',
aliases=()),
'pollination': _MODELMETA(
model_title='Crop Pollination',
pyname='natcap.invest.pollination',
gui='pollination.Pollination',
userguide='croppollination.html',
aliases=()),
'recreation': _MODELMETA(
model_title='Visitation: Recreation and Tourism',
pyname='natcap.invest.recreation.recmodel_client',
gui='recreation.Recreation',
userguide='recreation.html',
aliases=()),
'routedem': _MODELMETA(
model_title='RouteDEM',
pyname='natcap.invest.routedem',
gui='routedem.RouteDEM',
userguide='routedem.html',
aliases=()),
'scenario_generator_proximity': _MODELMETA(
model_title='Scenario Generator: Proximity Based',
pyname='natcap.invest.scenario_gen_proximity',
gui='scenario_gen.ScenarioGenProximity',
userguide='scenario_gen_proximity.html',
aliases=('sgp',)),
'scenic_quality': _MODELMETA(
model_title='Unobstructed Views: Scenic Quality Provision',
pyname='natcap.invest.scenic_quality.scenic_quality',
gui='scenic_quality.ScenicQuality',
userguide='scenic_quality.html',
aliases=('sq',)),
'sdr': _MODELMETA(
model_title='Sediment Delivery Ratio',
pyname='natcap.invest.sdr.sdr',
gui='sdr.SDR',
userguide='sdr.html',
aliases=()),
'seasonal_water_yield': _MODELMETA(
model_title='Seasonal Water Yield',
pyname='natcap.invest.seasonal_water_yield.seasonal_water_yield',
gui='seasonal_water_yield.SeasonalWaterYield',
userguide='seasonal_water_yield.html',
aliases=('swy',)),
'wave_energy': _MODELMETA(
model_title='Wave Energy Production',
pyname='natcap.invest.wave_energy',
gui='wave_energy.WaveEnergy',
userguide='wave_energy.html',
aliases=()),
'wind_energy': _MODELMETA(
model_title='Wind Energy Production',
pyname='natcap.invest.wind_energy',
gui='wind_energy.WindEnergy',
userguide='wind_energy.html',
aliases=()),
'urban_flood_risk_mitigation': _MODELMETA(
model_title='Urban Flood Risk Mitigation',
pyname='natcap.invest.urban_flood_risk_mitigation',
gui='urban_flood_risk_mitigation.UrbanFloodRiskMitigation',
userguide='urban_flood_risk_mitigation.html',
aliases=('ufrm',)),
'urban_cooling_model': _MODELMETA(
model_title='Urban Cooling',
pyname='natcap.invest.urban_cooling_model',
gui='urban_cooling_model.UrbanCoolingModel',
userguide='urban_cooling_model.html',
aliases=('ucm',)),
}
def local_dir(source_file):
"""Return the path to where `source_file` would be on disk.
If this is frozen (as with PyInstaller), this will be the folder with the
executable in it. If not, it'll just be the foldername of the source_file
being passed in.
"""
source_dirname = os.path.dirname(source_file)
if getattr(sys, 'frozen', False):
# sys.frozen is True when we're in either a py2exe or pyinstaller
# build.
# sys._MEIPASS exists, we're in a Pyinstaller build.
if not getattr(sys, '_MEIPASS', False):
# only one os.path.dirname() results in the path being relative to
# the natcap.invest package, when I actually want natcap/invest to
# be in the filepath.
# relpath would be something like <modelname>/<data_file>
relpath = os.path.relpath(source_file, os.path.dirname(__file__))
pkg_path = os.path.join('natcap', 'invest', relpath)
return os.path.join(
os.path.dirname(sys.executable), os.path.dirname(pkg_path))
else:
# assume that if we're in a frozen build, we're in py2exe. When in
# py2exe, the directory structure is maintained, so we just return
# the source_dirname.
pass
return source_dirname
| [
"logging.getLogger",
"logging.NullHandler",
"os.path.join",
"os.path.dirname",
"pkg_resources.get_distribution"
] | [((125, 159), 'logging.getLogger', 'logging.getLogger', (['"""natcap.invest"""'], {}), "('natcap.invest')\n", (142, 159), False, 'import logging\n'), ((178, 199), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (197, 199), False, 'import logging\n'), ((7822, 7850), 'os.path.dirname', 'os.path.dirname', (['source_file'], {}), '(source_file)\n', (7837, 7850), False, 'import os\n'), ((251, 291), 'pkg_resources.get_distribution', 'pkg_resources.get_distribution', (['__name__'], {}), '(__name__)\n', (281, 291), False, 'import pkg_resources\n'), ((8453, 8494), 'os.path.join', 'os.path.join', (['"""natcap"""', '"""invest"""', 'relpath'], {}), "('natcap', 'invest', relpath)\n", (8465, 8494), False, 'import os\n'), ((8403, 8428), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (8418, 8428), False, 'import os\n'), ((8544, 8575), 'os.path.dirname', 'os.path.dirname', (['sys.executable'], {}), '(sys.executable)\n', (8559, 8575), False, 'import os\n'), ((8577, 8602), 'os.path.dirname', 'os.path.dirname', (['pkg_path'], {}), '(pkg_path)\n', (8592, 8602), False, 'import os\n')] |
import math
import sys
from fractions import Fraction
from random import uniform, randint
import decimal as dec
def log10_floor(f):
b, k = 1, -1
while b <= f:
b *= 10
k += 1
return k
def log10_ceil(f):
b, k = 1, 0
while b < f:
b *= 10
k += 1
return k
def log10_floor(f):
if f <= 0: return -1
t, b, k, k_step = 1, 10, 0, 1
while True:
t1 = t * b
if t1 > f:
if k_step == 1:
break
k_step = 1
b = 10
else:
b *= 10
k += k_step
k_step += 1
t = t1
return k
# for i in range(20):
# f = 10 ** i
# print(f'{f}: {log10_floor(f)}, {log10_floor2(f)}')
# print(log10_floor2(100))
# sys.exit(0)
def str_of_pos_float_hi0(prec, x):
assert x > 0
q = Fraction(x)
n = int(q)
if n > 0:
k = log10_floor(n) + 1
if k >= prec:
b = 10 ** (k - prec)
r, e = n // b, k - prec
else:
b = 10 ** (prec - k)
r, e = n * b + int((q - n) * b), k - prec
else:
k = log10_floor(int(1 / q))
b = 10 ** (k + prec)
r, e = int(q * b), -(k + prec)
if r * Fraction(10) ** e < q:
r += 1
s = str(r)
if len(s) > prec:
s = s[:-1]
e += 1
e += prec - 1
s = f'{s[0]}.{s[1:]}'
if e == 0:
return s
return s + ('e+' if e > 0 else 'e') + str(e)
def str_of_pos_float_hi1(prec, x):
assert x > 0
m, exp = math.frexp(x)
m, exp = int(math.ldexp(m, 53)), exp - 53
mask = (1 << abs(exp)) - 1
if exp >= 0:
n, rem = m << exp, 0
else:
n, rem = m >> -exp, m & mask
if n > 0:
k = log10_floor(n) + 1
if k >= prec:
b = 10 ** (k - prec)
(r, rem2), e = divmod(n, b), k - prec
rem2 = rem2 or rem
else:
b = 10 ** (prec - k)
t = rem * b
t, rem2 = t >> -exp, t & mask
r, e = n * b + t, k - prec
else:
k = log10_floor((1 << -exp) // rem)
b = 10 ** (k + prec)
t = rem * b
r, rem2, e = t >> -exp, t & mask, -(k + prec)
if rem2:
r += 1
s = str(r)
assert prec <= len(s) <= prec + 1
if len(s) > prec:
s = s[:-1]
e += 1
e += prec - 1
s = f'{s[0]}.{s[1:]}'
if e == 0:
return s
return s + ('e+' if e > 0 else 'e') + str(e)
def str_of_pos_float_lo(prec, x):
assert x > 0
m, exp = math.frexp(x)
m, exp = int(math.ldexp(m, 53)), exp - 53
if exp >= 0:
n, rem = m << exp, 0
else:
mask = (1 << abs(exp)) - 1
n, rem = m >> -exp, m & mask
if n > 0:
k = log10_floor(n) + 1
if k >= prec:
b = 10 ** (k - prec)
r, e = n // b, k - prec
else:
b = 10 ** (prec - k)
t = (rem * b) >> -exp
r, e = n * b + t, k - prec
else:
k = log10_floor((1 << -exp) // rem)
b = 10 ** (k + prec)
t = rem * b
r, e = (rem * b) >> -exp, -(k + prec)
s = str(r)
assert len(s) == prec
e += prec - 1
s = f'{s[0]}.{s[1:]}'
if e == 0:
return s
return s + ('e+' if e > 0 else 'e') + str(e)
# print(str_of_pos_float_hi(2, 230454523525e+100))
def decimal_test_hi(prec, x, s=None):
if s is None:
s = str_of_pos_float_hi1(prec, x)
with dec.localcontext() as ctx:
ctx.prec = prec
ctx.rounding = dec.ROUND_UP
v = +dec.Decimal(x)
t = +dec.Decimal(s)
if v != t:
print(f'Error (hi): decimal = {v}, my = {s} (prec = {prec}, x = {x})')
def decimal_test_lo(prec, x, s=None):
if s is None:
s = str_of_pos_float_lo(prec, x)
with dec.localcontext() as ctx:
ctx.prec = prec
ctx.rounding = dec.ROUND_DOWN
v = +dec.Decimal(x)
t = +dec.Decimal(s)
if v != t:
print(f'Error (lo): decimal = {v}, my = {s} (prec = {prec}, x = {x})')
def tests(n, a, b):
for _ in range(n):
x = uniform(a, b)
prec = randint(1, 15)
decimal_test_hi(prec, x)
decimal_test_lo(prec, x)
def tests2(n):
for _ in range(n):
prec = randint(1, 15)
t = randint(-100, 100)
decimal_test_hi(prec, 2.0 ** t)
decimal_test_lo(prec, 2.0 ** t)
tests(10000, 1e-300, 1)
tests(10000, 0.5, 1000)
tests(10000, 1e+10, 1e+100)
tests(10000, 1e-300, 1e+300)
tests2(10000)
#print(str_of_pos_float_hi1(1, 0.47))
#print(str_of_pos_float_hi1(1, 0.5))
# print(str_of_pos_float_hi1(100, 0.3))
def check_ocaml_results(fname):
print(f'Checking: {fname}')
with open(fname, 'r') as f:
for line in f:
x, prec, s0, s1, s_lo = line.strip().split(',')
decimal_test_hi(int(prec), float(x), s0)
decimal_test_hi(int(prec), float(x), s1)
decimal_test_lo(int(prec), float(x), s_lo)
check_ocaml_results('out.txt') | [
"random.uniform",
"math.ldexp",
"fractions.Fraction",
"decimal.localcontext",
"math.frexp",
"random.randint",
"decimal.Decimal"
] | [((851, 862), 'fractions.Fraction', 'Fraction', (['x'], {}), '(x)\n', (859, 862), False, 'from fractions import Fraction\n'), ((1539, 1552), 'math.frexp', 'math.frexp', (['x'], {}), '(x)\n', (1549, 1552), False, 'import math\n'), ((2540, 2553), 'math.frexp', 'math.frexp', (['x'], {}), '(x)\n', (2550, 2553), False, 'import math\n'), ((3459, 3477), 'decimal.localcontext', 'dec.localcontext', ([], {}), '()\n', (3475, 3477), True, 'import decimal as dec\n'), ((3811, 3829), 'decimal.localcontext', 'dec.localcontext', ([], {}), '()\n', (3827, 3829), True, 'import decimal as dec\n'), ((4115, 4128), 'random.uniform', 'uniform', (['a', 'b'], {}), '(a, b)\n', (4122, 4128), False, 'from random import uniform, randint\n'), ((4144, 4158), 'random.randint', 'randint', (['(1)', '(15)'], {}), '(1, 15)\n', (4151, 4158), False, 'from random import uniform, randint\n'), ((4279, 4293), 'random.randint', 'randint', (['(1)', '(15)'], {}), '(1, 15)\n', (4286, 4293), False, 'from random import uniform, randint\n'), ((4306, 4324), 'random.randint', 'randint', (['(-100)', '(100)'], {}), '(-100, 100)\n', (4313, 4324), False, 'from random import uniform, randint\n'), ((1570, 1587), 'math.ldexp', 'math.ldexp', (['m', '(53)'], {}), '(m, 53)\n', (1580, 1587), False, 'import math\n'), ((2571, 2588), 'math.ldexp', 'math.ldexp', (['m', '(53)'], {}), '(m, 53)\n', (2581, 2588), False, 'import math\n'), ((3559, 3573), 'decimal.Decimal', 'dec.Decimal', (['x'], {}), '(x)\n', (3570, 3573), True, 'import decimal as dec\n'), ((3587, 3601), 'decimal.Decimal', 'dec.Decimal', (['s'], {}), '(s)\n', (3598, 3601), True, 'import decimal as dec\n'), ((3913, 3927), 'decimal.Decimal', 'dec.Decimal', (['x'], {}), '(x)\n', (3924, 3927), True, 'import decimal as dec\n'), ((3941, 3955), 'decimal.Decimal', 'dec.Decimal', (['s'], {}), '(s)\n', (3952, 3955), True, 'import decimal as dec\n'), ((1240, 1252), 'fractions.Fraction', 'Fraction', (['(10)'], {}), '(10)\n', (1248, 1252), False, 'from fractions import Fraction\n')] |
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="example-pkg-vanderbeck", # Replace with your own username
version="0.0.1",
author="<NAME>",
author_email="<EMAIL>",
description="A small example package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Vanderbeck/example_pkg.git",
packages=setuptools.find_packages(),
install_requires=[
'regex'
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
],
python_requires='>=3.5',
# entry_points = {
# 'console_scripts' : ['example_pkg = myscript.myscript:main']
# },
)
| [
"setuptools.find_packages"
] | [((465, 491), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (489, 491), False, 'import setuptools\n')] |
"""
Description: A Python module to use easily the osu!api V1.
Author: LostPy
License: MIT
Date: 2021-01-11
"""
import requests as req
import json
from . import from_json
base_url ='https://osu.ppy.sh/api'
urls = {
'beatmaps': base_url + '/get_beatmaps?',
'user': base_url + '/get_user?',
'scores': base_url + '/get_scores?',
'user_best': base_url + '/get_user_best?',
'user_recent': base_url + '/get_user_recent?',
'match': base_url + '/get_match?',
'replay': base_url + '/get_replay?'
}
def get_beatmaps(key: str, since: str = None, beatmapset_id: int = None, beatmap_id: int = None, type_return: str = 'dict', **kwargs):
"""Retrieve general beatmap information."""
params = {
'k': key,
'since': since,
's': beatmapset_id,
'b': beatmap_id,
'u': kwargs['user'] if 'user' in kwargs else None,
'type': kwargs['type_'] if 'type_' in kwargs else None,
'mode': kwargs['mode'] if 'mode' in kwargs else None,
'a': kwargs['a'] if 'a' in kwargs else 0,
'h': kwargs['h'] if 'h' in kwargs else None,
'limit': kwargs['limit'] if 'limit' in kwargs else 500,
'mods': kwargs['mods'] if 'mods' in kwargs else None}
r = req.get(urls['beatmaps'], params=params)
return from_json(r.text, type_return)
def get_user(key: str, user: int, type_return: str = 'dict', **kwargs):
"""Retrieve general user information."""
params = {
'k': key,
'u': user,
'm': kwargs['mode'] if 'mode' in kwargs else 0,
'type': kwargs['type_'] if 'type_' in kwargs else None,
'event_days': kwargs['event_days'] if 'event_days' in kwargs else 1}
r = req.get(urls['user'], params=params)
return from_json(r.text, type_return)
def get_scores(key: str, beatmap_id: int, user: int = None, type_return: str = 'dict', **kwargs):
"""Retrieve information about the top 100 scores of a specified beatmap."""
params = {
'k': key,
'b': beatmap_id,
'u': user,
'm': kwargs['mode'] if 'mode' in kwargs else 0,
'mods': kwargs['mods'] if 'mods' in kwargs else 0,
'type': kwargs['type_'] if 'type_' in kwargs else None,
'limit': kwargs['limit'] if 'limit' in kwargs else 50}
r = req.get(urls['scores'], params=params)
return from_json(r.text, type_return)
def get_user_best(key: str, user: int, mode: int = 0, limit: int = 10, type_: str = None, type_return: str = 'dict'):
"""Get the top scores for the specified user."""
params = {
'k': key,
'u': user,
'm': mode,
'limit': limit,
'type': type_}
r = req.get(urls['user_best'], params=params)
return from_json(r.text, type_return)
def get_user_recent(key: str, user: int, mode: int = 0, limit: int = 10, type_: str = None, type_return: str = 'dict'):
"""Gets the user's ten most recent plays over the last 24 hours."""
params = {
'k': key,
'u': user,
'm': mode,
'limit': limit,
'type': type_}
r = req.get(urls['user_recent'], params=params)
return from_json(r.text, type_return)
def get_match(key: str, match_id: int, type_return: str = 'dict'):
"""Retrieve information about multiplayer match."""
r = req.get(urls['match'], {'k': key, 'mp': match_id})
return from_json(r.text, type_return)
def get_replay(key: str, beatmap_id: int, user: int, **kwargs):
"""Get the replay data of a user's score on a map."""
params = {
'k': key,
'b': beatmap_id,
'u': user,
'm': kwargs['mode'] if 'mode' in kwargs else None,
's': kwargs['score_id'] if 'score_id' in kwargs else None,
'type_': kwargs['type_'] if 'type_' in kwargs else None,
'mods': kwargs['mods'] if 'mods' in kwargs else None}
return json.loads(req.get(urls['replay'], params=params).text)
def get_cover_image(beatmapset_id: int):
"""Return url of cover image from beatmapset_id."""
return f"https://assets.ppy.sh/beatmaps/{beatmapset_id}/covers/cover.jpg"
def get_profile_image(user_id: int):
"""Return url of profile image of user."""
return f"http://s.ppy.sh/a/{user_id}"
| [
"requests.get"
] | [((1126, 1166), 'requests.get', 'req.get', (["urls['beatmaps']"], {'params': 'params'}), "(urls['beatmaps'], params=params)\n", (1133, 1166), True, 'import requests as req\n'), ((1538, 1574), 'requests.get', 'req.get', (["urls['user']"], {'params': 'params'}), "(urls['user'], params=params)\n", (1545, 1574), True, 'import requests as req\n'), ((2063, 2101), 'requests.get', 'req.get', (["urls['scores']"], {'params': 'params'}), "(urls['scores'], params=params)\n", (2070, 2101), True, 'import requests as req\n'), ((2396, 2437), 'requests.get', 'req.get', (["urls['user_best']"], {'params': 'params'}), "(urls['user_best'], params=params)\n", (2403, 2437), True, 'import requests as req\n'), ((2753, 2796), 'requests.get', 'req.get', (["urls['user_recent']"], {'params': 'params'}), "(urls['user_recent'], params=params)\n", (2760, 2796), True, 'import requests as req\n'), ((2963, 3013), 'requests.get', 'req.get', (["urls['match']", "{'k': key, 'mp': match_id}"], {}), "(urls['match'], {'k': key, 'mp': match_id})\n", (2970, 3013), True, 'import requests as req\n'), ((3471, 3509), 'requests.get', 'req.get', (["urls['replay']"], {'params': 'params'}), "(urls['replay'], params=params)\n", (3478, 3509), True, 'import requests as req\n')] |
import glob
import matplotlib.pyplot as plt
import numpy as np
import sys
plt.ion()
data_files = list(glob.glob(sys.argv[1]+'/mnist_net_*_train.log'))
valid_data_files = list(glob.glob(sys.argv[1]+'/mnist_net_*_valid.log'))
for fname in data_files:
data = np.loadtxt(fname).reshape(-1, 3)
name = fname.split('/')[-1]
plt.plot(data[:, 0], 1-data[:, 2], label=name)
for fname in valid_data_files:
data = np.loadtxt(fname).reshape(-1, 2)
name = fname.split('/')[-1]
plt.plot(data[:, 0], 1-data[:, 1], label=name)
plt.legend(loc=1)
raw_input('Press Enter.')
| [
"matplotlib.pyplot.legend",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ion",
"numpy.loadtxt",
"glob.glob"
] | [((75, 84), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (82, 84), True, 'import matplotlib.pyplot as plt\n'), ((527, 544), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(1)'}), '(loc=1)\n', (537, 544), True, 'import matplotlib.pyplot as plt\n'), ((104, 153), 'glob.glob', 'glob.glob', (["(sys.argv[1] + '/mnist_net_*_train.log')"], {}), "(sys.argv[1] + '/mnist_net_*_train.log')\n", (113, 153), False, 'import glob\n'), ((177, 226), 'glob.glob', 'glob.glob', (["(sys.argv[1] + '/mnist_net_*_valid.log')"], {}), "(sys.argv[1] + '/mnist_net_*_valid.log')\n", (186, 226), False, 'import glob\n'), ((326, 374), 'matplotlib.pyplot.plot', 'plt.plot', (['data[:, 0]', '(1 - data[:, 2])'], {'label': 'name'}), '(data[:, 0], 1 - data[:, 2], label=name)\n', (334, 374), True, 'import matplotlib.pyplot as plt\n'), ((479, 527), 'matplotlib.pyplot.plot', 'plt.plot', (['data[:, 0]', '(1 - data[:, 1])'], {'label': 'name'}), '(data[:, 0], 1 - data[:, 1], label=name)\n', (487, 527), True, 'import matplotlib.pyplot as plt\n'), ((261, 278), 'numpy.loadtxt', 'np.loadtxt', (['fname'], {}), '(fname)\n', (271, 278), True, 'import numpy as np\n'), ((414, 431), 'numpy.loadtxt', 'np.loadtxt', (['fname'], {}), '(fname)\n', (424, 431), True, 'import numpy as np\n')] |
#!/usr/bin/python
# encoding: utf-8
from collections import Counter
from gist import create_workflow
from pprint import pprint as pp
import sys
import workflow
from workflow import Workflow, web
from workflow.background import run_in_background, is_running
def main(wf):
arg = wf.args[0]
wf.add_item(u"Set token", arg=arg, valid=True, icon="icons/token.png")
wf.send_feedback()
if __name__ == '__main__':
wf = create_workflow()
sys.exit(wf.run(main))
| [
"gist.create_workflow"
] | [((431, 448), 'gist.create_workflow', 'create_workflow', ([], {}), '()\n', (446, 448), False, 'from gist import create_workflow\n')] |
import argparse
import glob
import os
import random
import re
from dataclasses import dataclass
from functools import partial
from math import ceil
from typing import List, Optional
import numpy as np
import torch
from torch.optim.lr_scheduler import ReduceLROnPlateau
from tqdm import tqdm
import util
tqdm.monitor_interval = 0
tqdm = partial(tqdm, bar_format="{l_bar}{r_bar}")
TRAIN = "train"
DEV = "dev"
TEST = "test"
class Optimizer(util.NamedEnum):
sgd = "sgd"
adadelta = "adadelta"
adam = "adam"
amsgrad = "amsgrad"
class Scheduler(util.NamedEnum):
reducewhenstuck = "reducewhenstuck"
warmupinvsqr = "warmupinvsqr"
def setup_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
@dataclass
class Evaluation:
filepath: str
devloss: float
evaluation_result: Optional[List[util.Eval]]
class BaseTrainer(object):
"""docstring for Trainer."""
def __init__(self):
super().__init__()
self.parser = argparse.ArgumentParser()
self.set_args()
self.params = self.get_params()
util.maybe_mkdir(self.params.model)
self.logger = util.get_logger(
self.params.model + ".log", log_level=self.params.loglevel
)
for key, value in vars(self.params).items():
self.logger.info("command line argument: %s - %r", key, value)
setup_seed(self.params.seed)
self.data = None
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = None
self.optimizer = None
self.min_lr = 0
self.scheduler = None
self.evaluator = None
self.global_steps = 0
self.last_devloss = float("inf")
self.models: List[Evaluation] = list()
def set_args(self):
"""
get_args
"""
# fmt: off
parser = self.parser
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--train', required=True, type=str, nargs='+')
parser.add_argument('--dev', required=True, type=str, nargs='+')
parser.add_argument('--test', default=None, type=str, nargs='+')
parser.add_argument('--model', required=True, help='dump model filename')
parser.add_argument('--load', default='', help='load model and continue training; with `smart`, recover training automatically')
parser.add_argument('--bs', default=20, type=int, help='training batch size')
parser.add_argument('--epochs', default=20, type=int, help='maximum training epochs')
parser.add_argument('--max_steps', default=0, type=int, help='maximum training steps')
parser.add_argument('--warmup_steps', default=4000, type=int, help='number of warm up steps')
parser.add_argument('--total_eval', default=-1, type=int, help='total number of evaluation')
parser.add_argument('--optimizer', default=Optimizer.adam, type=Optimizer, choices=list(Optimizer))
parser.add_argument('--scheduler', default=Scheduler.reducewhenstuck, type=Scheduler, choices=list(Scheduler))
parser.add_argument('--lr', default=1e-3, type=float, help='learning rate')
parser.add_argument('--min_lr', default=1e-5, type=float, help='minimum learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum of SGD')
parser.add_argument('--beta1', default=0.9, type=float, help='beta1 of Adam')
parser.add_argument('--beta2', default=0.999, type=float, help='beta2 of Adam')
parser.add_argument('--estop', default=1e-8, type=float, help='early stopping criterion')
parser.add_argument('--cooldown', default=0, type=int, help='cooldown of `ReduceLROnPlateau`')
parser.add_argument('--patience', default=0, type=int, help='patience of `ReduceLROnPlateau`')
parser.add_argument('--discount_factor', default=0.5, type=float, help='discount factor of `ReduceLROnPlateau`')
parser.add_argument('--max_norm', default=0, type=float, help='gradient clipping max norm')
parser.add_argument('--gpuid', default=[], nargs='+', type=int, help='choose which GPU to use')
parser.add_argument('--loglevel', default='info', choices=['info', 'debug'])
parser.add_argument('--saveall', default=False, action='store_true', help='keep all models')
parser.add_argument('--shuffle', default=False, action='store_true', help='shuffle the data')
parser.add_argument('--cleanup_anyway', default=False, action='store_true', help='cleanup anyway')
# fmt: on
def get_params(self):
return self.parser.parse_args()
def checklist_before_run(self):
assert self.data is not None, "call load_data before run"
assert self.model is not None, "call build_model before run"
assert self.optimizer is not None, "call setup_training before run"
assert self.scheduler is not None, "call setup_scheduler before run"
assert self.evaluator is not None, "call setup_evalutator before run"
def load_data(self, dataset, train, dev, test):
raise NotImplementedError
def build_model(self):
raise NotImplementedError
def load_model(self, model):
assert self.model is None
self.logger.info("load model in %s", model)
self.model = torch.load(model, map_location=self.device)
self.model = self.model.to(self.device)
epoch = int(model.split("_")[-1])
return epoch
def smart_load_model(self, model_prefix):
assert self.model is None
models = []
for model in glob.glob(f"{model_prefix}.nll*"):
res = re.findall(r"\w*_\d+\.?\d*", model[len(model_prefix) :])
loss_ = res[0].split("_")
evals_ = res[1:-1]
epoch_ = res[-1].split("_")
assert loss_[0] == "nll" and epoch_[0] == "epoch"
loss, epoch = float(loss_[1]), int(epoch_[1])
evals = []
for ev in evals_:
ev = ev.split("_")
evals.append(util.Eval(ev[0], ev[0], float(ev[1])))
models.append((epoch, Evaluation(model, loss, evals)))
self.models = [x[1] for x in sorted(models)]
return self.load_model(self.models[-1].filepath)
def setup_training(self):
assert self.model is not None
params = self.params
if params.optimizer == Optimizer.sgd:
self.optimizer = torch.optim.SGD(
self.model.parameters(), params.lr, momentum=params.momentum
)
elif params.optimizer == Optimizer.adadelta:
self.optimizer = torch.optim.Adadelta(self.model.parameters(), params.lr)
elif params.optimizer == Optimizer.adam:
self.optimizer = torch.optim.Adam(
self.model.parameters(), params.lr, betas=(params.beta1, params.beta2)
)
elif params.optimizer == Optimizer.amsgrad:
self.optimizer = torch.optim.Adam(
self.model.parameters(),
params.lr,
betas=(params.beta1, params.beta2),
amsgrad=True,
)
else:
raise ValueError
self.min_lr = params.min_lr
if params.scheduler == Scheduler.reducewhenstuck:
self.scheduler = ReduceLROnPlateau(
self.optimizer,
"min",
patience=params.patience,
cooldown=params.cooldown,
factor=params.discount_factor,
min_lr=params.min_lr,
)
elif params.scheduler == Scheduler.warmupinvsqr:
self.scheduler = util.WarmupInverseSquareRootSchedule(
self.optimizer, params.warmup_steps
)
else:
raise ValueError
def save_training(self, model_fp):
save_objs = (self.optimizer.state_dict(), self.scheduler.state_dict())
torch.save(save_objs, f"{model_fp}.progress")
def load_training(self, model_fp):
assert self.model is not None
if os.path.isfile(f"{model_fp}.progress"):
optimizer_state, scheduler_state = torch.load(f"{model_fp}.progress")
self.optimizer.load_state_dict(optimizer_state)
self.scheduler.load_state_dict(scheduler_state)
else:
self.logger.warning("cannot find optimizer & scheduler file")
def setup_evalutator(self):
raise NotImplementedError
def get_lr(self):
if isinstance(self.scheduler, ReduceLROnPlateau):
return self.optimizer.param_groups[0]["lr"]
try:
return self.scheduler.get_last_lr()[0]
except AttributeError:
return self.scheduler.get_lr()[0]
def train(self, epoch_idx, batch_size, max_norm):
logger, model = self.logger, self.model
logger.info("At %d-th epoch with lr %f.", epoch_idx, self.get_lr())
model.train()
sampler, nb_batch = self.iterate_batch(TRAIN, batch_size)
losses, cnt = 0, 0
for batch in tqdm(sampler(batch_size), total=nb_batch):
loss = model.get_loss(batch)
self.optimizer.zero_grad()
loss.backward()
if max_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
logger.debug(
"loss %f with total grad norm %f",
loss,
util.grad_norm(model.parameters()),
)
self.optimizer.step()
if not isinstance(self.scheduler, ReduceLROnPlateau):
self.scheduler.step()
self.global_steps += 1
losses += loss.item()
cnt += 1
loss = losses / cnt
self.logger.info(f"Running average train loss is {loss} at epoch {epoch_idx}")
return loss
def iterate_batch(self, mode, batch_size):
if mode == TRAIN:
return (self.data.train_batch_sample, ceil(self.data.nb_train / batch_size))
elif mode == DEV:
return (self.data.dev_batch_sample, ceil(self.data.nb_dev / batch_size))
elif mode == TEST:
return (self.data.test_batch_sample, ceil(self.data.nb_test / batch_size))
else:
raise ValueError(f"wrong mode: {mode}")
def calc_loss(self, mode, batch_size, epoch_idx) -> float:
self.model.eval()
sampler, nb_batch = self.iterate_batch(mode, batch_size)
loss, cnt = 0.0, 0
for batch in tqdm(sampler(batch_size), total=nb_batch):
loss += self.model.get_loss(batch).item()
cnt += 1
loss = loss / cnt
self.logger.info(f"Average {mode} loss is {loss} at epoch {epoch_idx}")
return loss
def iterate_instance(self, mode):
if mode == TRAIN:
return self.data.train_sample, self.data.nb_train
elif mode == DEV:
return self.data.dev_sample, self.data.nb_dev
elif mode == TEST:
return self.data.test_sample, self.data.nb_test
else:
raise ValueError(f"wrong mode: {mode}")
def evaluate(self, mode, epoch_idx, decode_fn) -> List[util.Eval]:
raise NotImplementedError
def decode(self, mode, write_fp, decode_fn):
raise NotImplementedError
def update_lr_and_stop_early(self, epoch_idx, devloss, estop):
stop_early = True
if isinstance(self.scheduler, ReduceLROnPlateau):
prev_lr = self.get_lr()
self.scheduler.step(devloss)
curr_lr = self.get_lr()
if (
self.last_devloss - devloss
) < estop and prev_lr == curr_lr == self.min_lr:
self.logger.info(
"Early stopping triggered with epoch %d (previous dev loss: %f, current: %f)",
epoch_idx,
self.last_devloss,
devloss,
)
stop_status = stop_early
else:
stop_status = not stop_early
self.last_devloss = devloss
else:
stop_status = not stop_early
return stop_status
def save_model(
self, epoch_idx, devloss: float, eval_res: List[util.Eval], model_fp
):
eval_tag = "".join(["{}_{}.".format(e.desc, e.res) for e in eval_res])
fp = f"{model_fp}.nll_{devloss:.4f}.{eval_tag}epoch_{epoch_idx}"
torch.save(self.model, fp)
self.models.append(Evaluation(fp, devloss, eval_res))
def select_model(self):
raise NotImplementedError
def reload_and_test(self, model_fp, best_fp, bs, decode_fn):
self.model = None
self.logger.info(f"loading {best_fp} for testing")
self.load_model(best_fp)
self.calc_loss(DEV, bs, -1)
self.logger.info("decoding dev set")
self.decode(DEV, f"{model_fp}.decode", decode_fn)
results = self.evaluate(DEV, -1, decode_fn)
if results:
results = " ".join([f"{r.desc} {r.res}" for r in results])
self.logger.info(f'DEV {model_fp.split("/")[-1]} {results}')
if self.data.test_file is not None:
self.calc_loss(TEST, bs, -1)
self.logger.info("decoding test set")
self.decode(TEST, f"{model_fp}.decode", decode_fn)
results = self.evaluate(TEST, -1, decode_fn)
if results:
results = " ".join([f"{r.desc} {r.res}" for r in results])
self.logger.info(f'TEST {model_fp.split("/")[-1]} {results}')
def cleanup(self, saveall, save_fps, model_fp):
if not saveall:
for model in self.models:
if model.filepath in save_fps:
continue
os.remove(model.filepath)
os.remove(f"{model_fp}.progress")
def run(self, start_epoch, decode_fn=None):
"""
helper for training
"""
self.checklist_before_run()
finish = False
params = self.params
steps_per_epoch = ceil(self.data.nb_train / params.bs)
if params.max_steps > 0:
max_epochs = ceil(params.max_steps / steps_per_epoch)
else:
max_epochs = params.epochs
params.max_steps = max_epochs * steps_per_epoch
self.logger.info(
f"maximum training {params.max_steps} steps ({max_epochs} epochs)"
)
if params.total_eval > 0:
eval_every = max(max_epochs // params.total_eval, 1)
else:
eval_every = 1
self.logger.info(f"evaluate every {eval_every} epochs")
for epoch_idx in range(start_epoch, max_epochs):
self.train(epoch_idx, params.bs, params.max_norm)
if not (
epoch_idx
and (epoch_idx % eval_every == 0 or epoch_idx + 1 == max_epochs)
):
continue
with torch.no_grad():
devloss = self.calc_loss(DEV, params.bs, epoch_idx)
eval_res = self.evaluate(DEV, epoch_idx, decode_fn)
if self.update_lr_and_stop_early(epoch_idx, devloss, params.estop):
finish = True
break
self.save_model(epoch_idx, devloss, eval_res, params.model)
self.save_training(params.model)
if finish or params.cleanup_anyway:
best_fp, save_fps = self.select_model()
with torch.no_grad():
self.reload_and_test(params.model, best_fp, params.bs, decode_fn)
self.cleanup(params.saveall, save_fps, params.model)
| [
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"util.maybe_mkdir",
"math.ceil",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"argparse.ArgumentParser",
"util.get_logger",
"torch.load",
"random.seed",
"os.path.isfile",
"torch.cuda.is_available",
"functools.partial",
"numpy.random.seed",
... | [((340, 382), 'functools.partial', 'partial', (['tqdm'], {'bar_format': '"""{l_bar}{r_bar}"""'}), "(tqdm, bar_format='{l_bar}{r_bar}')\n", (347, 382), False, 'from functools import partial\n'), ((682, 699), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (693, 699), False, 'import random\n'), ((704, 724), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (718, 724), True, 'import numpy as np\n'), ((729, 752), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (746, 752), False, 'import torch\n'), ((760, 785), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (783, 785), False, 'import torch\n'), ((795, 827), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (821, 827), False, 'import torch\n'), ((1081, 1106), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1104, 1106), False, 'import argparse\n'), ((1180, 1215), 'util.maybe_mkdir', 'util.maybe_mkdir', (['self.params.model'], {}), '(self.params.model)\n', (1196, 1215), False, 'import util\n'), ((1238, 1313), 'util.get_logger', 'util.get_logger', (["(self.params.model + '.log')"], {'log_level': 'self.params.loglevel'}), "(self.params.model + '.log', log_level=self.params.loglevel)\n", (1253, 1313), False, 'import util\n'), ((5433, 5476), 'torch.load', 'torch.load', (['model'], {'map_location': 'self.device'}), '(model, map_location=self.device)\n', (5443, 5476), False, 'import torch\n'), ((5710, 5743), 'glob.glob', 'glob.glob', (['f"""{model_prefix}.nll*"""'], {}), "(f'{model_prefix}.nll*')\n", (5719, 5743), False, 'import glob\n'), ((8046, 8091), 'torch.save', 'torch.save', (['save_objs', 'f"""{model_fp}.progress"""'], {}), "(save_objs, f'{model_fp}.progress')\n", (8056, 8091), False, 'import torch\n'), ((8181, 8219), 'os.path.isfile', 'os.path.isfile', (['f"""{model_fp}.progress"""'], {}), "(f'{model_fp}.progress')\n", (8195, 8219), False, 'import os\n'), ((12541, 12567), 'torch.save', 'torch.save', (['self.model', 'fp'], {}), '(self.model, fp)\n', (12551, 12567), False, 'import torch\n'), ((13906, 13939), 'os.remove', 'os.remove', (['f"""{model_fp}.progress"""'], {}), "(f'{model_fp}.progress')\n", (13915, 13939), False, 'import os\n'), ((14155, 14191), 'math.ceil', 'ceil', (['(self.data.nb_train / params.bs)'], {}), '(self.data.nb_train / params.bs)\n', (14159, 14191), False, 'from math import ceil\n'), ((7429, 7579), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'ReduceLROnPlateau', (['self.optimizer', '"""min"""'], {'patience': 'params.patience', 'cooldown': 'params.cooldown', 'factor': 'params.discount_factor', 'min_lr': 'params.min_lr'}), "(self.optimizer, 'min', patience=params.patience, cooldown\n =params.cooldown, factor=params.discount_factor, min_lr=params.min_lr)\n", (7446, 7579), False, 'from torch.optim.lr_scheduler import ReduceLROnPlateau\n'), ((8268, 8302), 'torch.load', 'torch.load', (['f"""{model_fp}.progress"""'], {}), "(f'{model_fp}.progress')\n", (8278, 8302), False, 'import torch\n'), ((14250, 14290), 'math.ceil', 'ceil', (['(params.max_steps / steps_per_epoch)'], {}), '(params.max_steps / steps_per_epoch)\n', (14254, 14290), False, 'from math import ceil\n'), ((1572, 1597), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1595, 1597), False, 'import torch\n'), ((7772, 7845), 'util.WarmupInverseSquareRootSchedule', 'util.WarmupInverseSquareRootSchedule', (['self.optimizer', 'params.warmup_steps'], {}), '(self.optimizer, params.warmup_steps)\n', (7808, 7845), False, 'import util\n'), ((10080, 10117), 'math.ceil', 'ceil', (['(self.data.nb_train / batch_size)'], {}), '(self.data.nb_train / batch_size)\n', (10084, 10117), False, 'from math import ceil\n'), ((13872, 13897), 'os.remove', 'os.remove', (['model.filepath'], {}), '(model.filepath)\n', (13881, 13897), False, 'import os\n'), ((15023, 15038), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15036, 15038), False, 'import torch\n'), ((15538, 15553), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15551, 15553), False, 'import torch\n'), ((10193, 10228), 'math.ceil', 'ceil', (['(self.data.nb_dev / batch_size)'], {}), '(self.data.nb_dev / batch_size)\n', (10197, 10228), False, 'from math import ceil\n'), ((10306, 10342), 'math.ceil', 'ceil', (['(self.data.nb_test / batch_size)'], {}), '(self.data.nb_test / batch_size)\n', (10310, 10342), False, 'from math import ceil\n')] |
import cv2
import argparse
import numpy as np
def gray2bgr565(input_file, output_file):
img = np.fromfile(input_file, dtype=np.uint16)
img = img.reshape(480, 640)
# img = cv2.imread(input_file, cv2.IMREAD_ANYDEPTH)
ratio = np.amax(img) / 256
img8 = (img / ratio).astype('uint8')
img8 = cv2.cvtColor(img8, cv2.COLOR_GRAY2RGB)
cv2.imwrite(output_file, img8)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Command Usages of ImageHelper')
parser.add_argument("-i", "--input", type=str, help="input image dir")
parser.add_argument("-o", "--output", type=str, help="output image dir")
args = parser.parse_args()
if args.input:
gray2bgr565(args.input, args.output)
else:
parser.print_help() | [
"cv2.imwrite",
"numpy.fromfile",
"argparse.ArgumentParser",
"cv2.cvtColor",
"numpy.amax"
] | [((99, 139), 'numpy.fromfile', 'np.fromfile', (['input_file'], {'dtype': 'np.uint16'}), '(input_file, dtype=np.uint16)\n', (110, 139), True, 'import numpy as np\n'), ((311, 349), 'cv2.cvtColor', 'cv2.cvtColor', (['img8', 'cv2.COLOR_GRAY2RGB'], {}), '(img8, cv2.COLOR_GRAY2RGB)\n', (323, 349), False, 'import cv2\n'), ((354, 384), 'cv2.imwrite', 'cv2.imwrite', (['output_file', 'img8'], {}), '(output_file, img8)\n', (365, 384), False, 'import cv2\n'), ((426, 494), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Command Usages of ImageHelper"""'}), "(description='Command Usages of ImageHelper')\n", (449, 494), False, 'import argparse\n'), ((240, 252), 'numpy.amax', 'np.amax', (['img'], {}), '(img)\n', (247, 252), True, 'import numpy as np\n')] |
import math
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as integrate
import pdb
import sys
from ilqr.vehicle_model import Model
from ilqr.local_planner import LocalPlanner
from ilqr.constraints import Constraints
class iLQR():
def __init__(self, args, obstacle_bb, verbose=False):
self.args = args
self.Ts = args.timestep
self.N = args.horizon
self.tol = args.tol
self.obstacle_bb = obstacle_bb
self.verbose = verbose
self.global_plan = None
self.local_planner = LocalPlanner(args)
self.vehicle_model = Model(args)
self.constraints = Constraints(args, obstacle_bb)
# initial nominal trajectory
self.control_seq = np.zeros((self.args.num_ctrls, self.args.horizon))
self.control_seq[0, :] = np.ones((self.args.horizon)) * 0.5
self.debug_flag = 0
self.lamb_factor = 10
self.max_lamb = 1000
# self.fig, (self.ax1, self.ax2, self.ax3) = plt.subplots(1,3, num=0, figsize=(20, 5))
def set_global_plan(self, global_plan):
self.global_plan = global_plan
self.local_planner.set_global_planner(self.global_plan)
def get_nominal_trajectory(self, X_0, U):
X = np.zeros((self.args.num_states, self.args.horizon+1))
X[:, 0] = X_0
for i in range(self.args.horizon):
X[:, i+1] = self.vehicle_model.forward_simulate(X[:, i], U[:, i])
return X
def forward_pass(self, X, U, k, K):
X_new = np.zeros((self.args.num_states, self.args.horizon+1))
X_new[:, 0] = X[:, 0]
U_new = np.zeros((self.args.num_ctrls, self.args.horizon))
# Do a forward rollout and get states at all control points
for i in range(self.args.horizon):
U_new[:, i] = U[:, i] + k[:, i] + K[:, :, i] @ (X_new[:, i] - X[:, i])
X_new[:, i+1] = self.vehicle_model.forward_simulate(X_new[:, i], U_new[:, i])
return X_new, U_new
def backward_pass(self, X, U, poly_coeff, x_local_plan, npc_traj, lamb):
# Find control sequence that minimizes Q-value function
# Get derivatives of Q-function wrt to state and control
l_x, l_xx, l_u, l_uu, l_ux = self.constraints.get_cost_derivatives(X[:, 1:], U, poly_coeff, x_local_plan, npc_traj)
df_dx = self.vehicle_model.get_A_matrix(X[2, 1:], X[3, 1:], U[0,:])
df_du = self.vehicle_model.get_B_matrix(X[3, 1:])
# Value function at final timestep is known
V_x = l_x[:,-1]
V_xx = l_xx[:,:,-1]
# Allocate space for feedforward and feeback term
k = np.zeros((self.args.num_ctrls, self.args.horizon))
K = np.zeros((self.args.num_ctrls, self.args.num_states, self.args.horizon))
# Run a backwards pass from N-1 control step
for i in range(self.args.horizon-1,-1,-1):
Q_x = l_x[:,i] + df_dx[:,:,i].T @ V_x
Q_u = l_u[:,i] + df_du[:,:,i].T @ V_x
Q_xx = l_xx[:,:,i] + df_dx[:,:,i].T @ V_xx @ df_dx[:,:,i]
Q_ux = l_ux[:,:,i] + df_du[:,:,i].T @ V_xx @ df_dx[:,:,i]
Q_uu = l_uu[:,:,i] + df_du[:,:,i].T @ V_xx @ df_du[:,:,i]
# Q_uu_inv = np.linalg.pinv(Q_uu)
Q_uu_evals, Q_uu_evecs = np.linalg.eig(Q_uu)
Q_uu_evals[Q_uu_evals < 0] = 0.0
Q_uu_evals += lamb
Q_uu_inv = np.dot(Q_uu_evecs,np.dot(np.diag(1.0/Q_uu_evals), Q_uu_evecs.T))
# Calculate feedforward and feedback terms
k[:,i] = -Q_uu_inv @ Q_u
K[:,:,i] = -Q_uu_inv @ Q_ux
# Update value function for next time step
V_x = Q_x - K[:,:,i].T @ Q_uu @ k[:,i]
V_xx = Q_xx - K[:,:,i].T @ Q_uu @ K[:,:,i]
return k, K
def run_step(self, ego_state, npc_traj):
assert self.global_plan is not None, "Set a global plan in iLQR before starting run_step"
self.local_planner.set_ego_state(ego_state)
ref_traj, poly_coeff = self.local_planner.get_local_plan()
X_0 = np.array([ego_state[0][0], ego_state[0][1], ego_state[1][0], ego_state[2][2]])
# self.control_seq[:, :-1] = self.control_seq[:, 1:]
# self.control_seq[:, -1] = np.zeros((self.args.num_ctrls))
X, U = self.get_optimal_control_seq(X_0, self.control_seq, poly_coeff, ref_traj[:, 0], npc_traj)
traj = X[:2, ::int(self.args.horizon/10)].T
self.control_seq = U
# self.plot(U, X, ref_traj)
return traj, ref_traj, U #self.filter_control(U, X[2,:])
def get_optimal_control_seq(self, X_0, U, poly_coeff, x_local_plan, npc_traj):
X = self.get_nominal_trajectory(X_0, U)
J_old = sys.float_info.max
lamb = 1 # Regularization parameter
# Run iLQR for max iterations
for itr in range(self.args.max_iters):
k, K = self.backward_pass(X, U, poly_coeff, x_local_plan, npc_traj, lamb)
# Get control values at control points and new states again by a forward rollout
X_new, U_new = self.forward_pass(X, U, k, K)
J_new = self.constraints.get_total_cost(X, U, poly_coeff, x_local_plan, npc_traj)
if J_new < J_old:
X = X_new
U = U_new
lamb /= self.lamb_factor
if (abs(J_old - J_new) < self.args.tol):
print("Tolerance reached")
break
else:
lamb *= self.lamb_factor
if lamb > self.max_lamb:
break
J_old = J_new
# print(J_new)
return X, U
def filter_control(self, U, velocity):
U[1] = np.arctan2(self.args.wheelbase*U[1],velocity[:-1])
return U
def plot(self, control, X, ref_traj):
self.ax1.clear()
self.ax1.plot(np.arange(len(control[0])), control[0,:], color='g', label='Acc')
self.ax1.plot(np.arange(len(control[0])), control[1,:], color='b', label='Yaw Rate')
self.ax1.set_ylabel('Values')
self.ax1.set_xlabel('Time')
self.ax1.set_title('Controls',fontsize=18)
# self.ax1.xlim(0, len(control[0]))
# self.ax1.ylim(-6, 6)
# self.ax1.axis('equal')
self.ax1.legend()
self.ax1.grid()
self.ax2.clear()
self.ax2.plot(ref_traj[:, 0], ref_traj[:, 1], color='r', label='Ref Traj')
self.ax2.plot(X[0, :], X[1, :], color='g', label='Real Traj')
self.ax2.set_ylabel('y')
self.ax2.set_xlabel('x')
self.ax2.set_title('Position Trajectory',fontsize=18)
self.ax2.legend()
self.ax2.grid()
# plt.legend()
self.ax3.clear()
self.ax3.plot(np.arange(len(X[0])), X[2, :], color='r', label='Velocity')
self.ax3.plot(np.arange(len(X[0])), X[3, :], color='g', label='Yaw')
self.ax3.set_ylabel('Values')
self.ax3.set_xlabel('Time')
self.ax3.set_title('Traj',fontsize=18)
self.ax3.grid()
self.ax3.legend()
plt.pause(0.001)
| [
"ilqr.constraints.Constraints",
"numpy.ones",
"numpy.linalg.eig",
"ilqr.local_planner.LocalPlanner",
"ilqr.vehicle_model.Model",
"numpy.diag",
"numpy.array",
"numpy.zeros",
"numpy.arctan2",
"matplotlib.pyplot.pause"
] | [((574, 592), 'ilqr.local_planner.LocalPlanner', 'LocalPlanner', (['args'], {}), '(args)\n', (586, 592), False, 'from ilqr.local_planner import LocalPlanner\n'), ((622, 633), 'ilqr.vehicle_model.Model', 'Model', (['args'], {}), '(args)\n', (627, 633), False, 'from ilqr.vehicle_model import Model\n'), ((661, 691), 'ilqr.constraints.Constraints', 'Constraints', (['args', 'obstacle_bb'], {}), '(args, obstacle_bb)\n', (672, 691), False, 'from ilqr.constraints import Constraints\n'), ((765, 815), 'numpy.zeros', 'np.zeros', (['(self.args.num_ctrls, self.args.horizon)'], {}), '((self.args.num_ctrls, self.args.horizon))\n', (773, 815), True, 'import numpy as np\n'), ((1280, 1335), 'numpy.zeros', 'np.zeros', (['(self.args.num_states, self.args.horizon + 1)'], {}), '((self.args.num_states, self.args.horizon + 1))\n', (1288, 1335), True, 'import numpy as np\n'), ((1551, 1606), 'numpy.zeros', 'np.zeros', (['(self.args.num_states, self.args.horizon + 1)'], {}), '((self.args.num_states, self.args.horizon + 1))\n', (1559, 1606), True, 'import numpy as np\n'), ((1651, 1701), 'numpy.zeros', 'np.zeros', (['(self.args.num_ctrls, self.args.horizon)'], {}), '((self.args.num_ctrls, self.args.horizon))\n', (1659, 1701), True, 'import numpy as np\n'), ((2655, 2705), 'numpy.zeros', 'np.zeros', (['(self.args.num_ctrls, self.args.horizon)'], {}), '((self.args.num_ctrls, self.args.horizon))\n', (2663, 2705), True, 'import numpy as np\n'), ((2718, 2790), 'numpy.zeros', 'np.zeros', (['(self.args.num_ctrls, self.args.num_states, self.args.horizon)'], {}), '((self.args.num_ctrls, self.args.num_states, self.args.horizon))\n', (2726, 2790), True, 'import numpy as np\n'), ((4081, 4159), 'numpy.array', 'np.array', (['[ego_state[0][0], ego_state[0][1], ego_state[1][0], ego_state[2][2]]'], {}), '([ego_state[0][0], ego_state[0][1], ego_state[1][0], ego_state[2][2]])\n', (4089, 4159), True, 'import numpy as np\n'), ((5739, 5792), 'numpy.arctan2', 'np.arctan2', (['(self.args.wheelbase * U[1])', 'velocity[:-1]'], {}), '(self.args.wheelbase * U[1], velocity[:-1])\n', (5749, 5792), True, 'import numpy as np\n'), ((7091, 7107), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (7100, 7107), True, 'import matplotlib.pyplot as plt\n'), ((849, 875), 'numpy.ones', 'np.ones', (['self.args.horizon'], {}), '(self.args.horizon)\n', (856, 875), True, 'import numpy as np\n'), ((3289, 3308), 'numpy.linalg.eig', 'np.linalg.eig', (['Q_uu'], {}), '(Q_uu)\n', (3302, 3308), True, 'import numpy as np\n'), ((3433, 3458), 'numpy.diag', 'np.diag', (['(1.0 / Q_uu_evals)'], {}), '(1.0 / Q_uu_evals)\n', (3440, 3458), True, 'import numpy as np\n')] |
import argparse
import itertools
import json
import logging
import os
import pickle
import time
import warnings
from collections import Counter, defaultdict
from typing import Dict, Any, List, Iterable, Tuple, Set
warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')
import langdetect
import spacy
from gensim import corpora
from gensim.corpora import IndexedCorpus
from gensim.models import HdpModel, LdaMulticore
from gensim.models.basemodel import BaseTopicModel
from langdetect.lang_detect_exception import LangDetectException
from langdetect.language import Language
from spacy.tokens.doc import Doc
from spacy.tokens.token import Token
import text_processing
import util
from data import JsonLinesCorpus, Topic, Document, DocumentCollection
from util import ProgressLog
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logger = logging.getLogger('topic-models')
logger.setLevel(logging.INFO)
#
# goal: go from plaintext PDFs + optional metadata file (result of parser) to id-topic-mapping (input for es index)
# add optional pre-classification (id-class mapping) as first layer of the hierarchical topic model
#
# start: need corpus-id-mapping and metadata-by-doc-id.
# first layer: split by category and write multiple corpora, each with own id-mapping
# subsequent layers: split by assigned topic, add id-mapping, store topics in central id-topic-dict
# side-effect: build topic-tree, store relations in Topic objects (get parents and children, root topic is "main")
#
# so, the id-category-map is a one-time thingy that we don't need to preserve at all. Just write everything
# into the topic tree and document-topic-mapping immediately
#
# steps:
# - calculate topic model from document collection
# - classify documents using this model, store topic labels in document objects
# - create one new model per topic with different hyperparameters and train it with the sub-corpus consisting only of
# the documents in this topic
# - recur
#
# issues:
# - need to build a new mmcorpus and a corpusindex-docid-mapping for each model
#
# data structure: LayeredTopicModel
# - recursive structure, initialize for every subsequent layer
# - the build script requires lots of state and temporary files
# -> maybe have a separate builder, that spits out the final model...
# - the final model consists of multiple topic models + metadata in a single archive
#
# topic model visualization: https://github.com/bmabey/pyLDAvis
#
class TopicModel:
def __init__(self, file_pdf_text: str = None, file_corpus_input: str = None,
file_metadata: str = None, file_output_prefix: str = None, abstracts_only=False,
language_filter: str = None, model: str = "hdp", batch_size=100, n_threads=None,
topic_layers: List[int] = None, topic_limit_per_layer: List[int] = None,
category_layer=False, min_docs_per_topic: int = None, token_min_count=1,
dict_size_limit=10000, document_limit: int = None):
"""
:param file_pdf_text: path to the file containing the parsed PDFs (output of pdf_parser)
:param file_corpus_input: path to the file containing the tokens of the parsed pdfs
(optional, preferred over file_pdf_text)
:param file_metadata: path to the metadata file (output of arxiv_crawler. required,
if the category layer should be used)
:param file_output_prefix: all output files, including temporary files, will be prefixed
with this string. all results will be stored under this prefix aswell.
:param abstracts_only: use only title and abstract for the topic model instead of the
full document text
:param language_filter: filter by the specified language code. the spacy parser we use
currenlty only supports english text, so 'en' is a reasonable value here
(though not a requirement)
:param model: specify the model to use. supported models: "hdp", "lda"
:param batch_size: the batch size of the spacy parser
:param n_threads: the number of threads to use on parallelizable tasks (e.g. spacy)
:param topic_layers: how many topics are to be calculated on each nested topic layer
:param topic_limit_per_layer: how many of those topics should have a fixed limit during
classification (i.e. each document can be only part of up to N topics instead of as
many as the topic model yields)
:param category_layer: use the categories extracted from metadata as the first layer
:param min_docs_per_topic: how many documents are required for each sub-topic to add
(e.g. min_docs = 100, we have 1000 documents, this limits the number of sub-topics to 10)
:param token_min_count: lowest allowed token count for words that may appear in the dictionary
:param dict_size_limit: the total size limit of the dictionary (take the N most frequent terms)
:param document_limit: just process the first N documents (useful for testing)
"""
super().__init__()
# file paths
self.file_pdf_text = file_pdf_text
self.file_corpus_input = file_corpus_input
self.file_metadata = file_metadata
self.file_output_prefix = file_output_prefix
# derived paths
self.file_tasklog = file_output_prefix + '-progress.log'
self.file_corpus_plain = file_corpus_input or file_output_prefix + '-corpus-plain.json.bz2'
self.file_corpus = file_output_prefix + '-corpus.json'
self.file_dict = file_output_prefix + '-lemma.dict.bz2'
self.file_ids = file_output_prefix + '-ids.json'
self.file_docs = file_output_prefix + '-docs.json'
self.file_model = file_output_prefix + '-hdp.pkl.bz2'
self.file_topics = file_output_prefix + '-topics.json.bz2'
# application config
self.abstracts_only = abstracts_only
self.language_filter = language_filter
self.model = model
self.batch_size = batch_size
self.n_threads = n_threads or max(2, int(os.cpu_count() / 2))
self.topic_layers = topic_layers or [10]
self.topic_limit_per_layer = topic_limit_per_layer or [0] * len(topic_layers)
self.category_layer = category_layer
self.min_docs_per_topic = min_docs_per_topic
self.token_min_count = token_min_count
self.dict_size_limit = dict_size_limit
self.document_limit = document_limit
# integrity checks
if not abstracts_only and not file_pdf_text and not file_corpus_input:
raise ValueError("At least one of the parameters 'file_pdf_text' or 'file_token_input' "
"is required, if 'abstracts_only' is not enabled.")
if (category_layer or abstracts_only) and not file_metadata:
raise ValueError("The parameter 'file_metadata' is required, if 'category_layer' "
"or 'abstracts_only' is True.")
if not file_output_prefix:
raise ValueError("The output path must not be empty.")
def build(self, force=False):
# evaluate progress information (no need to do long-running tasks twice)
progress = ProgressLog(self.file_tasklog)
if progress.finished:
logger.info("skipping {} tasks that have already been finished".format(len(progress.finished)))
# unify declarations
if isinstance(self.topic_layers, int):
self.topic_layers = [self.topic_layers]
# build the corpus (if required) and vocabulary
if force or 'token_dict' not in progress:
self.stream_token_dict()
progress.add('token_dict', "finished calculating the token counts and the global dictionary for all documents")
# create a reduced version of the corpus based on the provided dictionary
if force or 'reduced_corpus' not in progress:
self.stream_reduced_corpus()
progress.add('reduced_corpus', "")
# build the category layer (if specified)
if self.category_layer and (force or 'metadata' not in progress):
self.stream_metadata()
progress.add('metadata', "finished extracting categories from document metadata")
# build the nested topic model and classify documents
if force or 'topic_model' not in progress:
self.stream_nested_topic_model()
progress.add('topic_model', "")
logger.info("build completed. Classification results have been stored in `{}`".format(self.file_topics))
def stream_nested_topic_model(self):
# initialize data structures
root_topic = Topic('root', layer=0)
current_topics = None # type: List[Topic]
documents = None # type: Dict[str, Document]
dictionary = self.load_dictionary()
if self.category_layer:
logger.info("building first topic layer from document metadata...")
current_topics = self.topics_from_metadata(root_topic)
documents = self.docs_from_metadata(current_topics)
else:
current_topics = [root_topic]
documents = self.docs_from_ids()
# build topic model and classify documents
logger.info("building topic models and classifying documents...")
for idx, (num_topics, topic_limit) in enumerate(zip(self.topic_layers, self.topic_limit_per_layer)):
logger.info("Processing layer {} of {}, with {} sub-topics per parent topic{}"
.format(idx+1, len(self.topic_layers), num_topics, " (max. {} topics per doc)"
.format(topic_limit) if topic_limit else ""))
# TODO add option to remove temporary data immediately
# collect topics for the next iteration
next_topics = [] # type: List[Topic]
# go through the documents of each topic
for topic in current_topics:
logger.info("Processing documents in topic '{}'...".format(topic.topic_id))
# load the last corpus that was created for this topic's parent
corpus = self.load_corpus_for_topic(topic.parent if topic != root_topic else topic)
# reduce the corpus so it only contains the documents we need
sub_corpus = self.corpus2corpus(corpus, documents, topic) if topic != root_topic else corpus
if sub_corpus: # only continue, of there are actually documents with this topic
# limit the number of sub-topics, if necessary
num_topics_adjusted = min(int(len(sub_corpus) / self.min_docs_per_topic), num_topics) \
if self.min_docs_per_topic else num_topics
if num_topics_adjusted <= 3:
logger.info("skipping topic {} (too few documents: {})".format(topic.topic_id, len(sub_corpus)))
else:
# build the topic model
self.stream_topic_model(topic, dictionary, sub_corpus, num_topics_adjusted)
# classify documents using the topic model
sub_topics = self.stream_classify_documents(topic, sub_corpus, documents, topic_limit=topic_limit)
# save the sub-topics for the next layer
next_topics.extend(sub_topics)
logger.info("All {} documents in topic '{}' have been classified".format(len(sub_corpus), topic.topic_id))
else:
logger.warning("there are no documents in topic '{}'. Hint: parent topic '{}' has {} documents"
.format(topic.topic_id, topic.parent.topic_id if topic.parent else "root", len(corpus)))
# select the topics for the next iteration
current_topics = next_topics
logger.info("all {} documents have been classified. storing results...".format(len(documents)))
topics = {topic.topic_id: topic for topic in root_topic._collect_topics()}
collection = DocumentCollection(topics, documents)
util.json_write(collection.to_dict(), self.file_topics, pretty=False)
def stream_token_dict(self):
"""
make a single run over the file containing all documents as plaintext.
Parse all documents using spacy, store the token counts for each document
and build the global token dict
"""
if self.file_corpus_input:
logger.info("reading corpus from '{}'".format(self.file_corpus_plain))
corpus = JsonLinesCorpus(self.file_corpus_input)
return self.store_gensim_dict(corpus)
else:
if self.abstracts_only:
logger.info("reading abstracts from '{}'".format(self.file_metadata))
documents = util.json_read_lines(self.file_metadata, self.get_title_and_abstract)
else:
logger.info("reading documents from '{}'".format(self.file_pdf_text))
documents = util.json_read_lines(self.file_pdf_text, self.combine_pages)
# limit document count (if configured)
documents_limited = (next(documents) for i in range(self.document_limit)) if self.document_limit else documents
# filter by document language (if configured)
documents_filtered = self.filter_by_lang(documents_limited, self.language_filter) if self.language_filter else documents_limited
# parse documents using spacy
documents_tokens = self.spacy_parse(documents_filtered, batch_size=self.batch_size, n_threads=self.n_threads)
# stream intermediate result to disk (in case data does not fit in RAM, which it won't if you're serious about this stuff)
return self.store_tokens_and_gensim_dict(documents_tokens)
def stream_reduced_corpus(self):
corpus = JsonLinesCorpus(self.file_corpus_plain)
if corpus.has_plain_tokens():
logger.info("building a reduced version of corpus '{}'".format(self.file_corpus_plain))
dictionary = self.load_dictionary()
corpus.convert_tokens_to_ids(self.file_corpus, id2word=dictionary.id2token)
else:
# corpus is already in reduced format. continue...
self.file_corpus = self.file_corpus_plain
def stream_metadata(self):
# get the IDs of all documents we need
documents = self.docs_from_ids()
# read the metadata file and extract all categories for the documents we want
logger.info("reading metadata from " + self.file_metadata)
metadata = util.json_read_lines(self.file_metadata) # type: List[Dict[str,Any]]
category_count = Counter()
for meta_dict in metadata:
doc_id = meta_dict['header']['identifier'].split(':')[-1]
# match doc ids
if doc_id in documents:
doc = documents[doc_id]
categories = meta_dict['header']['setSpecs']
categories_clean = sorted(set(c.split(':')[0] for c in categories))
doc.categories = categories_clean
for cat in categories_clean:
category_count[cat] += 1
# integrity check
for doc in documents.values():
if doc.categories is None:
logger.warning("there was no metadata entry for document '{}'".format(doc.doc_id))
# reading finished. print stats and write to file
logger.info("categories for {} documents have been read: {}".format(len(documents), category_count.items()))
util.json_write(Document.store_documents(documents.values()), self.file_docs, pretty=False)
def stream_topic_model(self, topic: Topic, dictionary: corpora.Dictionary = None,
corpus: IndexedCorpus = None, num_topics=20, max_topics_per_doc=5):
# load dictionary and corpus, if necessary
if not dictionary:
dictionary = self.load_dictionary()
logger.warning("the default dictionary was loaded from file. "
"You should keep an instance in memory instead of calling this in a loop...")
if not corpus:
corpus = JsonLinesCorpus(self.file_corpus)
logger.warning("the default corpus was loaded from file. You should provide a "
"reduced corpus to increase performance (see corpus2corpus)")
# build the model
logger.info("building a topic model with {} topics for {} documents in topic '{}'"
.format(num_topics, len(corpus), topic.topic_id))
t0 = time.time()
if self.model == "lda":
model = LdaMulticore(corpus, id2word=dictionary.id2token, num_topics=num_topics,
passes=2, iterations=50, chunksize=2000, workers=self.n_threads)
elif self.model == "hdp":
# T = overall topic limit, K = max topics per document
model = HdpModel(corpus, id2word=dictionary.id2token, T=num_topics, K=max_topics_per_doc)
else:
raise ValueError("Unknown model identifier '{}'".format(self.model))
t1 = time.time()
# serialize
logger.info("building the model took {:.1f} s. Serializing model...".format(t1-t0))
output_path = self._get_model_path(topic)
with util.open_by_ext(output_path, 'wb') as fp:
pickle.dump(model, fp, protocol=4)
logger.info("model dump finished, took {:.1f} s".format(time.time()-t1))
def stream_classify_documents(self, parent_topic: Topic, corpus: JsonLinesCorpus,
documents: Dict[str, Document], topic_limit=0) -> List[Topic]:
# load the actual topic model
model = self.load_model(self._get_model_path(parent_topic)) # type: HdpModel
# build Topic objects from model
topics = {}
try:
for i in itertools.count():
topic_id = "{}-{}".format(parent_topic.topic_id, i)
show_topic_kwargs = {}
if self.model == "hdp":
show_topic_kwargs = {'num_words': 10, 'formatted': False}
elif self.model == "lda":
show_topic_kwargs = {'topn': 10}
topic_terms = [(term, round(score, 5)) for term, score in model.show_topic(i, **show_topic_kwargs)]
topic = parent_topic.add_child(topic_id, topic_terms)
topics[i] = topic
except IndexError:
pass # most pythonic way to interrupt iteration, if # of elements is unknown...
# calculate the topics for each document
logger.info("classifying {} documents from topic '{}' into {} new categories"
.format(len(corpus), parent_topic.topic_id, len(topics)))
t = time.time()
for i, doc_dict in enumerate(corpus.iter_all()):
if not doc_dict['id'] or doc_dict['id'] not in documents:
logger.warning("Document '{}' at corpus index {} (topic: {}) was not found "
"in the document index and will be skipped"
.format(doc_dict['id'], parent_topic.topic_id, i))
continue
doc_id = doc_dict['id']
tokens = doc_dict['tokens']
document = documents[doc_id]
assert document.topics is None or parent_topic in document.topics, \
"tried to classify a document which is not part of the current topic"
doc_topics = sorted(model[tokens], key=lambda x: x[1], reverse=True) # type: List[Tuple[str, float]]
for topic_idx, score in (doc_topics[:topic_limit] if topic_limit else doc_topics):
if score > 0.10:
document.add_topic(topics[topic_idx], round(score, 5))
if (i+1) % 10000 == 0:
t1 = time.time()
logger.info("{}/{} documents have been classified ({:.2f} doc/min)"
.format(i+1, len(corpus), self.batch_size*60/(t1-t)))
t = t1
return list(topics.values())
def corpus2corpus(self, corpus: JsonLinesCorpus, documents: Dict[str, Document], topic: Topic) -> JsonLinesCorpus:
"""
get a subset of a corpus. It will include all documents that contain
the specified topic.
Writes the reduced corpus to a new file whose name is derived from the document ID
:param corpus: the source corpus
:param documents: the document definition (contains document topics)
:param topic: filter all documents in the corpus by this topic
:return: a new corpus containing only the filtered documents
"""
logger.info("creating a subset of corpus '{}' for topic '{}'".format(corpus.fname, topic.topic_id))
# specify the filter function
def doc_filter(doc_dict: Dict[str, Any]) -> bool:
"""
:return: True, iff this document has the specified topic
"""
doc = documents[doc_dict['id']]
return doc.topics and topic in doc.topics
# build the new corpus
corpus_path = self._get_corpus_path(topic)
return corpus.subset(corpus_path, doc_filter)
def test_model(self, fin_corpus: str, fin_model: str):
model = self.load_model(fin_model)
model.print_topics(num_topics=-1, num_words=10)
corpus = JsonLinesCorpus(fin_corpus)
for tokens in corpus:
topics = model[tokens]
print("dominant topics in https://arxiv.org/abs/{}".format(tokens))
for topic, score in sorted(topics, key=lambda x: x[1], reverse=True):
print("topic {} @ {:.3f}: {}".format(topic, score, model.print_topic(topic)))
def test_document_topics(self):
# get best matching documents + URLs per topic
topic_model = DocumentCollection.from_dict(util.json_read(self.file_topics))
docs_by_first_topic = defaultdict(list)
# group documents by first topic
for id, doc in topic_model.documents.items():
if doc.topics:
topic, score = doc.topics[0]
docs_by_first_topic[topic].append((id, score))
else:
logger.warning("document {} has no topics".format(doc.doc_id))
# sort by score descending
for doc_list in docs_by_first_topic.values():
doc_list.sort(key=lambda x: x[1], reverse=True)
# print highest scoring documents for each topic
for topic in topic_model.topics.values():
print("Topic {}: {}".format(topic.topic_id, topic.tokens))
for doc_id, score in docs_by_first_topic[topic.topic_id][:10]:
print("paper https://arxiv.org/abs/{} with score {}".format(doc_id.replace('-', '/'), score))
def docs_from_ids(self) -> Dict[str, Document]:
return {doc_id: Document(doc_id) for doc_id in util.json_read(self.file_ids)}
def docs_from_metadata(self, topics: List[Topic]) -> Dict[str, Document]:
# restore documents
topic_dict = {t.topic_id: t for t in topics}
documents = Document.restore_documents(util.json_read(self.file_docs), topic_dict)
# add topics to documents (one for each category)
if self.category_layer:
for doc in documents.values():
if doc.categories:
for category in doc.categories:
doc.add_topic(topic_dict[category], 1.0)
else:
logger.warning("Document {} has no categories!".format(doc.doc_id))
return documents
def topics_from_metadata(self, parent_topic: Topic) -> List[Topic]:
# note: some papers do not have categories (especially very old ones)
categories = (doc_dict['categories'] for doc_dict in util.json_read(self.file_docs) if doc_dict['categories'])
topic_ids = sorted(set(util.flatten(categories, generator=True)))
topics = [parent_topic.add_child(topic_id) for topic_id in topic_ids]
return topics
def load_dictionary(self) -> corpora.Dictionary:
dictionary = corpora.Dictionary.load(self.file_dict)
dictionary[0] # forces id2token to be calculated. Probably a bug in gensim...
return dictionary
def load_corpus_for_topic(self, topic: Topic) -> JsonLinesCorpus:
corpus_path = self._get_corpus_path(topic)
if os.path.isfile(corpus_path):
# load the corpus for this topic (if available)
return JsonLinesCorpus(self._get_corpus_path(topic))
else:
if topic.parent:
# ok, try again with this topic's parent
return self.load_corpus_for_topic(topic.parent)
else:
# no parent left? then use the root corpus
return JsonLinesCorpus(self.file_corpus)
def _get_topic_file_prefix(self, topic: Topic) -> str:
"""
get a file prefix based on the output path of this instance and the topic id
"""
return "{}-topic-{}".format(self.file_output_prefix, topic.topic_id)
def _get_model_path(self, topic: Topic) -> str:
"""
get the path of the model associated with this topic
"""
return self._get_topic_file_prefix(topic) + '-model.pkl.bz2'
def _get_corpus_path(self, topic: Topic) -> str:
"""
get the path of the model associated with this topic
"""
return self._get_topic_file_prefix(topic) + '-corpus.json'
@staticmethod
def load_model(file_model: str) -> BaseTopicModel:
logger.debug("loading model from file '{}'...".format(file_model))
with util.open_by_ext(file_model, 'rb') as fp:
return pickle.load(fp)
@staticmethod
def filter_by_lang(documents: Iterable[Dict[str, Any]], lang_code: str, threshold=0.8,
broken_codes=['cy', 'ca', 'pt']) -> Iterable[Dict[str, Any]]:
logger.info("will only accept documents in language '{}'".format(lang_code))
counter = Counter()
for i, entry in enumerate(documents):
id = entry['id']
doc = entry['text']
if not doc:
logger.debug("empty document at index %s", i)
continue
sample = doc[5000:6000] if len(doc) >= 6000 else doc[:1000]
try:
langs = langdetect.detect_langs(sample) # type: List[Language]
lang = langs[0].lang
proba = langs[0].prob
if lang != lang_code or proba < threshold:
logger.debug("language: {}, {:.3f}, {}, \"{}\"".format(lang, proba, id, sample[:100].replace('\n', '\\n')))
if proba < threshold or lang in broken_codes:
counter['_failed'] += 1
else:
counter[lang] += 1
else:
counter[lang] += 1
yield entry
except LangDetectException:
logger.warning("language detection failed on document {} (sample: {})".format(id, sample[:1000]), exc_info=1)
logger.info("Results of language detection: {}".format(str(counter.most_common())))
@classmethod
def spacy_parse(cls, documents: Iterable[Dict[str, Any]], batch_size=10, n_threads=1) -> Iterable[Dict[str, Any]]:
logger.debug("loading spacy model...")
t = time.time()
nlp = spacy.load('en', parser=False)
logger.info("loading spacy model took {:.2f}s. Processing documents using spacy...".format(time.time() - t))
# preserve document IDs
gen1, gen2 = itertools.tee(documents)
ids = (x['id'] for x in gen1)
texts = (x['text'] for x in gen2)
docs = nlp.pipe(texts)
# start the actual work and join the results with the IDs again
t = time.time()
count = 0
docs = nlp.pipe(texts, batch_size=batch_size, n_threads=n_threads)
for id, doc in zip(ids, docs): # type: Tuple[str, Doc]
count += 1
if count % batch_size == 0:
t1 = time.time()
logger.info("a total of {} documents has been processed, took {:.2f}s ({:.2f} doc/min, {} thread(s))"
.format(count, t1-t, batch_size*60/(t1-t), n_threads))
t = t1
# skip undesired tokens
tokens = cls.filter_tokens(doc)
lemmata = [token.lemma_ for token in tokens]
yield {'id': id, 'tokens': lemmata}
@staticmethod
def filter_tokens(document: Doc) -> List[Token]:
"""
conditions are
- length > 1
- first character is alpha
- no space or punctuation
- consists of few strange characters
:param document:
:return:
"""
pos_filter = ['SPACE', 'PUNCT']
return [token for token in document if
len(token) > 1 and
token.string[0].isalpha() and
token.pos_ not in pos_filter and
(token.is_alpha or text_processing.has_valid_chars(token.string))]
def store_tokens_and_gensim_dict(self, documents: Iterable[Dict[str,Any]]):
"""
process token stream to build dictionary in memory and dump tokens as one json per line to file.
afterwards, serialize the entire dictionary.
"""
logger.info("building the dictionary and storing the corpus...")
dictionary = corpora.Dictionary()
doc_ids = set()
with util.open_by_ext(self.file_corpus_plain, 'wt', encoding='utf-8') as fp:
for entry in documents:
doc_id = entry['id']
tokens = entry['tokens'] # type: List[str]
token_counts = Counter(tokens)
doc_ids.add(doc_id)
result = {'id': doc_id, 'tokens': token_counts}
fp.write(json.dumps(result, separators=None, indent=None, ensure_ascii=False))
fp.write('\n')
dictionary.doc2bow(tokens, allow_update=True)
# store the document IDs
util.json_write(sorted(doc_ids), self.file_ids)
# store the dictionary
dictionary.filter_extremes(no_below=self.token_min_count, no_above=0.2, keep_n=self.dict_size_limit)
dictionary.compactify()
dictionary.save(self.file_dict, pickle_protocol=4)
return doc_ids, dictionary
def store_gensim_dict(self, corpus: JsonLinesCorpus) -> Tuple[Set[str], corpora.Dictionary]:
"""
process token stream to build dictionary in memory, then serialize the entire dictionary.
also stores document IDs in a separate file.
"""
logger.info("building the dictionary...")
dictionary = corpora.Dictionary()
doc_ids = set()
for i, doc in enumerate(corpus.iter_all()):
doc_id = doc['id']
doc_ids.add(doc_id)
token_counts = doc['tokens'] # type: Dict[str, int]
# unfortunately, dictionary.doc2bow() does not accept (token,count) tuples
# therefore we expand the dictionary to a token list again... (yes, this is stupid)
tokens = util.flatten([token] * count for token, count in token_counts.items())
dictionary.doc2bow(tokens, allow_update=True)
if (i+1) % 50000 == 0:
logger.info("{} documents have been read so far".format(i+1))
# store the document IDs
util.json_write(sorted(doc_ids), self.file_ids)
# store the dictionary
dictionary.filter_extremes(no_below=self.token_min_count, no_above=0.2, keep_n=self.dict_size_limit)
dictionary.compactify()
dictionary.save(self.file_dict, pickle_protocol=4)
return doc_ids, dictionary
@staticmethod
def combine_pages(entry: Dict[str, Any], **kwargs) -> Dict[str, Any]:
# document IDs might be broken, if they were extracted from file names...
doc_id = text_processing.fix_file_based_id(entry['id'])
raw_text = "\n".join(entry['pages'])
clean_text = text_processing.clean_parsed_text(raw_text)
return {'id': doc_id, 'text': clean_text}
@staticmethod
def get_title_and_abstract(entry: Dict[str, Any], **kwargs) -> Dict[str, Any]:
full_id = entry['header']['identifier']
short_id = full_id[(full_id.rfind(':') + 1):]
title = text_processing.strip_all_whitespace(entry['title'][0])
abstract = text_processing.strip_all_whitespace(entry['description'][0])
return {'id': short_id, 'text': (title + "\n\n" + abstract) }
def topic_stats(topic_file: str):
print("gathering stats for topics in", topic_file)
dc_dict = util.json_read(topic_file)
dc = DocumentCollection.from_dict(dc_dict)
flat_topics = util.flatten((doc.topics or [] for doc in dc.documents.values()), generator=True)
c = Counter(flat_topics)
for topic, count in c.most_common():
print("{}: {} ({})".format(topic.topic_id, count, topic.tokens))
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description='Build a nested topic model and classify documents')
parser.add_argument('-p', '--input-pdfs', metavar='FILE', type=str,
help='path to the file containing the parsed PDFs (output of pdf_parser)')
parser.add_argument('-t', '--input-tokens', metavar='FILE', type=str,
help='path to the file containing the tokens of the parsed pdfs '
'(optional, alternative to --input-pdfs)')
parser.add_argument('-m', '--input-meta', metavar='FILE', type=str,
help='path to the metadata file (output of arxiv_crawler. '
'required, if the category layer should be used)')
parser.add_argument('-o', '--output-prefix', metavar='PATH', type=str, required=True,
help='all output files, including temporary files, will be prefixed '
'with this string. all results will be stored under this '
'prefix aswell.')
parser.add_argument('-a', '--abstracts-only', action='store_true',
help="build topic models based on a paper's abstract only "
"(do not use the entire document text)")
parser.add_argument('-T', '--topic-model', metavar='MODEL', type=str, default="lda",
help='the topic model to use. Options: "lda" (default), "hdp")')
parser.add_argument('-l', '--layers', metavar='LAYERS', type=str, default="10",
help='how many nested topic layers are to be used? Example: "10,7,4"')
parser.add_argument('-c', '--limit-classification', metavar='LIMITS', type=str,
help='limits the number of topics that each document can be assigned '
'to at each layer during classification. One number per layer, '
'0 stands for unlimited. Must have same length as -l. '
'Example: "1,2,0"')
parser.add_argument('-M', '--min-docs-per-topic', metavar='N', type=int,
help='require at least N documents per topic on each layer. '
'Can reduce the allowed topic count at each layer (but never increase). '
'Interrupts the build for a topic, if less than 3*N documents remain '
'(a topic model with just two topics does not seem useful)')
parser.add_argument('-f', '--lang-filter', metavar='LANG', type=str, default="en",
help='filter by the specified language code. Defaults to "en" '
'(because we can currently only parse english text)')
parser.add_argument('-v', '--vocab-size', metavar='N', type=int,
help='limit the size of the vocabulary, if specified')
parser.add_argument('-d', '--doc-limit', metavar='N', type=int,
help='just process the first N documents (useful for testing)')
args = parser.parse_args()
# process list input & convert data types
if isinstance(args.layers, str):
args.layers = [int(s.strip()) for s in args.layers.split(",")]
if isinstance(args.limit_classification, str):
args.limit_classification = [int(s.strip()) for s in args.limit_classification.split(",")]
if args.limit_classification and len(args.layers) != len(args.limit_classification):
raise ValueError("the arguments --layers and --limit-classification must have the "
"same length! layers: {}, limits: {}"
.format(str(args.layers), str(args.limit_classification)))
return args
# example args:
# topic_model.py -t "tokens.json.bz2" -m "meta.json.bz2" -o "./data/test" -l "5,5" -v 10000
if __name__ == "__main__":
args = parse_args()
topic_model = TopicModel(file_pdf_text=args.input_pdfs, file_corpus_input=args.input_tokens,
file_metadata=args.input_meta, file_output_prefix=args.output_prefix,
abstracts_only=args.abstracts_only, model=args.topic_model,
language_filter=args.lang_filter, batch_size=500, n_threads=None,
topic_layers=args.layers, topic_limit_per_layer=args.limit_classification,
category_layer=(args.input_meta is not None),
min_docs_per_topic=args.min_docs_per_topic,
token_min_count=5, dict_size_limit=args.vocab_size,
document_limit=args.doc_limit)
topic_model.build()
| [
"logging.getLogger",
"gensim.corpora.Dictionary.load",
"data.DocumentCollection",
"text_processing.fix_file_based_id",
"gensim.models.LdaMulticore",
"util.open_by_ext",
"os.cpu_count",
"data.Document",
"argparse.ArgumentParser",
"util.json_read_lines",
"spacy.load",
"gensim.corpora.Dictionary"... | [((215, 294), 'warnings.filterwarnings', 'warnings.filterwarnings', ([], {'action': '"""ignore"""', 'category': 'UserWarning', 'module': '"""gensim"""'}), "(action='ignore', category=UserWarning, module='gensim')\n", (238, 294), False, 'import warnings\n'), ((807, 902), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s : %(levelname)s : %(message)s"""', 'level': 'logging.INFO'}), "(format='%(asctime)s : %(levelname)s : %(message)s',\n level=logging.INFO)\n", (826, 902), False, 'import logging\n'), ((908, 941), 'logging.getLogger', 'logging.getLogger', (['"""topic-models"""'], {}), "('topic-models')\n", (925, 941), False, 'import logging\n'), ((33086, 33112), 'util.json_read', 'util.json_read', (['topic_file'], {}), '(topic_file)\n', (33100, 33112), False, 'import util\n'), ((33122, 33159), 'data.DocumentCollection.from_dict', 'DocumentCollection.from_dict', (['dc_dict'], {}), '(dc_dict)\n', (33150, 33159), False, 'from data import JsonLinesCorpus, Topic, Document, DocumentCollection\n'), ((33268, 33288), 'collections.Counter', 'Counter', (['flat_topics'], {}), '(flat_topics)\n', (33275, 33288), False, 'from collections import Counter, defaultdict\n'), ((33458, 33551), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Build a nested topic model and classify documents"""'}), "(description=\n 'Build a nested topic model and classify documents')\n", (33481, 33551), False, 'import argparse\n'), ((7333, 7363), 'util.ProgressLog', 'ProgressLog', (['self.file_tasklog'], {}), '(self.file_tasklog)\n', (7344, 7363), False, 'from util import ProgressLog\n'), ((8789, 8811), 'data.Topic', 'Topic', (['"""root"""'], {'layer': '(0)'}), "('root', layer=0)\n", (8794, 8811), False, 'from data import JsonLinesCorpus, Topic, Document, DocumentCollection\n'), ((12244, 12281), 'data.DocumentCollection', 'DocumentCollection', (['topics', 'documents'], {}), '(topics, documents)\n', (12262, 12281), False, 'from data import JsonLinesCorpus, Topic, Document, DocumentCollection\n'), ((14074, 14113), 'data.JsonLinesCorpus', 'JsonLinesCorpus', (['self.file_corpus_plain'], {}), '(self.file_corpus_plain)\n', (14089, 14113), False, 'from data import JsonLinesCorpus, Topic, Document, DocumentCollection\n'), ((14811, 14851), 'util.json_read_lines', 'util.json_read_lines', (['self.file_metadata'], {}), '(self.file_metadata)\n', (14831, 14851), False, 'import util\n'), ((14909, 14918), 'collections.Counter', 'Counter', ([], {}), '()\n', (14916, 14918), False, 'from collections import Counter, defaultdict\n'), ((16838, 16849), 'time.time', 'time.time', ([], {}), '()\n', (16847, 16849), False, 'import time\n'), ((17384, 17395), 'time.time', 'time.time', ([], {}), '()\n', (17393, 17395), False, 'import time\n'), ((19059, 19070), 'time.time', 'time.time', ([], {}), '()\n', (19068, 19070), False, 'import time\n'), ((21685, 21712), 'data.JsonLinesCorpus', 'JsonLinesCorpus', (['fin_corpus'], {}), '(fin_corpus)\n', (21700, 21712), False, 'from data import JsonLinesCorpus, Topic, Document, DocumentCollection\n'), ((22242, 22259), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (22253, 22259), False, 'from collections import Counter, defaultdict\n'), ((24430, 24469), 'gensim.corpora.Dictionary.load', 'corpora.Dictionary.load', (['self.file_dict'], {}), '(self.file_dict)\n', (24453, 24469), False, 'from gensim import corpora\n'), ((24716, 24743), 'os.path.isfile', 'os.path.isfile', (['corpus_path'], {}), '(corpus_path)\n', (24730, 24743), False, 'import os\n'), ((26364, 26373), 'collections.Counter', 'Counter', ([], {}), '()\n', (26371, 26373), False, 'from collections import Counter, defaultdict\n'), ((27753, 27764), 'time.time', 'time.time', ([], {}), '()\n', (27762, 27764), False, 'import time\n'), ((27779, 27809), 'spacy.load', 'spacy.load', (['"""en"""'], {'parser': '(False)'}), "('en', parser=False)\n", (27789, 27809), False, 'import spacy\n'), ((27981, 28005), 'itertools.tee', 'itertools.tee', (['documents'], {}), '(documents)\n', (27994, 28005), False, 'import itertools\n'), ((28202, 28213), 'time.time', 'time.time', ([], {}), '()\n', (28211, 28213), False, 'import time\n'), ((29828, 29848), 'gensim.corpora.Dictionary', 'corpora.Dictionary', ([], {}), '()\n', (29846, 29848), False, 'from gensim import corpora\n'), ((31127, 31147), 'gensim.corpora.Dictionary', 'corpora.Dictionary', ([], {}), '()\n', (31145, 31147), False, 'from gensim import corpora\n'), ((32347, 32393), 'text_processing.fix_file_based_id', 'text_processing.fix_file_based_id', (["entry['id']"], {}), "(entry['id'])\n", (32380, 32393), False, 'import text_processing\n'), ((32460, 32503), 'text_processing.clean_parsed_text', 'text_processing.clean_parsed_text', (['raw_text'], {}), '(raw_text)\n', (32493, 32503), False, 'import text_processing\n'), ((32774, 32829), 'text_processing.strip_all_whitespace', 'text_processing.strip_all_whitespace', (["entry['title'][0]"], {}), "(entry['title'][0])\n", (32810, 32829), False, 'import text_processing\n'), ((32849, 32910), 'text_processing.strip_all_whitespace', 'text_processing.strip_all_whitespace', (["entry['description'][0]"], {}), "(entry['description'][0])\n", (32885, 32910), False, 'import text_processing\n'), ((12758, 12797), 'data.JsonLinesCorpus', 'JsonLinesCorpus', (['self.file_corpus_input'], {}), '(self.file_corpus_input)\n', (12773, 12797), False, 'from data import JsonLinesCorpus, Topic, Document, DocumentCollection\n'), ((16423, 16456), 'data.JsonLinesCorpus', 'JsonLinesCorpus', (['self.file_corpus'], {}), '(self.file_corpus)\n', (16438, 16456), False, 'from data import JsonLinesCorpus, Topic, Document, DocumentCollection\n'), ((16902, 17043), 'gensim.models.LdaMulticore', 'LdaMulticore', (['corpus'], {'id2word': 'dictionary.id2token', 'num_topics': 'num_topics', 'passes': '(2)', 'iterations': '(50)', 'chunksize': '(2000)', 'workers': 'self.n_threads'}), '(corpus, id2word=dictionary.id2token, num_topics=num_topics,\n passes=2, iterations=50, chunksize=2000, workers=self.n_threads)\n', (16914, 17043), False, 'from gensim.models import HdpModel, LdaMulticore\n'), ((17572, 17607), 'util.open_by_ext', 'util.open_by_ext', (['output_path', '"""wb"""'], {}), "(output_path, 'wb')\n", (17588, 17607), False, 'import util\n'), ((17627, 17661), 'pickle.dump', 'pickle.dump', (['model', 'fp'], {'protocol': '(4)'}), '(model, fp, protocol=4)\n', (17638, 17661), False, 'import pickle\n'), ((18152, 18169), 'itertools.count', 'itertools.count', ([], {}), '()\n', (18167, 18169), False, 'import itertools\n'), ((22177, 22209), 'util.json_read', 'util.json_read', (['self.file_topics'], {}), '(self.file_topics)\n', (22191, 22209), False, 'import util\n'), ((23178, 23194), 'data.Document', 'Document', (['doc_id'], {}), '(doc_id)\n', (23186, 23194), False, 'from data import JsonLinesCorpus, Topic, Document, DocumentCollection\n'), ((23447, 23477), 'util.json_read', 'util.json_read', (['self.file_docs'], {}), '(self.file_docs)\n', (23461, 23477), False, 'import util\n'), ((25989, 26023), 'util.open_by_ext', 'util.open_by_ext', (['file_model', '"""rb"""'], {}), "(file_model, 'rb')\n", (26005, 26023), False, 'import util\n'), ((26050, 26065), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (26061, 26065), False, 'import pickle\n'), ((29886, 29950), 'util.open_by_ext', 'util.open_by_ext', (['self.file_corpus_plain', '"""wt"""'], {'encoding': '"""utf-8"""'}), "(self.file_corpus_plain, 'wt', encoding='utf-8')\n", (29902, 29950), False, 'import util\n'), ((13012, 13081), 'util.json_read_lines', 'util.json_read_lines', (['self.file_metadata', 'self.get_title_and_abstract'], {}), '(self.file_metadata, self.get_title_and_abstract)\n', (13032, 13081), False, 'import util\n'), ((13214, 13274), 'util.json_read_lines', 'util.json_read_lines', (['self.file_pdf_text', 'self.combine_pages'], {}), '(self.file_pdf_text, self.combine_pages)\n', (13234, 13274), False, 'import util\n'), ((17194, 17280), 'gensim.models.HdpModel', 'HdpModel', (['corpus'], {'id2word': 'dictionary.id2token', 'T': 'num_topics', 'K': 'max_topics_per_doc'}), '(corpus, id2word=dictionary.id2token, T=num_topics, K=\n max_topics_per_doc)\n', (17202, 17280), False, 'from gensim.models import HdpModel, LdaMulticore\n'), ((20131, 20142), 'time.time', 'time.time', ([], {}), '()\n', (20140, 20142), False, 'import time\n'), ((23209, 23238), 'util.json_read', 'util.json_read', (['self.file_ids'], {}), '(self.file_ids)\n', (23223, 23238), False, 'import util\n'), ((24123, 24153), 'util.json_read', 'util.json_read', (['self.file_docs'], {}), '(self.file_docs)\n', (24137, 24153), False, 'import util\n'), ((24212, 24252), 'util.flatten', 'util.flatten', (['categories'], {'generator': '(True)'}), '(categories, generator=True)\n', (24224, 24252), False, 'import util\n'), ((25134, 25167), 'data.JsonLinesCorpus', 'JsonLinesCorpus', (['self.file_corpus'], {}), '(self.file_corpus)\n', (25149, 25167), False, 'from data import JsonLinesCorpus, Topic, Document, DocumentCollection\n'), ((26705, 26736), 'langdetect.detect_langs', 'langdetect.detect_langs', (['sample'], {}), '(sample)\n', (26728, 26736), False, 'import langdetect\n'), ((28454, 28465), 'time.time', 'time.time', ([], {}), '()\n', (28463, 28465), False, 'import time\n'), ((30124, 30139), 'collections.Counter', 'Counter', (['tokens'], {}), '(tokens)\n', (30131, 30139), False, 'from collections import Counter, defaultdict\n'), ((27909, 27920), 'time.time', 'time.time', ([], {}), '()\n', (27918, 27920), False, 'import time\n'), ((30265, 30333), 'json.dumps', 'json.dumps', (['result'], {'separators': 'None', 'indent': 'None', 'ensure_ascii': '(False)'}), '(result, separators=None, indent=None, ensure_ascii=False)\n', (30275, 30333), False, 'import json\n'), ((6190, 6204), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (6202, 6204), False, 'import os\n'), ((17730, 17741), 'time.time', 'time.time', ([], {}), '()\n', (17739, 17741), False, 'import time\n'), ((29423, 29468), 'text_processing.has_valid_chars', 'text_processing.has_valid_chars', (['token.string'], {}), '(token.string)\n', (29454, 29468), False, 'import text_processing\n')] |
from google.cloud import storage
GCS_CLIENT = storage.Client()
GCS_BUCKET = GCS_CLIENT.get_bucket('senpai-io.appspot.com')
path = 'quandl-stage/backfill_data_jan2015_mar2018.csv'
blob = GCS_BUCKET.blob(path)
blob.upload_from_filename(filename='data_jan2015_mar2018.csv')
| [
"google.cloud.storage.Client"
] | [((47, 63), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (61, 63), False, 'from google.cloud import storage\n')] |
import numpy as np
from spacy.pipeline.sentencizer import Sentencizer
from glob import glob
from spacy.lang.en import English
def metrics(a, b):
from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score
return (accuracy_score(a, b),
recall_score(a, b),
precision_score(a, b),
f1_score(a, b))
def performance(colgate=None):
colgate = colgate if colgate is not None else Sentencizer()
nlp = English()
output = []
for test in glob("marked-*.txt"):
input = test.replace("marked-", "")
txt = open(input).read()
tokens = nlp(open(test).read())
hy_tokens = colgate(nlp(txt))
assert len(tokens) == len(hy_tokens)
y = [False] * len(tokens)
seen_period = False
for i, tok in enumerate(tokens):
is_in_punct_chars = tok.text in Sentencizer.default_punct_chars
if seen_period and not tok.is_punct and not is_in_punct_chars and not tok.is_space:
y[i] = True
seen_period = False
elif tok.is_punct and tok.text == "#":
seen_period = True
y = np.array(y, dtype=bool)
y[0] = True
hy = np.array([x.is_sent_start for x in hy_tokens])
_ = metrics(y, hy)
output.append((test, _, y.sum()))
return output
if __name__ == "__main__":
from hw2 import ColgateSBD
from glob import glob
from spacy.lang.en import English
output = performance(ColgateSBD())
for input, perf, n_sent in output:
print("Input:", input, perf, "Number of sentences:", n_sent)
print("*" * 5, "Sentencizer", "*" * 5)
output = performance()
for input, perf, n_sent in output:
print("Input:", input, perf, "Number of sentences:", n_sent)
| [
"hw2.ColgateSBD",
"sklearn.metrics.f1_score",
"spacy.lang.en.English",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"numpy.array",
"spacy.pipeline.sentencizer.Sentencizer",
"sklearn.metrics.accuracy_score",
"glob.glob"
] | [((471, 480), 'spacy.lang.en.English', 'English', ([], {}), '()\n', (478, 480), False, 'from spacy.lang.en import English\n'), ((514, 534), 'glob.glob', 'glob', (['"""marked-*.txt"""'], {}), "('marked-*.txt')\n", (518, 534), False, 'from glob import glob\n'), ((247, 267), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['a', 'b'], {}), '(a, b)\n', (261, 267), False, 'from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score\n'), ((281, 299), 'sklearn.metrics.recall_score', 'recall_score', (['a', 'b'], {}), '(a, b)\n', (293, 299), False, 'from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score\n'), ((313, 334), 'sklearn.metrics.precision_score', 'precision_score', (['a', 'b'], {}), '(a, b)\n', (328, 334), False, 'from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score\n'), ((348, 362), 'sklearn.metrics.f1_score', 'f1_score', (['a', 'b'], {}), '(a, b)\n', (356, 362), False, 'from sklearn.metrics import f1_score, recall_score, precision_score, accuracy_score\n'), ((447, 460), 'spacy.pipeline.sentencizer.Sentencizer', 'Sentencizer', ([], {}), '()\n', (458, 460), False, 'from spacy.pipeline.sentencizer import Sentencizer\n'), ((1174, 1197), 'numpy.array', 'np.array', (['y'], {'dtype': 'bool'}), '(y, dtype=bool)\n', (1182, 1197), True, 'import numpy as np\n'), ((1231, 1277), 'numpy.array', 'np.array', (['[x.is_sent_start for x in hy_tokens]'], {}), '([x.is_sent_start for x in hy_tokens])\n', (1239, 1277), True, 'import numpy as np\n'), ((1515, 1527), 'hw2.ColgateSBD', 'ColgateSBD', ([], {}), '()\n', (1525, 1527), False, 'from hw2 import ColgateSBD\n')] |
from modules.mpulib import computeheading, attitudefromCompassGravity, RP_calculate, MadgwickQuaternionUpdate, Euler2Quat, quaternion_to_euler_angle, MPU9250_computeEuler
import socket, traceback
import csv
import struct
import sys, time, string, pygame
import pygame
import pygame.draw
import pygame.time
import numpy as np
from math import sin, cos, acos
from modules.euclid import Vector3, Quaternion
from modules.EuclidObjects import Cube, Screen, Grid, PerspectiveScreen
import math
# from pygame.locals import *
# from ponycube import *
from modules.madgwickahrs import *
import modules.quaternion
from modules.quaternion import QuaternionClass
from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler
from math import atan2, atan
from numpy.linalg import inv
from numpy import linalg as LA
# import euclid
import serial
ser = serial.Serial('/dev/tty.usbmodem14411')
ser.baudrate = 115200
ser.timeout = 3
prev_time = 0
filename = open('/Users/eunsunlee/Documents/NESL/UnderwaterSensorTag/IMU_algorithms/optitrack/imu_movement.txt','w')
# offset_mx = 77.345
# offset_my = -13.725
# offset_mz = -71.64
# scale_mx = 1.1
# scale_my = 1.13
# scale_mz = 0.827
# LAB
offset_mx = 71.12
offset_my = -30.385
offset_mz = -66.24
scale_mx = 1.210645853980839
scale_my = 1.1778152745972439
scale_mz = 0.7547368963031613
dt = 1/10
visualIMU = False
if visualIMU:
pygame.init()
screen = Screen(1600,400,scale=1.5)
cube1 = Cube(40,30,60)
cube2 = Cube(40,30,60)
cube3 = Cube(40,30,60)
cube4 = Cube(40,30,60)
cube5 = Cube(40,30,60)
q1 = Quaternion(1,0,0,0)
q2 = Quaternion(1,0,0,0)
q3 = Quaternion(1,0,0,0)
q4 = Quaternion(1,0,0,0)
q5 = Quaternion(1,0,0,0)
p1 = Vector3(-400,0,0)
p2 = Vector3(-200,0,0)
p3 = Vector3(0,0,0)
p4 = Vector3(200,0,0)
p5 = Vector3(400,0,0)
incr = Quaternion(0.96,0.01,0.01,0).normalized()
cube1.erase(screen)
cube1.draw(screen,q1,p1)
cube2.erase(screen)
cube2.draw(screen,q2,p2)
cube3.erase(screen)
cube3.draw(screen,q3,p3)
cube4.erase(screen)
cube4.draw(screen,q4,p4)
cube5.erase(screen)
cube5.draw(screen,q5,p5)
# Madgwick
Imupredict = MadgwickAHRS();
Imupredict2 = MadgwickAHRS();
# A3
omega0 = [0,0,0]
similaritywindowA3 = 0
Sc = []
Sg = []
C = []
G = []
Eg = 0
quatA3 = QuaternionClass(1, 0, 0, 0)
quatMuseAlg = QuaternionClass(1, 0, 0, 0)
similaritywindowMUSE = 0
initial = 0
update = 0
Ax = []
Ay = []
Az = []
beta = 0.80
quat = QuaternionClass(1,0,0,0)
# 1 Hz - 1000
# 10 Hz - 100
while True:
reading = ser.readline()
print(reading, file = filename)
# print(reading)
sp = str(reading).split(',')
# print(sp)
time = float(sp[0][2:].strip())
# reads in g so multiply by 9.8
ax = float(sp[1].strip())
ay = float(sp[2].strip())
az = float(sp[3].strip())
ax = ax*9.8
ay = ay*9.8
az = az*9.8
gx = float(sp[4].strip())*math.pi/180 #rad/s
gy = float(sp[5].strip())*math.pi/180 #rad/s
gz = float(sp[6].strip())*math.pi/180 #rad/s
#uT
mx = float(sp[7].strip())
my = float(sp[8].strip())
mz = float(sp[9].strip())
mx = mx - offset_mx
my = my - offset_my
mz = mz - offset_mz
mx = mx*scale_mx
my = my*scale_my
mz = mz*scale_mz
qw = float(sp[10].strip())
qx = float(sp[11].strip())
qy = float(sp[12].strip())
qz = float(sp[13].strip())
pitch = float(sp[14].strip())
roll = float(sp[15].strip())
yaw = float(sp[16].strip())
dq = QuaternionClass(0,0,-1,0)
# print("yaw, pitch, roll: ", yaw, pitch, roll)
heading = float(sp[17].split('\\r')[0].strip())
# print("heading: ", heading)
# print(computeheading(mx,my))
# print(yaw, pitch, roll)
accel = [ax, ay, az]
gyro = [gx, gy, gz]
mag = [mx, my, mz]
# print(accel)
a333 = 0
# yawAM, pitchAM, rollAM, quatAM = AccMagOrientation(accel, mag)
# print("ypr: ", yaw, pitch, roll)
# print("ypr: ", yawAM, pitchAM, rollAM)
# print("heading: ", heading)
# print(headingM)
# time_diff = 60
if visualIMU: #quaternion from imu
# yellow area facing straight if imu hold with usbside facing me
# print("yaw: ", yaw)
# q1w = float(sp[10].strip())
# q1x = float(sp[11].strip())
# q1z = -float(sp[12].strip())
# q1y = float(sp[13].split('\\r')[0].strip())
# quatMDP = QuaternionClass(q1w, q1x, q1y, q1z)
# rollMDP, pitchMDP, yawMDP = QuatToEuler(quatMDP)
# print("yawMDP: ", yawMDP)
# quat = QuaternionClass(qw, qx, qy, -qz) *dq
q1.w = quat[0]
q1.x = quat[1]
q1.z = quat[3]
q1.y = quat[2]
q1 = q1.normalized()
cube1.erase(screen)
cube1.draw(screen,q1,p1)
# print("yaw: ", yaw )
if visualIMU: # Madgwick Algorithm
Imupredict.samplePeriod = 0.025#0.1
Imupredict.update(gyro,accel,mag)
quatMad = Imupredict.quaternion
quatMad = qnormalized(quatMad)
Imupredict.quaternion = quatMad
#quatMad = quatNormalized(quatMad)
yawMad, pitchMad, rollMad = QuatToEuler(quatMad)
# print("yawMad: ", yawMad*180/math.pi)
quat = QuaternionClass(quatMad[0], quatMad[1], quatMad[3], quatMad[2])
q2.w = quat[0]
q2.x = quat[1]
q2.z = quat[3]
q2.y = quat[2]
q2 = q2.normalized()
cube2.erase(screen)
cube2.draw(screen,q2,p2)
if False:
# quat = MadgwickQuaternionUpdate(ax, ay, az, gx, gy, gz, mx, my, mz, quat)
# q5.w = quat[0]
# q5.x = quat[1]
# q5.z = -quat[2]
# q5.y = quat[3]
# q5 = q5.normalized()
# cube5.erase(screen)
# cube5.draw(screen,q5,p5)
yawT, pitchT, rollT, quatT = androidAccMag2Euler(accel, mag)
if yawT > 0:
yawT = 360 - yawT*180/math.pi
else:
yawT = -yawT*180/math.pi
# print("yaw: ",yawT)
q5.w = quatT[0]
q5.x = quatT[1]
q5.z = -quatT[2]
q5.y = quatT[3]
q5 = q5.normalized()
cube5.erase(screen)
cube5.draw(screen,q5,p5)
# Imupredict2.samplePeriod = 0.1
# Imupredict2.update_imu(gyro,accel)
# quatMad2 = Imupredict2.quaternion
# quatMad2 = qnormalized(quatMad)
# Imupredict2.quaternion = quatMad2
# q5.w = quatMad2[0]
# q5.x = quatMad2[1]
# q5.z = -quatMad2[2]
# q5.y = quatMad2[3]
# q5 = q5.normalized()
# cube5.erase(screen)
# cube5.draw(screen,q5,p5)
# https://stackoverflow.com/questions/32372847/android-algorithms-for-sensormanager-getrotationmatrix-and-sensormanager-getori/35390001#35390001
if visualIMU: #a3
q_a3 = 0
omega1 = [gx, gy, gz]
quatG = IntegrationRK4(omega0, omega1, quatA3, dt)
yawG, pitchG, rollG = QuatToEuler(quatG)
if yawG < 0:
yawG = -yawG*180/math.pi
else:
yawG = 360 - yawG*180/math.pi
# # print(yawG, pitchG, rollG)
omega0 = omega1
# # # A3 Algorithm - accelerometer, magnetometer calibration
# yawAM, pitchAM, rollAM, quatAM = AccMag2Euler(accel, mag)
yawAM, pitchAM, rollAM, quatAM = androidAccMag2Euler(accel, mag)
# # print(yawAM, pitchAM, rollAM)
# # # TODO: Update quaternion if w < 240 degree, a < 2g
w = max(abs(np.array(gyro)))*180/math.pi
a = max(abs(np.array(accel)))
# # # if w < 240 and a < 2*9.8:
# # # print("stable")
# # # else:
# # # print("moving")
# # headingM = headingfromMag(mag)
headingM = computeheading(mx, my)
# print("headingM:" , headingM)
# print("heading: ", headingM)
# print("yawG: ", yawG*180/math.pi)
# # print(headingM)
if similaritywindowA3 > 1:
# print("similaritywindow")
# calculate pc and pg
pc = 1/(2**np.var(np.subtract(Sc,C)))
pg = 1/(2**np.var(np.subtract(Sg,G)))
# print(pc)
# print(pg)
if pc > 0.2 and pg > 0.2:
print("change?")
# TODO: if Ec < Eg, then update quaternion
E1 = -32.14*pc + 19.93
E2 = -12.86*pg + 11.57
Ec = max(E1, E2)
Eg = (Eg + 0.0003*w*dt + 0.001*a*dt)*1000
#print(Ec)
#print(Eg)
if Ec < Eg*1000:
# print(a333)
a333 = a333 + 1
print("A3 reset ")
q_a3 = 1
#quatA3 = quatAM
# # quat = quatAM
# reset values
similaritywindowA3 = 0
C = []
Sc = []
Sg = []
G = []
Eg = 0
else:
# #TODO: update Eg
Eg = Eg + 0.0003*w*dt + 0.001*a*dt
C.append(yawAM)
Sc.append(yawG)
Sg.append(rollG)
G.append(rollAM)
similaritywindowA3 = similaritywindowA3 + dt
if q_a3:
quatA3 = quatAM #QuaternionClass(quatAM[0], quatAM[1], quatAM[2], quatAM[3])
# print("quatAM", quatAM)
else:
quatA3 = quatG
# print("quatG", quatG[0], quatG[1], quatG[2], quatG[3])
# print("quatA3", quatA3[0], quatA3[1], quatA3)
yawA3, pitchA3, rollA3 = QuatToEuler(quatA3)
# print("yawA3: ", yawA3*180/math.pi)
quatA3_temp = QuaternionClass(quatA3[0], quatA3[1], quatA3[3], -quatA3[2])
# quatA3 = quatA3_temp
q3.w = quatA3_temp[0]
q3.x = quatA3_temp[1]
q3.y = quatA3_temp[2]
q3.z = quatA3_temp[3]
q3 = q3.normalized()
cube3.erase(screen)
cube3.draw(screen,q3,p3)
if visualIMU: # MUSE
# # # Initial yaw, pitch, roll from Accelerometer and Magnetometer
#yawAM, pitchAM, rollAM, quatAM = AccMag2Euler(accel, mag)
yawAM, pitchAM, rollAM, quatAM = androidAccMag2Euler(accel, mag)
omega1 = [gx, gy, gz]
quatG = IntegrationRK4(omega0, omega1, quatMuseAlg, dt)
yawG, pitchG, rollG = QuatToEuler(quatG)
omega0 = omega1
headingM = computeheading(mx, my)
# headingM = headingfromMag(mag)
if initial < 30:
quatMuseAlg = quatAM
print("initial")
# O: orientation rotMat from quat
# O-1 : inverse of the rot Mat
# Calculate Ng = O*NL- Equation (1)
N_L = np.mat([[mx],[my],[mz]])
# print("N_L")
# print(N_L)
O = QuatToRotMat(quatAM)
N_G = O*N_L
# print("N_G")
# print(N_G)
initial = initial + 1
else:
quatMuseAlg = quatAM
# print("similaritywindow: ", similaritywindowMUSE)
if similaritywindowMUSE > 1:
# print("Ax: ", Ax)
# print("Ay: ", Ay)
# print("Az: ", Az)
aAx = abs(np.array(Ax))
aAy = abs(np.array(Ay))
aAz = abs(np.array(Az))
# print("Ax: ", aAx)
# print("Ay: ", aAy)
# print("Az: ", aAz)
agAx = aAx - 9.8
agAy = aAy - 9.8
agAz = aAz - 9.8
# print("agAx: ", agAx)
# print("agAy: ", agAy)
# print("agAz: ", agAz)
aagAx = abs(agAx)
aagAy = abs(agAy)
aagAz = abs(agAz)
# print("aagAx: ", aagAx)
# print("aagAy: ", aagAy)
# print("aagAz: ", aagAz)
x_max = max(aagAx)
y_max = max(aagAy)
z_max = max(aagAz)
# Ax = abs(abs(np.array(Ax))-9.8)
# Ay = abs(abs(np.array(Ax))-9.8)
# Az = abs(abs(np.array(Az))-9.8)
# # print(Az)
# # x_max = max([abs(max(Ax)), abs(min(Ax))])
# # y_max = max([abs(max(Ay)), abs(min(Ay))])
# # z_max = max([abs(max(Az)), abs(min(Az))])
# x_max = max(Ax)
# y_max = max(Ay)
# z_max = max(Az)
# print("x: ", x_max)
# print("y: ", y_max)
# print("z: ", z_max)
xyz_min = min([x_max, y_max, z_max])
# print(xyz_min)
# acceleration roughly measures 9.8m/s2
if xyz_min < 1:
print("yes, update quat with AM")
Oa = QuatToRotMat(quatAM)
Og = QuatToRotMat(quatG)
Ocomp = np.mat(Oa)*(1-beta) + np.mat(Og)*beta
# print("Oa")
# print(Oa)
# print("Og")
# print(Og)
# print("Ocomp")
# print(Ocomp)
quatComp = RotMatToQuat(np.array(np.mat(Ocomp)))
quatMuseAlg = quatComp
update = 1
# Update 3D magnetic vector estimation
N_L = np.mat([[mx],[my],[mz]])
# print("N_L")
# print(N_L)
O = QuatToRotMat(quatAM)
N_G = O*N_L
# reset values
similaritywindowMUSE = 0
Ax = []
Ay = []
Az = []
else:
Ax.append(ax)
Ay.append(ay)
Az.append(az)
similaritywindowMUSE = similaritywindowMUSE + dt
if update == 0:
O_hat = QuatToRotMat(quatG)
Oinv_hat = inv(O_hat)
N_L_hat = Oinv_hat * N_G
# print("N_L_hat")
# print(N_L_hat)
N_L = np.mat([[mx],[my],[mz]])
# print("N_L")
# print(N_L)
N_L_hat = np.array([np.array(N_L_hat)[0][0], np.array(N_L_hat)[1][0], np.array(N_L_hat)[2][0]])
N_L = np.array([mx, my, mz])
RotAxis = np.cross(N_L_hat, N_L)
RotAxis = RotAxis/LA.norm(RotAxis)
# print("RotAxis")
# print(RotAxis/LA.norm(RotAxis))
alpha = 0.01
RotAngle = angle_between(N_L_hat, N_L)
alphaRotAngle = alpha* RotAngle
deltaRotMat = AxisAngleToRotMat(RotAxis, alphaRotAngle)
Onew_hat = np.array(np.mat(inv(deltaRotMat))*np.mat(O_hat))
quatMUSE = RotMatToQuat(Onew_hat)
quatMUSE = quatNormalized(quatMUSE)
quatMuseAlg = QuaternionClass(quatMUSE[0], quatMUSE[1], quatMUSE[2], quatMUSE[3])
#print("update quat with MUSE")
update = 0
yawMUSE, pitchMUSE, rollMUSE = QuatToEuler(quatMuseAlg)
# print("yawMUSE: ", yawMUSE*180/math.pi)
q4.w = quatMuseAlg[0]
q4.x = quatMuseAlg[1]
q4.y = quatMuseAlg[3]
q4.z = -quatMuseAlg[2]
q4 = q4.normalized()
cube4.erase(screen)
cube4.draw(screen,q4,p4)
if visualIMU:
# quatDMP = QuaternionClass(qw, qx, qy, qz)
# yawDMP, pitchDMP, rollDMP = MPU9250_computeEuler(qw, qx, qy, qz)
# print("yprDMP: ", yawDMP, pitchDMP, rollDMP)
# # print("ypr: ", yaw, pitch, roll)
# quatDMP1 = Euler2Quat(yawDMP, pitchDMP, rollDMP)
# quatDMP = qnormalized(quatDMP)
# print("quatDMP: " , quatDMP[0], quatDMP[1], quatDMP[2], quatDMP[3])
# yawDMP, pitchDMP, rollDMP = quaternion_to_euler_angle(quatDMP[0], quatDMP[1], quatDMP[2], quatDMP[3])
# quatDMP1 = Euler2Quat(yawDMP, pitchDMP, rollDMP)
# quatDMP1 = qnormalized(quatDMP1)
# print("quatDMP1: ", quatDMP1[0], quatDMP1[1], quatDMP1[2], quatDMP1[3])
# print("ypr: ", yawDMP*180/math.pi)
# if yaw - 180 > 0 :
# yaw -= 360
# yaw *= math.pi/180
# if roll - 180 > 0 :
# roll -= 360
# roll *= math.pi/180
# if pitch - 180 > 0 :
# pitch -= 360
# pitch *= math.pi/180
# quatDMP = Euler2Quat(yaw, pitch, roll)
# quatDMP = qnormalized(quatDMP)
# q5.w = quatDMP1[0]
# q5.x = quatDMP1[1]
# q5.y = quatDMP1[3]
# q5.z = -quatDMP1[2]
# yawES = math.atan2(mx,my)
# rollES, pitchES = RP_calculate(accel)
# rollES = rollES
# yawES *= 180/math.pi
# if yawES < 0 :
# yawES += 360.0
# rollES *= 180/math.pi
# if rollES < 0 :
# rollES += 360.0
# pitchES *= 180/math.pi
# if pitchES < 0 :
# pitchES += 360.0
# print("yaw, yawES: ", yaw, yawES)
# print("roll, rollES: ", roll, rollES)
# print("pitch, pitchES: ", pitch, pitchES)
# rollES = rollES * 180/math.pi
# if rollES < 0:
# rollES = 360 + rollES
# rollES = (360 - rollES*180/math.pi)
# rollES = rollES * math.pi/180
# yawES = yawES*math.pi/180
# rollES = rollES*math.pi/180
# print("yawES: ", yawES)
#
# quatES = Euler2Quat(yaw*math.pi/180, pitch*math.pi/180, roll*math.pi/180)
# # quatES = Euler2Quat(yawES*math.pi/180, 0, 0)
# quatES = qnormalized(quatES)
# # print("quatES: ", quatES[0], quatES[1], quatES[2], quatES[3]) # 3 - yaw
# q5.w = quatES[0]
# q5.x = quatES[1]
# q5.z = -quatES[2]
# q5.y = quatES[3]
q5 = q5.normalized()
cube5.erase(screen)
cube5.draw(screen,q5,p5)
if visualIMU:
pygame.display.flip()
pygame.time.delay(0)
event = pygame.event.poll()
if event.type == pygame.QUIT \
or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):
break
# print(time)
# print(time-prev_time)
# print(ax)
# print(ay)
# print(az)
# print(gx)
# print(gy)
# print(gz)
# print(mx)
# print(my)
# print(mz)
# sp = reading.split()
# print(float(sp[0][:-1]))
# print(sp[1].split(','))
# # print(float(sp[1][:-1]))
| [
"pygame.event.poll",
"pygame.init",
"numpy.array",
"numpy.linalg.norm",
"modules.euclid.Quaternion",
"modules.quaternion.QuaternionClass",
"numpy.cross",
"pygame.time.delay",
"modules.a3muse.quatNormalized",
"pygame.display.flip",
"numpy.subtract",
"modules.a3muse.RotMatToQuat",
"numpy.mat",... | [((1012, 1051), 'serial.Serial', 'serial.Serial', (['"""/dev/tty.usbmodem14411"""'], {}), "('/dev/tty.usbmodem14411')\n", (1025, 1051), False, 'import serial\n'), ((1556, 1569), 'pygame.init', 'pygame.init', ([], {}), '()\n', (1567, 1569), False, 'import pygame\n'), ((1580, 1608), 'modules.EuclidObjects.Screen', 'Screen', (['(1600)', '(400)'], {'scale': '(1.5)'}), '(1600, 400, scale=1.5)\n', (1586, 1608), False, 'from modules.EuclidObjects import Cube, Screen, Grid, PerspectiveScreen\n'), ((1616, 1632), 'modules.EuclidObjects.Cube', 'Cube', (['(40)', '(30)', '(60)'], {}), '(40, 30, 60)\n', (1620, 1632), False, 'from modules.EuclidObjects import Cube, Screen, Grid, PerspectiveScreen\n'), ((1640, 1656), 'modules.EuclidObjects.Cube', 'Cube', (['(40)', '(30)', '(60)'], {}), '(40, 30, 60)\n', (1644, 1656), False, 'from modules.EuclidObjects import Cube, Screen, Grid, PerspectiveScreen\n'), ((1664, 1680), 'modules.EuclidObjects.Cube', 'Cube', (['(40)', '(30)', '(60)'], {}), '(40, 30, 60)\n', (1668, 1680), False, 'from modules.EuclidObjects import Cube, Screen, Grid, PerspectiveScreen\n'), ((1688, 1704), 'modules.EuclidObjects.Cube', 'Cube', (['(40)', '(30)', '(60)'], {}), '(40, 30, 60)\n', (1692, 1704), False, 'from modules.EuclidObjects import Cube, Screen, Grid, PerspectiveScreen\n'), ((1712, 1728), 'modules.EuclidObjects.Cube', 'Cube', (['(40)', '(30)', '(60)'], {}), '(40, 30, 60)\n', (1716, 1728), False, 'from modules.EuclidObjects import Cube, Screen, Grid, PerspectiveScreen\n'), ((1734, 1756), 'modules.euclid.Quaternion', 'Quaternion', (['(1)', '(0)', '(0)', '(0)'], {}), '(1, 0, 0, 0)\n', (1744, 1756), False, 'from modules.euclid import Vector3, Quaternion\n'), ((1760, 1782), 'modules.euclid.Quaternion', 'Quaternion', (['(1)', '(0)', '(0)', '(0)'], {}), '(1, 0, 0, 0)\n', (1770, 1782), False, 'from modules.euclid import Vector3, Quaternion\n'), ((1786, 1808), 'modules.euclid.Quaternion', 'Quaternion', (['(1)', '(0)', '(0)', '(0)'], {}), '(1, 0, 0, 0)\n', (1796, 1808), False, 'from modules.euclid import Vector3, Quaternion\n'), ((1812, 1834), 'modules.euclid.Quaternion', 'Quaternion', (['(1)', '(0)', '(0)', '(0)'], {}), '(1, 0, 0, 0)\n', (1822, 1834), False, 'from modules.euclid import Vector3, Quaternion\n'), ((1838, 1860), 'modules.euclid.Quaternion', 'Quaternion', (['(1)', '(0)', '(0)', '(0)'], {}), '(1, 0, 0, 0)\n', (1848, 1860), False, 'from modules.euclid import Vector3, Quaternion\n'), ((1865, 1884), 'modules.euclid.Vector3', 'Vector3', (['(-400)', '(0)', '(0)'], {}), '(-400, 0, 0)\n', (1872, 1884), False, 'from modules.euclid import Vector3, Quaternion\n'), ((1889, 1908), 'modules.euclid.Vector3', 'Vector3', (['(-200)', '(0)', '(0)'], {}), '(-200, 0, 0)\n', (1896, 1908), False, 'from modules.euclid import Vector3, Quaternion\n'), ((1913, 1929), 'modules.euclid.Vector3', 'Vector3', (['(0)', '(0)', '(0)'], {}), '(0, 0, 0)\n', (1920, 1929), False, 'from modules.euclid import Vector3, Quaternion\n'), ((1934, 1952), 'modules.euclid.Vector3', 'Vector3', (['(200)', '(0)', '(0)'], {}), '(200, 0, 0)\n', (1941, 1952), False, 'from modules.euclid import Vector3, Quaternion\n'), ((1957, 1975), 'modules.euclid.Vector3', 'Vector3', (['(400)', '(0)', '(0)'], {}), '(400, 0, 0)\n', (1964, 1975), False, 'from modules.euclid import Vector3, Quaternion\n'), ((2442, 2469), 'modules.quaternion.QuaternionClass', 'QuaternionClass', (['(1)', '(0)', '(0)', '(0)'], {}), '(1, 0, 0, 0)\n', (2457, 2469), False, 'from modules.quaternion import QuaternionClass\n'), ((2485, 2512), 'modules.quaternion.QuaternionClass', 'QuaternionClass', (['(1)', '(0)', '(0)', '(0)'], {}), '(1, 0, 0, 0)\n', (2500, 2512), False, 'from modules.quaternion import QuaternionClass\n'), ((2617, 2644), 'modules.quaternion.QuaternionClass', 'QuaternionClass', (['(1)', '(0)', '(0)', '(0)'], {}), '(1, 0, 0, 0)\n', (2632, 2644), False, 'from modules.quaternion import QuaternionClass\n'), ((3556, 3584), 'modules.quaternion.QuaternionClass', 'QuaternionClass', (['(0)', '(0)', '(-1)', '(0)'], {}), '(0, 0, -1, 0)\n', (3571, 3584), False, 'from modules.quaternion import QuaternionClass\n'), ((4869, 4889), 'modules.a3muse.qnormalized', 'qnormalized', (['quatMad'], {}), '(quatMad)\n', (4880, 4889), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((4992, 5012), 'modules.a3muse.QuatToEuler', 'QuatToEuler', (['quatMad'], {}), '(quatMad)\n', (5003, 5012), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((5065, 5128), 'modules.quaternion.QuaternionClass', 'QuaternionClass', (['quatMad[0]', 'quatMad[1]', 'quatMad[3]', 'quatMad[2]'], {}), '(quatMad[0], quatMad[1], quatMad[3], quatMad[2])\n', (5080, 5128), False, 'from modules.quaternion import QuaternionClass\n'), ((5565, 5596), 'modules.a3muse.androidAccMag2Euler', 'androidAccMag2Euler', (['accel', 'mag'], {}), '(accel, mag)\n', (5584, 5596), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((6436, 6478), 'modules.a3muse.IntegrationRK4', 'IntegrationRK4', (['omega0', 'omega1', 'quatA3', 'dt'], {}), '(omega0, omega1, quatA3, dt)\n', (6450, 6478), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((6503, 6521), 'modules.a3muse.QuatToEuler', 'QuatToEuler', (['quatG'], {}), '(quatG)\n', (6514, 6521), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((6822, 6853), 'modules.a3muse.androidAccMag2Euler', 'androidAccMag2Euler', (['accel', 'mag'], {}), '(accel, mag)\n', (6841, 6853), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((7176, 7198), 'modules.mpulib.computeheading', 'computeheading', (['mx', 'my'], {}), '(mx, my)\n', (7190, 7198), False, 'from modules.mpulib import computeheading, attitudefromCompassGravity, RP_calculate, MadgwickQuaternionUpdate, Euler2Quat, quaternion_to_euler_angle, MPU9250_computeEuler\n'), ((8513, 8532), 'modules.a3muse.QuatToEuler', 'QuatToEuler', (['quatA3'], {}), '(quatA3)\n', (8524, 8532), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((8591, 8651), 'modules.quaternion.QuaternionClass', 'QuaternionClass', (['quatA3[0]', 'quatA3[1]', 'quatA3[3]', '(-quatA3[2])'], {}), '(quatA3[0], quatA3[1], quatA3[3], -quatA3[2])\n', (8606, 8651), False, 'from modules.quaternion import QuaternionClass\n'), ((9038, 9069), 'modules.a3muse.androidAccMag2Euler', 'androidAccMag2Euler', (['accel', 'mag'], {}), '(accel, mag)\n', (9057, 9069), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((9105, 9152), 'modules.a3muse.IntegrationRK4', 'IntegrationRK4', (['omega0', 'omega1', 'quatMuseAlg', 'dt'], {}), '(omega0, omega1, quatMuseAlg, dt)\n', (9119, 9152), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((9177, 9195), 'modules.a3muse.QuatToEuler', 'QuatToEuler', (['quatG'], {}), '(quatG)\n', (9188, 9195), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((9228, 9250), 'modules.mpulib.computeheading', 'computeheading', (['mx', 'my'], {}), '(mx, my)\n', (9242, 9250), False, 'from modules.mpulib import computeheading, attitudefromCompassGravity, RP_calculate, MadgwickQuaternionUpdate, Euler2Quat, quaternion_to_euler_angle, MPU9250_computeEuler\n'), ((15138, 15159), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (15157, 15159), False, 'import pygame\n'), ((15162, 15182), 'pygame.time.delay', 'pygame.time.delay', (['(0)'], {}), '(0)\n', (15179, 15182), False, 'import pygame\n'), ((15193, 15212), 'pygame.event.poll', 'pygame.event.poll', ([], {}), '()\n', (15210, 15212), False, 'import pygame\n'), ((1983, 2014), 'modules.euclid.Quaternion', 'Quaternion', (['(0.96)', '(0.01)', '(0.01)', '(0)'], {}), '(0.96, 0.01, 0.01, 0)\n', (1993, 2014), False, 'from modules.euclid import Vector3, Quaternion\n'), ((9474, 9500), 'numpy.mat', 'np.mat', (['[[mx], [my], [mz]]'], {}), '([[mx], [my], [mz]])\n', (9480, 9500), True, 'import numpy as np\n'), ((9541, 9561), 'modules.a3muse.QuatToRotMat', 'QuatToRotMat', (['quatAM'], {}), '(quatAM)\n', (9553, 9561), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((12684, 12708), 'modules.a3muse.QuatToEuler', 'QuatToEuler', (['quatMuseAlg'], {}), '(quatMuseAlg)\n', (12695, 12708), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((7008, 7023), 'numpy.array', 'np.array', (['accel'], {}), '(accel)\n', (7016, 7023), True, 'import numpy as np\n'), ((11362, 11388), 'numpy.mat', 'np.mat', (['[[mx], [my], [mz]]'], {}), '([[mx], [my], [mz]])\n', (11368, 11388), True, 'import numpy as np\n'), ((11433, 11453), 'modules.a3muse.QuatToRotMat', 'QuatToRotMat', (['quatAM'], {}), '(quatAM)\n', (11445, 11453), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((11719, 11738), 'modules.a3muse.QuatToRotMat', 'QuatToRotMat', (['quatG'], {}), '(quatG)\n', (11731, 11738), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((11756, 11766), 'numpy.linalg.inv', 'inv', (['O_hat'], {}), '(O_hat)\n', (11759, 11766), False, 'from numpy.linalg import inv\n'), ((11858, 11884), 'numpy.mat', 'np.mat', (['[[mx], [my], [mz]]'], {}), '([[mx], [my], [mz]])\n', (11864, 11884), True, 'import numpy as np\n'), ((12030, 12052), 'numpy.array', 'np.array', (['[mx, my, mz]'], {}), '([mx, my, mz])\n', (12038, 12052), True, 'import numpy as np\n'), ((12067, 12089), 'numpy.cross', 'np.cross', (['N_L_hat', 'N_L'], {}), '(N_L_hat, N_L)\n', (12075, 12089), True, 'import numpy as np\n'), ((12229, 12256), 'modules.a3muse.angle_between', 'angle_between', (['N_L_hat', 'N_L'], {}), '(N_L_hat, N_L)\n', (12242, 12256), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((12311, 12352), 'modules.a3muse.AxisAngleToRotMat', 'AxisAngleToRotMat', (['RotAxis', 'alphaRotAngle'], {}), '(RotAxis, alphaRotAngle)\n', (12328, 12352), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((12432, 12454), 'modules.a3muse.RotMatToQuat', 'RotMatToQuat', (['Onew_hat'], {}), '(Onew_hat)\n', (12444, 12454), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((12470, 12494), 'modules.a3muse.quatNormalized', 'quatNormalized', (['quatMUSE'], {}), '(quatMUSE)\n', (12484, 12494), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((12513, 12580), 'modules.quaternion.QuaternionClass', 'QuaternionClass', (['quatMUSE[0]', 'quatMUSE[1]', 'quatMUSE[2]', 'quatMUSE[3]'], {}), '(quatMUSE[0], quatMUSE[1], quatMUSE[2], quatMUSE[3])\n', (12528, 12580), False, 'from modules.quaternion import QuaternionClass\n'), ((9848, 9860), 'numpy.array', 'np.array', (['Ax'], {}), '(Ax)\n', (9856, 9860), True, 'import numpy as np\n'), ((9876, 9888), 'numpy.array', 'np.array', (['Ay'], {}), '(Ay)\n', (9884, 9888), True, 'import numpy as np\n'), ((9904, 9916), 'numpy.array', 'np.array', (['Az'], {}), '(Az)\n', (9912, 9916), True, 'import numpy as np\n'), ((10993, 11013), 'modules.a3muse.QuatToRotMat', 'QuatToRotMat', (['quatAM'], {}), '(quatAM)\n', (11005, 11013), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((11024, 11043), 'modules.a3muse.QuatToRotMat', 'QuatToRotMat', (['quatG'], {}), '(quatG)\n', (11036, 11043), False, 'from modules.a3muse import androidAccMag2Euler, qnormalized, quatNormalized, IntegrationRK4, EulerToQuat, AccMagOrientation, headingfromMag, QuatToEuler, angle_between, QuatToRotMat, AxisAngleToRotMat, RotMatToQuat, AccMag2Euler\n'), ((12112, 12128), 'numpy.linalg.norm', 'LA.norm', (['RotAxis'], {}), '(RotAxis)\n', (12119, 12128), True, 'from numpy import linalg as LA\n'), ((6965, 6979), 'numpy.array', 'np.array', (['gyro'], {}), '(gyro)\n', (6973, 6979), True, 'import numpy as np\n'), ((7435, 7453), 'numpy.subtract', 'np.subtract', (['Sc', 'C'], {}), '(Sc, C)\n', (7446, 7453), True, 'import numpy as np\n'), ((7476, 7494), 'numpy.subtract', 'np.subtract', (['Sg', 'G'], {}), '(Sg, G)\n', (7487, 7494), True, 'import numpy as np\n'), ((12402, 12415), 'numpy.mat', 'np.mat', (['O_hat'], {}), '(O_hat)\n', (12408, 12415), True, 'import numpy as np\n'), ((11058, 11068), 'numpy.mat', 'np.mat', (['Oa'], {}), '(Oa)\n', (11064, 11068), True, 'import numpy as np\n'), ((11080, 11090), 'numpy.mat', 'np.mat', (['Og'], {}), '(Og)\n', (11086, 11090), True, 'import numpy as np\n'), ((11248, 11261), 'numpy.mat', 'np.mat', (['Ocomp'], {}), '(Ocomp)\n', (11254, 11261), True, 'import numpy as np\n'), ((12384, 12400), 'numpy.linalg.inv', 'inv', (['deltaRotMat'], {}), '(deltaRotMat)\n', (12387, 12400), False, 'from numpy.linalg import inv\n'), ((11944, 11961), 'numpy.array', 'np.array', (['N_L_hat'], {}), '(N_L_hat)\n', (11952, 11961), True, 'import numpy as np\n'), ((11969, 11986), 'numpy.array', 'np.array', (['N_L_hat'], {}), '(N_L_hat)\n', (11977, 11986), True, 'import numpy as np\n'), ((11994, 12011), 'numpy.array', 'np.array', (['N_L_hat'], {}), '(N_L_hat)\n', (12002, 12011), True, 'import numpy as np\n')] |
from rest_framework import generics
from rest_framework import response
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.views import APIView
from rest_api.serializers.doc_serializers import (DoctorRegisterSerializer,
DoctorUsersSerializer, DoctorLoginSerializer, DoctorChangePasswordSerializer)
from rest_framework.response import Response
from rest_api.send_mail import password_reset_token_created, send_confirmation_email
from rest_framework import status
from doctorsUser.models import DoctorUser
from drf_yasg.utils import swagger_auto_schema
from rest_framework_simplejwt.views import TokenObtainPairView
from rest_framework_simplejwt.backends import TokenBackend
from rest_api.permissions import IsOwnerOrReadOnly
import jwt
from config.settings import SECRET_KEY
class DoctorUsersView(generics.ListAPIView):
queryset = DoctorUser.objects.all()
serializer_class = DoctorUsersSerializer
@swagger_auto_schema(operation_description='List doctor users (can add params(?search) for search)', tags=['Doctor User'],
security=[])
def get(self, request):
return self.list(request)
def get_queryset(self):
search = self.request.query_params.get("search")
query = super().get_queryset()
if search:
query = query.filter(fullname__icontains=search)
return query
else:
return query
class Doctor(generics.RetrieveAPIView):
serializer_class = DoctorUsersSerializer
queryset = DoctorUser.objects.all()
class DoctorUserRegisterView(generics.CreateAPIView):
serializer_class = DoctorRegisterSerializer
@swagger_auto_schema(operation_description='Registration doctor users', tags=['Doctor User'],
security=[])
def post(self, request):
serializer = DoctorRegisterSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
user = serializer.save()
if user:
send_confirmation_email(request, user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
class DoctorLoginView(TokenObtainPairView):
serializer_class = DoctorLoginSerializer
permission_classes = [AllowAny,]
@swagger_auto_schema(operation_description='Login doctor users', tags=['Doctor User'],
security=[])
def post(self, request, *args, **kwargs):
return super().post(request, *args, **kwargs)
class DoctorChangePasswordView(generics.UpdateAPIView):
serializer_class = DoctorChangePasswordSerializer
model = DoctorUser
permission_classes = (IsOwnerOrReadOnly,)
def update(self, request, *args, **kwargs):
user = self.request.user
serializer: DoctorChangePasswordSerializer = self.get_serializer(data=request.data)
if serializer.is_valid():
if not user.check_password(serializer.data.get("old_password")):
return Response({"old_password": "<PASSWORD>"}, status=status.HTTP_400_BAD_REQUEST)
user.set_password(serializer.data.get("new_password"))
user.save()
return Response({
'status': 'success',
'code': status.HTTP_200_OK,
'message': 'Password updated successfully',
'data': []
})
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class DoctorForgotPasswordView(APIView):
permission_classes = [IsOwnerOrReadOnly, ]
@swagger_auto_schema(operation_description='Reset password doctor users', tags=['Doctor User'],
security=[])
def get(self, request, *args, **kwargs):
password_reset_token_created(request)
return response.Response("Email was sended", status=status.HTTP_200_OK)
class DoctorInfo(generics.ListAPIView):
serializer_class = DoctorUsersSerializer
model = DoctorUser
permission_classes = [IsAuthenticated]
def get(self, request, *args, **kwargs):
token = request.META.get('HTTP_AUTHORIZATION', " ").split(' ')[1]
user_id = jwt.decode(token, SECRET_KEY, algorithms=["HS256"])["user_id"]
request.user = DoctorUser.objects.get(pk=user_id)
try:
data = {
"id": str(request.user.id),
"fullname": str(request.user.fullname),
"username": str(request.user.username),
"email": str(request.user.email),
"phone_number": str(request.user.phone_number),
"license_image": str(request.user.license_image),
"avatar": str(request.user.avatar),
"profession": str(request.user.profession),
"experience": str(request.user.experience),
"price": str(request.user.price),
"company": str(request.user.company),
"address": str(request.user.address),
"is_active": str(request.user.is_active),
}
return response.Response(data, status=200)
except Exception:
return response.Response("Login does not succeded", status=401) | [
"jwt.decode",
"rest_api.send_mail.password_reset_token_created",
"rest_api.serializers.doc_serializers.DoctorRegisterSerializer",
"rest_api.send_mail.send_confirmation_email",
"doctorsUser.models.DoctorUser.objects.get",
"drf_yasg.utils.swagger_auto_schema",
"rest_framework.response.Response",
"doctor... | [((885, 909), 'doctorsUser.models.DoctorUser.objects.all', 'DoctorUser.objects.all', ([], {}), '()\n', (907, 909), False, 'from doctorsUser.models import DoctorUser\n'), ((961, 1105), 'drf_yasg.utils.swagger_auto_schema', 'swagger_auto_schema', ([], {'operation_description': '"""List doctor users (can add params(?search) for search)"""', 'tags': "['Doctor User']", 'security': '[]'}), "(operation_description=\n 'List doctor users (can add params(?search) for search)', tags=[\n 'Doctor User'], security=[])\n", (980, 1105), False, 'from drf_yasg.utils import swagger_auto_schema\n'), ((1557, 1581), 'doctorsUser.models.DoctorUser.objects.all', 'DoctorUser.objects.all', ([], {}), '()\n', (1579, 1581), False, 'from doctorsUser.models import DoctorUser\n'), ((1691, 1801), 'drf_yasg.utils.swagger_auto_schema', 'swagger_auto_schema', ([], {'operation_description': '"""Registration doctor users"""', 'tags': "['Doctor User']", 'security': '[]'}), "(operation_description='Registration doctor users', tags\n =['Doctor User'], security=[])\n", (1710, 1801), False, 'from drf_yasg.utils import swagger_auto_schema\n'), ((2301, 2404), 'drf_yasg.utils.swagger_auto_schema', 'swagger_auto_schema', ([], {'operation_description': '"""Login doctor users"""', 'tags': "['Doctor User']", 'security': '[]'}), "(operation_description='Login doctor users', tags=[\n 'Doctor User'], security=[])\n", (2320, 2404), False, 'from drf_yasg.utils import swagger_auto_schema\n'), ((3569, 3680), 'drf_yasg.utils.swagger_auto_schema', 'swagger_auto_schema', ([], {'operation_description': '"""Reset password doctor users"""', 'tags': "['Doctor User']", 'security': '[]'}), "(operation_description='Reset password doctor users',\n tags=['Doctor User'], security=[])\n", (3588, 3680), False, 'from drf_yasg.utils import swagger_auto_schema\n'), ((1872, 1915), 'rest_api.serializers.doc_serializers.DoctorRegisterSerializer', 'DoctorRegisterSerializer', ([], {'data': 'request.data'}), '(data=request.data)\n', (1896, 1915), False, 'from rest_api.serializers.doc_serializers import DoctorRegisterSerializer, DoctorUsersSerializer, DoctorLoginSerializer, DoctorChangePasswordSerializer\n'), ((3410, 3473), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n', (3418, 3473), False, 'from rest_framework.response import Response\n'), ((3755, 3792), 'rest_api.send_mail.password_reset_token_created', 'password_reset_token_created', (['request'], {}), '(request)\n', (3783, 3792), False, 'from rest_api.send_mail import password_reset_token_created, send_confirmation_email\n'), ((3808, 3872), 'rest_framework.response.Response', 'response.Response', (['"""Email was sended"""'], {'status': 'status.HTTP_200_OK'}), "('Email was sended', status=status.HTTP_200_OK)\n", (3825, 3872), False, 'from rest_framework import response\n'), ((4262, 4296), 'doctorsUser.models.DoctorUser.objects.get', 'DoctorUser.objects.get', ([], {'pk': 'user_id'}), '(pk=user_id)\n', (4284, 4296), False, 'from doctorsUser.models import DoctorUser\n'), ((3201, 3320), 'rest_framework.response.Response', 'Response', (["{'status': 'success', 'code': status.HTTP_200_OK, 'message':\n 'Password updated successfully', 'data': []}"], {}), "({'status': 'success', 'code': status.HTTP_200_OK, 'message':\n 'Password updated successfully', 'data': []})\n", (3209, 3320), False, 'from rest_framework.response import Response\n'), ((4176, 4227), 'jwt.decode', 'jwt.decode', (['token', 'SECRET_KEY'], {'algorithms': "['HS256']"}), "(token, SECRET_KEY, algorithms=['HS256'])\n", (4186, 4227), False, 'import jwt\n'), ((5088, 5123), 'rest_framework.response.Response', 'response.Response', (['data'], {'status': '(200)'}), '(data, status=200)\n', (5105, 5123), False, 'from rest_framework import response\n'), ((2044, 2082), 'rest_api.send_mail.send_confirmation_email', 'send_confirmation_email', (['request', 'user'], {}), '(request, user)\n', (2067, 2082), False, 'from rest_api.send_mail import password_reset_token_created, send_confirmation_email\n'), ((2106, 2163), 'rest_framework.response.Response', 'Response', (['serializer.data'], {'status': 'status.HTTP_201_CREATED'}), '(serializer.data, status=status.HTTP_201_CREATED)\n', (2114, 2163), False, 'from rest_framework.response import Response\n'), ((3014, 3090), 'rest_framework.response.Response', 'Response', (["{'old_password': '<PASSWORD>'}"], {'status': 'status.HTTP_400_BAD_REQUEST'}), "({'old_password': '<PASSWORD>'}, status=status.HTTP_400_BAD_REQUEST)\n", (3022, 3090), False, 'from rest_framework.response import Response\n'), ((5169, 5225), 'rest_framework.response.Response', 'response.Response', (['"""Login does not succeded"""'], {'status': '(401)'}), "('Login does not succeded', status=401)\n", (5186, 5225), False, 'from rest_framework import response\n')] |
from google.appengine.ext import webapp
from wsgiref.handlers import CGIHandler
from model import Membership
from model import Group
from model import Transaction
class WhatHandler(webapp.RequestHandler):
def get(self):
page = self.request.get('p');
if page is None or page == '':
page = 1
else:
page = int(page)
offset = (page - 1) * 20
if page != 1:
self.response.out.write("<a href=\"?p=%s\">Previous</a> | " % (page - 1))
self.response.out.write(" %s " % page)
self.response.out.write(" | <a href=\"?p=%s\">Next</a>" % (page + 1))
self.response.out.write("<br/><br/>")
self.response.out.write("<ul>")
for tr in Transaction.gql("ORDER BY date DESC LIMIT %s, %s" % (offset, 20)):
try:
self.response.out.write("<li>In %s: %s <b>%s</b> %s ($%s due to \"%s\", %s)</li>" % (
tr.group.name,
tr.fromMember.userNick,
tr.type,
tr.toMember.userNick,
tr.amount,
tr.reason,
tr.date))
except:
self.response.out.write("<li style=\"color:blue\">Group must have been deleted...</li>")
self.response.out.write("</ul>")
def main():
application = webapp.WSGIApplication([
('/what', WhatHandler),
], debug=True)
CGIHandler().run(application) | [
"model.Transaction.gql",
"google.appengine.ext.webapp.WSGIApplication",
"wsgiref.handlers.CGIHandler"
] | [((1248, 1308), 'google.appengine.ext.webapp.WSGIApplication', 'webapp.WSGIApplication', (["[('/what', WhatHandler)]"], {'debug': '(True)'}), "([('/what', WhatHandler)], debug=True)\n", (1270, 1308), False, 'from google.appengine.ext import webapp\n'), ((710, 775), 'model.Transaction.gql', 'Transaction.gql', (["('ORDER BY date DESC LIMIT %s, %s' % (offset, 20))"], {}), "('ORDER BY date DESC LIMIT %s, %s' % (offset, 20))\n", (725, 775), False, 'from model import Transaction\n'), ((1405, 1417), 'wsgiref.handlers.CGIHandler', 'CGIHandler', ([], {}), '()\n', (1415, 1417), False, 'from wsgiref.handlers import CGIHandler\n')] |
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import os
from torch.autograd import Variable
import argparse
import numpy as np
from torch.optim.lr_scheduler import *
from model.resnet import resnet101
from data_pre.FashionAI import fashion
parser=argparse.ArgumentParser()
parser.add_argument('--workers',type=int,default=2)
parser.add_argument('--batchSize',type=int,default=64)
parser.add_argument('--nepoch',type=int,default=11)
parser.add_argument('--lr',type=float,default=0.001)
parser.add_argument('--gpu',type=str,default='7')
parser.add_argument('--attr',type=str,default='collar_design_labels')
opt=parser.parse_args()
print(opt)
os.environ["CUDA_VISIBLE_DEVICES"]=opt.gpu
transform_train=transforms.Compose([
transforms.Resize((256,256)),
transforms.RandomCrop((224,224)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.485,0.456,0.406),(0.229,0.224,0.225))
])
transform_val=transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize((0.485,0.456,0.406),(0.229,0.224,0.225))
])
trainset=fashion('/home/yhf/Challenge/FashionAI/STL_FashionAI/data/2base/Annotations/sum_labels.csv',transform_train,opt.attr,train=True)
trainloader=torch.utils.data.DataLoader(trainset,batch_size=opt.batchSize,shuffle=True,num_workers=opt.workers)
valset=fashion('/home/yhf/Challenge/FashionAI/STL_FashionAI/data/2base/Annotations/sum_labels.csv',transform_val,opt.attr,train=False)
valloader=torch.utils.data.DataLoader(valset,batch_size=opt.batchSize,shuffle=False,num_workers=opt.workers)
AttrNum={
"coat_length_labels":8,
"collar_design_labels":5,
"lapel_design_labels":5,
"neck_design_labels":5,
"neckline_design_labels":10,
"pant_length_labels":6,
"skirt_length_labels":6,
"sleeve_length_labels":9
}
model=resnet101(pretrained=True)
model.fc=nn.Linear(2048,AttrNum[opt.attr])
model.cuda()
optimizer=torch.optim.SGD(model.parameters(),lr=opt.lr,momentum=0.9,weight_decay=5e-4)
scheduler=StepLR(optimizer,step_size=3)
criterion=nn.CrossEntropyLoss()
criterion.cuda()
def train(epoch):
print('\nTrain Epoch:%d' % epoch)
scheduler.step()
model.train()
for batch_idx, (img,label) in enumerate(trainloader):
image=Variable(img.cuda())
label=Variable(label.cuda())
optimizer.zero_grad()
out=model(image)
loss=criterion(out,label)
loss.backward()
optimizer.step()
if batch_idx%20==0:
print("Epoch: %d [%d:%d] loss: %f" % (epoch,batch_idx,len(trainloader),loss.mean()))
def val(epoch):
print('\nTest Epoch:%d'%epoch)
model.eval()
total=0
correct=0
for batch_idx, (img,label) in enumerate(valloader):
image=Variable(img.cuda(),volatile=True)
label=Variable(label.cuda())
out=model(image)
_,predict=torch.max(out.data,1)
total+=image.size(0)
correct+=predict.eq(label.data).cpu().sum()
print("Acc:%f" % ((1.0*correct)/total))
for epoch in range(opt.nepoch):
train(epoch)
val(epoch)
torch.save(model.state_dict(),'ckp/model_task_%s.pth' % opt.attr)
| [
"torch.nn.CrossEntropyLoss",
"argparse.ArgumentParser",
"torch.max",
"torchvision.transforms.RandomHorizontalFlip",
"model.resnet.resnet101",
"torchvision.transforms.RandomCrop",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"torchvision.transforms.Resize",
"torchvision.transforms.Normalize",
... | [((325, 350), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (348, 350), False, 'import argparse\n'), ((1183, 1324), 'data_pre.FashionAI.fashion', 'fashion', (['"""/home/yhf/Challenge/FashionAI/STL_FashionAI/data/2base/Annotations/sum_labels.csv"""', 'transform_train', 'opt.attr'], {'train': '(True)'}), "(\n '/home/yhf/Challenge/FashionAI/STL_FashionAI/data/2base/Annotations/sum_labels.csv'\n , transform_train, opt.attr, train=True)\n", (1190, 1324), False, 'from data_pre.FashionAI import fashion\n'), ((1325, 1432), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': 'opt.batchSize', 'shuffle': '(True)', 'num_workers': 'opt.workers'}), '(trainset, batch_size=opt.batchSize, shuffle=\n True, num_workers=opt.workers)\n', (1352, 1432), False, 'import torch\n'), ((1433, 1573), 'data_pre.FashionAI.fashion', 'fashion', (['"""/home/yhf/Challenge/FashionAI/STL_FashionAI/data/2base/Annotations/sum_labels.csv"""', 'transform_val', 'opt.attr'], {'train': '(False)'}), "(\n '/home/yhf/Challenge/FashionAI/STL_FashionAI/data/2base/Annotations/sum_labels.csv'\n , transform_val, opt.attr, train=False)\n", (1440, 1573), False, 'from data_pre.FashionAI import fashion\n'), ((1572, 1677), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['valset'], {'batch_size': 'opt.batchSize', 'shuffle': '(False)', 'num_workers': 'opt.workers'}), '(valset, batch_size=opt.batchSize, shuffle=False,\n num_workers=opt.workers)\n', (1599, 1677), False, 'import torch\n'), ((1912, 1938), 'model.resnet.resnet101', 'resnet101', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (1921, 1938), False, 'from model.resnet import resnet101\n'), ((1949, 1983), 'torch.nn.Linear', 'nn.Linear', (['(2048)', 'AttrNum[opt.attr]'], {}), '(2048, AttrNum[opt.attr])\n', (1958, 1983), True, 'import torch.nn as nn\n'), ((2137, 2158), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2156, 2158), True, 'import torch.nn as nn\n'), ((812, 841), 'torchvision.transforms.Resize', 'transforms.Resize', (['(256, 256)'], {}), '((256, 256))\n', (829, 841), True, 'import torchvision.transforms as transforms\n'), ((844, 877), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(224, 224)'], {}), '((224, 224))\n', (865, 877), True, 'import torchvision.transforms as transforms\n'), ((880, 913), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (911, 913), True, 'import torchvision.transforms as transforms\n'), ((917, 938), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (936, 938), True, 'import torchvision.transforms as transforms\n'), ((942, 1008), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.485, 0.456, 0.406)', '(0.229, 0.224, 0.225)'], {}), '((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n', (962, 1008), True, 'import torchvision.transforms as transforms\n'), ((1048, 1077), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (1065, 1077), True, 'import torchvision.transforms as transforms\n'), ((1080, 1101), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1099, 1101), True, 'import torchvision.transforms as transforms\n'), ((1105, 1171), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.485, 0.456, 0.406)', '(0.229, 0.224, 0.225)'], {}), '((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n', (1125, 1171), True, 'import torchvision.transforms as transforms\n'), ((2864, 2886), 'torch.max', 'torch.max', (['out.data', '(1)'], {}), '(out.data, 1)\n', (2873, 2886), False, 'import torch\n')] |
import arcade
import math
import LevelGenerator
import Textures
import Sounds
from Constants import TILE_SIZE, ROOM_WIDTH, ROOM_HEIGHT
from Mob import Mob
from Projectile import Projectile
class Player(Mob):
def __init__(self, x, y, keyboard):
self.keyboard = keyboard
self.movespeed = 2.5
self.jump_height = 4
self.jumping = False
self.max_attack_speed = 12
self.curr_attack_speed = 0
self.attack_dir = 0
self.curr_jump_height = 0
self.min_jump_height = 8
self.max_jump_height = 64
self.walk_count = 0
self.walk_frame_speed = 8
self.not_mirrored = True
self.curr_dash_frame = 0
self.dash_frame_speed = 12
self.dashing = False
self.crawling = False
self.curr_crawl_frame = 0
self.crawl_frame_speed = 16
self.health = 9
self.curr_invis_frame = 0
self.invis_frame = 150
# Textures
self.idle_texture = Textures.get_texture(0, 4)
self.idle_texture_mirrored = Textures.get_texture(0, 5)
self.walking_textures = Textures.get_textures(1, 4, 4)
self.walking_textures_mirrored = Textures.get_textures(1, 5, 4)
self.dash_textures = Textures.get_textures(5, 4, 3)
self.dash_textures_mirrored = Textures.get_textures(5, 5, 3)
self.crawl_textures = Textures.get_textures(7, 4, 4)
self.crawl_textures_mirrored = Textures.get_textures(7, 5, 4)
super().__init__(self.idle_texture, x, y)
def update(self):
speed_mult = 1
if self.keyboard.is_pressed("sprint"):
speed_mult = 2
if self.keyboard.is_pressed("dash"):
if not self.dashing:
self.change_y += 2
self.dashing = True
if self.keyboard.is_pressed("l"):
pass
# self.level.reset = True
if self.keyboard.is_pressed("down"):
self.change_y -= 0.1
self.crawling = True
speed_mult *= 0.5
else:
self.crawling = False
if self.keyboard.is_pressed("attack"):
if self.curr_attack_speed == 0:
extra_y_dir = 0
if self.keyboard.is_pressed("up"):
extra_y_dir = 4
elif self.keyboard.is_pressed("down"):
extra_y_dir = -4
attack_x = (self.change_x) * 4
attack_y = (self.change_y + extra_y_dir) * 3
attack_angle = int(math.atan2(attack_y, attack_x)/math.pi*180)
card = Projectile(
Textures.SPRITESHEET[3 + int((attack_angle % 360) / 45) + 16],
self.center_x,
self.center_y,
attack_x,
attack_y)
self.level.add_entity_to_list(card, self.level.entities)
self.curr_attack_speed = self.max_attack_speed
Sounds.play(Sounds.SHOOT)
if self.curr_attack_speed > 0:
self.curr_attack_speed -= 1
if self.keyboard.is_pressed("jump"):
if self.level.physics_engine.can_jump(1):
# if self.level.engine.can_jump(self, 1):
if not self.jumping:
Sounds.play(Sounds.JUMP)
self.level.physics_engine.jump(self.jump_height)
self.jumping = True
# elif self.level.engine.can_jump(self, -1):
elif self.level.physics_engine.can_jump(-1):
self.jumping = False
self.curr_jump_height = 0
if self.curr_jump_height > self.max_jump_height:
self.jumping = False
self.curr_jump_height = 0
elif self.curr_jump_height >= self.min_jump_height:
self.jumping = False
self.curr_jump_height = 0
if self.jumping:
self.change_y = self.jump_height
self.curr_jump_height += self.jump_height
if self.keyboard.is_pressed("left"):
self.change_x = -self.movespeed * speed_mult
elif self.keyboard.is_pressed("right"):
self.change_x = self.movespeed * speed_mult
else:
if self.change_x > 1:
self.change_x -= 1
self.not_mirrored = True
elif self.change_x < -1:
self.change_x += 1
self.not_mirrored = False
else:
self.change_x = 0
if self.dashing:
if self.change_x > 0:
self.change_x = self.movespeed * speed_mult * 1.5
elif self.change_x < 0:
self.change_x = -self.movespeed * speed_mult * 1.5
self.curr_dash_frame += 1
if self.curr_dash_frame >= self.dash_frame_speed * len(self.dash_textures):
self.curr_dash_frame = 0
self.dashing = False
elif self.crawling:
self.curr_crawl_frame += 1
if self.curr_crawl_frame >= self.crawl_frame_speed * len(self.crawl_textures):
self.curr_crawl_frame = 0
else:
self.walk_count += 1
if self.walk_count >= len(self.walking_textures) * self.walk_frame_speed:
self.walk_count = 0
if self.curr_invis_frame > 0 and self.curr_invis_frame % 12 < 6:
self.texture = Textures.get_texture(15, 15)
elif self.change_x > 0:
if self.dashing:
self.texture = self.dash_textures[self.curr_dash_frame // self.dash_frame_speed]
elif self.crawling:
self.texture = self.crawl_textures[self.curr_crawl_frame // self.crawl_frame_speed]
else:
self.texture = self.walking_textures[self.walk_count // self.walk_frame_speed]
# self.player_dir = True
elif self.change_x < 0:
if self.dashing:
self.texture = self.dash_textures_mirrored[self.curr_dash_frame // self.dash_frame_speed]
elif self.crawling:
self.texture = self.crawl_textures_mirrored[self.curr_crawl_frame // self.crawl_frame_speed]
else:
self.texture = self.walking_textures_mirrored[self.walk_count // self.walk_frame_speed]
# self.player_dir = False
else:
if self.not_mirrored:
if self.crawling:
self.texture = self.crawl_textures[0]
else:
self.texture = self.idle_texture
else:
if self.crawling:
self.texture = self.crawl_textures_mirrored[0]
else:
self.texture = self.idle_texture_mirrored
super().update()
def collided(self, entity, dx, dy):
super().collided(entity, dx, dy)
def hurt(self, damage, knockback):
if damage == 0:
return
if self.curr_invis_frame <= 0:
self.health -= damage
self.change_x += knockback
self.curr_invis_frame = self.invis_frame
Sounds.play(Sounds.HURT)
if self.health <= 0:
self.level.game_over = True
self.level.game_over_timer = 180 | [
"Textures.get_textures",
"Sounds.play",
"Textures.get_texture",
"math.atan2"
] | [((1010, 1036), 'Textures.get_texture', 'Textures.get_texture', (['(0)', '(4)'], {}), '(0, 4)\n', (1030, 1036), False, 'import Textures\n'), ((1074, 1100), 'Textures.get_texture', 'Textures.get_texture', (['(0)', '(5)'], {}), '(0, 5)\n', (1094, 1100), False, 'import Textures\n'), ((1133, 1163), 'Textures.get_textures', 'Textures.get_textures', (['(1)', '(4)', '(4)'], {}), '(1, 4, 4)\n', (1154, 1163), False, 'import Textures\n'), ((1205, 1235), 'Textures.get_textures', 'Textures.get_textures', (['(1)', '(5)', '(4)'], {}), '(1, 5, 4)\n', (1226, 1235), False, 'import Textures\n'), ((1265, 1295), 'Textures.get_textures', 'Textures.get_textures', (['(5)', '(4)', '(3)'], {}), '(5, 4, 3)\n', (1286, 1295), False, 'import Textures\n'), ((1334, 1364), 'Textures.get_textures', 'Textures.get_textures', (['(5)', '(5)', '(3)'], {}), '(5, 5, 3)\n', (1355, 1364), False, 'import Textures\n'), ((1395, 1425), 'Textures.get_textures', 'Textures.get_textures', (['(7)', '(4)', '(4)'], {}), '(7, 4, 4)\n', (1416, 1425), False, 'import Textures\n'), ((1465, 1495), 'Textures.get_textures', 'Textures.get_textures', (['(7)', '(5)', '(4)'], {}), '(7, 5, 4)\n', (1486, 1495), False, 'import Textures\n'), ((5488, 5516), 'Textures.get_texture', 'Textures.get_texture', (['(15)', '(15)'], {}), '(15, 15)\n', (5508, 5516), False, 'import Textures\n'), ((7225, 7249), 'Sounds.play', 'Sounds.play', (['Sounds.HURT'], {}), '(Sounds.HURT)\n', (7236, 7249), False, 'import Sounds\n'), ((3015, 3040), 'Sounds.play', 'Sounds.play', (['Sounds.SHOOT'], {}), '(Sounds.SHOOT)\n', (3026, 3040), False, 'import Sounds\n'), ((3332, 3356), 'Sounds.play', 'Sounds.play', (['Sounds.JUMP'], {}), '(Sounds.JUMP)\n', (3343, 3356), False, 'import Sounds\n'), ((2570, 2600), 'math.atan2', 'math.atan2', (['attack_y', 'attack_x'], {}), '(attack_y, attack_x)\n', (2580, 2600), False, 'import math\n')] |
import tensorflow as tf
from data_types.training_result import TrainingResult
from data_types.training_set import TrainingSet
from timeseries.build import compile_and_fit
from timeseries.window_generator import WindowGenerator
def evaluate_linear(
training_set: TrainingSet
) -> TrainingResult:
## LINEAR
linear = tf.keras.Sequential([
tf.keras.layers.Dense(units=1)
])
single_step_window = WindowGenerator(
input_width=1,
label_width=1,
shift=1,
training_set=training_set,
label_columns=['T (degC)']
)
print('Input shape:', single_step_window.example[0].shape)
print('Output shape:', linear(single_step_window.example[0]).shape)
compile_and_fit(linear, single_step_window)
wide_window = WindowGenerator(
input_width=24,
label_width=24,
shift=1,
label_columns=['T (degC)'],
training_set=training_set
)
wide_window.plot(linear)
metric_index = linear.metrics_names.index('mean_absolute_error')
return TrainingResult(
performance=linear.evaluate(single_step_window.test, verbose=0)[metric_index],
validation_performance=linear.evaluate(single_step_window.val)[metric_index]
) | [
"tensorflow.keras.layers.Dense",
"timeseries.window_generator.WindowGenerator",
"timeseries.build.compile_and_fit"
] | [((427, 541), 'timeseries.window_generator.WindowGenerator', 'WindowGenerator', ([], {'input_width': '(1)', 'label_width': '(1)', 'shift': '(1)', 'training_set': 'training_set', 'label_columns': "['T (degC)']"}), "(input_width=1, label_width=1, shift=1, training_set=\n training_set, label_columns=['T (degC)'])\n", (442, 541), False, 'from timeseries.window_generator import WindowGenerator\n'), ((724, 767), 'timeseries.build.compile_and_fit', 'compile_and_fit', (['linear', 'single_step_window'], {}), '(linear, single_step_window)\n', (739, 767), False, 'from timeseries.build import compile_and_fit\n'), ((787, 903), 'timeseries.window_generator.WindowGenerator', 'WindowGenerator', ([], {'input_width': '(24)', 'label_width': '(24)', 'shift': '(1)', 'label_columns': "['T (degC)']", 'training_set': 'training_set'}), "(input_width=24, label_width=24, shift=1, label_columns=[\n 'T (degC)'], training_set=training_set)\n", (802, 903), False, 'from timeseries.window_generator import WindowGenerator\n'), ((363, 393), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(1)'}), '(units=1)\n', (384, 393), True, 'import tensorflow as tf\n')] |
import matplotlib.pyplot as plt
import numpy as np
from gpar.regression import GPARRegressor
from wbml.experiment import WorkingDirectory
import wbml.plot
if __name__ == "__main__":
wd = WorkingDirectory("_experiments", "synthetic", seed=1)
# Create toy data set.
n = 200
x = np.linspace(0, 1, n)
noise = 0.1
# Draw functions depending on each other in complicated ways.
f1 = -np.sin(10 * np.pi * (x + 1)) / (2 * x + 1) - x ** 4
f2 = np.cos(f1) ** 2 + np.sin(3 * x)
f3 = f2 * f1 ** 2 + 3 * x
f = np.stack((f1, f2, f3), axis=0).T
# Add noise and subsample.
y = f + noise * np.random.randn(n, 3)
x_obs, y_obs = x[::8], y[::8]
# Fit and predict GPAR.
model = GPARRegressor(
scale=0.1,
linear=True,
linear_scale=10.0,
nonlinear=True,
nonlinear_scale=0.1,
noise=0.1,
impute=True,
replace=False,
normalise_y=False,
)
model.fit(x_obs, y_obs)
means, lowers, uppers = model.predict(
x, num_samples=200, credible_bounds=True, latent=True
)
# Fit and predict independent GPs: set `markov=0` in GPAR.
igp = GPARRegressor(
scale=0.1,
linear=True,
linear_scale=10.0,
nonlinear=True,
nonlinear_scale=0.1,
noise=0.1,
markov=0,
normalise_y=False,
)
igp.fit(x_obs, y_obs)
igp_means, igp_lowers, igp_uppers = igp.predict(
x, num_samples=200, credible_bounds=True, latent=True
)
# Plot the result.
plt.figure(figsize=(15, 3))
for i in range(3):
plt.subplot(1, 3, i + 1)
# Plot observations.
plt.scatter(x_obs, y_obs[:, i], label="Observations", style="train")
plt.plot(x, f[:, i], label="Truth", style="test")
# Plot GPAR.
plt.plot(x, means[:, i], label="GPAR", style="pred")
plt.fill_between(x, lowers[:, i], uppers[:, i], style="pred")
# Plot independent GPs.
plt.plot(x, igp_means[:, i], label="IGP", style="pred2")
plt.fill_between(x, igp_lowers[:, i], igp_uppers[:, i], style="pred2")
plt.xlabel("$t$")
plt.ylabel(f"$y_{i + 1}$")
wbml.plot.tweak(legend=i == 2)
plt.tight_layout()
plt.savefig(wd.file("synthetic.pdf"))
| [
"gpar.regression.GPARRegressor",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.fill_between",
"wbml.experiment.WorkingDirectory",
"numpy.linspace",
"matplotlib.pyplot.figure",
"numpy.stack",
"numpy.cos",
"matplotlib.pyplot.tight_layout",
"... | [((192, 245), 'wbml.experiment.WorkingDirectory', 'WorkingDirectory', (['"""_experiments"""', '"""synthetic"""'], {'seed': '(1)'}), "('_experiments', 'synthetic', seed=1)\n", (208, 245), False, 'from wbml.experiment import WorkingDirectory\n'), ((294, 314), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (305, 314), True, 'import numpy as np\n'), ((721, 881), 'gpar.regression.GPARRegressor', 'GPARRegressor', ([], {'scale': '(0.1)', 'linear': '(True)', 'linear_scale': '(10.0)', 'nonlinear': '(True)', 'nonlinear_scale': '(0.1)', 'noise': '(0.1)', 'impute': '(True)', 'replace': '(False)', 'normalise_y': '(False)'}), '(scale=0.1, linear=True, linear_scale=10.0, nonlinear=True,\n nonlinear_scale=0.1, noise=0.1, impute=True, replace=False, normalise_y\n =False)\n', (734, 881), False, 'from gpar.regression import GPARRegressor\n'), ((1165, 1302), 'gpar.regression.GPARRegressor', 'GPARRegressor', ([], {'scale': '(0.1)', 'linear': '(True)', 'linear_scale': '(10.0)', 'nonlinear': '(True)', 'nonlinear_scale': '(0.1)', 'noise': '(0.1)', 'markov': '(0)', 'normalise_y': '(False)'}), '(scale=0.1, linear=True, linear_scale=10.0, nonlinear=True,\n nonlinear_scale=0.1, noise=0.1, markov=0, normalise_y=False)\n', (1178, 1302), False, 'from gpar.regression import GPARRegressor\n'), ((1545, 1572), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 3)'}), '(figsize=(15, 3))\n', (1555, 1572), True, 'import matplotlib.pyplot as plt\n'), ((2231, 2249), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2247, 2249), True, 'import matplotlib.pyplot as plt\n'), ((487, 500), 'numpy.sin', 'np.sin', (['(3 * x)'], {}), '(3 * x)\n', (493, 500), True, 'import numpy as np\n'), ((539, 569), 'numpy.stack', 'np.stack', (['(f1, f2, f3)'], {'axis': '(0)'}), '((f1, f2, f3), axis=0)\n', (547, 569), True, 'import numpy as np\n'), ((1605, 1629), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(i + 1)'], {}), '(1, 3, i + 1)\n', (1616, 1629), True, 'import matplotlib.pyplot as plt\n'), ((1668, 1736), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_obs', 'y_obs[:, i]'], {'label': '"""Observations"""', 'style': '"""train"""'}), "(x_obs, y_obs[:, i], label='Observations', style='train')\n", (1679, 1736), True, 'import matplotlib.pyplot as plt\n'), ((1745, 1794), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'f[:, i]'], {'label': '"""Truth"""', 'style': '"""test"""'}), "(x, f[:, i], label='Truth', style='test')\n", (1753, 1794), True, 'import matplotlib.pyplot as plt\n'), ((1825, 1877), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'means[:, i]'], {'label': '"""GPAR"""', 'style': '"""pred"""'}), "(x, means[:, i], label='GPAR', style='pred')\n", (1833, 1877), True, 'import matplotlib.pyplot as plt\n'), ((1886, 1947), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', 'lowers[:, i]', 'uppers[:, i]'], {'style': '"""pred"""'}), "(x, lowers[:, i], uppers[:, i], style='pred')\n", (1902, 1947), True, 'import matplotlib.pyplot as plt\n'), ((1989, 2045), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'igp_means[:, i]'], {'label': '"""IGP"""', 'style': '"""pred2"""'}), "(x, igp_means[:, i], label='IGP', style='pred2')\n", (1997, 2045), True, 'import matplotlib.pyplot as plt\n'), ((2054, 2124), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['x', 'igp_lowers[:, i]', 'igp_uppers[:, i]'], {'style': '"""pred2"""'}), "(x, igp_lowers[:, i], igp_uppers[:, i], style='pred2')\n", (2070, 2124), True, 'import matplotlib.pyplot as plt\n'), ((2134, 2151), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$t$"""'], {}), "('$t$')\n", (2144, 2151), True, 'import matplotlib.pyplot as plt\n'), ((2160, 2186), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['f"""$y_{i + 1}$"""'], {}), "(f'$y_{i + 1}$')\n", (2170, 2186), True, 'import matplotlib.pyplot as plt\n'), ((469, 479), 'numpy.cos', 'np.cos', (['f1'], {}), '(f1)\n', (475, 479), True, 'import numpy as np\n'), ((624, 645), 'numpy.random.randn', 'np.random.randn', (['n', '(3)'], {}), '(n, 3)\n', (639, 645), True, 'import numpy as np\n'), ((408, 436), 'numpy.sin', 'np.sin', (['(10 * np.pi * (x + 1))'], {}), '(10 * np.pi * (x + 1))\n', (414, 436), True, 'import numpy as np\n')] |
#%%
import numpy as np
import pandas as pd
import bokeh.plotting
import bokeh.io
import bokeh.models
import growth.model
import growth.viz
const = growth.model.load_constants()
colors, palette = growth.viz.bokeh_style()
mapper = growth.viz.load_markercolors()
bokeh.io.output_file('../../figures/interactive/interactive_ecoli_data.html')
# Define constants
gamma_max = const['gamma_max']
phi_O = const['phi_O']
Kd_cpc = const['Kd_cpc']
nu_max= np.arange(0.001, 50, 0.001)
const_phiRb = 0.25
# Load the mass_frac
mass_frac = pd.read_csv('../../data/main_figure_data/Fig4_ecoli_ribosomal_mass_fractions.csv')
elong = pd.read_csv('../../data/main_figure_data/Fig4_ecoli_peptide_elongation_rates.csv')
# Add markers and colors to maintain consistency.
markers = [mapper[g]['m_bokeh'] for g in mass_frac['source'].values]
_colors = [mapper[g]['c'] for g in mass_frac['source'].values]
mass_frac['marker'] = markers
mass_frac['color'] = _colors
markers = [mapper[g]['m_bokeh'] for g in elong['source'].values]
_colors = [mapper[g]['c'] for g in elong['source'].values]
elong['marker'] = markers
elong['color'] = _colors
mass_frac = bokeh.models.ColumnDataSource(mass_frac)
elong = bokeh.models.ColumnDataSource(elong)
# Set up the initial scenarios
opt_phiRb = growth.model.phiRb_optimal_allocation(gamma_max, nu_max, Kd_cpc, phi_O)
opt_gamma = growth.model.steady_state_gamma(gamma_max, opt_phiRb, nu_max, Kd_cpc, phi_O) * 7459 / 3600
opt_lam = growth.model.steady_state_growth_rate(gamma_max, opt_phiRb, nu_max, Kd_cpc, phi_O)
const_phiRb = const_phiRb * np.ones_like(nu_max)
const_gamma = growth.model.steady_state_gamma(gamma_max, const_phiRb, nu_max, Kd_cpc, phi_O) * 7459 / 3600
const_lam = growth.model.steady_state_growth_rate(gamma_max, const_phiRb, nu_max, Kd_cpc, phi_O)
trans_phiRb = growth.model.phiRb_constant_translation(gamma_max, nu_max, 10, Kd_cpc, phi_O)
trans_gamma = growth.model.steady_state_gamma(gamma_max, trans_phiRb, nu_max, Kd_cpc, phi_O) * 7459 / 3600
trans_lam = growth.model.steady_state_growth_rate(gamma_max, trans_phiRb, nu_max, Kd_cpc, phi_O)
source = bokeh.models.ColumnDataSource({'phiRb': [const_phiRb, trans_phiRb, opt_phiRb],
'gamma': [const_gamma, trans_gamma, opt_gamma],
'lam': [const_lam, trans_lam, opt_lam],
'color': [colors['primary_black'],
colors['primary_green'],
colors['primary_blue']],
'label': ['scenario I: constant allocation',
'scenario II: constant translation rate',
'scenario III: optimal allocation'],
'filler_xs': [[], [], []],
'filler_ys': [[], [], []]})
# ##############################################################################
# WIDGET DEFINITIONS
# ##############################################################################
phiO_slider = bokeh.models.Slider(start=0, end=0.95, step=0.001, value=phi_O,
title='allocation to other proteins')
gamma_slider = bokeh.models.Slider(start=1, end=25, step=0.001, value=gamma_max * 7459 / 3600,
title='maximum translation speed [AA / s]')
Kd_cpc_slider = bokeh.models.Slider(start=-4, end=-0.0001, step=0.001, value=np.log10(Kd_cpc),
title='log\u2081\u2080 precursor Michaelis-Menten constant')
phiRb_slider = bokeh.models.Slider(start=0.001, end=0.45, step=0.001,
value = 0.25,
title='scenario I: constant ribosomal allocation parameter',
bar_color=colors['primary_black'],
default_size=350)
sc2_cpc_slider = bokeh.models.Slider(start=0, end=0.999, step=0.01,
value = 0.9,
title='scenario II: target translation speed (relative to max)',
bar_color=colors['primary_green'],
default_size=350)
# ##############################################################################
# CANVAS DEFINITION
# ##############################################################################
mass_frac_tooltips = [('source', '@source'),
('ribosomal allocation', '@mass_fraction{0.2f}'),
('growth rate\n[inv. hr.]', '@growth_rate_hr{0.2f}'),
('method', '@method')]
elong_tooltips = [('source', '@source'),
('translation rate [AA/s]', '@elongation_rate_aa_s{0.2f}'),
('growth rate\n[inv. hr.]', '@growth_rate_hr{0.2f}')]
mass_hover = bokeh.models.HoverTool(names=['data'], tooltips=mass_frac_tooltips)
elong_hover = bokeh.models.HoverTool(names=['data'], tooltips=elong_tooltips)
allocation_axis = bokeh.plotting.figure(width=450, height=400,
x_axis_label='growth rate λ [inv. hr]',
y_axis_label = 'ribosomal allocation',
y_range=[0, 0.35],
x_range=[0, 2],
tools = [mass_hover, 'pan',
'wheel_zoom', 'box_zoom']
)
elongation_axis = bokeh.plotting.figure(width=450, height=400,
y_axis_label='translation speed [AA / s]',
x_axis_label = 'growth rate λ [inv. hr]',
y_range=[5, 20],
x_range = [0, 2],
tools = [elong_hover, 'pan',
'wheel_zoom', 'box_zoom']
)
legend_axis = bokeh.plotting.figure(width=370, height=120, tools=[])
legend_axis.axis.axis_label = None
legend_axis.axis.visible = False
legend_axis.grid.grid_line_color = None
legend_axis.background_fill_color = None
legend_axis.outline_line_color = None
# ##############################################################################
# GLYPH DEFINITION
# ##############################################################################
allocation_axis.scatter(x='growth_rate_hr', y='mass_fraction', marker='marker',
color='color', source=mass_frac, size=10, line_color='black',
alpha=0.75, name='data')
elongation_axis.scatter(x='growth_rate_hr', y='elongation_rate_aa_s', marker='marker',
color='color', source=elong, size=10, line_color='black',
alpha=0.75, name='data')
allocation_axis.multi_line(xs='lam', ys='phiRb', color='color', line_width=2,
source=source)
elongation_axis.multi_line(xs='lam', ys='gamma', color='color', line_width=2,
source=source)
legend_axis.multi_line(xs='filler_xs', ys='filler_ys', line_width=2.5,
line_color='color', legend_field='label' ,
source=source)
##############################################################################
# CALLBACK DEFINITION
# ##############################################################################
args = {'gamma_slider': gamma_slider,
'Kd_cpc_slider': Kd_cpc_slider,
'phiO_slider': phiO_slider,
'phiRb_slider': phiRb_slider,
'source': source,
'nu_max': nu_max,
'sc2_cpc_slider': sc2_cpc_slider}
callback = growth.viz.load_js(['./interactive_ecoli_data.js', './functions.js'],
args=args)
for s in [gamma_slider, Kd_cpc_slider, phiO_slider, phiRb_slider, sc2_cpc_slider]:
s.js_on_change('value', callback)
# ##############################################################################
# LAYOUT
# ##############################################################################
col1 = bokeh.layouts.Column(gamma_slider, phiO_slider)
col2 = bokeh.layouts.Column(Kd_cpc_slider, phiRb_slider, sc2_cpc_slider)
sliders = bokeh.layouts.Row(col1, col2, legend_axis)
row1 = bokeh.layouts.Row(allocation_axis, elongation_axis)
layout = bokeh.layouts.Column(sliders, row1)
bokeh.io.save(layout)
| [
"numpy.ones_like",
"numpy.log10",
"numpy.arange",
"pandas.read_csv"
] | [((450, 477), 'numpy.arange', 'np.arange', (['(0.001)', '(50)', '(0.001)'], {}), '(0.001, 50, 0.001)\n', (459, 477), True, 'import numpy as np\n'), ((532, 619), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/main_figure_data/Fig4_ecoli_ribosomal_mass_fractions.csv"""'], {}), "(\n '../../data/main_figure_data/Fig4_ecoli_ribosomal_mass_fractions.csv')\n", (543, 619), True, 'import pandas as pd\n'), ((623, 710), 'pandas.read_csv', 'pd.read_csv', (['"""../../data/main_figure_data/Fig4_ecoli_peptide_elongation_rates.csv"""'], {}), "(\n '../../data/main_figure_data/Fig4_ecoli_peptide_elongation_rates.csv')\n", (634, 710), True, 'import pandas as pd\n'), ((1563, 1583), 'numpy.ones_like', 'np.ones_like', (['nu_max'], {}), '(nu_max)\n', (1575, 1583), True, 'import numpy as np\n'), ((3525, 3541), 'numpy.log10', 'np.log10', (['Kd_cpc'], {}), '(Kd_cpc)\n', (3533, 3541), True, 'import numpy as np\n')] |
from flask_wtf import FlaskForm
from flask_wtf.file import FileAllowed, FileField
from flask_babel import lazy_gettext as _l
from wtforms import StringField, TextAreaField, SubmitField, PasswordField, BooleanField
from wtforms.validators import DataRequired, Email, ValidationError, Length, EqualTo
from app.models import User
class RegistrationForm(FlaskForm):
username = StringField(_l('Username',
validators=[DataRequired(), Length(min=2, max=24)]))
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField(_l('Password', validators=[DataRequired()]))
password_confirmation = PasswordField(_l('Confirm Password',
validators=[DataRequired(), EqualTo('password')]))
submit = SubmitField(_l('Sign Up'))
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError(
_l('This username already exists. Please use a different username!')
)
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError(_l('This email already exists. Please use a different email!'))
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField(_l('Password'), validators=[DataRequired()])
remember = BooleanField(_l('Remember Me'))
submit = SubmitField(_l('Sign In'))
class UpdateUserForm(FlaskForm):
username = StringField(_l('Username'),
validators=[DataRequired(), Length(min=2, max=24)])
email = StringField('Email', validators=[DataRequired(), Email()])
about_me = TextAreaField(_l('About me'), validators=[Length(min=0, max=140)])
image_file = FileField(_l('Update profile picture'),
validators=[FileAllowed(['jpg', 'jpeg', 'png'])])
submit = SubmitField(_l('Submit'))
def __init__(self, original_username, original_email, *args, **kwargs):
super(UpdateUserForm, self).__init__(*args, **kwargs)
self.original_username = original_username
self.original_email = original_email
def validate_username(self, username):
if username.data is not self.original_username:
user = User.query.filter_by(username=self.username.data).first()
if user is not None:
raise ValidationError(_l('Please use a different username!'))
def validate_email(self, email):
if email.data is not self.original_email:
user = User.query.filter_by(email=self.email.data).first()
if user is not None:
raise ValidationError(_l('Please use a different email!'))
class RequestResetForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email()])
submit = SubmitField(_l('Request Password Reset'))
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is None:
raise ValidationError('There is no account with this email.')
class ResetPasswordForm(FlaskForm):
password = PasswordField('Password', validators=[DataRequired()])
password_confirmation = PasswordField('<PASSWORD> Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Reset Password')
class DeleteUserForm(FlaskForm):
username = StringField('Username', validators=[DataRequired(), Length(min=2, max=24)]
)
submit = SubmitField('Delete User')
| [
"wtforms.validators.Email",
"wtforms.validators.ValidationError",
"flask_wtf.file.FileAllowed",
"wtforms.SubmitField",
"wtforms.validators.Length",
"wtforms.validators.EqualTo",
"flask_babel.lazy_gettext",
"app.models.User.query.filter_by",
"wtforms.validators.DataRequired"
] | [((3492, 3521), 'wtforms.SubmitField', 'SubmitField', (['"""Reset Password"""'], {}), "('Reset Password')\n", (3503, 3521), False, 'from wtforms import StringField, TextAreaField, SubmitField, PasswordField, BooleanField\n'), ((3689, 3715), 'wtforms.SubmitField', 'SubmitField', (['"""Delete User"""'], {}), "('Delete User')\n", (3700, 3715), False, 'from wtforms import StringField, TextAreaField, SubmitField, PasswordField, BooleanField\n'), ((818, 831), 'flask_babel.lazy_gettext', '_l', (['"""Sign Up"""'], {}), "('Sign Up')\n", (820, 831), True, 'from flask_babel import lazy_gettext as _l\n'), ((1489, 1503), 'flask_babel.lazy_gettext', '_l', (['"""Password"""'], {}), "('Password')\n", (1491, 1503), True, 'from flask_babel import lazy_gettext as _l\n'), ((1562, 1579), 'flask_babel.lazy_gettext', '_l', (['"""Remember Me"""'], {}), "('Remember Me')\n", (1564, 1579), True, 'from flask_babel import lazy_gettext as _l\n'), ((1606, 1619), 'flask_babel.lazy_gettext', '_l', (['"""Sign In"""'], {}), "('Sign In')\n", (1608, 1619), True, 'from flask_babel import lazy_gettext as _l\n'), ((1683, 1697), 'flask_babel.lazy_gettext', '_l', (['"""Username"""'], {}), "('Username')\n", (1685, 1697), True, 'from flask_babel import lazy_gettext as _l\n'), ((1878, 1892), 'flask_babel.lazy_gettext', '_l', (['"""About me"""'], {}), "('About me')\n", (1880, 1892), True, 'from flask_babel import lazy_gettext as _l\n'), ((1958, 1986), 'flask_babel.lazy_gettext', '_l', (['"""Update profile picture"""'], {}), "('Update profile picture')\n", (1960, 1986), True, 'from flask_babel import lazy_gettext as _l\n'), ((2090, 2102), 'flask_babel.lazy_gettext', '_l', (['"""Submit"""'], {}), "('Submit')\n", (2092, 2102), True, 'from flask_babel import lazy_gettext as _l\n'), ((3027, 3055), 'flask_babel.lazy_gettext', '_l', (['"""Request Password Reset"""'], {}), "('Request Password Reset')\n", (3029, 3055), True, 'from flask_babel import lazy_gettext as _l\n'), ((3200, 3255), 'wtforms.validators.ValidationError', 'ValidationError', (['"""There is no account with this email."""'], {}), "('There is no account with this email.')\n", (3215, 3255), False, 'from wtforms.validators import DataRequired, Email, ValidationError, Length, EqualTo\n'), ((535, 549), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (547, 549), False, 'from wtforms.validators import DataRequired, Email, ValidationError, Length, EqualTo\n'), ((551, 558), 'wtforms.validators.Email', 'Email', ([], {}), '()\n', (556, 558), False, 'from wtforms.validators import DataRequired, Email, ValidationError, Length, EqualTo\n'), ((892, 936), 'app.models.User.query.filter_by', 'User.query.filter_by', ([], {'username': 'username.data'}), '(username=username.data)\n', (912, 936), False, 'from app.models import User\n'), ((1029, 1097), 'flask_babel.lazy_gettext', '_l', (['"""This username already exists. Please use a different username!"""'], {}), "('This username already exists. Please use a different username!')\n", (1031, 1097), True, 'from flask_babel import lazy_gettext as _l\n'), ((1185, 1223), 'app.models.User.query.filter_by', 'User.query.filter_by', ([], {'email': 'email.data'}), '(email=email.data)\n', (1205, 1223), False, 'from app.models import User\n'), ((1295, 1357), 'flask_babel.lazy_gettext', '_l', (['"""This email already exists. Please use a different email!"""'], {}), "('This email already exists. Please use a different email!')\n", (1297, 1357), True, 'from flask_babel import lazy_gettext as _l\n'), ((1434, 1448), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1446, 1448), False, 'from wtforms.validators import DataRequired, Email, ValidationError, Length, EqualTo\n'), ((1450, 1457), 'wtforms.validators.Email', 'Email', ([], {}), '()\n', (1455, 1457), False, 'from wtforms.validators import DataRequired, Email, ValidationError, Length, EqualTo\n'), ((1517, 1531), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1529, 1531), False, 'from wtforms.validators import DataRequired, Email, ValidationError, Length, EqualTo\n'), ((1738, 1752), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1750, 1752), False, 'from wtforms.validators import DataRequired, Email, ValidationError, Length, EqualTo\n'), ((1754, 1775), 'wtforms.validators.Length', 'Length', ([], {'min': '(2)', 'max': '(24)'}), '(min=2, max=24)\n', (1760, 1775), False, 'from wtforms.validators import DataRequired, Email, ValidationError, Length, EqualTo\n'), ((1823, 1837), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1835, 1837), False, 'from wtforms.validators import DataRequired, Email, ValidationError, Length, EqualTo\n'), ((1839, 1846), 'wtforms.validators.Email', 'Email', ([], {}), '()\n', (1844, 1846), False, 'from wtforms.validators import DataRequired, Email, ValidationError, Length, EqualTo\n'), ((1906, 1928), 'wtforms.validators.Length', 'Length', ([], {'min': '(0)', 'max': '(140)'}), '(min=0, max=140)\n', (1912, 1928), False, 'from wtforms.validators import DataRequired, Email, ValidationError, Length, EqualTo\n'), ((2027, 2062), 'flask_wtf.file.FileAllowed', 'FileAllowed', (["['jpg', 'jpeg', 'png']"], {}), "(['jpg', 'jpeg', 'png'])\n", (2038, 2062), False, 'from flask_wtf.file import FileAllowed, FileField\n'), ((2976, 2990), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (2988, 2990), False, 'from wtforms.validators import DataRequired, Email, ValidationError, Length, EqualTo\n'), ((2992, 2999), 'wtforms.validators.Email', 'Email', ([], {}), '()\n', (2997, 2999), False, 'from wtforms.validators import DataRequired, Email, ValidationError, Length, EqualTo\n'), ((3110, 3148), 'app.models.User.query.filter_by', 'User.query.filter_by', ([], {'email': 'email.data'}), '(email=email.data)\n', (3130, 3148), False, 'from app.models import User\n'), ((3347, 3361), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (3359, 3361), False, 'from wtforms.validators import DataRequired, Email, ValidationError, Length, EqualTo\n'), ((3441, 3455), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (3453, 3455), False, 'from wtforms.validators import DataRequired, Email, ValidationError, Length, EqualTo\n'), ((3457, 3476), 'wtforms.validators.EqualTo', 'EqualTo', (['"""password"""'], {}), "('password')\n", (3464, 3476), False, 'from wtforms.validators import DataRequired, Email, ValidationError, Length, EqualTo\n'), ((3608, 3622), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (3620, 3622), False, 'from wtforms.validators import DataRequired, Email, ValidationError, Length, EqualTo\n'), ((3624, 3645), 'wtforms.validators.Length', 'Length', ([], {'min': '(2)', 'max': '(24)'}), '(min=2, max=24)\n', (3630, 3645), False, 'from wtforms.validators import DataRequired, Email, ValidationError, Length, EqualTo\n'), ((449, 463), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (461, 463), False, 'from wtforms.validators import DataRequired, Email, ValidationError, Length, EqualTo\n'), ((465, 486), 'wtforms.validators.Length', 'Length', ([], {'min': '(2)', 'max': '(24)'}), '(min=2, max=24)\n', (471, 486), False, 'from wtforms.validators import DataRequired, Email, ValidationError, Length, EqualTo\n'), ((617, 631), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (629, 631), False, 'from wtforms.validators import DataRequired, Email, ValidationError, Length, EqualTo\n'), ((754, 768), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (766, 768), False, 'from wtforms.validators import DataRequired, Email, ValidationError, Length, EqualTo\n'), ((770, 789), 'wtforms.validators.EqualTo', 'EqualTo', (['"""password"""'], {}), "('password')\n", (777, 789), False, 'from wtforms.validators import DataRequired, Email, ValidationError, Length, EqualTo\n'), ((2458, 2507), 'app.models.User.query.filter_by', 'User.query.filter_by', ([], {'username': 'self.username.data'}), '(username=self.username.data)\n', (2478, 2507), False, 'from app.models import User\n'), ((2587, 2625), 'flask_babel.lazy_gettext', '_l', (['"""Please use a different username!"""'], {}), "('Please use a different username!')\n", (2589, 2625), True, 'from flask_babel import lazy_gettext as _l\n'), ((2734, 2777), 'app.models.User.query.filter_by', 'User.query.filter_by', ([], {'email': 'self.email.data'}), '(email=self.email.data)\n', (2754, 2777), False, 'from app.models import User\n'), ((2857, 2892), 'flask_babel.lazy_gettext', '_l', (['"""Please use a different email!"""'], {}), "('Please use a different email!')\n", (2859, 2892), True, 'from flask_babel import lazy_gettext as _l\n')] |
from helpers import poseRt
from frame import Frame
import time
import numpy as np
import g2o
import json
LOCAL_WINDOW = 20
#LOCAL_WINDOW = None
class Point(object):
# A Point is a 3-D point in the world
# Each Point is observed in multiple Frames
def __init__(self, mapp, loc, color, tid=None):
self.pt = np.array(loc)
self.frames = []
self.idxs = []
self.color = np.copy(color)
self.id = tid if tid is not None else mapp.add_point(self)
def homogeneous(self):
return np.array([self.pt[0], self.pt[1], self.pt[2], 1.0])
def orb(self):
return [f.des[idx] for f,idx in zip(self.frames, self.idxs)]
def delete(self):
for f,idx in zip(self.frames, self.idxs):
f.pts[idx] = None
del self
def add_observation(self, frame, idx):
frame.pts[idx] = self
self.frames.append(frame)
self.idxs.append(idx)
class Map(object):
def __init__(self):
self.frames = []
self.points = []
self.max_frame = 0
self.max_point = 0
def serialize(self):
ret = {}
ret['points'] = [{'id': p.id, 'pt': p.pt.tolist(), 'color': p.color.tolist()} for p in self.points]
ret['frames'] = []
for f in self.frames:
ret['frames'].append({
'id': f.id, 'K': f.K.tolist(), 'pose': f.pose.tolist(), 'h': f.h, 'w': f.w,
'kpus': f.kpus.tolist(), 'des': f.des.tolist(),
'pts': [p.id if p is not None else -1 for p in f.pts]})
ret['max_frame'] = self.max_frame
ret['max_point'] = self.max_point
return json.dumps(ret)
def deserialize(self, s):
ret = json.loads(s)
self.max_frame = ret['max_frame']
self.max_point = ret['max_point']
self.points = []
self.frames = []
pids = {}
for p in ret['points']:
pp = Point(self, p['pt'], p['color'], p['id'])
self.points.append(pp)
pids[p['id']] = pp
for f in ret['frames']:
ff = Frame(self, None, f['K'], f['pose'], f['id'])
ff.w, ff.h = f['w'], f['h']
ff.kpus = np.array(f['kpus'])
ff.des = np.array(f['des'])
ff.pts = [None] * len(ff.kpus)
for i,p in enumerate(f['pts']):
if p != -1:
ff.pts[i] = pids[p]
self.frames.append(ff)
def add_point(self, point):
ret = self.max_point
self.max_point += 1
self.points.append(point)
return ret
def add_frame(self, frame):
ret = self.max_frame
self.max_frame += 1
self.frames.append(frame)
return ret
# *** optimizer ***
def optimize(self, local_window=LOCAL_WINDOW, fix_points=False, verbose=False):
# create g2o optimizer
opt = g2o.SparseOptimizer()
solver = g2o.BlockSolverSE3(g2o.LinearSolverCholmodSE3())
solver = g2o.OptimizationAlgorithmLevenberg(solver)
opt.set_algorithm(solver)
robust_kernel = g2o.RobustKernelHuber(np.sqrt(5.991))
if local_window is None:
local_frames = self.frames
else:
local_frames = self.frames[-local_window:]
# add frames to graph
for f in self.frames:
pose = np.linalg.inv(f.pose)
sbacam = g2o.SBACam(g2o.SE3Quat(pose[0:3, 0:3], pose[0:3, 3]))
sbacam.set_cam(f.K[0][0], f.K[1][1], f.K[0][2], f.K[1][2], 1.0)
v_se3 = g2o.VertexCam()
v_se3.set_id(f.id)
v_se3.set_estimate(sbacam)
v_se3.set_fixed(f.id <= 1 or f not in local_frames)
opt.add_vertex(v_se3)
# add points to frames
PT_ID_OFFSET = 0x10000
for p in self.points:
if not any([f in local_frames for f in p.frames]):
continue
pt = g2o.VertexSBAPointXYZ()
pt.set_id(p.id + PT_ID_OFFSET)
pt.set_estimate(p.pt[0:3])
pt.set_marginalized(True)
pt.set_fixed(fix_points)
opt.add_vertex(pt)
for f,idx in zip(p.frames, p.idxs):
edge = g2o.EdgeProjectP2MC()
edge.set_vertex(0, pt)
edge.set_vertex(1, opt.vertex(f.id))
uv = f.kpus[idx]
edge.set_measurement(uv)
edge.set_information(np.eye(2))
edge.set_robust_kernel(robust_kernel)
opt.add_edge(edge)
if verbose:
opt.set_verbose(True)
opt.initialize_optimization()
opt.optimize(20)
# put frames back
for f in self.frames:
est = opt.vertex(f.id).estimate()
R = est.rotation().matrix()
t = est.translation()
f.pose = np.linalg.inv(poseRt(R, t))
# put points back (and cull)
if not fix_points:
new_points = []
for p in self.points:
vert = opt.vertex(p.id + PT_ID_OFFSET)
if vert is None:
new_points.append(p)
continue
est = vert.estimate()
# <= 3 match point that's old
old_point = len(p.frames) <= 3 and p.frames[-1] not in local_frames
# compute reprojection error
errs = []
for f,idx in zip(p.frames, p.idxs):
uv = f.kpus[idx]
proj = np.dot(np.dot(f.K, f.pose[:3]),
np.array([est[0], est[1], est[2], 1.0]))
proj = proj[0:2] / proj[2]
errs.append(np.linalg.norm(proj-uv))
# cull
if old_point or np.mean(errs) > 5:
p.delete()
continue
p.pt = np.array(est)
new_points.append(p)
print("Culled: %d points" % (len(self.points) - len(new_points)))
self.points = new_points
return opt.active_chi2()
| [
"numpy.sqrt",
"numpy.array",
"g2o.VertexCam",
"numpy.linalg.norm",
"numpy.mean",
"g2o.SparseOptimizer",
"json.dumps",
"numpy.dot",
"g2o.LinearSolverCholmodSE3",
"frame.Frame",
"g2o.OptimizationAlgorithmLevenberg",
"g2o.EdgeProjectP2MC",
"helpers.poseRt",
"json.loads",
"numpy.eye",
"g2o... | [((318, 331), 'numpy.array', 'np.array', (['loc'], {}), '(loc)\n', (326, 331), True, 'import numpy as np\n'), ((389, 403), 'numpy.copy', 'np.copy', (['color'], {}), '(color)\n', (396, 403), True, 'import numpy as np\n'), ((504, 555), 'numpy.array', 'np.array', (['[self.pt[0], self.pt[1], self.pt[2], 1.0]'], {}), '([self.pt[0], self.pt[1], self.pt[2], 1.0])\n', (512, 555), True, 'import numpy as np\n'), ((1510, 1525), 'json.dumps', 'json.dumps', (['ret'], {}), '(ret)\n', (1520, 1525), False, 'import json\n'), ((1565, 1578), 'json.loads', 'json.loads', (['s'], {}), '(s)\n', (1575, 1578), False, 'import json\n'), ((2584, 2605), 'g2o.SparseOptimizer', 'g2o.SparseOptimizer', ([], {}), '()\n', (2603, 2605), False, 'import g2o\n'), ((2681, 2723), 'g2o.OptimizationAlgorithmLevenberg', 'g2o.OptimizationAlgorithmLevenberg', (['solver'], {}), '(solver)\n', (2715, 2723), False, 'import g2o\n'), ((1887, 1932), 'frame.Frame', 'Frame', (['self', 'None', "f['K']", "f['pose']", "f['id']"], {}), "(self, None, f['K'], f['pose'], f['id'])\n", (1892, 1932), False, 'from frame import Frame\n'), ((1983, 2002), 'numpy.array', 'np.array', (["f['kpus']"], {}), "(f['kpus'])\n", (1991, 2002), True, 'import numpy as np\n'), ((2018, 2036), 'numpy.array', 'np.array', (["f['des']"], {}), "(f['des'])\n", (2026, 2036), True, 'import numpy as np\n'), ((2638, 2666), 'g2o.LinearSolverCholmodSE3', 'g2o.LinearSolverCholmodSE3', ([], {}), '()\n', (2664, 2666), False, 'import g2o\n'), ((2797, 2811), 'numpy.sqrt', 'np.sqrt', (['(5.991)'], {}), '(5.991)\n', (2804, 2811), True, 'import numpy as np\n'), ((3001, 3022), 'numpy.linalg.inv', 'np.linalg.inv', (['f.pose'], {}), '(f.pose)\n', (3014, 3022), True, 'import numpy as np\n'), ((3177, 3192), 'g2o.VertexCam', 'g2o.VertexCam', ([], {}), '()\n', (3190, 3192), False, 'import g2o\n'), ((3504, 3527), 'g2o.VertexSBAPointXYZ', 'g2o.VertexSBAPointXYZ', ([], {}), '()\n', (3525, 3527), False, 'import g2o\n'), ((3049, 3090), 'g2o.SE3Quat', 'g2o.SE3Quat', (['pose[0:3, 0:3]', 'pose[0:3, 3]'], {}), '(pose[0:3, 0:3], pose[0:3, 3])\n', (3060, 3090), False, 'import g2o\n'), ((3744, 3765), 'g2o.EdgeProjectP2MC', 'g2o.EdgeProjectP2MC', ([], {}), '()\n', (3763, 3765), False, 'import g2o\n'), ((4301, 4313), 'helpers.poseRt', 'poseRt', (['R', 't'], {}), '(R, t)\n', (4307, 4313), False, 'from helpers import poseRt\n'), ((5129, 5142), 'numpy.array', 'np.array', (['est'], {}), '(est)\n', (5137, 5142), True, 'import numpy as np\n'), ((3929, 3938), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (3935, 3938), True, 'import numpy as np\n'), ((4840, 4863), 'numpy.dot', 'np.dot', (['f.K', 'f.pose[:3]'], {}), '(f.K, f.pose[:3])\n', (4846, 4863), True, 'import numpy as np\n'), ((4889, 4928), 'numpy.array', 'np.array', (['[est[0], est[1], est[2], 1.0]'], {}), '([est[0], est[1], est[2], 1.0])\n', (4897, 4928), True, 'import numpy as np\n'), ((4989, 5014), 'numpy.linalg.norm', 'np.linalg.norm', (['(proj - uv)'], {}), '(proj - uv)\n', (5003, 5014), True, 'import numpy as np\n'), ((5054, 5067), 'numpy.mean', 'np.mean', (['errs'], {}), '(errs)\n', (5061, 5067), True, 'import numpy as np\n')] |
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from utils.parse_config import *
from utils.utils import build_targets, to_cpu, non_max_suppression
import matplotlib.pyplot as plt
import matplotlib.patches as patches
class Swish(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x * F.sigmoid(x)
class Mish(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x * torch.tanh(F.softplus(x))
# Depthwise Convolution的实现
class DepthwiseConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False):
super(DepthwiseConv2d, self).__init__()
self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size, stride, padding, dilation, groups=in_channels,
bias=bias)
self.pointwise = nn.Conv2d(in_channels, out_channels, 1, 1, 0, 1, 1, bias=bias)
def forward(self, x):
x = self.conv1(x)
x = self.pointwise(x)
return x
def create_modules(module_defs, act_type=0, mobile_yolo=False):
"""
Constructs module list of layer blocks from module configuration in module_defs
"""
# module_defs = [{"type":"net", "channels":3, ...}, # each elemnt is a layer block (dtype=dict)
# {"type":"convolutional", "batch_normalize":1, ...},
# ...]
hyperparams = module_defs.pop(0) # [net]的整体参数
output_filters = [int(hyperparams["channels"])] # 3: 最初。因为是rgb 3通道
module_list = nn.ModuleList() # 存储每一大层,如conv层: 包括conv-bn-leaky relu等
for module_i, module_def in enumerate(module_defs):
modules = nn.Sequential()
if module_def["type"] == "convolutional":
bn = int(module_def["batch_normalize"])
filters = int(module_def["filters"])
kernel_size = int(module_def["size"])
pad = (kernel_size - 1) // 2
# 根据参数选择是否使用depthwise Convolution
if mobile_yolo:
modules.add_module(
f"conv_{module_i}",
DepthwiseConv2d(
in_channels=output_filters[-1],
out_channels=filters,
kernel_size=kernel_size,
stride=int(module_def["stride"]),
padding=pad,
bias=not bn,
),
)
else:
modules.add_module(
f"conv_{module_i}",
nn.Conv2d(
in_channels=output_filters[-1],
out_channels=filters,
kernel_size=kernel_size,
stride=int(module_def["stride"]),
padding=pad,
bias=not bn,
),
)
if bn:
modules.add_module(f"batch_norm_{module_i}", nn.BatchNorm2d(filters, momentum=0.9, eps=1e-5))
if module_def["activation"] == "leaky":
if int(act_type) == 0:
print("Adding LeakyReLU")
modules.add_module(f"leaky_{module_i}", nn.LeakyReLU(0.1))
elif int(act_type) == 1:
print("Adding Swish")
modules.add_module(f"swish_{module_i}", Swish())
elif int(act_type) == 2:
print("Adding Mish")
modules.add_module(f"mish_{module_i}", Mish())
elif module_def["type"] == "maxpool":
kernel_size = int(module_def["size"])
stride = int(module_def["stride"])
if kernel_size == 2 and stride == 1:
modules.add_module(f"_debug_padding_{module_i}", nn.ZeroPad2d((0, 1, 0, 1)))
maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=int((kernel_size - 1) // 2))
modules.add_module(f"maxpool_{module_i}", maxpool)
elif module_def["type"] == "upsample":
upsample = Upsample(scale_factor=int(module_def["stride"]), mode="nearest")
modules.add_module(f"upsample_{module_i}", upsample)
elif module_def["type"] == "route":
layers = [int(x) for x in module_def["layers"].split(",")]
filters = sum([output_filters[1:][i] for i in layers]) # channel个数相加,对应concat
modules.add_module(f"route_{module_i}", EmptyLayer())
elif module_def["type"] == "shortcut":
filters = output_filters[1:][int(module_def["from"])]
modules.add_module(f"shortcut_{module_i}", EmptyLayer())
elif module_def["type"] == "yolo":
# # mask: 6,7,8 / 3,4,5 / 0,1,2 <=> 小/中/大 feature map <=> 大/中/小 物体
anchor_idxs = [int(x) for x in module_def["mask"].split(",")]
# Extract anchors
anchors = [int(x) for x in module_def["anchors"].split(",")]
anchors = [(anchors[i], anchors[i + 1]) for i in range(0, len(anchors), 2)]
anchors = [anchors[i] for i in anchor_idxs]
# for mask: 6,7,8
# [(116, 90), (156, 198), (373, 326)]
num_classes = int(module_def["classes"]) # 80
img_size = int(hyperparams["height"]) # 416
# Define detection layer
yolo_layer = YOLOLayer(anchors, num_classes, img_size)
modules.add_module(f"yolo_{module_i}", yolo_layer)
# Register module list and number of output filters
module_list.append(modules)
output_filters.append(filters)
return hyperparams, module_list
| [
"torch.nn.BatchNorm2d",
"torch.nn.ZeroPad2d",
"torch.nn.LeakyReLU",
"torch.nn.ModuleList",
"torch.nn.Sequential",
"torch.nn.functional.sigmoid",
"torch.nn.Conv2d",
"torch.nn.functional.softplus"
] | [((1706, 1721), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (1719, 1721), True, 'import torch.nn as nn\n'), ((854, 964), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'in_channels', 'kernel_size', 'stride', 'padding', 'dilation'], {'groups': 'in_channels', 'bias': 'bias'}), '(in_channels, in_channels, kernel_size, stride, padding, dilation,\n groups=in_channels, bias=bias)\n', (863, 964), True, 'import torch.nn as nn\n'), ((1017, 1079), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels', '(1)', '(1)', '(0)', '(1)', '(1)'], {'bias': 'bias'}), '(in_channels, out_channels, 1, 1, 0, 1, 1, bias=bias)\n', (1026, 1079), True, 'import torch.nn as nn\n'), ((1837, 1852), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (1850, 1852), True, 'import torch.nn as nn\n'), ((449, 461), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['x'], {}), '(x)\n', (458, 461), True, 'import torch.nn.functional as F\n'), ((595, 608), 'torch.nn.functional.softplus', 'F.softplus', (['x'], {}), '(x)\n', (605, 608), True, 'import torch.nn.functional as F\n'), ((3136, 3184), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['filters'], {'momentum': '(0.9)', 'eps': '(1e-05)'}), '(filters, momentum=0.9, eps=1e-05)\n', (3150, 3184), True, 'import torch.nn as nn\n'), ((3382, 3399), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.1)'], {}), '(0.1)\n', (3394, 3399), True, 'import torch.nn as nn\n'), ((3960, 3986), 'torch.nn.ZeroPad2d', 'nn.ZeroPad2d', (['(0, 1, 0, 1)'], {}), '((0, 1, 0, 1))\n', (3972, 3986), True, 'import torch.nn as nn\n')] |
# -*- coding: utf-8 -*-
import os,urllib
class Dataset(object):
def __init__(self,opt=None):
if opt is not None:
self.setup(opt)
self.http_proxy= opt.__dict__.get("proxy","null")
else:
self.name="demo"
self.dirname="demo"
self.http_proxy="null"
self.urls=[]
self.root=".data"
self.saved_path= os.path.join(os.path.join(self.root,"clean"),self.name)
self.formated_files=None
def setup(self,opt):
self.name=opt.dataset
self.dirname=opt.dataset
self.http_proxy= opt.__dict__.get("proxy","null")
def process(self):
dirname=self.download()
print("processing dirname: "+ dirname)
raise Exception("method in father class have been called in processing: {} dataset".format(opt.dataset))
return dirname
def getFormatedData(self):
if self.formated_files is not None:
return self.formated_files
if os.path.exists(self.saved_path):
return [os.path.join(self.saved_path,filename) for filename in os.listdir(self.saved_path)]
self.formated_files = self.process()
return self.formated_files
def download_from_url(self,url, path, schedule=None):
#if schedule is None:
# schedule=lambda a,b,c : print("%.1f"%(100.0 * a * b / c), end='\r',flush=True) if (int(a * b / c)*100)%10==0 else None
if self.http_proxy != "null":
proxy = urllib.request.ProxyHandler({'http': self.http_proxy,'https': self.http_proxy})
# construct a new opener using your proxy settings
opener = urllib.request.build_opener(proxy)
# install the openen on the module-level
urllib.request.install_opener(opener)
print("proxy in %s" % self.http_proxy)
# urllib.request.urlretrieve(url,path,lambda a,b,c : print("%.1f"%(100.0 * a * b / c), end='\r',flush=True) if (int(a * b / c)*1000)%100==0 else None )a
try:
urllib.request.urlretrieve(url,path )
except:
import urllib2
urllib2.urlretrieve(url,path )
return path
def download(self,check=None):
"""Download and unzip an online archive (.zip, .gz, or .tgz).
Arguments:
check (str or None): Folder whose existence indicates
that the dataset has already been downloaded, or
None to check the existence of root/{cls.name}.
Returns:
dataset_path (str): Path to extracted dataset.
"""
import zipfile,tarfile
path = os.path.join(self.root, self.name)
check = path if check is None else check
if not os.path.isdir(check):
for url in self.urls:
if isinstance(url, tuple):
url, filename = url
else:
filename = os.path.basename(url)
zpath = os.path.join(path, filename)
if not os.path.isfile(zpath):
if not os.path.exists(os.path.dirname(zpath)):
os.makedirs(os.path.dirname(zpath))
print('downloading {}'.format(filename))
self.download_from_url(url, zpath)
ext = os.path.splitext(filename)[-1]
if ext == '.zip':
with zipfile.ZipFile(zpath, 'r') as zfile:
print('extracting')
zfile.extractall(path)
elif ext in ['.gz', '.tgz',".bz2"]:
with tarfile.open(zpath, 'r:gz') as tar:
dirs = [member for member in tar.getmembers()]
tar.extractall(path=path, members=dirs)
else:
print("%s do not need to be downloaded" % path)
return path
| [
"os.path.exists",
"os.listdir",
"tarfile.open",
"urllib2.urlretrieve",
"zipfile.ZipFile",
"urllib.request.urlretrieve",
"urllib.request.install_opener",
"os.path.join",
"urllib.request.ProxyHandler",
"os.path.splitext",
"os.path.isfile",
"os.path.dirname",
"os.path.isdir",
"urllib.request.... | [((1078, 1109), 'os.path.exists', 'os.path.exists', (['self.saved_path'], {}), '(self.saved_path)\n', (1092, 1109), False, 'import os, urllib\n'), ((2717, 2751), 'os.path.join', 'os.path.join', (['self.root', 'self.name'], {}), '(self.root, self.name)\n', (2729, 2751), False, 'import os, urllib\n'), ((427, 459), 'os.path.join', 'os.path.join', (['self.root', '"""clean"""'], {}), "(self.root, 'clean')\n", (439, 459), False, 'import os, urllib\n'), ((1578, 1663), 'urllib.request.ProxyHandler', 'urllib.request.ProxyHandler', (["{'http': self.http_proxy, 'https': self.http_proxy}"], {}), "({'http': self.http_proxy, 'https': self.http_proxy}\n )\n", (1605, 1663), False, 'import os, urllib\n'), ((1734, 1768), 'urllib.request.build_opener', 'urllib.request.build_opener', (['proxy'], {}), '(proxy)\n', (1761, 1768), False, 'import os, urllib\n'), ((1826, 1863), 'urllib.request.install_opener', 'urllib.request.install_opener', (['opener'], {}), '(opener)\n', (1855, 1863), False, 'import os, urllib\n'), ((2100, 2137), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['url', 'path'], {}), '(url, path)\n', (2126, 2137), False, 'import os, urllib\n'), ((2816, 2836), 'os.path.isdir', 'os.path.isdir', (['check'], {}), '(check)\n', (2829, 2836), False, 'import os, urllib\n'), ((1131, 1170), 'os.path.join', 'os.path.join', (['self.saved_path', 'filename'], {}), '(self.saved_path, filename)\n', (1143, 1170), False, 'import os, urllib\n'), ((2193, 2223), 'urllib2.urlretrieve', 'urllib2.urlretrieve', (['url', 'path'], {}), '(url, path)\n', (2212, 2223), False, 'import urllib2\n'), ((3054, 3082), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (3066, 3082), False, 'import os, urllib\n'), ((1186, 1213), 'os.listdir', 'os.listdir', (['self.saved_path'], {}), '(self.saved_path)\n', (1196, 1213), False, 'import os, urllib\n'), ((3008, 3029), 'os.path.basename', 'os.path.basename', (['url'], {}), '(url)\n', (3024, 3029), False, 'import os, urllib\n'), ((3106, 3127), 'os.path.isfile', 'os.path.isfile', (['zpath'], {}), '(zpath)\n', (3120, 3127), False, 'import os, urllib\n'), ((3415, 3441), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (3431, 3441), False, 'import os, urllib\n'), ((3505, 3532), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zpath', '"""r"""'], {}), "(zpath, 'r')\n", (3520, 3532), False, 'import zipfile, tarfile\n'), ((3171, 3193), 'os.path.dirname', 'os.path.dirname', (['zpath'], {}), '(zpath)\n', (3186, 3193), False, 'import os, urllib\n'), ((3232, 3254), 'os.path.dirname', 'os.path.dirname', (['zpath'], {}), '(zpath)\n', (3247, 3254), False, 'import os, urllib\n'), ((3711, 3738), 'tarfile.open', 'tarfile.open', (['zpath', '"""r:gz"""'], {}), "(zpath, 'r:gz')\n", (3723, 3738), False, 'import zipfile, tarfile\n')] |
# coding:utf-8
from urllib.parse import urlencode, urljoin
from .client import Client
class API(Client):
HOST = None
PATH = None
TIMEOUT = 30
@classmethod
def _build_url(cls, path_args=None, params=None):
url = urljoin(cls.HOST, cls.PATH)
if path_args:
url = url.format(**path_args)
sep = "&" if "?" in url else "?"
if params:
url = "{}{}{}".format(url, sep, urlencode(params))
return url
async def call(self, path_args=None, params=None, data=None, headers=None):
url = self._build_url(path_args, params)
return await super(API, self).call(url, data, headers)
| [
"urllib.parse.urlencode",
"urllib.parse.urljoin"
] | [((244, 271), 'urllib.parse.urljoin', 'urljoin', (['cls.HOST', 'cls.PATH'], {}), '(cls.HOST, cls.PATH)\n', (251, 271), False, 'from urllib.parse import urlencode, urljoin\n'), ((442, 459), 'urllib.parse.urlencode', 'urlencode', (['params'], {}), '(params)\n', (451, 459), False, 'from urllib.parse import urlencode, urljoin\n')] |
import unittest
import io
from unittest import mock
from tests.lib.utils import INSPECT
from custom_image_cli.validation_tool import validation_helper
from custom_image_cli.validation_tool.validation_models.validation_models import \
ImageDetail, ImageManifest, EmrRelease
class TestValidationHelper(unittest.TestCase):
def setUp(self) -> None:
self.inspect = INSPECT
self.manifest = ImageManifest([EmrRelease("release_name", [ImageDetail("image_type", None, [], [])])], [], [])
@mock.patch('sys.stdout', new_callable=io.StringIO)
@mock.patch('custom_image_cli.validation_tool.validation_helper.load_validation_info')
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_local_job_run.CheckLocalJobRun.check")
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_manifest.CheckManifest.check")
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_manifest.CheckManifest.__init__")
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_files.CheckFiles.check")
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_files.CheckFiles.__init__")
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_envs.CheckEnvs.check")
@mock.patch("custom_image_cli.validation_tool.validation_tests.check_envs.CheckEnvs.__init__")
def test_validate_all(self, check_envs_constructor, check_envs, check_files_constructor,
check_files, check_manifest_constructor,
check_manifest, check_local_job_run, load_info, mock_stdout):
check_envs_constructor.return_value = None
check_envs.return_value = True
check_files_constructor.return_value = None
check_files.return_value = True
check_manifest_constructor.return_value = None
check_manifest.return_value = True
check_local_job_run.return_value = True
load_info.return_value = ImageDetail("image_type", None, [], []), [], []
actual = validation_helper.validate_all(self.inspect, "docker_cmd", "docker_image_uri",
self.manifest, "release_name", "image_type", "log")
self.assertEqual(actual, True)
check_manifest.assert_called_once()
check_envs.assert_called_once()
check_files.assert_called_once()
check_local_job_run.assert_called_once()
expected = "... Checking Image Manifest\n"
self.assertEqual(expected, mock_stdout.getvalue())
@mock.patch("custom_image_cli.validation_tool.check_inputs.check_version")
@mock.patch("custom_image_cli.validation_tool.check_inputs.check_image")
def test_load_validation_info(self, check_image, check_version):
value = self.manifest
check_version.return_value = None
check_image.return_value = None
actual_img, actual_file, actual_env = validation_helper.load_validation_info(self.manifest, "release_name", "image_type", "log")
self.assertEqual(actual_img, self.manifest.emr_releases[0].images[0])
self.assertEqual(actual_file, [])
self.assertEqual(actual_env, [])
check_version.assert_called_once_with(self.manifest.emr_releases[0], "release_name", "log")
check_image.assert_called_once_with(self.manifest.emr_releases[0].images[0], "image_type", "log")
| [
"custom_image_cli.validation_tool.validation_models.validation_models.ImageDetail",
"unittest.mock.patch",
"custom_image_cli.validation_tool.validation_helper.validate_all",
"custom_image_cli.validation_tool.validation_helper.load_validation_info"
] | [((511, 561), 'unittest.mock.patch', 'mock.patch', (['"""sys.stdout"""'], {'new_callable': 'io.StringIO'}), "('sys.stdout', new_callable=io.StringIO)\n", (521, 561), False, 'from unittest import mock\n'), ((567, 657), 'unittest.mock.patch', 'mock.patch', (['"""custom_image_cli.validation_tool.validation_helper.load_validation_info"""'], {}), "(\n 'custom_image_cli.validation_tool.validation_helper.load_validation_info')\n", (577, 657), False, 'from unittest import mock\n'), ((658, 774), 'unittest.mock.patch', 'mock.patch', (['"""custom_image_cli.validation_tool.validation_tests.check_local_job_run.CheckLocalJobRun.check"""'], {}), "(\n 'custom_image_cli.validation_tool.validation_tests.check_local_job_run.CheckLocalJobRun.check'\n )\n", (668, 774), False, 'from unittest import mock\n'), ((770, 878), 'unittest.mock.patch', 'mock.patch', (['"""custom_image_cli.validation_tool.validation_tests.check_manifest.CheckManifest.check"""'], {}), "(\n 'custom_image_cli.validation_tool.validation_tests.check_manifest.CheckManifest.check'\n )\n", (780, 878), False, 'from unittest import mock\n'), ((874, 985), 'unittest.mock.patch', 'mock.patch', (['"""custom_image_cli.validation_tool.validation_tests.check_manifest.CheckManifest.__init__"""'], {}), "(\n 'custom_image_cli.validation_tool.validation_tests.check_manifest.CheckManifest.__init__'\n )\n", (884, 985), False, 'from unittest import mock\n'), ((981, 1083), 'unittest.mock.patch', 'mock.patch', (['"""custom_image_cli.validation_tool.validation_tests.check_files.CheckFiles.check"""'], {}), "(\n 'custom_image_cli.validation_tool.validation_tests.check_files.CheckFiles.check'\n )\n", (991, 1083), False, 'from unittest import mock\n'), ((1079, 1184), 'unittest.mock.patch', 'mock.patch', (['"""custom_image_cli.validation_tool.validation_tests.check_files.CheckFiles.__init__"""'], {}), "(\n 'custom_image_cli.validation_tool.validation_tests.check_files.CheckFiles.__init__'\n )\n", (1089, 1184), False, 'from unittest import mock\n'), ((1180, 1280), 'unittest.mock.patch', 'mock.patch', (['"""custom_image_cli.validation_tool.validation_tests.check_envs.CheckEnvs.check"""'], {}), "(\n 'custom_image_cli.validation_tool.validation_tests.check_envs.CheckEnvs.check'\n )\n", (1190, 1280), False, 'from unittest import mock\n'), ((1276, 1379), 'unittest.mock.patch', 'mock.patch', (['"""custom_image_cli.validation_tool.validation_tests.check_envs.CheckEnvs.__init__"""'], {}), "(\n 'custom_image_cli.validation_tool.validation_tests.check_envs.CheckEnvs.__init__'\n )\n", (1286, 1379), False, 'from unittest import mock\n'), ((2555, 2628), 'unittest.mock.patch', 'mock.patch', (['"""custom_image_cli.validation_tool.check_inputs.check_version"""'], {}), "('custom_image_cli.validation_tool.check_inputs.check_version')\n", (2565, 2628), False, 'from unittest import mock\n'), ((2634, 2705), 'unittest.mock.patch', 'mock.patch', (['"""custom_image_cli.validation_tool.check_inputs.check_image"""'], {}), "('custom_image_cli.validation_tool.check_inputs.check_image')\n", (2644, 2705), False, 'from unittest import mock\n'), ((2045, 2179), 'custom_image_cli.validation_tool.validation_helper.validate_all', 'validation_helper.validate_all', (['self.inspect', '"""docker_cmd"""', '"""docker_image_uri"""', 'self.manifest', '"""release_name"""', '"""image_type"""', '"""log"""'], {}), "(self.inspect, 'docker_cmd',\n 'docker_image_uri', self.manifest, 'release_name', 'image_type', 'log')\n", (2075, 2179), False, 'from custom_image_cli.validation_tool import validation_helper\n'), ((2934, 3028), 'custom_image_cli.validation_tool.validation_helper.load_validation_info', 'validation_helper.load_validation_info', (['self.manifest', '"""release_name"""', '"""image_type"""', '"""log"""'], {}), "(self.manifest, 'release_name',\n 'image_type', 'log')\n", (2972, 3028), False, 'from custom_image_cli.validation_tool import validation_helper\n'), ((1979, 2018), 'custom_image_cli.validation_tool.validation_models.validation_models.ImageDetail', 'ImageDetail', (['"""image_type"""', 'None', '[]', '[]'], {}), "('image_type', None, [], [])\n", (1990, 2018), False, 'from custom_image_cli.validation_tool.validation_models.validation_models import ImageDetail, ImageManifest, EmrRelease\n'), ((453, 492), 'custom_image_cli.validation_tool.validation_models.validation_models.ImageDetail', 'ImageDetail', (['"""image_type"""', 'None', '[]', '[]'], {}), "('image_type', None, [], [])\n", (464, 492), False, 'from custom_image_cli.validation_tool.validation_models.validation_models import ImageDetail, ImageManifest, EmrRelease\n')] |
# -*- coding: utf-8 -*-
import nltk
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
#from nltk import pos_tag
from nltk.tag import StanfordNERTagger
from nltk.tokenize import word_tokenize
style.use('fivethirtyeight')
# Process text
raw_text = open("news_article.txt").read()
token_text = word_tokenize(raw_text)
def stanford_tagger(token_text):
st = StanfordNERTagger('english.all.3class.distsim.crf.ser.gz',
'stanford-ner.jar')
ne_tagged = st.tag(token_text)
return ne_tagged
def nltk_tagger(token_text):
tagged_words = nltk.pos_tag(token_text)
ne_tagged = nltk.ne_chunk(tagged_words)
return ne_tagged
def stanford_main():
print (stanford_tagger(token_text))
def nltk_main():
print (nltk_tagger(token_text))
def time_plot(stanford_total_time, nltk_total_time):
N = 1
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
stanford_total_time = stanford_total_time
nltk_total_time = nltk_total_time
fig, ax = plt.subplots()
rects1 = ax.bar(ind, stanford_total_time, width, color='r')
rects2 = ax.bar(ind+width, nltk_total_time, width, color='y')
# Add text for labels, title and axes ticks
ax.set_xlabel('Classifier')
ax.set_ylabel('Time (in seconds)')
ax.set_title('Speed by NER Classifier')
ax.set_xticks(ind+width)
ax.set_xticklabels( ('') )
ax.legend( (rects1[0], rects2[0]), ('Stanford', 'NLTK'), bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0. )
def autolabel(rects):
#attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.02*height, '%10.2f' % float(height),
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
plt.show()
if __name__ == '__main__':
stanford_t0 = os.times()[4]
stanford_main()
stanford_t1 = os.times()[4]
stanford_total_time = stanford_t1 - stanford_t0
nltk_t0 = os.times()[4]
nltk_main()
nltk_t1 = os.times()[4]
nltk_total_time = nltk_t1 - nltk_t0
time_plot(stanford_total_time, nltk_total_time)
| [
"nltk.pos_tag",
"os.times",
"nltk.ne_chunk",
"nltk.tokenize.word_tokenize",
"matplotlib.style.use",
"nltk.tag.StanfordNERTagger",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((233, 261), 'matplotlib.style.use', 'style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (242, 261), False, 'from matplotlib import style\n'), ((336, 359), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['raw_text'], {}), '(raw_text)\n', (349, 359), False, 'from nltk.tokenize import word_tokenize\n'), ((403, 481), 'nltk.tag.StanfordNERTagger', 'StanfordNERTagger', (['"""english.all.3class.distsim.crf.ser.gz"""', '"""stanford-ner.jar"""'], {}), "('english.all.3class.distsim.crf.ser.gz', 'stanford-ner.jar')\n", (420, 481), False, 'from nltk.tag import StanfordNERTagger\n'), ((611, 635), 'nltk.pos_tag', 'nltk.pos_tag', (['token_text'], {}), '(token_text)\n', (623, 635), False, 'import nltk\n'), ((649, 676), 'nltk.ne_chunk', 'nltk.ne_chunk', (['tagged_words'], {}), '(tagged_words)\n', (662, 676), False, 'import nltk\n'), ((886, 898), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (895, 898), True, 'import numpy as np\n'), ((1081, 1095), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1093, 1095), True, 'import matplotlib.pyplot as plt\n'), ((1892, 1902), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1900, 1902), True, 'import matplotlib.pyplot as plt\n'), ((1950, 1960), 'os.times', 'os.times', ([], {}), '()\n', (1958, 1960), False, 'import os\n'), ((1996, 2006), 'os.times', 'os.times', ([], {}), '()\n', (2004, 2006), False, 'import os\n'), ((2072, 2082), 'os.times', 'os.times', ([], {}), '()\n', (2080, 2082), False, 'import os\n'), ((2110, 2120), 'os.times', 'os.times', ([], {}), '()\n', (2118, 2120), False, 'import os\n')] |
import streamlit as st
from streamlit import caching
import os
import torch
from src.core.detect import Detector
from src.core.utils import utils
from PIL import Image
import cv2
st.title('1stDayKit Object Detection')
st.write('1stDayKit is a high-level Deep Learning toolkit for solving generic tasks.')
uploaded_file = st.file_uploader("Choose an image...", type=["png","jpg"])
if uploaded_file is not None:
st.spinner()
with st.spinner(text='Loading...'):
det = Detector(name="DemoDet")
img = Image.open(uploaded_file)
img_cv = utils.pil_to_cv2(img)
output = det.predict(img_cv)
out_img = det.visualize(img_cv,output,figsize=(18,18))
cv2.imwrite('tempImage.jpg', out_img)
st.image('tempImage.jpg',width=700) | [
"cv2.imwrite",
"streamlit.image",
"PIL.Image.open",
"streamlit.file_uploader",
"streamlit.write",
"streamlit.spinner",
"src.core.utils.utils.pil_to_cv2",
"src.core.detect.Detector",
"streamlit.title"
] | [((180, 218), 'streamlit.title', 'st.title', (['"""1stDayKit Object Detection"""'], {}), "('1stDayKit Object Detection')\n", (188, 218), True, 'import streamlit as st\n'), ((219, 315), 'streamlit.write', 'st.write', (['"""1stDayKit is a high-level Deep Learning toolkit for solving generic tasks."""'], {}), "(\n '1stDayKit is a high-level Deep Learning toolkit for solving generic tasks.'\n )\n", (227, 315), True, 'import streamlit as st\n'), ((323, 382), 'streamlit.file_uploader', 'st.file_uploader', (['"""Choose an image..."""'], {'type': "['png', 'jpg']"}), "('Choose an image...', type=['png', 'jpg'])\n", (339, 382), True, 'import streamlit as st\n'), ((416, 428), 'streamlit.spinner', 'st.spinner', ([], {}), '()\n', (426, 428), True, 'import streamlit as st\n'), ((438, 467), 'streamlit.spinner', 'st.spinner', ([], {'text': '"""Loading..."""'}), "(text='Loading...')\n", (448, 467), True, 'import streamlit as st\n'), ((483, 507), 'src.core.detect.Detector', 'Detector', ([], {'name': '"""DemoDet"""'}), "(name='DemoDet')\n", (491, 507), False, 'from src.core.detect import Detector\n'), ((522, 547), 'PIL.Image.open', 'Image.open', (['uploaded_file'], {}), '(uploaded_file)\n', (532, 547), False, 'from PIL import Image\n'), ((565, 586), 'src.core.utils.utils.pil_to_cv2', 'utils.pil_to_cv2', (['img'], {}), '(img)\n', (581, 586), False, 'from src.core.utils import utils\n'), ((695, 732), 'cv2.imwrite', 'cv2.imwrite', (['"""tempImage.jpg"""', 'out_img'], {}), "('tempImage.jpg', out_img)\n", (706, 732), False, 'import cv2\n'), ((741, 777), 'streamlit.image', 'st.image', (['"""tempImage.jpg"""'], {'width': '(700)'}), "('tempImage.jpg', width=700)\n", (749, 777), True, 'import streamlit as st\n')] |
"""An Http API Client to interact with meross devices"""
from email import header
import logging
from types import MappingProxyType
from typing import List, MappingView, Optional, Dict, Any, Callable, Union
from enum import Enum
from uuid import uuid4
from hashlib import md5
from time import time
from json import (
dumps as json_dumps,
loads as json_loads,
)
import aiohttp
from yarl import URL
import socket
import asyncio
import async_timeout
from . import const as mc
KeyType = Union[dict, Optional[str]] # pylint: disable=unsubscriptable-object
def build_payload(namespace:str, method:str, payload:dict = {}, key:KeyType = None, device_id:str = None)-> dict:
if isinstance(key, dict):
key[mc.KEY_NAMESPACE] = namespace
key[mc.KEY_METHOD] = method
key[mc.KEY_PAYLOADVERSION] = 1
key[mc.KEY_FROM] = mc.TOPIC_RESPONSE.format(device_id or mc.MANUFACTURER)
return {
mc.KEY_HEADER: key,
mc.KEY_PAYLOAD: payload
}
else:
messageid = uuid4().hex
timestamp = int(time())
return {
mc.KEY_HEADER: {
mc.KEY_MESSAGEID: messageid,
mc.KEY_NAMESPACE: namespace,
mc.KEY_METHOD: method,
mc.KEY_PAYLOADVERSION: 1,
mc.KEY_FROM: mc.TOPIC_RESPONSE.format(device_id or mc.MANUFACTURER),
#mc.KEY_FROM: "/app/0-0/subscribe",
#"from": "/appliance/9109182170548290882048e1e9522946/publish",
mc.KEY_TIMESTAMP: timestamp,
mc.KEY_TIMESTAMPMS: 0,
mc.KEY_SIGN: md5((messageid + (key or "") + str(timestamp)).encode('utf-8')).hexdigest()
},
mc.KEY_PAYLOAD: payload
}
def get_replykey(header: dict, key:KeyType = None) -> KeyType:
"""
checks header signature against key:
if ok return sign itsef else return the full header { "messageId", "timestamp", "sign", ...}
in order to be able to use it in a reply scheme
**UPDATE 28-03-2021**
the 'reply scheme' hack doesnt work on mqtt but works on http: this code will be left since it works if the key is correct
anyway and could be reused in a future attempt
"""
if isinstance(key, dict):
# no way! we're already keying as replykey workflow
return header
sign = md5((header[mc.KEY_MESSAGEID] + (key or "") + str(header[mc.KEY_TIMESTAMP])).encode('utf-8')).hexdigest()
if sign == header[mc.KEY_SIGN]:
return key
return header
def get_productname(type: str) -> str:
for _type, _name in mc.TYPE_NAME_MAP.items():
if type.startswith(_type):
return _name
return type
def get_productnameuuid(type: str, uuid: str) -> str:
return f"{get_productname(type)} ({uuid})"
def get_productnametype(type: str) -> str:
name = get_productname(type)
return f"{name} ({type})" if name is not type else type
class MerossDeviceDescriptor:
"""
Utility class to extract various info from Appliance.System.All
device descriptor
"""
all = dict()
_dynamicattrs = {
mc.KEY_SYSTEM: lambda _self: _self.all.get(mc.KEY_SYSTEM, {}),
mc.KEY_HARDWARE: lambda _self: _self.system.get(mc.KEY_HARDWARE, {}),
mc.KEY_FIRMWARE: lambda _self: _self.system.get(mc.KEY_FIRMWARE, {}),
mc.KEY_TYPE: lambda _self: _self.hardware.get(mc.KEY_TYPE, mc.MANUFACTURER),
mc.KEY_UUID: lambda _self: _self.hardware.get(mc.KEY_UUID),
mc.KEY_MACADDRESS: lambda _self: _self.hardware.get(mc.KEY_MACADDRESS, mc.MEROSS_MACADDRESS),
mc.KEY_INNERIP: lambda _self: _self.firmware.get(mc.KEY_INNERIP),
mc.KEY_TIME: lambda _self: _self.system.get(mc.KEY_TIME, {}),
mc.KEY_TIMEZONE: lambda _self: _self.time.get(mc.KEY_TIMEZONE),
'productname': lambda _self: get_productnameuuid(_self.type, _self.uuid),
'productmodel': lambda _self: f"{_self.type} {_self.hardware.get(mc.KEY_VERSION, '')}"
}
def __init__(self, payload: dict):
self.ability = payload.get(mc.KEY_ABILITY, {})
self.update(payload)
def __getattr__(self, name):
value = MerossDeviceDescriptor._dynamicattrs[name](self)
setattr(self, name, value)
return value
"""
@property
def uuid(self) -> str:
return self.hardware.get(mc.KEY_UUID)
@property
def macAddress(self) -> str:
return self.hardware.get(mc.KEY_MACADDRESS, '48:e1:e9:XX:XX:XX')
@property
def ipAddress(self) -> str:
return self.firmware.get(mc.KEY_INNERIP)
@property
def timezone(self) -> str:
return self.system.get(mc.KEY_TIME, {}).get(mc.KEY_TIMEZONE)
@property
def productname(self) -> str:
return get_productnameuuid(self.type, self.uuid)
@property
def productmodel(self) -> str:
return f"{self.type} {self.hardware.get(mc.KEY_VERSION, '')}"
"""
def update(self, payload: dict):
"""
reset the cached pointers
"""
self.all = payload.get(mc.KEY_ALL, self.all)
self.digest = self.all.get(mc.KEY_DIGEST, {})
for key in MerossDeviceDescriptor._dynamicattrs.keys():
try:
delattr(self, key)
except Exception:
continue
class MerossHttpClient:
DEFAULT_TIMEOUT = 5
def __init__(self,
host: str,
key: str = None,
session: aiohttp.client.ClientSession = None,
logger: logging.Logger = None
):
self._host = host
self._requesturl = URL(f"http://{host}/config")
self.key = key
self.replykey = None
self._session = session or aiohttp.ClientSession()
self._logger = logger or logging.getLogger(__name__)
def set_host_key(self, host: str, key: str) -> None:
if host != self._host:
self._host = host
self._requesturl = URL(f"http://{host}/config")
self.key = key
async def async_request(
self,
namespace: str,
method: str = mc.METHOD_GET,
payload: dict = {},
timeout=DEFAULT_TIMEOUT
) -> dict:
self._logger.debug("MerossHttpClient(%s): HTTP POST method:(%s) namespace:(%s)", self._host, method, namespace)
request: dict = build_payload(namespace, method, payload, self.key or self.replykey)
response: dict = await self.async_raw_request(request, timeout)
if response.get(mc.KEY_PAYLOAD, {}).get(mc.KEY_ERROR, {}).get(mc.KEY_CODE) == 5001:
#sign error... hack and fool
self._logger.debug(
"Key error on %s (%s:%s) -> retrying with key-reply hack",
self._host, method, namespace)
req_header = request[mc.KEY_HEADER]
resp_header = response[mc.KEY_HEADER]
req_header[mc.KEY_MESSAGEID] = resp_header[mc.KEY_MESSAGEID]
req_header[mc.KEY_TIMESTAMP] = resp_header[mc.KEY_TIMESTAMP]
req_header[mc.KEY_SIGN] = resp_header[mc.KEY_SIGN]
response = await self.async_raw_request(request, timeout)
return response
async def async_raw_request(self, payload: dict, timeout=DEFAULT_TIMEOUT) -> dict:
try:
with async_timeout.timeout(timeout):
response = await self._session.post(
url=self._requesturl,
data=json_dumps(payload)
)
response.raise_for_status()
text_body = await response.text()
self._logger.debug("MerossHttpClient(%s): HTTP Response (%s)", self._host, text_body)
json_body:dict = json_loads(text_body)
self.replykey = get_replykey(json_body.get(mc.KEY_HEADER), self.key)
except Exception as e:
self._logger.debug("MerossHttpClient(%s): HTTP Exception (%s)", self._host, str(e) or type(e).__name__)
raise e
return json_body
| [
"logging.getLogger",
"aiohttp.ClientSession",
"json.loads",
"json.dumps",
"async_timeout.timeout",
"uuid.uuid4",
"yarl.URL",
"time.time"
] | [((5651, 5679), 'yarl.URL', 'URL', (['f"""http://{host}/config"""'], {}), "(f'http://{host}/config')\n", (5654, 5679), False, 'from yarl import URL\n'), ((1032, 1039), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1037, 1039), False, 'from uuid import uuid4\n'), ((1068, 1074), 'time.time', 'time', ([], {}), '()\n', (1072, 1074), False, 'from time import time\n'), ((5767, 5790), 'aiohttp.ClientSession', 'aiohttp.ClientSession', ([], {}), '()\n', (5788, 5790), False, 'import aiohttp\n'), ((5824, 5851), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (5841, 5851), False, 'import logging\n'), ((6003, 6031), 'yarl.URL', 'URL', (['f"""http://{host}/config"""'], {}), "(f'http://{host}/config')\n", (6006, 6031), False, 'from yarl import URL\n'), ((7740, 7761), 'json.loads', 'json_loads', (['text_body'], {}), '(text_body)\n', (7750, 7761), True, 'from json import dumps as json_dumps, loads as json_loads\n'), ((7332, 7362), 'async_timeout.timeout', 'async_timeout.timeout', (['timeout'], {}), '(timeout)\n', (7353, 7362), False, 'import async_timeout\n'), ((7484, 7503), 'json.dumps', 'json_dumps', (['payload'], {}), '(payload)\n', (7494, 7503), True, 'from json import dumps as json_dumps, loads as json_loads\n')] |
import os
import json
import pytest
import pandas as pd
# TODO: revise the following constants when using new or revised CPS/PUF data
CPS_START_YEAR = 2014
PUF_START_YEAR = 2011
PUF_COUNT = 248591
LAST_YEAR = 2027
@pytest.fixture(scope='session')
def test_path():
return os.path.abspath(os.path.dirname(__file__))
@pytest.fixture(scope='session')
def growfactors(test_path):
gf_path = os.path.join(test_path, '../puf_stage1/growfactors.csv')
return pd.read_csv(gf_path, index_col='YEAR')
@pytest.fixture(scope='session')
def metadata(test_path):
md_path = os.path.join(test_path, 'records_metadata.json')
with open(md_path, 'r') as mdf:
return json.load(mdf)
@pytest.fixture(scope='session')
def cps(test_path):
cps_path = os.path.join(test_path, '../cps_data/cps.csv.gz')
return pd.read_csv(cps_path)
@pytest.fixture(scope='session')
def cps_count(test_path):
cps_path = os.path.join(test_path, '../cps_data/cps.csv.gz')
cps_df = pd.read_csv(cps_path)
return cps_df.shape[0]
@pytest.fixture(scope='session')
def cps_start_year():
return CPS_START_YEAR
@pytest.fixture(scope='session')
def puf_path(test_path):
return os.path.join(test_path, '../puf_data/puf.csv')
@pytest.fixture(scope='session')
def puf(puf_path):
if os.path.isfile(puf_path):
return pd.read_csv(puf_path)
else:
return None
@pytest.fixture(scope='session')
def puf_count(puf_path):
if os.path.isfile(puf_path):
puf_df = pd.read_csv(puf_path)
count = puf_df.shape[0]
if count != PUF_COUNT:
msg = 'puf.shape[0] = {} not equal to PUF_COUNT = {}'
raise ValueError(msg.format(count, PUF_COUNT))
else:
count = PUF_COUNT
return count
@pytest.fixture(scope='session')
def puf_start_year():
return PUF_START_YEAR
@pytest.fixture(scope='session')
def last_year():
return LAST_YEAR
@pytest.fixture(scope='session')
def cps_weights(test_path):
cpsw_path = os.path.join(test_path, '../cps_stage2/cps_weights.csv.gz')
return pd.read_csv(cpsw_path)
@pytest.fixture(scope='session')
def puf_weights(test_path):
pufw_path = os.path.join(test_path, '../puf_stage2/puf_weights.csv.gz')
return pd.read_csv(pufw_path)
@pytest.fixture(scope='session')
def cps_ratios(test_path):
# cpsr_path = os.path.join(test_path, '../cps_stage3/cps_ratios.csv')
# return pd.read_csv(cpsr_path, index_col=0)
return None
@pytest.fixture(scope='session')
def puf_ratios(test_path):
pufr_path = os.path.join(test_path, '../puf_stage3/puf_ratios.csv')
return pd.read_csv(pufr_path, index_col=0)
| [
"pandas.read_csv",
"os.path.join",
"os.path.isfile",
"os.path.dirname",
"json.load",
"pytest.fixture"
] | [((219, 250), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (233, 250), False, 'import pytest\n'), ((325, 356), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (339, 356), False, 'import pytest\n'), ((509, 540), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (523, 540), False, 'import pytest\n'), ((698, 729), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (712, 729), False, 'import pytest\n'), ((851, 882), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (865, 882), False, 'import pytest\n'), ((1039, 1070), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1053, 1070), False, 'import pytest\n'), ((1122, 1153), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1136, 1153), False, 'import pytest\n'), ((1240, 1271), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1254, 1271), False, 'import pytest\n'), ((1394, 1425), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1408, 1425), False, 'import pytest\n'), ((1767, 1798), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1781, 1798), False, 'import pytest\n'), ((1850, 1881), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1864, 1881), False, 'import pytest\n'), ((1923, 1954), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (1937, 1954), False, 'import pytest\n'), ((2096, 2127), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (2110, 2127), False, 'import pytest\n'), ((2269, 2300), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (2283, 2300), False, 'import pytest\n'), ((2470, 2501), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (2484, 2501), False, 'import pytest\n'), ((399, 455), 'os.path.join', 'os.path.join', (['test_path', '"""../puf_stage1/growfactors.csv"""'], {}), "(test_path, '../puf_stage1/growfactors.csv')\n", (411, 455), False, 'import os\n'), ((467, 505), 'pandas.read_csv', 'pd.read_csv', (['gf_path'], {'index_col': '"""YEAR"""'}), "(gf_path, index_col='YEAR')\n", (478, 505), True, 'import pandas as pd\n'), ((580, 628), 'os.path.join', 'os.path.join', (['test_path', '"""records_metadata.json"""'], {}), "(test_path, 'records_metadata.json')\n", (592, 628), False, 'import os\n'), ((765, 814), 'os.path.join', 'os.path.join', (['test_path', '"""../cps_data/cps.csv.gz"""'], {}), "(test_path, '../cps_data/cps.csv.gz')\n", (777, 814), False, 'import os\n'), ((826, 847), 'pandas.read_csv', 'pd.read_csv', (['cps_path'], {}), '(cps_path)\n', (837, 847), True, 'import pandas as pd\n'), ((924, 973), 'os.path.join', 'os.path.join', (['test_path', '"""../cps_data/cps.csv.gz"""'], {}), "(test_path, '../cps_data/cps.csv.gz')\n", (936, 973), False, 'import os\n'), ((987, 1008), 'pandas.read_csv', 'pd.read_csv', (['cps_path'], {}), '(cps_path)\n', (998, 1008), True, 'import pandas as pd\n'), ((1190, 1236), 'os.path.join', 'os.path.join', (['test_path', '"""../puf_data/puf.csv"""'], {}), "(test_path, '../puf_data/puf.csv')\n", (1202, 1236), False, 'import os\n'), ((1298, 1322), 'os.path.isfile', 'os.path.isfile', (['puf_path'], {}), '(puf_path)\n', (1312, 1322), False, 'import os\n'), ((1458, 1482), 'os.path.isfile', 'os.path.isfile', (['puf_path'], {}), '(puf_path)\n', (1472, 1482), False, 'import os\n'), ((1999, 2058), 'os.path.join', 'os.path.join', (['test_path', '"""../cps_stage2/cps_weights.csv.gz"""'], {}), "(test_path, '../cps_stage2/cps_weights.csv.gz')\n", (2011, 2058), False, 'import os\n'), ((2070, 2092), 'pandas.read_csv', 'pd.read_csv', (['cpsw_path'], {}), '(cpsw_path)\n', (2081, 2092), True, 'import pandas as pd\n'), ((2172, 2231), 'os.path.join', 'os.path.join', (['test_path', '"""../puf_stage2/puf_weights.csv.gz"""'], {}), "(test_path, '../puf_stage2/puf_weights.csv.gz')\n", (2184, 2231), False, 'import os\n'), ((2243, 2265), 'pandas.read_csv', 'pd.read_csv', (['pufw_path'], {}), '(pufw_path)\n', (2254, 2265), True, 'import pandas as pd\n'), ((2545, 2600), 'os.path.join', 'os.path.join', (['test_path', '"""../puf_stage3/puf_ratios.csv"""'], {}), "(test_path, '../puf_stage3/puf_ratios.csv')\n", (2557, 2600), False, 'import os\n'), ((2612, 2647), 'pandas.read_csv', 'pd.read_csv', (['pufr_path'], {'index_col': '(0)'}), '(pufr_path, index_col=0)\n', (2623, 2647), True, 'import pandas as pd\n'), ((295, 320), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (310, 320), False, 'import os\n'), ((680, 694), 'json.load', 'json.load', (['mdf'], {}), '(mdf)\n', (689, 694), False, 'import json\n'), ((1339, 1360), 'pandas.read_csv', 'pd.read_csv', (['puf_path'], {}), '(puf_path)\n', (1350, 1360), True, 'import pandas as pd\n'), ((1501, 1522), 'pandas.read_csv', 'pd.read_csv', (['puf_path'], {}), '(puf_path)\n', (1512, 1522), True, 'import pandas as pd\n')] |
from math import sqrt
import re
from curious.commands import Context
from curious.commands.exc import ConversionFailedError
from typing import Tuple
colour_pattern = re.compile(r'(#|0x)?([A-Za-z0-9]{1,6})')
RGB = Tuple[int, int, int]
class Colour:
"""
A class that represents a colour.
"""
def __init__(self, value: int):
self.value = value
def _get_part(self, part) -> int:
string = f'{self.value:06x}'
piece = slice(part * 2, part * 2 + 2)
return int(string[piece], base=16)
@property
def red(self) -> int:
return self._get_part(0)
r = red
@property
def green(self) -> int:
return self._get_part(1)
g = green
@property
def blue(self) -> int:
return self._get_part(2)
b = blue
@property
def rgb(self) -> RGB:
return self.r, self.g, self.b
def distance(self, other: 'Colour'):
# Taken from some wikipedia article I'm too lazy to dig it up
r1, g1, b1 = self.rgb
r2, g2, b2 = other.rgb
return sqrt((r2 - r1) ** 2 + (g2 - g1) ** 2 + (b2 - b1) ** 2)
def luminance(self) -> float:
"""
Calculate the luminance of the colour.
Based on information from https://www.w3.org/TR/WCAG20-TECHS/G18.html
"""
def convert(value):
value /= 255
if value <= 0.03928:
return value / 12.92
else:
return ((value + 0.055) / 1.055) ** 2.4
r, g, b = map(convert, self.rgb)
return r * 0.2126 + g * 0.7152 + b * 0.0722
def contrast(self, other: 'Colour'):
"""
Calculate the contrast between two colours.
Based on information from https://www.w3.org/TR/WCAG20-TECHS/G18.html
"""
# TODO make x.contrast(y) the same as y.contrast(x) instead of x/y y/x
return (self.luminance() + 0.05) / (other.luminance() + 0.05)
def __repr__(self):
return '{0.__class__.__name__}({0.value})'.format(self)
def __str__(self):
return f'#{self.value:06x}'
def __eq__(self, other):
return self.value == other.value
def __hash__(self):
return hash(self.value)
def convert_hex_colour(annotation, ctx: Context, arg: str) -> Colour:
"""
Converts a string representation of a hex colour into an instance of Colour.
"""
arg = colour_pattern.sub(r'\2', arg)
try:
value = int(arg, base=16)
except ValueError:
raise ConversionFailedError(ctx, arg, annotation, 'Invalid value.')
else:
return annotation(value)
class RGBPart:
"""
Represents a hex value that is in the unsigned char range.
"""
pass
def valid_unsigned_char(annotation, ctx: Context, arg: str):
"""
Checks if given input is a number in the domain [0, 255].
255 = 0xFF, which is the largest value for any component of an RGB(A) number.
"""
try:
value = int(arg)
except ValueError:
raise ConversionFailedError(ctx, arg, annotation, 'Invalid number.')
else:
if not 0 <= value <= 255:
raise ConversionFailedError(ctx, arg, annotation, 'Value must be within range (0 - 255)')
return value
Context.add_converter(Colour, convert_hex_colour)
Context.add_converter(RGBPart, valid_unsigned_char)
| [
"math.sqrt",
"curious.commands.exc.ConversionFailedError",
"curious.commands.Context.add_converter",
"re.compile"
] | [((168, 207), 're.compile', 're.compile', (['"""(#|0x)?([A-Za-z0-9]{1,6})"""'], {}), "('(#|0x)?([A-Za-z0-9]{1,6})')\n", (178, 207), False, 'import re\n'), ((3251, 3300), 'curious.commands.Context.add_converter', 'Context.add_converter', (['Colour', 'convert_hex_colour'], {}), '(Colour, convert_hex_colour)\n', (3272, 3300), False, 'from curious.commands import Context\n'), ((3301, 3352), 'curious.commands.Context.add_converter', 'Context.add_converter', (['RGBPart', 'valid_unsigned_char'], {}), '(RGBPart, valid_unsigned_char)\n', (3322, 3352), False, 'from curious.commands import Context\n'), ((1069, 1123), 'math.sqrt', 'sqrt', (['((r2 - r1) ** 2 + (g2 - g1) ** 2 + (b2 - b1) ** 2)'], {}), '((r2 - r1) ** 2 + (g2 - g1) ** 2 + (b2 - b1) ** 2)\n', (1073, 1123), False, 'from math import sqrt\n'), ((2517, 2578), 'curious.commands.exc.ConversionFailedError', 'ConversionFailedError', (['ctx', 'arg', 'annotation', '"""Invalid value."""'], {}), "(ctx, arg, annotation, 'Invalid value.')\n", (2538, 2578), False, 'from curious.commands.exc import ConversionFailedError\n'), ((3022, 3084), 'curious.commands.exc.ConversionFailedError', 'ConversionFailedError', (['ctx', 'arg', 'annotation', '"""Invalid number."""'], {}), "(ctx, arg, annotation, 'Invalid number.')\n", (3043, 3084), False, 'from curious.commands.exc import ConversionFailedError\n'), ((3147, 3234), 'curious.commands.exc.ConversionFailedError', 'ConversionFailedError', (['ctx', 'arg', 'annotation', '"""Value must be within range (0 - 255)"""'], {}), "(ctx, arg, annotation,\n 'Value must be within range (0 - 255)')\n", (3168, 3234), False, 'from curious.commands.exc import ConversionFailedError\n')] |
"""
Created on 9 Aug 2016
@author: <NAME> (<EMAIL>)
"""
import _csv
import sys
# --------------------------------------------------------------------------------------------------------------------
class Histogram(object):
"""
classdocs
"""
__HEADER_BIN = ".bin"
__HEADER_COUNT = ".count"
# ----------------------------------------------------------------------------------------------------------------
def __init__(self, minimum, maximum, bin_count, path):
"""
Constructor
"""
self.__minimum = minimum
self.__maximum = maximum
self.__bin_count = bin_count
self.__path = path
self.__counts = [0] * bin_count
self.__max_count = int(0)
self.__delta = (maximum - minimum) / bin_count
def __len__(self):
return self.__bin_count
# ----------------------------------------------------------------------------------------------------------------
def append(self, datum):
# reject out-of-range
if datum < self.__minimum or datum > self.__maximum:
raise ValueError("datum out of range:%f" % datum)
# compute index...
offset = datum - self.__minimum
index = int(offset // self.__delta)
# update counts...
self.__counts[index] += 1
if self.__counts[index] > self.__max_count:
self.__max_count = int(self.__counts[index])
return index, self.__counts[index]
def to_csv(self, filename=None):
file = sys.stdout if filename is None else open(filename, "w")
writer = _csv.writer(file)
writer.writerow((self.__path + Histogram.__HEADER_BIN, self.__path + Histogram.__HEADER_COUNT))
for i in range(self.bin_count):
writer.writerow((format(self.__bin(i), '.6f'), self.__counts[i]))
if filename is not None:
file.close()
# ----------------------------------------------------------------------------------------------------------------
@property
def bins(self):
return [self.__bin(i) for i in range(self.__bin_count)]
@property
def minimum(self):
return self.__minimum
@property
def maximum(self):
return self.__maximum
@property
def bin_count(self):
return self.__bin_count
@property
def path(self):
return self.__path
@property
def delta(self):
return self.__delta
@property
def max_count(self):
return self.__max_count
@property
def counts(self):
return self.__counts
# ----------------------------------------------------------------------------------------------------------------
def __bin(self, index):
return self.__minimum + (index * self.__delta)
# ----------------------------------------------------------------------------------------------------------------
def __str__(self, *args, **kwargs):
return "Histogram:{minimum:%0.6f, maximum:%0.6f, bin_count:%d, delta:%0.6f, max_count:%d, counts:%s, " \
"path:%s}" % \
(self.minimum, self.maximum, self.bin_count, self.delta, self.max_count, self.counts,
self.path)
| [
"_csv.writer"
] | [((1618, 1635), '_csv.writer', '_csv.writer', (['file'], {}), '(file)\n', (1629, 1635), False, 'import _csv\n')] |
import importlib
import sys
import pituophis
# check if the user is running the script with the correct number of arguments
if len(sys.argv) < 2:
# if not, print the usage
print('usage: pituophis [command] cd [options]')
print('Commands:')
print(' serve [options]')
print(' fetch [url] [options]')
print('Server Options:')
print(' -H, --host=HOST\t\tAdvertised host (default: 127.0.0.1)')
print(' -p, --port=PORT\t\tPort to bind to (default: 70)')
print(' -a, --advertised-port=PORT\tPort to advertise')
print(' -d, --directory=DIR\t\tDirectory to serve (default: pub/)')
print(' -A, --alt-handler=HANDLER\tAlternate handler to use if 404 error is generated (python file with it defined as "def alt(request):")')
print(' -s, --send-period\t\tSend a period at the end of each response (default: False)')
print(' -D, --debug\t\t\tPrint requests as they are received (default: False)')
print(' -v, --version\t\t\tPrint version')
print('Fetch Options:')
print(' -o, --output=FILE\t\tFile to write to (default: stdout)')
else:
# check if the user is serving or fetching
if sys.argv[1] == 'serve':
# check for arguments
# host
host = '127.0.0.1'
if '-H' in sys.argv or '--host' in sys.argv:
host = sys.argv[sys.argv.index('-H') + 1]
# port
port = 70
if '-p' in sys.argv or '--port' in sys.argv:
port = int(sys.argv[sys.argv.index('-p') + 1])
# advertised port
advertised_port = None
if '-a' in sys.argv or '--advertised-port' in sys.argv:
advertised_port = int(sys.argv[sys.argv.index('-a') + 1])
# directory
pub_dir = 'pub/'
if '-d' in sys.argv or '--directory' in sys.argv:
pub_dir = sys.argv[sys.argv.index('-d') + 1]
# alternate handler
alt_handler = False
if '-A' in sys.argv or '--alt-handler' in sys.argv:
alt_handler = sys.argv[sys.argv.index('-A') + 1]
# get the function from the file
alt_handler = getattr(
importlib.import_module(alt_handler), 'handler')
# send period
send_period = False
if '-s' in sys.argv or '--send-period' in sys.argv:
send_period = True
# debug
debug = False
if '-D' in sys.argv or '--debug' in sys.argv:
debug = True
# start the server
pituophis.serve(host=host, port=port, advertised_port=advertised_port,
handler=pituophis.handle, pub_dir=pub_dir, alt_handler=alt_handler,
send_period=send_period, debug=debug)
elif sys.argv[1] == 'fetch':
# check for arguments
# url
url = sys.argv[2]
# output file
output = 'stdout'
if '-o' in sys.argv or '--output' in sys.argv:
output = sys.argv[sys.argv.index('-o') + 1]
# start the fetch
o = pituophis.get(url)
if output == 'stdout':
sys.stdout.buffer.write(o.binary)
else:
with open(output, 'wb') as f:
f.write(o.binary)
f.close() | [
"importlib.import_module",
"pituophis.get",
"sys.stdout.buffer.write",
"pituophis.serve",
"sys.argv.index"
] | [((2471, 2655), 'pituophis.serve', 'pituophis.serve', ([], {'host': 'host', 'port': 'port', 'advertised_port': 'advertised_port', 'handler': 'pituophis.handle', 'pub_dir': 'pub_dir', 'alt_handler': 'alt_handler', 'send_period': 'send_period', 'debug': 'debug'}), '(host=host, port=port, advertised_port=advertised_port,\n handler=pituophis.handle, pub_dir=pub_dir, alt_handler=alt_handler,\n send_period=send_period, debug=debug)\n', (2486, 2655), False, 'import pituophis\n'), ((2972, 2990), 'pituophis.get', 'pituophis.get', (['url'], {}), '(url)\n', (2985, 2990), False, 'import pituophis\n'), ((2120, 2156), 'importlib.import_module', 'importlib.import_module', (['alt_handler'], {}), '(alt_handler)\n', (2143, 2156), False, 'import importlib\n'), ((3034, 3067), 'sys.stdout.buffer.write', 'sys.stdout.buffer.write', (['o.binary'], {}), '(o.binary)\n', (3057, 3067), False, 'import sys\n'), ((1329, 1349), 'sys.argv.index', 'sys.argv.index', (['"""-H"""'], {}), "('-H')\n", (1343, 1349), False, 'import sys\n'), ((1825, 1845), 'sys.argv.index', 'sys.argv.index', (['"""-d"""'], {}), "('-d')\n", (1839, 1845), False, 'import sys\n'), ((2002, 2022), 'sys.argv.index', 'sys.argv.index', (['"""-A"""'], {}), "('-A')\n", (2016, 2022), False, 'import sys\n'), ((1473, 1493), 'sys.argv.index', 'sys.argv.index', (['"""-p"""'], {}), "('-p')\n", (1487, 1493), False, 'import sys\n'), ((1664, 1684), 'sys.argv.index', 'sys.argv.index', (['"""-a"""'], {}), "('-a')\n", (1678, 1684), False, 'import sys\n'), ((2908, 2928), 'sys.argv.index', 'sys.argv.index', (['"""-o"""'], {}), "('-o')\n", (2922, 2928), False, 'import sys\n')] |
"""
Every name reference is swapped for a call to ``__autoimport__``, which
will check if it's part of the locals or globals, falling back to trying
an import before giving up.
"""
from importlib import import_module
from ast import NodeTransformer, copy_location, fix_missing_locations, \
AST, Call, Name, Load, Str, keyword
from typing import Any, Union, Dict
__all__ = ['__autoimport__']
class AutoImportTransformer(NodeTransformer):
def visit_Name(self, node: Name) -> Union[Name, Call]:
if not isinstance(node.ctx, Load):
return node
delegate = Call(
func=Name(id='__autoimport__', ctx=Load()),
args=[
Str(s=node.id)
],
keywords=[])
copy_location(delegate, node)
fix_missing_locations(delegate)
return delegate
def __autoimport__(name: str) -> Any:
import inspect
f_back = inspect.currentframe().f_back #type: ignore
if name in f_back.f_locals:
return f_back.f_locals[name]
if name in f_back.f_globals:
return f_back.f_globals[name]
try:
return import_module(name)
except ImportError:
pass
raise NameError(name)
def parser(node: AST) -> AST:
return AutoImportTransformer().visit(node)
| [
"ast.Load",
"importlib.import_module",
"ast.copy_location",
"inspect.currentframe",
"ast.fix_missing_locations",
"ast.Str"
] | [((756, 785), 'ast.copy_location', 'copy_location', (['delegate', 'node'], {}), '(delegate, node)\n', (769, 785), False, 'from ast import NodeTransformer, copy_location, fix_missing_locations, AST, Call, Name, Load, Str, keyword\n'), ((794, 825), 'ast.fix_missing_locations', 'fix_missing_locations', (['delegate'], {}), '(delegate)\n', (815, 825), False, 'from ast import NodeTransformer, copy_location, fix_missing_locations, AST, Call, Name, Load, Str, keyword\n'), ((922, 944), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (942, 944), False, 'import inspect\n'), ((1134, 1153), 'importlib.import_module', 'import_module', (['name'], {}), '(name)\n', (1147, 1153), False, 'from importlib import import_module\n'), ((692, 706), 'ast.Str', 'Str', ([], {'s': 'node.id'}), '(s=node.id)\n', (695, 706), False, 'from ast import NodeTransformer, copy_location, fix_missing_locations, AST, Call, Name, Load, Str, keyword\n'), ((648, 654), 'ast.Load', 'Load', ([], {}), '()\n', (652, 654), False, 'from ast import NodeTransformer, copy_location, fix_missing_locations, AST, Call, Name, Load, Str, keyword\n')] |
from PyQt5 import QtWidgets, QtCore, QtGui
from pyqtgraph import SignalProxy
class TaskWidget(QtWidgets.QWidget):
def __init__(self, task, rate_limit=0.01, parent=None):
super().__init__(parent=parent)
self.task = task
self.init_ui()
proxy_config = {
'signal': self.task.progressed,
'delay': 0.01,
'rateLimit': rate_limit,
'slot': self.update,
}
self.task.exception_raised.connect(lambda: self.update_run_state(state='error'))
self.task.running.connect(lambda r: self.update_run_state(state='run' if r else 'stop'))
self.proxy = SignalProxy(**proxy_config)
self.running = False
return
def init_ui(self):
task_label = QtWidgets.QLabel('<h3>{}</h3>'.format(self.task.name))
control_layout = QtWidgets.QHBoxLayout()
play_icon = self.style().standardIcon(QtWidgets.QStyle.SP_MediaPlay)
stop_icon = self.style().standardIcon(QtWidgets.QStyle.SP_MediaStop)
start_button = QtWidgets.QToolButton()
stop_button = QtWidgets.QToolButton()
start_button.clicked.connect(lambda: self.run_state(state='run'))
stop_button.clicked.connect(lambda: self.run_state(state='stop'))
start_button.setIcon(play_icon)
stop_button.setIcon(stop_icon)
control_layout.addWidget(start_button)
control_layout.addWidget(stop_button)
top_layout = QtWidgets.QHBoxLayout()
top_layout.addWidget(task_label)
top_layout.addLayout(control_layout)
self.progress_bar = QtWidgets.QProgressBar()
self.progress_bar.setTextVisible(True)
self.time_label = QtWidgets.QLabel()
self.state_label = QtWidgets.QLabel()
bottom_layout = QtWidgets.QHBoxLayout()
bottom_layout.addWidget(self.time_label)
bottom_layout.addStretch()
bottom_layout.addWidget(self.state_label)
outer_layout = QtWidgets.QVBoxLayout()
outer_layout.addLayout(top_layout)
outer_layout.addWidget(self.progress_bar)
outer_layout.addLayout(bottom_layout)
self.setLayout(outer_layout)
return
@QtCore.pyqtSlot(object)
def update(self, args):
if not self.running:
return
depth, n, total, elapsed = args
if total is not None:
iter_word = 'iterations' if total > 1 else 'iteration'
pct = 100 * n / total
if n:
tot_time = (elapsed / n) * total
rem_time = tot_time - elapsed
else:
rem_time = None
progfmt = "{completed} of {total} {iter_word} complete ({pct:1.1f}%)"
self.progress_bar.setFormat(progfmt.format(**{
'completed': n,
'total': total,
'iter_word': iter_word,
'pct': pct,
}))
self.progress_bar.setRange(0, 100)
self.progress_bar.setValue(pct)
timefmt = "{elapsed:s} elapsed" if rem_time is None else "{elapsed:s} elapsed; {rem:s} remaining"
self.time_label.setText(timefmt.format(**{
'elapsed': readable_seconds(elapsed),
'rem': readable_seconds(rem_time) if rem_time is not None else None,
}))
else:
iter_word = 'iterations' if n > 1 else 'iteration'
progfmt = "{completed} {iter_word} complete"
self.progress_bar.setRange(0, 0)
self.progress_bar.setValue(0)
self.progress_bar.setFormat(progfmt.format(**{
'completed': n,
'iter_word': iter_word,
}))
timefmt = "{elapsed:s} elapsed"
self.time_label.setText(timefmt.format(**{
'elapsed': readable_seconds(elapsed),
}))
return
def run_state(self, state):
if state == 'run':
self.task()
self.running = True
if state == 'error':
pass
if state == 'stop':
self.task.stop()
self.running = False
self.update_run_state(state)
return
def update_run_state(self, state):
if state == 'run':
self.state_label.setText('Running')
if state == 'error':
self.state_label.setText('Exception encountered')
if state == 'stop':
self.state_label.setText('Stopped')
self.progress_bar.setRange(0, 100)
self.progress_bar.setValue(0)
return
def readable_seconds(seconds):
seconds = int(seconds)
if not seconds:
return '0 s'
hours = seconds // 3600
mins = (seconds % 3600) // 60
secs = seconds % 60
htext = '{} h'.format(hours) if hours else ''
mtext = '{} m'.format(mins) if mins else ''
stext = '{} s'.format(secs) if secs else ''
readable = ' '.join(v for v in (htext, mtext, stext) if v)
return readable
| [
"PyQt5.QtWidgets.QToolButton",
"PyQt5.QtCore.pyqtSlot",
"PyQt5.QtWidgets.QHBoxLayout",
"PyQt5.QtWidgets.QProgressBar",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QVBoxLayout",
"pyqtgraph.SignalProxy"
] | [((2192, 2215), 'PyQt5.QtCore.pyqtSlot', 'QtCore.pyqtSlot', (['object'], {}), '(object)\n', (2207, 2215), False, 'from PyQt5 import QtWidgets, QtCore, QtGui\n'), ((647, 674), 'pyqtgraph.SignalProxy', 'SignalProxy', ([], {}), '(**proxy_config)\n', (658, 674), False, 'from pyqtgraph import SignalProxy\n'), ((846, 869), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (867, 869), False, 'from PyQt5 import QtWidgets, QtCore, QtGui\n'), ((1047, 1070), 'PyQt5.QtWidgets.QToolButton', 'QtWidgets.QToolButton', ([], {}), '()\n', (1068, 1070), False, 'from PyQt5 import QtWidgets, QtCore, QtGui\n'), ((1093, 1116), 'PyQt5.QtWidgets.QToolButton', 'QtWidgets.QToolButton', ([], {}), '()\n', (1114, 1116), False, 'from PyQt5 import QtWidgets, QtCore, QtGui\n'), ((1461, 1484), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (1482, 1484), False, 'from PyQt5 import QtWidgets, QtCore, QtGui\n'), ((1600, 1624), 'PyQt5.QtWidgets.QProgressBar', 'QtWidgets.QProgressBar', ([], {}), '()\n', (1622, 1624), False, 'from PyQt5 import QtWidgets, QtCore, QtGui\n'), ((1699, 1717), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (1715, 1717), False, 'from PyQt5 import QtWidgets, QtCore, QtGui\n'), ((1745, 1763), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', ([], {}), '()\n', (1761, 1763), False, 'from PyQt5 import QtWidgets, QtCore, QtGui\n'), ((1788, 1811), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ([], {}), '()\n', (1809, 1811), False, 'from PyQt5 import QtWidgets, QtCore, QtGui\n'), ((1970, 1993), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ([], {}), '()\n', (1991, 1993), False, 'from PyQt5 import QtWidgets, QtCore, QtGui\n')] |
# Generated by Django 2.1.5 on 2019-08-28 07:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0016_friends'),
]
operations = [
migrations.AlterModelOptions(
name='friends',
options={'verbose_name': 'Friend List', 'verbose_name_plural': 'Friend List'},
),
migrations.AlterModelOptions(
name='messagedata',
options={'verbose_name': 'Messages', 'verbose_name_plural': 'Messages'},
),
migrations.RenameField(
model_name='friends',
old_name='friends',
new_name='friendList',
),
]
| [
"django.db.migrations.AlterModelOptions",
"django.db.migrations.RenameField"
] | [((214, 341), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""friends"""', 'options': "{'verbose_name': 'Friend List', 'verbose_name_plural': 'Friend List'}"}), "(name='friends', options={'verbose_name':\n 'Friend List', 'verbose_name_plural': 'Friend List'})\n", (242, 341), False, 'from django.db import migrations\n'), ((382, 507), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""messagedata"""', 'options': "{'verbose_name': 'Messages', 'verbose_name_plural': 'Messages'}"}), "(name='messagedata', options={'verbose_name':\n 'Messages', 'verbose_name_plural': 'Messages'})\n", (410, 507), False, 'from django.db import migrations\n'), ((548, 640), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""friends"""', 'old_name': '"""friends"""', 'new_name': '"""friendList"""'}), "(model_name='friends', old_name='friends', new_name=\n 'friendList')\n", (570, 640), False, 'from django.db import migrations\n')] |
import tensorflow as tf
from tensorflow.compat.v1 import logging
logging.set_verbosity("INFO")
logging.info("TF Version:{}".format(tf.__version__))
try:
import horovod.tensorflow as hvd
no_horovod = False
except ModuleNotFoundError:
logging.warning("No horvod module, cannot perform distributed training")
no_horovod = True
import os
import six
from types import SimpleNamespace
import pprint
import time
import functools
import numpy as np
from tensorflow.python.profiler import profiler_v2 as profiler
from graph_nets import utils_tf
from graph_nets import utils_np
import sonnet as snt
from root_gnn.utils import load_yaml
from root_gnn.src.datasets import graph
from root_gnn import model as all_models
from root_gnn import losses
verbosities = ['DEBUG','ERROR', "FATAL", "INFO", "WARN"]
printer = pprint.PrettyPrinter(indent=2)
def read_dataset(filenames):
"""
Read dataset...
"""
AUTO = tf.data.experimental.AUTOTUNE
tr_filenames = tf.io.gfile.glob(filenames)
n_files = len(tr_filenames)
dataset = tf.data.TFRecordDataset(tr_filenames)
dataset = dataset.map(graph.parse_tfrec_function, num_parallel_calls=AUTO)
return dataset, tr_filenames
def loop_dataset(datasets, batch_size):
if batch_size > 0:
in_list = []
target_list = []
for dataset in datasets:
inputs_tr, targets_tr = dataset
in_list.append(inputs_tr)
target_list.append(targets_tr)
if len(in_list) == batch_size:
inputs_tr = utils_tf.concat(in_list, axis=0)
targets_tr = utils_tf.concat(target_list, axis=0)
yield (inputs_tr, targets_tr)
else:
for dataset in datasets:
yield dataset
class TrainerBase(object):
def __init__(self, input_dir, output_dir, lr,
batch_size, num_epochs,
num_iters,
decay_lr=True, # if to use decay learning rate...
decay_lr_start_epoch=10,
patterns='*', distributed=False, verbose="INFO", *args, **kwargs):
self.model = None
self.loss_fcn = None
self.num_iters = num_iters
# datasets
self.input_dir = input_dir
self.output_dir = output_dir
# create optimizer
self.init_lr = lr
self.lr = tf.Variable(lr, trainable=False, name='lr', dtype=tf.float32)
self.optimizer = snt.optimizers.Adam(learning_rate=self.lr)
self.num_epochs = tf.constant(num_epochs, dtype=tf.int32)
self.decay_lr_start_epoch = tf.constant(decay_lr_start_epoch, dtype=tf.int32)
self.decay_lr = decay_lr # if use decay lr
# perform distributed training
self.distributed = distributed
# calcualte metrics to be recorded
self.metric_dict = {}
def setup_training_loop(self, model, loss_fcn):
input_signature = self.input_signature()
def update_step(inputs, targets):
print("Tracing update_step")
with tf.GradientTape() as tape:
output_ops = model(inputs, self.num_iters)
loss_ops_tr = loss_fcn(targets, output_ops)
loss_op_tr = tf.math.reduce_sum(loss_ops_tr) / tf.constant(self.num_iters, dtype=tf.float32)
gradients = tape.gradient(loss_op_tr, model.trainable_variables)
self.optimizer.apply(gradients, model.trainable_variables)
return loss_op_tr
self.training_step = tf.function(update_step, input_signature=input_signature)
def update_step(self, model, loss_fcn):
self.setup_training_loop()
self.train_one_epoch()
self.ckpt_manager.save()
self.after_train_one_epoch()
def eval(self, model):
raise NotImplementedError
def after_train_one_epoch(self):
pass
def validate_one_epoch(self):
for data in loop_dataset(self.data_val):
inputs, targets = data
outputs = self.apply(inputs)
if len(outputs) > 1:
outputs = outputs[-1]
self.update_metrics(targets, outputs)
def load_training_data(self, filenames):
self.data_train, _ = read_dataset(filenames)
self.ngraphs_train = sum([1 for _ in self.data_train])
def load_validating_data(self, filenames):
self.data_val, _ = read_dataset(filenames)
self.ngraphs_val = sum([1 for _ in self.data_val])
def load_testing_data(self, filenames):
self.data_test, _ = read_dataset(filenames)
self.ngraphs_test = sum([1 for _ in self.data_test])
def optimizer(self, lr):
self.optimizer = snt.optimizers.Adam(lr)
def input_signature(self):
with_batch_dim = False
input_list = []
target_list = []
for dd in self.data_train.take(self.train_batch_size).as_numpy_iterator():
input_list.append(dd[0])
target_list.append(dd[1])
inputs = utils_tf.concat(input_list, axis=0)
targets = utils_tf.concat(target_list, axis=0)
input_signature = (
graph.specs_from_graphs_tuple(inputs, with_batch_dim),
graph.specs_from_graphs_tuple(targets, with_batch_dim),
)
return input_signature
def train_one_epoch(self):
num_batches = 0
total_loss = 0
for inputs in loop_dataset(self.data_train):
inputs_tr, targets_tr = inputs
total_loss += self.training_step(inputs_tr, targets_tr).numpy()
num_batches += 1
return total_loss, num_batches
| [
"tensorflow.data.TFRecordDataset",
"tensorflow.io.gfile.glob",
"tensorflow.Variable",
"sonnet.optimizers.Adam",
"tensorflow.compat.v1.logging.warning",
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow.GradientTape",
"graph_nets.utils_tf.concat",
"tensorflow.constant",
"pprint.PrettyPrinte... | [((65, 94), 'tensorflow.compat.v1.logging.set_verbosity', 'logging.set_verbosity', (['"""INFO"""'], {}), "('INFO')\n", (86, 94), False, 'from tensorflow.compat.v1 import logging\n'), ((825, 855), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(2)'}), '(indent=2)\n', (845, 855), False, 'import pprint\n'), ((982, 1009), 'tensorflow.io.gfile.glob', 'tf.io.gfile.glob', (['filenames'], {}), '(filenames)\n', (998, 1009), True, 'import tensorflow as tf\n'), ((1057, 1094), 'tensorflow.data.TFRecordDataset', 'tf.data.TFRecordDataset', (['tr_filenames'], {}), '(tr_filenames)\n', (1080, 1094), True, 'import tensorflow as tf\n'), ((245, 317), 'tensorflow.compat.v1.logging.warning', 'logging.warning', (['"""No horvod module, cannot perform distributed training"""'], {}), "('No horvod module, cannot perform distributed training')\n", (260, 317), False, 'from tensorflow.compat.v1 import logging\n'), ((2350, 2411), 'tensorflow.Variable', 'tf.Variable', (['lr'], {'trainable': '(False)', 'name': '"""lr"""', 'dtype': 'tf.float32'}), "(lr, trainable=False, name='lr', dtype=tf.float32)\n", (2361, 2411), True, 'import tensorflow as tf\n'), ((2437, 2479), 'sonnet.optimizers.Adam', 'snt.optimizers.Adam', ([], {'learning_rate': 'self.lr'}), '(learning_rate=self.lr)\n', (2456, 2479), True, 'import sonnet as snt\n'), ((2506, 2545), 'tensorflow.constant', 'tf.constant', (['num_epochs'], {'dtype': 'tf.int32'}), '(num_epochs, dtype=tf.int32)\n', (2517, 2545), True, 'import tensorflow as tf\n'), ((2582, 2631), 'tensorflow.constant', 'tf.constant', (['decay_lr_start_epoch'], {'dtype': 'tf.int32'}), '(decay_lr_start_epoch, dtype=tf.int32)\n', (2593, 2631), True, 'import tensorflow as tf\n'), ((3503, 3560), 'tensorflow.function', 'tf.function', (['update_step'], {'input_signature': 'input_signature'}), '(update_step, input_signature=input_signature)\n', (3514, 3560), True, 'import tensorflow as tf\n'), ((4684, 4707), 'sonnet.optimizers.Adam', 'snt.optimizers.Adam', (['lr'], {}), '(lr)\n', (4703, 4707), True, 'import sonnet as snt\n'), ((4996, 5031), 'graph_nets.utils_tf.concat', 'utils_tf.concat', (['input_list'], {'axis': '(0)'}), '(input_list, axis=0)\n', (5011, 5031), False, 'from graph_nets import utils_tf\n'), ((5050, 5086), 'graph_nets.utils_tf.concat', 'utils_tf.concat', (['target_list'], {'axis': '(0)'}), '(target_list, axis=0)\n', (5065, 5086), False, 'from graph_nets import utils_tf\n'), ((5127, 5180), 'root_gnn.src.datasets.graph.specs_from_graphs_tuple', 'graph.specs_from_graphs_tuple', (['inputs', 'with_batch_dim'], {}), '(inputs, with_batch_dim)\n', (5156, 5180), False, 'from root_gnn.src.datasets import graph\n'), ((5194, 5248), 'root_gnn.src.datasets.graph.specs_from_graphs_tuple', 'graph.specs_from_graphs_tuple', (['targets', 'with_batch_dim'], {}), '(targets, with_batch_dim)\n', (5223, 5248), False, 'from root_gnn.src.datasets import graph\n'), ((1547, 1579), 'graph_nets.utils_tf.concat', 'utils_tf.concat', (['in_list'], {'axis': '(0)'}), '(in_list, axis=0)\n', (1562, 1579), False, 'from graph_nets import utils_tf\n'), ((1609, 1645), 'graph_nets.utils_tf.concat', 'utils_tf.concat', (['target_list'], {'axis': '(0)'}), '(target_list, axis=0)\n', (1624, 1645), False, 'from graph_nets import utils_tf\n'), ((3039, 3056), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (3054, 3056), True, 'import tensorflow as tf\n'), ((3214, 3245), 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['loss_ops_tr'], {}), '(loss_ops_tr)\n', (3232, 3245), True, 'import tensorflow as tf\n'), ((3248, 3293), 'tensorflow.constant', 'tf.constant', (['self.num_iters'], {'dtype': 'tf.float32'}), '(self.num_iters, dtype=tf.float32)\n', (3259, 3293), True, 'import tensorflow as tf\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Project: Hangman
File: hangman.py
Author: <NAME>
Created: 2017-11-24
IDE: PyCharm Community Edition
Synopsis:
hangman.py [ARGUMENT]
Description:
A simple hangman game that runs in the command line.
To play the game, figure out a proverb by guessing
letters. If you guess a letter that's not in the
proverb and you've already guessed it, you get
a penalty. The game ends when you guess all letters
correctly or when the hangman is finished.
Optional arguments
-h, --help
show this docstring and exit
Notes:
The proverbs come from a text file in the resources folder.
The proverb file's first line contains an alphabet
including all the single characters the proverbs have
but excluding punctuation marks and characters not used
in non-contracted words like quotation marks or apostrophes.
For example for English it's abcdefghijklmnopqrstuvwxyz
and for Hungarian it's aábcdeéfghiíjklmnoóöőpqrstuúüűvwxyz
Each proverb is in a new line and there are no blank
lines in the file. Blank lines would cause the game
not to work properly.
The file ends with the END_OF_FILE string, after that
everything's ignored.
Exit codes:
0: Program exited without errors
1: one or modules couldn't be loaded
2: incorrect argument passed in command line
"""
import os
import random
import sys
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from lib import get_ui_strings
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2017., <NAME>"
__license__ = "Apache 2.0"
__version__ = "1.1"
__email__ = "<EMAIL>"
__status__ = "Development"
# TODO: test on Windows
def get_proverb(filename):
"""
This function reads a random line from a given file.
:param filename: absolute or relative path to a file
:type filename: str
:return: a proverb
:rtype: str
"""
_ = 0 # initialize here
with open(filename) as f:
for _, line in enumerate(f):
if "END_OF_FILE" in line:
_ -= 1
break
# starts with index 1 because the first (index 0) line contains
# the alphabet with all the letters used in proverbs in the file
x = random.randint(1, _)
with open(filename) as f:
for _, line in enumerate(f):
if _ == x:
return line[:-1]
def get_alphabet(filename):
"""
This function returns the alphabet, all the letters
used in the proverbs in the file. The alphabet is
the first line of the file.
:param filename: the proverbs file
:type filename: str
:return: uppercase alphabet
:rtype: str
"""
with open(filename) as f:
return f.readline().strip().upper()
def draw_hangman(x):
"""
Creates a simple ASCII art of a hangman step by step from 0 to 10,
then returns it as a string.
:param x: current step
:type x: int
:return: simple ASCII art
:rtype: str
"""
if x == 0:
img = "\n"
img += " " + "—" * 15 + "\n"
img += " |" + " " * 9 + "|\n"
img += " |\n" * 6
img += " " + "—" * 8
return img
#imgplot = plt.imshow(mpimg.imread('turtle7.jpg'))
#plt.show()
elif x == 1:
imgplot = plt.imshow(mpimg.imread('turtle6.jpg'))
plt.show()
elif x == 2:
imgplot = plt.imshow(mpimg.imread('turtle5.jpg'))
plt.show()
elif x == 3:
imgplot = plt.imshow(mpimg.imread('turtle4.jpg'))
plt.show()
elif x == 4:
imgplot = plt.imshow(mpimg.imread('turtle3.jpg'))
plt.show()
elif x == 5:
imgplot = plt.imshow(mpimg.imread('turtle2.jpg'))
plt.show()
else:
imgplot = plt.imshow(mpimg.imread('turtle1.jpg'))
plt.show()
# return img
def incomplete_proverb(pvb, lst, abc):
"""
Returns a string where the unknown letters are replaced with
underscores.
Assumes everything is uppercase.
:param abc: the alphabet used in the proverbs
:type abc: str
:param pvb: a proverb
:type pvb: str
:param lst: known letters
:type lst: list
:return: proverb with underscores replacing unknown letters
:rtype: str
"""
ret = ""
for c in pvb:
if c in abc and c not in lst:
ret += "_"
else:
ret += c
return ret
def wrong_guesses_to_display(lst):
"""
Make a string from a list
:param lst: list of strings
:type lst: list
:return: a string
:rtype: str
"""
ret = ""
for _ in lst:
if len(_) == 1:
if len(ret) > 0:
ret += ", " + _
else:
ret += _
return ret
def complete_proverb(pvb):
"""
Checks if the proverb is complete.
Assumes the proverb is converted to have underscores replacing
unknown letters.
Assumes everything is uppercase.
:param pvb: a proverb
:type pvb: str
:return: True | False
:rtype: bool
"""
if "_" not in pvb:
return True
return False
def letter_only(guess, abc):
"""
Checks if the player's guess is a single ASCII letter only.
:param abc: the alphabet used in the proverbs
:type abc: str
:param guess: the player's guess
:type guess: str
:return: True | False
:rtype: bool
"""
if len(guess) == 1 and guess.upper() in abc:
return True
return False
def used_letters(guess, pvb, lst):
"""
Checks if the player's guess is in the proverb. Adds it to the
list of used letters if it's not.
Assumes everything is uppercase.
:param guess: the player's guess, a single letter
:type guess: str
:param pvb: the proverb
:type pvb: str
:param lst: known letters
:type lst: list
:return: known letters updated and sorted
:rtype: list
"""
if guess not in pvb:
lst.append(guess)
return sorted(lst)
def in_proverb(guess, pvb):
"""
Checks if the player's guess is in the proverb.
Assumes everything is uppercase.
:param guess: a single letter
:type guess: str
:param pvb: the proverb
:type pvb: str
:return: True | False
:rtype: bool
"""
if guess in pvb:
return True
return False
def already_guessed(guess, lst):
"""
Checks if the player's guess was already made.
Assumes everything is uppercase.
:param guess: a single letter
:type guess: str
:param lst: the list of guesses
:type lst: list
:return: True | False
:rtype: bool
"""
if guess in lst:
return True
return False
def get_max_guess_number():
"""
Returns the number of guesses the player has
:return: max guess number
:rtype: int
"""
return 6
if __name__ == '__main__':
# Wrong argument message
message = "Argument unrecognized.\n" \
"Usage:\n" \
" game.py\n" \
" game.py -h\n" \
" game.py --help"
# Check arguments
if len(sys.argv) == 1:
pass
elif len(sys.argv) > 2:
print(message)
sys.exit(2)
elif sys.argv[1] == "-h" or sys.argv[1] == "--help":
print(__doc__)
sys.exit(2)
else:
print(message)
sys.exit(2)
language_file = os.path.join("resources", "lang.csv")
language_list = get_ui_strings.get_language_list(language_file)
# Set a string to clear the command line
# Tested only on Linux
cls = "\033[H\033[J"
# Clear command line
print(cls, end="")
# Ask player to choose language
for i, l in enumerate(language_list):
print(f" {i + 1}: {l}")
selection = 0
while selection < 1 or selection > len(language_list):
selection = input("--> ")
if selection == "exit" or selection == "quit":
sys.exit(0)
try:
selection = int(selection)
except ValueError:
pass
language = language_list[selection - 1]
# Get the strings corresponding to selected language
# used in-game from lang.csv
string_list = get_ui_strings.get_strings(language_file, language)
# File name and path of proverbs file
prv_file = string_list[1]
prv_path = os.path.join("resources", prv_file)
# Get proverb
proverb = get_proverb(prv_path)
# Get alphabet
alphabet = get_alphabet(prv_path)
# Welcome message
print(cls, end="")
print(string_list[4])
input()
# Bye message
bye = string_list[5]
# Uppercase proverb
proverb = proverb.upper()
# List of the letters guessed and not in the proverb
non_matches = []
# List of the letters guessed and in the proverb
matches = []
# The proverb with underscores replacing unknown letters
incomplete = incomplete_proverb(proverb, matches, alphabet)
message = ""
# Continue asking for input until the hangman
# or the game is finished
while len(non_matches) < get_max_guess_number():
print(cls, end="")
print(draw_hangman(len(non_matches)))
inc_guesses = wrong_guesses_to_display(sorted(non_matches))
# Print list of incorrect guesses
print(f"{string_list[6]}".replace("VARIABLE", f"{inc_guesses}"))
print(f"{string_list[7]}".replace("VARIABLE", f"{incomplete}"))
print(message)
# Get player input
g = None
while g is None:
# ask player for guess
g = input(f"{string_list[8]}")
if letter_only(g, alphabet) is False:
if g == "exit" or g == "quit":
print(bye)
sys.exit(0)
g = None
# print invalid input message
print(f"{string_list[9]}")
else:
g = g.upper()
# Check guess
if already_guessed(g, matches):
# correct guess already given
message = f"{string_list[10]}"
elif already_guessed(g, non_matches):
# incorrect guess already given
message = f"{string_list[11]}"
# append "penalty" to non_matches
non_matches.append("+1")
elif in_proverb(g, proverb):
matches.append(g)
message = ""
else:
non_matches.append(g)
message = ""
# recreate var incomplete with new data
incomplete = incomplete_proverb(proverb, matches, alphabet)
if complete_proverb(incomplete):
print("\n")
print(incomplete, "\n")
# win message
print(f"{string_list[12]}")
print(bye)
sys.exit(0)
print(cls, end="")
print(draw_hangman(len(non_matches)), "\n")
print(proverb.upper(), "\n")
# lose message
print(f"{string_list[13]}")
print(bye)
sys.exit(0) | [
"lib.get_ui_strings.get_language_list",
"lib.get_ui_strings.get_strings",
"matplotlib.image.imread",
"os.path.join",
"sys.exit",
"random.randint",
"matplotlib.pyplot.show"
] | [((2589, 2609), 'random.randint', 'random.randint', (['(1)', '_'], {}), '(1, _)\n', (2603, 2609), False, 'import random\n'), ((7938, 7975), 'os.path.join', 'os.path.join', (['"""resources"""', '"""lang.csv"""'], {}), "('resources', 'lang.csv')\n", (7950, 7975), False, 'import os\n'), ((7997, 8044), 'lib.get_ui_strings.get_language_list', 'get_ui_strings.get_language_list', (['language_file'], {}), '(language_file)\n', (8029, 8044), False, 'from lib import get_ui_strings\n'), ((8772, 8823), 'lib.get_ui_strings.get_strings', 'get_ui_strings.get_strings', (['language_file', 'language'], {}), '(language_file, language)\n', (8798, 8823), False, 'from lib import get_ui_strings\n'), ((8916, 8951), 'os.path.join', 'os.path.join', (['"""resources"""', 'prv_file'], {}), "('resources', prv_file)\n", (8928, 8951), False, 'import os\n'), ((11620, 11631), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (11628, 11631), False, 'import sys\n'), ((3735, 3745), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3743, 3745), True, 'import matplotlib.pyplot as plt\n'), ((7744, 7755), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (7752, 7755), False, 'import sys\n'), ((8500, 8511), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (8508, 8511), False, 'import sys\n'), ((11421, 11432), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (11429, 11432), False, 'import sys\n'), ((3697, 3724), 'matplotlib.image.imread', 'mpimg.imread', (['"""turtle6.jpg"""'], {}), "('turtle6.jpg')\n", (3709, 3724), True, 'import matplotlib.image as mpimg\n'), ((3832, 3842), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3840, 3842), True, 'import matplotlib.pyplot as plt\n'), ((7847, 7858), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (7855, 7858), False, 'import sys\n'), ((7903, 7914), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (7911, 7914), False, 'import sys\n'), ((3794, 3821), 'matplotlib.image.imread', 'mpimg.imread', (['"""turtle5.jpg"""'], {}), "('turtle5.jpg')\n", (3806, 3821), True, 'import matplotlib.image as mpimg\n'), ((3929, 3939), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3937, 3939), True, 'import matplotlib.pyplot as plt\n'), ((10366, 10377), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (10374, 10377), False, 'import sys\n'), ((3891, 3918), 'matplotlib.image.imread', 'mpimg.imread', (['"""turtle4.jpg"""'], {}), "('turtle4.jpg')\n", (3903, 3918), True, 'import matplotlib.image as mpimg\n'), ((4026, 4036), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4034, 4036), True, 'import matplotlib.pyplot as plt\n'), ((3988, 4015), 'matplotlib.image.imread', 'mpimg.imread', (['"""turtle3.jpg"""'], {}), "('turtle3.jpg')\n", (4000, 4015), True, 'import matplotlib.image as mpimg\n'), ((4123, 4133), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4131, 4133), True, 'import matplotlib.pyplot as plt\n'), ((4213, 4223), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4221, 4223), True, 'import matplotlib.pyplot as plt\n'), ((4085, 4112), 'matplotlib.image.imread', 'mpimg.imread', (['"""turtle2.jpg"""'], {}), "('turtle2.jpg')\n", (4097, 4112), True, 'import matplotlib.image as mpimg\n'), ((4175, 4202), 'matplotlib.image.imread', 'mpimg.imread', (['"""turtle1.jpg"""'], {}), "('turtle1.jpg')\n", (4187, 4202), True, 'import matplotlib.image as mpimg\n')] |
import json
import os
import typing
import codecs
import typing
import os
import json
import dill
from dataclasses import dataclass, field
ENCODED_PICKLE = "encodedpickle"
class TutorialJsonIOManager(typing.List[str]):
"""
TutorialJsonIOManager will read step results from a json file
"""
def __init__(
self,
# NOTE Encoding scheme is a union of literals, not of type str
# but typing.Literal does not exist in python 3.7
# allow any type for now, to avoid mypy errors
encoding_scheme="base64",
file_name: str = "result.json",
field_name: str = "result",
):
self.encoding_scheme = encoding_scheme
self.file_name = file_name
self.field_name = field_name
def deserialize(self, val: typing.Any) -> typing.Any:
"""
If val is type List[str], then assume it is an encoded pickle
(3) encode string as bytes (2) base64 decode (1) unpickle to python object
Else if val is type str and of format `/app/_.json`
first read the data from the file and run deserialize on contents
Else assume it is just a raw value
return val
"""
def is_type_list_str(val: typing.Any) -> bool:
if not isinstance(val, list):
return False
for element in val:
if not isinstance(element, str):
return False
return True
# if val is type List[str], then assume it is an encoded pickle
if is_type_list_str(val):
val = "".join(val)
return dill.loads(codecs.decode(val.encode(), self.encoding_scheme))
# if val is `/app/_.json`, then it is a file we need to read before deserializing
if (
isinstance(val, str)
and val.startswith(os.sep + "app")
and val.endswith(".json")
):
return self.read(val)
# otherwise simply return val
return val
def read(self, file_path: str) -> typing.Any:
"""
files must be valid json, so actual value is embedded in the result field
e.g. {"type": "encodedpickle", "result": "__what we want as base64 encoded pickle__"}
"""
with open(file_path, "r") as f:
r = json.load(f)
return self.deserialize(r[self.field_name])
@dataclass
class TaskResult:
# map of input name to value
inputs: typing.Dict[str, typing.Any] = field(default_factory=dict)
result: typing.Optional[typing.Any] = None
@dataclass
class TutorialWorkflowResult:
# map of task name to TaskResult
tasks: typing.Dict[str, TaskResult] = field(default_factory=dict)
def __str__(self) -> str:
s = "\n" + " ┌" + "-" * (os.get_terminal_size().columns - 2) + "\n"
for step_name, step_result in self.tasks.items():
s += f" ├ {step_name:15s} : {step_result.result}\n"
s += " └" + "-" * (os.get_terminal_size().columns - 2) + "\n"
return s
def _deserialize_result(io_manager: TutorialJsonIOManager, result: typing.Dict[str, typing.Any]):
if result["type"] == ENCODED_PICKLE:
return io_manager.deserialize(result["result"])
else:
return result["result"]
def _deserialize_inputs(
io_manager: TutorialJsonIOManager, inputs: typing.Dict, workflow_result_json: typing.Dict
):
inputs_result = {}
for k in inputs:
# when input is raw value
if "type" in inputs[k]:
if inputs[k]["type"] == ENCODED_PICKLE:
inputs_result[k] = io_manager.deserialize(inputs[k]["value"])
else:
inputs_result[k] = inputs[k]["value"]
# when input is a result from a previous step
elif "sourceArtifactName" in inputs[k]:
inputs_result[k] = _deserialize_result(
io_manager,
workflow_result_json[inputs[k]["sourceStepID"]]["result"],
)
return inputs_result
def _load_workflowresult_from_dict(workflow_result_json: dict) -> TutorialWorkflowResult:
io_manager = TutorialJsonIOManager()
workflow_result = TutorialWorkflowResult()
for step_id in workflow_result_json.keys():
step = workflow_result_json[step_id]
step_inputs = {}
if "inputs" in step:
step_inputs = _deserialize_inputs(
io_manager, step["inputs"], workflow_result_json
)
step_result = None
if "result" in step:
step_result = _deserialize_result(io_manager, step["result"])
workflow_result.tasks[step["stepName"]] = TaskResult(
inputs=step_inputs,
result=step_result,
)
return workflow_result
def load_cached_workflowresult(file_path: str) -> TutorialWorkflowResult:
with open(file_path) as f:
workflow_result_json = json.load(f)
return _load_workflowresult_from_dict(workflow_result_json)
| [
"json.load",
"os.get_terminal_size",
"dataclasses.field"
] | [((2495, 2522), 'dataclasses.field', 'field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (2500, 2522), False, 'from dataclasses import dataclass, field\n'), ((2692, 2719), 'dataclasses.field', 'field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (2697, 2719), False, 'from dataclasses import dataclass, field\n'), ((4893, 4905), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4902, 4905), False, 'import json\n'), ((2319, 2331), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2328, 2331), False, 'import json\n'), ((2786, 2808), 'os.get_terminal_size', 'os.get_terminal_size', ([], {}), '()\n', (2806, 2808), False, 'import os\n'), ((2978, 3000), 'os.get_terminal_size', 'os.get_terminal_size', ([], {}), '()\n', (2998, 3000), False, 'import os\n')] |
import sys
sys.path.append('../')
sys.path.append('../..')
import cmbnncs.utils as utils
import cmbnncs.spherical as spherical
import cmbnncs.simulator as simulator
import numpy as np
import time
start_time = time.time()
def sim_Dust(dust_seed, frequ, amplitude_randn, spectralIndex_randn, temp_randn):
### ComDust = simulator.DustComponents(nside, 3)
ComDust = simulator.DustComponents(nside, 1)#use this
## ComDust.ReadParameter('paramsML.ini')#don't use
#ParametersSampling() don't use when using model 3 in DustComponents(nside, 2)
ComDust.ParametersSampling()
print (ComDust.paramsample, '\n')
ComDust.RealizationSampling( seed = int(dust_seed), amplitude_randn=amplitude_randn,
spectralIndex_randn=spectralIndex_randn, temp_randn=temp_randn)
ComDust.WriteMap(frequencies = frequ)
out_put = ComDust.out_put
return out_put
#%% generate the Dust full map - training (test) data
nside = 512
# temp_randn = '0'
temp_randn = '0.05Multi'
# amplitude_randn = '0'; spectralIndex_randn = '0' #training set: 1000 #
amplitude_randn = '0'; spectralIndex_randn = '0.1One' #training set: 1000 ##
# amplitude_randn = '0'; spectralIndex_randn = '0.1Multi' #training set: 1000 #
# amplitude_randn = '0.1One'; spectralIndex_randn = '0' #training set: 1000 #
# amplitude_randn = '0.1One'; spectralIndex_randn = '0.1One' #training set: 1000 #
# amplitude_randn = '0.1One'; spectralIndex_randn = '0.1Multi' #training set: 1000 #
# amplitude_randn = '0.1Multi'; spectralIndex_randn = '0' #training set: 1000 #
# amplitude_randn = '0.1Multi'; spectralIndex_randn = '0.1One' #training set: 1000 #
# amplitude_randn = '0.1Multi'; spectralIndex_randn = '0.1Multi' #training set: 1000 #
part_n = 0 #0,1,...
part_size = 1000
frequencies = [100, 143, 217, 353] #for Planck
# frequencies = [85, 95, 145, 155, 220, 270] #for CMB-S4
print ('dust_freqs: %s'%frequencies, 'part_n: %s'%part_n, 'part_size: %s'%part_size, 'start_n: %s'%(part_n*part_size))
np.random.seed(2)#note!!!
Dustseed = np.random.choice(1000000, 50000, replace=False)
for i in range(part_size):
for freq in frequencies:
map_I, map_Q, map_U = sim_Dust(Dustseed[i+part_n*part_size], [freq], amplitude_randn,
spectralIndex_randn, temp_randn=temp_randn)
map_I_piece = spherical.sphere2piecePlane(map_I, nside=nside)
map_Q_piece = spherical.sphere2piecePlane(map_Q, nside=nside)
map_U_piece = spherical.sphere2piecePlane(map_U, nside=nside)
utils.savenpy('samples/full_map_nside%s/Foregrounds_oneModel/Dust/Dust_A%s_Beta%s_T%s_%sGHz_I'%(nside,amplitude_randn,spectralIndex_randn,temp_randn,freq),
'Dust_%s'%(i+part_n*part_size), map_I_piece, dtype=np.float32)
utils.savenpy('samples/full_map_nside%s/Foregrounds_oneModel/Dust/Dust_A%s_Beta%s_T%s_%sGHz_Q'%(nside,amplitude_randn,spectralIndex_randn,temp_randn,freq),
'Dust_%s'%(i+part_n*part_size), map_Q_piece, dtype=np.float32)
utils.savenpy('samples/full_map_nside%s/Foregrounds_oneModel/Dust/Dust_A%s_Beta%s_T%s_%sGHz_U'%(nside,amplitude_randn,spectralIndex_randn,temp_randn,freq),
'Dust_%s'%(i+part_n*part_size), map_U_piece, dtype=np.float32)
#%%
print ('\n', "Time elapsed: %.3f" %((time.time()-start_time)/60), "mins")
| [
"numpy.random.choice",
"cmbnncs.simulator.DustComponents",
"cmbnncs.utils.savenpy",
"numpy.random.seed",
"cmbnncs.spherical.sphere2piecePlane",
"time.time",
"sys.path.append"
] | [((11, 33), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (26, 33), False, 'import sys\n'), ((34, 58), 'sys.path.append', 'sys.path.append', (['"""../.."""'], {}), "('../..')\n", (49, 58), False, 'import sys\n'), ((209, 220), 'time.time', 'time.time', ([], {}), '()\n', (218, 220), False, 'import time\n'), ((2007, 2024), 'numpy.random.seed', 'np.random.seed', (['(2)'], {}), '(2)\n', (2021, 2024), True, 'import numpy as np\n'), ((2044, 2091), 'numpy.random.choice', 'np.random.choice', (['(1000000)', '(50000)'], {'replace': '(False)'}), '(1000000, 50000, replace=False)\n', (2060, 2091), True, 'import numpy as np\n'), ((371, 405), 'cmbnncs.simulator.DustComponents', 'simulator.DustComponents', (['nside', '(1)'], {}), '(nside, 1)\n', (395, 405), True, 'import cmbnncs.simulator as simulator\n'), ((2357, 2404), 'cmbnncs.spherical.sphere2piecePlane', 'spherical.sphere2piecePlane', (['map_I'], {'nside': 'nside'}), '(map_I, nside=nside)\n', (2384, 2404), True, 'import cmbnncs.spherical as spherical\n'), ((2427, 2474), 'cmbnncs.spherical.sphere2piecePlane', 'spherical.sphere2piecePlane', (['map_Q'], {'nside': 'nside'}), '(map_Q, nside=nside)\n', (2454, 2474), True, 'import cmbnncs.spherical as spherical\n'), ((2497, 2544), 'cmbnncs.spherical.sphere2piecePlane', 'spherical.sphere2piecePlane', (['map_U'], {'nside': 'nside'}), '(map_U, nside=nside)\n', (2524, 2544), True, 'import cmbnncs.spherical as spherical\n'), ((2562, 2807), 'cmbnncs.utils.savenpy', 'utils.savenpy', (["('samples/full_map_nside%s/Foregrounds_oneModel/Dust/Dust_A%s_Beta%s_T%s_%sGHz_I'\n % (nside, amplitude_randn, spectralIndex_randn, temp_randn, freq))", "('Dust_%s' % (i + part_n * part_size))", 'map_I_piece'], {'dtype': 'np.float32'}), "(\n 'samples/full_map_nside%s/Foregrounds_oneModel/Dust/Dust_A%s_Beta%s_T%s_%sGHz_I'\n % (nside, amplitude_randn, spectralIndex_randn, temp_randn, freq), \n 'Dust_%s' % (i + part_n * part_size), map_I_piece, dtype=np.float32)\n", (2575, 2807), True, 'import cmbnncs.utils as utils\n'), ((2811, 3056), 'cmbnncs.utils.savenpy', 'utils.savenpy', (["('samples/full_map_nside%s/Foregrounds_oneModel/Dust/Dust_A%s_Beta%s_T%s_%sGHz_Q'\n % (nside, amplitude_randn, spectralIndex_randn, temp_randn, freq))", "('Dust_%s' % (i + part_n * part_size))", 'map_Q_piece'], {'dtype': 'np.float32'}), "(\n 'samples/full_map_nside%s/Foregrounds_oneModel/Dust/Dust_A%s_Beta%s_T%s_%sGHz_Q'\n % (nside, amplitude_randn, spectralIndex_randn, temp_randn, freq), \n 'Dust_%s' % (i + part_n * part_size), map_Q_piece, dtype=np.float32)\n", (2824, 3056), True, 'import cmbnncs.utils as utils\n'), ((3060, 3305), 'cmbnncs.utils.savenpy', 'utils.savenpy', (["('samples/full_map_nside%s/Foregrounds_oneModel/Dust/Dust_A%s_Beta%s_T%s_%sGHz_U'\n % (nside, amplitude_randn, spectralIndex_randn, temp_randn, freq))", "('Dust_%s' % (i + part_n * part_size))", 'map_U_piece'], {'dtype': 'np.float32'}), "(\n 'samples/full_map_nside%s/Foregrounds_oneModel/Dust/Dust_A%s_Beta%s_T%s_%sGHz_U'\n % (nside, amplitude_randn, spectralIndex_randn, temp_randn, freq), \n 'Dust_%s' % (i + part_n * part_size), map_U_piece, dtype=np.float32)\n", (3073, 3305), True, 'import cmbnncs.utils as utils\n'), ((3344, 3355), 'time.time', 'time.time', ([], {}), '()\n', (3353, 3355), False, 'import time\n')] |
# Generated by Django 3.2.4 on 2021-07-22 09:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0175_lotv2_lots_v2_year_87d135_idx'),
('certificates', '0010_auto_20210509_1038'),
]
operations = [
migrations.CreateModel(
name='DoubleCoutingRegistration',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('certificate_id', models.CharField(max_length=64)),
('certificate_holder', models.CharField(max_length=256)),
('registered_address', models.TextField()),
('valid_from', models.DateField()),
('valid_until', models.DateField()),
],
options={
'verbose_name': 'Certificat Double Compte',
'verbose_name_plural': 'Certificats Double Compte',
'db_table': 'double_counting_registrations',
},
),
migrations.CreateModel(
name='DoubleCoutingRegistrationInputOutput',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('biofuel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.biocarburant')),
('certificate', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='certificates.doublecoutingregistration')),
('feedstock', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.matierepremiere')),
],
options={
'verbose_name': 'Périmètre Certificat Double Compte',
'verbose_name_plural': 'Périmètres Certificats Double Compte',
'db_table': 'double_counting_registrations_scope',
},
),
]
| [
"django.db.models.DateField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.BigAutoField",
"django.db.models.CharField"
] | [((443, 539), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (462, 539), False, 'from django.db import migrations, models\n'), ((573, 604), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (589, 604), False, 'from django.db import migrations, models\n'), ((646, 678), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(256)'}), '(max_length=256)\n', (662, 678), False, 'from django.db import migrations, models\n'), ((720, 738), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (736, 738), False, 'from django.db import migrations, models\n'), ((772, 790), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (788, 790), False, 'from django.db import migrations, models\n'), ((825, 843), 'django.db.models.DateField', 'models.DateField', ([], {}), '()\n', (841, 843), False, 'from django.db import migrations, models\n'), ((1231, 1327), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1250, 1327), False, 'from django.db import migrations, models\n'), ((1354, 1445), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""core.biocarburant"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'core.biocarburant')\n", (1371, 1445), False, 'from django.db import migrations, models\n'), ((1475, 1587), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""certificates.doublecoutingregistration"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'certificates.doublecoutingregistration')\n", (1492, 1587), False, 'from django.db import migrations, models\n'), ((1615, 1732), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""core.matierepremiere"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='core.matierepremiere')\n", (1632, 1732), False, 'from django.db import migrations, models\n')] |
from .datafetcher import fetch_measure_levels
from .stationdata import build_station_list, update_water_levels
from .flood import stations_highest_rel_level
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
from floodsystem.station import inconsistent_typical_range_stations
import datetime
stations = build_station_list()
def run():
stations = build_station_list()
update_water_levels(stations)
station = stations_highest_rel_level(stations, 6)
stations_high_risk_level = []
for n in station:
stations_high_risk_level.append(n[0])
return stations_high_risk_level
stations_at_risk = run()
stations_at_risk.pop(0)
y = inconsistent_typical_range_stations(stations)
print(y)
update_water_levels(stations)
def plot_water_levels(station, dates, levels):
typical_range_high = []
typical_range_low = []
for i in range(len(dates)):
typical_range_high.append(typical_range[0])
typical_range_low.append(typical_range[1])
plt.plot(dates , levels , label="water level")
plt.xlabel("data")
plt.ylabel("water level (m)")
plt.xticks(rotation=45);
plt.title(station)
plt.tight_layout()
plt.plot(dates , typical_range_high , "-y" , label="typical high")
plt.plot(dates , typical_range_low , "-o" , label="typical low")
plt.legend()
plt.show()
counter = 0
for i in stations:
if i.name in stations_at_risk:
dt = 10
dates, levels = fetch_measure_levels(i.measure_id , dt = datetime.timedelta(days=dt))
typical_range = i.typical_range
plot_water_levels(i.name , dates , levels)
counter = counter + 1
if counter > 5:
raise RuntimeError("All of the 5 stations have displayed the outputs")
def plot_water_level_with_fit(station, dates, levels, p):
x = dates
y = levels
p_coeff = np.polyfit(x , y , p)
poly = np.poly1d(p_coeff)
plt.plot(x , y , '.')
plt.xlabel("time")
plt.ylabel("water level")
plt.xticks(rotation=45);
plt.title(station)
x1 = np.linspace(x[0] , x[-1] , 30)
plt.plot(x1 , poly(x1))
plt.show()
return poly | [
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"numpy.polyfit",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"datetime.timedelta",
"numpy.linspace",
"matplotlib.pyplot.tight_layout",
"numpy.poly1d",
"matplotlib.pyplot.title",
"floodsystem.station.inconsistent_typical_range_stat... | [((714, 759), 'floodsystem.station.inconsistent_typical_range_stations', 'inconsistent_typical_range_stations', (['stations'], {}), '(stations)\n', (749, 759), False, 'from floodsystem.station import inconsistent_typical_range_stations\n'), ((1054, 1098), 'matplotlib.pyplot.plot', 'plt.plot', (['dates', 'levels'], {'label': '"""water level"""'}), "(dates, levels, label='water level')\n", (1062, 1098), True, 'import matplotlib.pyplot as plt\n'), ((1106, 1124), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""data"""'], {}), "('data')\n", (1116, 1124), True, 'import matplotlib.pyplot as plt\n'), ((1129, 1158), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""water level (m)"""'], {}), "('water level (m)')\n", (1139, 1158), True, 'import matplotlib.pyplot as plt\n'), ((1163, 1186), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (1173, 1186), True, 'import matplotlib.pyplot as plt\n'), ((1192, 1210), 'matplotlib.pyplot.title', 'plt.title', (['station'], {}), '(station)\n', (1201, 1210), True, 'import matplotlib.pyplot as plt\n'), ((1215, 1233), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1231, 1233), True, 'import matplotlib.pyplot as plt\n'), ((1243, 1306), 'matplotlib.pyplot.plot', 'plt.plot', (['dates', 'typical_range_high', '"""-y"""'], {'label': '"""typical high"""'}), "(dates, typical_range_high, '-y', label='typical high')\n", (1251, 1306), True, 'import matplotlib.pyplot as plt\n'), ((1314, 1375), 'matplotlib.pyplot.plot', 'plt.plot', (['dates', 'typical_range_low', '"""-o"""'], {'label': '"""typical low"""'}), "(dates, typical_range_low, '-o', label='typical low')\n", (1322, 1375), True, 'import matplotlib.pyplot as plt\n'), ((1388, 1400), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1398, 1400), True, 'import matplotlib.pyplot as plt\n'), ((1405, 1415), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1413, 1415), True, 'import matplotlib.pyplot as plt\n'), ((1925, 1944), 'numpy.polyfit', 'np.polyfit', (['x', 'y', 'p'], {}), '(x, y, p)\n', (1935, 1944), True, 'import numpy as np\n'), ((1958, 1976), 'numpy.poly1d', 'np.poly1d', (['p_coeff'], {}), '(p_coeff)\n', (1967, 1976), True, 'import numpy as np\n'), ((1981, 2000), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', '"""."""'], {}), "(x, y, '.')\n", (1989, 2000), True, 'import matplotlib.pyplot as plt\n'), ((2007, 2025), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {}), "('time')\n", (2017, 2025), True, 'import matplotlib.pyplot as plt\n'), ((2030, 2055), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""water level"""'], {}), "('water level')\n", (2040, 2055), True, 'import matplotlib.pyplot as plt\n'), ((2060, 2083), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(45)'}), '(rotation=45)\n', (2070, 2083), True, 'import matplotlib.pyplot as plt\n'), ((2089, 2107), 'matplotlib.pyplot.title', 'plt.title', (['station'], {}), '(station)\n', (2098, 2107), True, 'import matplotlib.pyplot as plt\n'), ((2117, 2145), 'numpy.linspace', 'np.linspace', (['x[0]', 'x[-1]', '(30)'], {}), '(x[0], x[-1], 30)\n', (2128, 2145), True, 'import numpy as np\n'), ((2180, 2190), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2188, 2190), True, 'import matplotlib.pyplot as plt\n'), ((1564, 1591), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'dt'}), '(days=dt)\n', (1582, 1591), False, 'import datetime\n')] |
#!/usr/bin/env python
"""
LIVE STREAM TO YOUTUBE LIVE using FFMPEG -- from webcam
https://www.scivision.co/youtube-live-ffmpeg-livestream/
https://support.google.com/youtube/answer/2853702
Windows: get DirectShow device list from:
ffmpeg -list_devices true -f dshow -i dummy
"""
from youtubelive_ffmpeg import youtubelive
import sys
#
if sys.platform.startswith('win'):
audiochan = 'audio="Internal Microphone"'
videochan = 'video="Integrated Camera"'
elif sys.platform.startswith('darwin'):
audiochan = 'default'
videochan = 'default'
elif sys.platform.startswith('linux'):
audiochan = 'default'
videochan = '/dev/video0'
if __name__ == '__main__':
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
from argparse import ArgumentParser
p = ArgumentParser()
p.add_argument('-fps',default=30, type=int)
p = p.parse_args()
P = {'fps': p.fps,
'audiochan': audiochan,
'videochan': videochan,
'vidsource': 'camera',
}
youtubelive(P)
| [
"signal.signal",
"sys.platform.startswith",
"youtubelive_ffmpeg.youtubelive",
"argparse.ArgumentParser"
] | [((343, 373), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (366, 373), False, 'import sys\n'), ((470, 503), 'sys.platform.startswith', 'sys.platform.startswith', (['"""darwin"""'], {}), "('darwin')\n", (493, 503), False, 'import sys\n'), ((702, 746), 'signal.signal', 'signal.signal', (['signal.SIGINT', 'signal.SIG_DFL'], {}), '(signal.SIGINT, signal.SIG_DFL)\n', (715, 746), False, 'import signal\n'), ((796, 812), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (810, 812), False, 'from argparse import ArgumentParser\n'), ((1025, 1039), 'youtubelive_ffmpeg.youtubelive', 'youtubelive', (['P'], {}), '(P)\n', (1036, 1039), False, 'from youtubelive_ffmpeg import youtubelive\n'), ((562, 594), 'sys.platform.startswith', 'sys.platform.startswith', (['"""linux"""'], {}), "('linux')\n", (585, 594), False, 'import sys\n')] |
import unittest
from unittest.mock import patch, Mock
from werkzeug.datastructures import FileStorage
import io
import json
from app import app
from app.models.base import db
from app.models.user import User
from app.auth.views import UserPassportphotoView
from app.auth import views
class AuthUploadPassportPhotoTestCase(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
app.testing = True
self.user_data = {
"username": "john123",
"email": "<EMAIL>",
"password": "<PASSWORD>",
}
with app.app_context():
db.drop_all()
db.create_all()
# create admin user
user = User(
username="john123",
email="<EMAIL>",
password="<PASSWORD>",
role=True,
)
user.save()
@patch.object(views.UserPassportphotoView, "post")
def test_upload_passport_photo(self, mock_post):
upload = UserPassportphotoView()
mock_post.return_value.status_code = 200
res = upload.post(
"/api/v1/auth/upload",
data=dict(file=(io.BytesIO(b"abcdef"), "test.jpg")),
headers={"Content-Type": "multipart/form-data"},
)
self.assertEqual(res.status_code, 200)
def test_upload_photo_with_non_allowed_ext(self):
res = self.app.post(
"/api/v1/auth/login",
data=json.dumps(self.user_data),
headers={"Content-Type": "application/json"},
)
token = json.loads(res.data.decode())["access_token"]
data = {"file": (io.BytesIO(b'my file contents'), 'hello.txt')}
result = self.app.post(
"/api/v1/auth/upload", buffered=True,
headers={
"Authorization": token,
"Content-Type" : 'multipart/form-data',
},
data=data,
)
self.assertEqual(result.status_code, 400)
def test_no_photo_upload(self):
res = self.app.post(
"/api/v1/auth/login",
data=json.dumps(self.user_data),
headers={"Content-Type": "application/json"},
)
token = json.loads(res.data.decode())["access_token"]
result = self.app.post(
"/api/v1/auth/upload", buffered=True,
headers={
"Authorization": token,
"Content-Type" : 'multipart/form-data',
},
data={},
)
self.assertEqual(result.status_code, 400)
| [
"app.auth.views.UserPassportphotoView",
"app.models.base.db.drop_all",
"app.models.user.User",
"json.dumps",
"app.app.test_client",
"io.BytesIO",
"app.app.app_context",
"app.models.base.db.create_all",
"unittest.mock.patch.object"
] | [((899, 948), 'unittest.mock.patch.object', 'patch.object', (['views.UserPassportphotoView', '"""post"""'], {}), "(views.UserPassportphotoView, 'post')\n", (911, 948), False, 'from unittest.mock import patch, Mock\n'), ((387, 404), 'app.app.test_client', 'app.test_client', ([], {}), '()\n', (402, 404), False, 'from app import app\n'), ((1019, 1042), 'app.auth.views.UserPassportphotoView', 'UserPassportphotoView', ([], {}), '()\n', (1040, 1042), False, 'from app.auth.views import UserPassportphotoView\n'), ((589, 606), 'app.app.app_context', 'app.app_context', ([], {}), '()\n', (604, 606), False, 'from app import app\n'), ((620, 633), 'app.models.base.db.drop_all', 'db.drop_all', ([], {}), '()\n', (631, 633), False, 'from app.models.base import db\n'), ((646, 661), 'app.models.base.db.create_all', 'db.create_all', ([], {}), '()\n', (659, 661), False, 'from app.models.base import db\n'), ((714, 789), 'app.models.user.User', 'User', ([], {'username': '"""john123"""', 'email': '"""<EMAIL>"""', 'password': '"""<PASSWORD>"""', 'role': '(True)'}), "(username='john123', email='<EMAIL>', password='<PASSWORD>', role=True)\n", (718, 789), False, 'from app.models.user import User\n'), ((1473, 1499), 'json.dumps', 'json.dumps', (['self.user_data'], {}), '(self.user_data)\n', (1483, 1499), False, 'import json\n'), ((1657, 1688), 'io.BytesIO', 'io.BytesIO', (["b'my file contents'"], {}), "(b'my file contents')\n", (1667, 1688), False, 'import io\n'), ((2121, 2147), 'json.dumps', 'json.dumps', (['self.user_data'], {}), '(self.user_data)\n', (2131, 2147), False, 'import json\n'), ((1182, 1203), 'io.BytesIO', 'io.BytesIO', (["b'abcdef'"], {}), "(b'abcdef')\n", (1192, 1203), False, 'import io\n')] |
import os
import numpy as np
import pytest
from pennylane import qchem
from openfermion.hamiltonians import MolecularData
ref_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_ref_files")
table_1 = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.68238953],
[0.0, 1.0, 1.0, 0.0, 0.68238953],
[1.0, 0.0, 0.0, 1.0, 0.68238953],
[1.0, 1.0, 1.0, 1.0, 0.68238953],
[0.0, 0.0, 2.0, 2.0, 0.17900058],
[0.0, 1.0, 3.0, 2.0, 0.17900058],
[1.0, 0.0, 2.0, 3.0, 0.17900058],
[1.0, 1.0, 3.0, 3.0, 0.17900058],
[0.0, 2.0, 0.0, 2.0, 0.17900058],
[0.0, 3.0, 1.0, 2.0, 0.17900058],
[1.0, 2.0, 0.0, 3.0, 0.17900058],
[1.0, 3.0, 1.0, 3.0, 0.17900058],
[0.0, 2.0, 2.0, 0.0, 0.67073278],
[0.0, 3.0, 3.0, 0.0, 0.67073278],
[1.0, 2.0, 2.0, 1.0, 0.67073278],
[1.0, 3.0, 3.0, 1.0, 0.67073278],
[2.0, 0.0, 0.0, 2.0, 0.67073278],
[2.0, 1.0, 1.0, 2.0, 0.67073278],
[3.0, 0.0, 0.0, 3.0, 0.67073278],
[3.0, 1.0, 1.0, 3.0, 0.67073278],
[2.0, 0.0, 2.0, 0.0, 0.17900058],
[2.0, 1.0, 3.0, 0.0, 0.17900058],
[3.0, 0.0, 2.0, 1.0, 0.17900058],
[3.0, 1.0, 3.0, 1.0, 0.17900058],
[2.0, 2.0, 0.0, 0.0, 0.17900058],
[2.0, 3.0, 1.0, 0.0, 0.17900058],
[3.0, 2.0, 0.0, 1.0, 0.17900058],
[3.0, 3.0, 1.0, 1.0, 0.17900058],
[2.0, 2.0, 2.0, 2.0, 0.70510563],
[2.0, 3.0, 3.0, 2.0, 0.70510563],
[3.0, 2.0, 2.0, 3.0, 0.70510563],
[3.0, 3.0, 3.0, 3.0, 0.70510563],
]
)
table_2 = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.70510563],
[0.0, 1.0, 1.0, 0.0, 0.70510563],
[1.0, 0.0, 0.0, 1.0, 0.70510563],
[1.0, 1.0, 1.0, 1.0, 0.70510563],
]
)
table_3 = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.48731097],
[0.0, 1.0, 1.0, 0.0, 0.48731097],
[1.0, 0.0, 0.0, 1.0, 0.48731097],
[1.0, 1.0, 1.0, 1.0, 0.48731097],
[0.0, 0.0, 0.0, 2.0, -0.04857958],
[0.0, 1.0, 1.0, 2.0, -0.04857958],
[1.0, 0.0, 0.0, 3.0, -0.04857958],
[1.0, 1.0, 1.0, 3.0, -0.04857958],
[0.0, 0.0, 2.0, 0.0, -0.04857958],
[0.0, 1.0, 3.0, 0.0, -0.04857958],
[1.0, 0.0, 2.0, 1.0, -0.04857958],
[1.0, 1.0, 3.0, 1.0, -0.04857958],
[0.0, 0.0, 2.0, 2.0, 0.01306398],
[0.0, 1.0, 3.0, 2.0, 0.01306398],
[1.0, 0.0, 2.0, 3.0, 0.01306398],
[1.0, 1.0, 3.0, 3.0, 0.01306398],
[0.0, 2.0, 0.0, 0.0, -0.04857958],
[0.0, 3.0, 1.0, 0.0, -0.04857958],
[1.0, 2.0, 0.0, 1.0, -0.04857958],
[1.0, 3.0, 1.0, 1.0, -0.04857958],
[0.0, 2.0, 0.0, 2.0, 0.01306398],
[0.0, 3.0, 1.0, 2.0, 0.01306398],
[1.0, 2.0, 0.0, 3.0, 0.01306398],
[1.0, 3.0, 1.0, 3.0, 0.01306398],
[0.0, 2.0, 2.0, 0.0, 0.22361004],
[0.0, 3.0, 3.0, 0.0, 0.22361004],
[1.0, 2.0, 2.0, 1.0, 0.22361004],
[1.0, 3.0, 3.0, 1.0, 0.22361004],
[0.0, 2.0, 2.0, 2.0, 0.00748417],
[0.0, 3.0, 3.0, 2.0, 0.00748417],
[1.0, 2.0, 2.0, 3.0, 0.00748417],
[1.0, 3.0, 3.0, 3.0, 0.00748417],
[2.0, 0.0, 0.0, 0.0, -0.04857958],
[2.0, 1.0, 1.0, 0.0, -0.04857958],
[3.0, 0.0, 0.0, 1.0, -0.04857958],
[3.0, 1.0, 1.0, 1.0, -0.04857958],
[2.0, 0.0, 0.0, 2.0, 0.22361004],
[2.0, 1.0, 1.0, 2.0, 0.22361004],
[3.0, 0.0, 0.0, 3.0, 0.22361004],
[3.0, 1.0, 1.0, 3.0, 0.22361004],
[2.0, 0.0, 2.0, 0.0, 0.01306398],
[2.0, 1.0, 3.0, 0.0, 0.01306398],
[3.0, 0.0, 2.0, 1.0, 0.01306398],
[3.0, 1.0, 3.0, 1.0, 0.01306398],
[2.0, 0.0, 2.0, 2.0, 0.00748417],
[2.0, 1.0, 3.0, 2.0, 0.00748417],
[3.0, 0.0, 2.0, 3.0, 0.00748417],
[3.0, 1.0, 3.0, 3.0, 0.00748417],
[2.0, 2.0, 0.0, 0.0, 0.01306398],
[2.0, 3.0, 1.0, 0.0, 0.01306398],
[3.0, 2.0, 0.0, 1.0, 0.01306398],
[3.0, 3.0, 1.0, 1.0, 0.01306398],
[2.0, 2.0, 0.0, 2.0, 0.00748417],
[2.0, 3.0, 1.0, 2.0, 0.00748417],
[3.0, 2.0, 0.0, 3.0, 0.00748417],
[3.0, 3.0, 1.0, 3.0, 0.00748417],
[2.0, 2.0, 2.0, 0.0, 0.00748417],
[2.0, 3.0, 3.0, 0.0, 0.00748417],
[3.0, 2.0, 2.0, 1.0, 0.00748417],
[3.0, 3.0, 3.0, 1.0, 0.00748417],
[2.0, 2.0, 2.0, 2.0, 0.33788228],
[2.0, 3.0, 3.0, 2.0, 0.33788228],
[3.0, 2.0, 2.0, 3.0, 0.33788228],
[3.0, 3.0, 3.0, 3.0, 0.33788228],
]
)
@pytest.mark.parametrize(
("name", "core", "active", "table_exp", "v_core_exp"),
[
("h2_pyscf", None, None, table_1, 0),
("h2_pyscf", [0], None, table_2, 0.6823895331520422),
("h2_pyscf", None, [0, 1], table_1, 0),
("h2_pyscf", [0], [1], table_2, 0.6823895331520422),
("lih", [0], [1, 2], table_3, 1.6585666870874103),
],
)
def test_table_two_particle(name, core, active, table_exp, v_core_exp, tol):
r"""Test the table of two-particle matrix elements and the contribution of core orbitals
as implemented in the `two_particle` function of the `obs` module"""
hf_data = MolecularData(filename=os.path.join(ref_dir, name))
table, v_core = qchem.two_particle(hf_data.two_body_integrals, core=core, active=active)
assert np.allclose(table, table_exp, **tol)
assert np.allclose(v_core, v_core_exp, **tol)
v_me_1D = np.array([1, 2, 3, 4])
v_me_4D = np.full((2, 2, 2, 2), 0.5)
@pytest.mark.parametrize(
("v_me", "core", "active", "msg_match"),
[
(v_me_1D, [0], None, "'matrix_elements' must be a 4D array"),
(v_me_4D, [-1, 0, 1, 2], None, "Indices of core orbitals must be between 0 and"),
(v_me_4D, [0, 1, 2, 3], None, "Indices of core orbitals must be between 0 and"),
(v_me_4D, None, [-1, 0], "Indices of active orbitals must be between 0 and"),
(v_me_4D, None, [2, 6], "Indices of active orbitals must be between 0 and"),
],
)
def test_exceptions_two_particle(v_me, core, active, msg_match):
"""Test that the function `'two_particle'` throws an exception
if the dimension of the matrix elements array is not a 4D array or
if the indices of core and/or active orbitals are out of range."""
with pytest.raises(ValueError, match=msg_match):
qchem.two_particle(v_me, core=core, active=active)
| [
"numpy.allclose",
"pennylane.qchem.two_particle",
"os.path.join",
"os.path.realpath",
"numpy.array",
"pytest.mark.parametrize",
"pytest.raises",
"numpy.full"
] | [((222, 1390), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0, 0.68238953], [0.0, 1.0, 1.0, 0.0, 0.68238953], [1.0, \n 0.0, 0.0, 1.0, 0.68238953], [1.0, 1.0, 1.0, 1.0, 0.68238953], [0.0, 0.0,\n 2.0, 2.0, 0.17900058], [0.0, 1.0, 3.0, 2.0, 0.17900058], [1.0, 0.0, 2.0,\n 3.0, 0.17900058], [1.0, 1.0, 3.0, 3.0, 0.17900058], [0.0, 2.0, 0.0, 2.0,\n 0.17900058], [0.0, 3.0, 1.0, 2.0, 0.17900058], [1.0, 2.0, 0.0, 3.0, \n 0.17900058], [1.0, 3.0, 1.0, 3.0, 0.17900058], [0.0, 2.0, 2.0, 0.0, \n 0.67073278], [0.0, 3.0, 3.0, 0.0, 0.67073278], [1.0, 2.0, 2.0, 1.0, \n 0.67073278], [1.0, 3.0, 3.0, 1.0, 0.67073278], [2.0, 0.0, 0.0, 2.0, \n 0.67073278], [2.0, 1.0, 1.0, 2.0, 0.67073278], [3.0, 0.0, 0.0, 3.0, \n 0.67073278], [3.0, 1.0, 1.0, 3.0, 0.67073278], [2.0, 0.0, 2.0, 0.0, \n 0.17900058], [2.0, 1.0, 3.0, 0.0, 0.17900058], [3.0, 0.0, 2.0, 1.0, \n 0.17900058], [3.0, 1.0, 3.0, 1.0, 0.17900058], [2.0, 2.0, 0.0, 0.0, \n 0.17900058], [2.0, 3.0, 1.0, 0.0, 0.17900058], [3.0, 2.0, 0.0, 1.0, \n 0.17900058], [3.0, 3.0, 1.0, 1.0, 0.17900058], [2.0, 2.0, 2.0, 2.0, \n 0.70510563], [2.0, 3.0, 3.0, 2.0, 0.70510563], [3.0, 2.0, 2.0, 3.0, \n 0.70510563], [3.0, 3.0, 3.0, 3.0, 0.70510563]]'], {}), '([[0.0, 0.0, 0.0, 0.0, 0.68238953], [0.0, 1.0, 1.0, 0.0, 0.68238953\n ], [1.0, 0.0, 0.0, 1.0, 0.68238953], [1.0, 1.0, 1.0, 1.0, 0.68238953],\n [0.0, 0.0, 2.0, 2.0, 0.17900058], [0.0, 1.0, 3.0, 2.0, 0.17900058], [\n 1.0, 0.0, 2.0, 3.0, 0.17900058], [1.0, 1.0, 3.0, 3.0, 0.17900058], [0.0,\n 2.0, 0.0, 2.0, 0.17900058], [0.0, 3.0, 1.0, 2.0, 0.17900058], [1.0, 2.0,\n 0.0, 3.0, 0.17900058], [1.0, 3.0, 1.0, 3.0, 0.17900058], [0.0, 2.0, 2.0,\n 0.0, 0.67073278], [0.0, 3.0, 3.0, 0.0, 0.67073278], [1.0, 2.0, 2.0, 1.0,\n 0.67073278], [1.0, 3.0, 3.0, 1.0, 0.67073278], [2.0, 0.0, 0.0, 2.0, \n 0.67073278], [2.0, 1.0, 1.0, 2.0, 0.67073278], [3.0, 0.0, 0.0, 3.0, \n 0.67073278], [3.0, 1.0, 1.0, 3.0, 0.67073278], [2.0, 0.0, 2.0, 0.0, \n 0.17900058], [2.0, 1.0, 3.0, 0.0, 0.17900058], [3.0, 0.0, 2.0, 1.0, \n 0.17900058], [3.0, 1.0, 3.0, 1.0, 0.17900058], [2.0, 2.0, 0.0, 0.0, \n 0.17900058], [2.0, 3.0, 1.0, 0.0, 0.17900058], [3.0, 2.0, 0.0, 1.0, \n 0.17900058], [3.0, 3.0, 1.0, 1.0, 0.17900058], [2.0, 2.0, 2.0, 2.0, \n 0.70510563], [2.0, 3.0, 3.0, 2.0, 0.70510563], [3.0, 2.0, 2.0, 3.0, \n 0.70510563], [3.0, 3.0, 3.0, 3.0, 0.70510563]])\n', (230, 1390), True, 'import numpy as np\n'), ((1601, 1752), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0, 0.70510563], [0.0, 1.0, 1.0, 0.0, 0.70510563], [1.0, \n 0.0, 0.0, 1.0, 0.70510563], [1.0, 1.0, 1.0, 1.0, 0.70510563]]'], {}), '([[0.0, 0.0, 0.0, 0.0, 0.70510563], [0.0, 1.0, 1.0, 0.0, 0.70510563\n ], [1.0, 0.0, 0.0, 1.0, 0.70510563], [1.0, 1.0, 1.0, 1.0, 0.70510563]])\n', (1609, 1752), True, 'import numpy as np\n'), ((1804, 4156), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0, 0.48731097], [0.0, 1.0, 1.0, 0.0, 0.48731097], [1.0, \n 0.0, 0.0, 1.0, 0.48731097], [1.0, 1.0, 1.0, 1.0, 0.48731097], [0.0, 0.0,\n 0.0, 2.0, -0.04857958], [0.0, 1.0, 1.0, 2.0, -0.04857958], [1.0, 0.0, \n 0.0, 3.0, -0.04857958], [1.0, 1.0, 1.0, 3.0, -0.04857958], [0.0, 0.0, \n 2.0, 0.0, -0.04857958], [0.0, 1.0, 3.0, 0.0, -0.04857958], [1.0, 0.0, \n 2.0, 1.0, -0.04857958], [1.0, 1.0, 3.0, 1.0, -0.04857958], [0.0, 0.0, \n 2.0, 2.0, 0.01306398], [0.0, 1.0, 3.0, 2.0, 0.01306398], [1.0, 0.0, 2.0,\n 3.0, 0.01306398], [1.0, 1.0, 3.0, 3.0, 0.01306398], [0.0, 2.0, 0.0, 0.0,\n -0.04857958], [0.0, 3.0, 1.0, 0.0, -0.04857958], [1.0, 2.0, 0.0, 1.0, -\n 0.04857958], [1.0, 3.0, 1.0, 1.0, -0.04857958], [0.0, 2.0, 0.0, 2.0, \n 0.01306398], [0.0, 3.0, 1.0, 2.0, 0.01306398], [1.0, 2.0, 0.0, 3.0, \n 0.01306398], [1.0, 3.0, 1.0, 3.0, 0.01306398], [0.0, 2.0, 2.0, 0.0, \n 0.22361004], [0.0, 3.0, 3.0, 0.0, 0.22361004], [1.0, 2.0, 2.0, 1.0, \n 0.22361004], [1.0, 3.0, 3.0, 1.0, 0.22361004], [0.0, 2.0, 2.0, 2.0, \n 0.00748417], [0.0, 3.0, 3.0, 2.0, 0.00748417], [1.0, 2.0, 2.0, 3.0, \n 0.00748417], [1.0, 3.0, 3.0, 3.0, 0.00748417], [2.0, 0.0, 0.0, 0.0, -\n 0.04857958], [2.0, 1.0, 1.0, 0.0, -0.04857958], [3.0, 0.0, 0.0, 1.0, -\n 0.04857958], [3.0, 1.0, 1.0, 1.0, -0.04857958], [2.0, 0.0, 0.0, 2.0, \n 0.22361004], [2.0, 1.0, 1.0, 2.0, 0.22361004], [3.0, 0.0, 0.0, 3.0, \n 0.22361004], [3.0, 1.0, 1.0, 3.0, 0.22361004], [2.0, 0.0, 2.0, 0.0, \n 0.01306398], [2.0, 1.0, 3.0, 0.0, 0.01306398], [3.0, 0.0, 2.0, 1.0, \n 0.01306398], [3.0, 1.0, 3.0, 1.0, 0.01306398], [2.0, 0.0, 2.0, 2.0, \n 0.00748417], [2.0, 1.0, 3.0, 2.0, 0.00748417], [3.0, 0.0, 2.0, 3.0, \n 0.00748417], [3.0, 1.0, 3.0, 3.0, 0.00748417], [2.0, 2.0, 0.0, 0.0, \n 0.01306398], [2.0, 3.0, 1.0, 0.0, 0.01306398], [3.0, 2.0, 0.0, 1.0, \n 0.01306398], [3.0, 3.0, 1.0, 1.0, 0.01306398], [2.0, 2.0, 0.0, 2.0, \n 0.00748417], [2.0, 3.0, 1.0, 2.0, 0.00748417], [3.0, 2.0, 0.0, 3.0, \n 0.00748417], [3.0, 3.0, 1.0, 3.0, 0.00748417], [2.0, 2.0, 2.0, 0.0, \n 0.00748417], [2.0, 3.0, 3.0, 0.0, 0.00748417], [3.0, 2.0, 2.0, 1.0, \n 0.00748417], [3.0, 3.0, 3.0, 1.0, 0.00748417], [2.0, 2.0, 2.0, 2.0, \n 0.33788228], [2.0, 3.0, 3.0, 2.0, 0.33788228], [3.0, 2.0, 2.0, 3.0, \n 0.33788228], [3.0, 3.0, 3.0, 3.0, 0.33788228]]'], {}), '([[0.0, 0.0, 0.0, 0.0, 0.48731097], [0.0, 1.0, 1.0, 0.0, 0.48731097\n ], [1.0, 0.0, 0.0, 1.0, 0.48731097], [1.0, 1.0, 1.0, 1.0, 0.48731097],\n [0.0, 0.0, 0.0, 2.0, -0.04857958], [0.0, 1.0, 1.0, 2.0, -0.04857958], [\n 1.0, 0.0, 0.0, 3.0, -0.04857958], [1.0, 1.0, 1.0, 3.0, -0.04857958], [\n 0.0, 0.0, 2.0, 0.0, -0.04857958], [0.0, 1.0, 3.0, 0.0, -0.04857958], [\n 1.0, 0.0, 2.0, 1.0, -0.04857958], [1.0, 1.0, 3.0, 1.0, -0.04857958], [\n 0.0, 0.0, 2.0, 2.0, 0.01306398], [0.0, 1.0, 3.0, 2.0, 0.01306398], [1.0,\n 0.0, 2.0, 3.0, 0.01306398], [1.0, 1.0, 3.0, 3.0, 0.01306398], [0.0, 2.0,\n 0.0, 0.0, -0.04857958], [0.0, 3.0, 1.0, 0.0, -0.04857958], [1.0, 2.0, \n 0.0, 1.0, -0.04857958], [1.0, 3.0, 1.0, 1.0, -0.04857958], [0.0, 2.0, \n 0.0, 2.0, 0.01306398], [0.0, 3.0, 1.0, 2.0, 0.01306398], [1.0, 2.0, 0.0,\n 3.0, 0.01306398], [1.0, 3.0, 1.0, 3.0, 0.01306398], [0.0, 2.0, 2.0, 0.0,\n 0.22361004], [0.0, 3.0, 3.0, 0.0, 0.22361004], [1.0, 2.0, 2.0, 1.0, \n 0.22361004], [1.0, 3.0, 3.0, 1.0, 0.22361004], [0.0, 2.0, 2.0, 2.0, \n 0.00748417], [0.0, 3.0, 3.0, 2.0, 0.00748417], [1.0, 2.0, 2.0, 3.0, \n 0.00748417], [1.0, 3.0, 3.0, 3.0, 0.00748417], [2.0, 0.0, 0.0, 0.0, -\n 0.04857958], [2.0, 1.0, 1.0, 0.0, -0.04857958], [3.0, 0.0, 0.0, 1.0, -\n 0.04857958], [3.0, 1.0, 1.0, 1.0, -0.04857958], [2.0, 0.0, 0.0, 2.0, \n 0.22361004], [2.0, 1.0, 1.0, 2.0, 0.22361004], [3.0, 0.0, 0.0, 3.0, \n 0.22361004], [3.0, 1.0, 1.0, 3.0, 0.22361004], [2.0, 0.0, 2.0, 0.0, \n 0.01306398], [2.0, 1.0, 3.0, 0.0, 0.01306398], [3.0, 0.0, 2.0, 1.0, \n 0.01306398], [3.0, 1.0, 3.0, 1.0, 0.01306398], [2.0, 0.0, 2.0, 2.0, \n 0.00748417], [2.0, 1.0, 3.0, 2.0, 0.00748417], [3.0, 0.0, 2.0, 3.0, \n 0.00748417], [3.0, 1.0, 3.0, 3.0, 0.00748417], [2.0, 2.0, 0.0, 0.0, \n 0.01306398], [2.0, 3.0, 1.0, 0.0, 0.01306398], [3.0, 2.0, 0.0, 1.0, \n 0.01306398], [3.0, 3.0, 1.0, 1.0, 0.01306398], [2.0, 2.0, 0.0, 2.0, \n 0.00748417], [2.0, 3.0, 1.0, 2.0, 0.00748417], [3.0, 2.0, 0.0, 3.0, \n 0.00748417], [3.0, 3.0, 1.0, 3.0, 0.00748417], [2.0, 2.0, 2.0, 0.0, \n 0.00748417], [2.0, 3.0, 3.0, 0.0, 0.00748417], [3.0, 2.0, 2.0, 1.0, \n 0.00748417], [3.0, 3.0, 3.0, 1.0, 0.00748417], [2.0, 2.0, 2.0, 2.0, \n 0.33788228], [2.0, 3.0, 3.0, 2.0, 0.33788228], [3.0, 2.0, 2.0, 3.0, \n 0.33788228], [3.0, 3.0, 3.0, 3.0, 0.33788228]])\n', (1812, 4156), True, 'import numpy as np\n'), ((4535, 4868), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('name', 'core', 'active', 'table_exp', 'v_core_exp')", "[('h2_pyscf', None, None, table_1, 0), ('h2_pyscf', [0], None, table_2, \n 0.6823895331520422), ('h2_pyscf', None, [0, 1], table_1, 0), (\n 'h2_pyscf', [0], [1], table_2, 0.6823895331520422), ('lih', [0], [1, 2],\n table_3, 1.6585666870874103)]"], {}), "(('name', 'core', 'active', 'table_exp',\n 'v_core_exp'), [('h2_pyscf', None, None, table_1, 0), ('h2_pyscf', [0],\n None, table_2, 0.6823895331520422), ('h2_pyscf', None, [0, 1], table_1,\n 0), ('h2_pyscf', [0], [1], table_2, 0.6823895331520422), ('lih', [0], [\n 1, 2], table_3, 1.6585666870874103)])\n", (4558, 4868), False, 'import pytest\n'), ((5425, 5447), 'numpy.array', 'np.array', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (5433, 5447), True, 'import numpy as np\n'), ((5458, 5484), 'numpy.full', 'np.full', (['(2, 2, 2, 2)', '(0.5)'], {}), '((2, 2, 2, 2), 0.5)\n', (5465, 5484), True, 'import numpy as np\n'), ((5488, 5958), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('v_me', 'core', 'active', 'msg_match')", '[(v_me_1D, [0], None, "\'matrix_elements\' must be a 4D array"), (v_me_4D, [-\n 1, 0, 1, 2], None, \'Indices of core orbitals must be between 0 and\'), (\n v_me_4D, [0, 1, 2, 3], None,\n \'Indices of core orbitals must be between 0 and\'), (v_me_4D, None, [-1,\n 0], \'Indices of active orbitals must be between 0 and\'), (v_me_4D, None,\n [2, 6], \'Indices of active orbitals must be between 0 and\')]'], {}), '((\'v_me\', \'core\', \'active\', \'msg_match\'), [(v_me_1D,\n [0], None, "\'matrix_elements\' must be a 4D array"), (v_me_4D, [-1, 0, 1,\n 2], None, \'Indices of core orbitals must be between 0 and\'), (v_me_4D,\n [0, 1, 2, 3], None, \'Indices of core orbitals must be between 0 and\'),\n (v_me_4D, None, [-1, 0],\n \'Indices of active orbitals must be between 0 and\'), (v_me_4D, None, [2,\n 6], \'Indices of active orbitals must be between 0 and\')])\n', (5511, 5958), False, 'import pytest\n'), ((5241, 5313), 'pennylane.qchem.two_particle', 'qchem.two_particle', (['hf_data.two_body_integrals'], {'core': 'core', 'active': 'active'}), '(hf_data.two_body_integrals, core=core, active=active)\n', (5259, 5313), False, 'from pennylane import qchem\n'), ((5326, 5362), 'numpy.allclose', 'np.allclose', (['table', 'table_exp'], {}), '(table, table_exp, **tol)\n', (5337, 5362), True, 'import numpy as np\n'), ((5374, 5412), 'numpy.allclose', 'np.allclose', (['v_core', 'v_core_exp'], {}), '(v_core, v_core_exp, **tol)\n', (5385, 5412), True, 'import numpy as np\n'), ((164, 190), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (180, 190), False, 'import os\n'), ((6277, 6319), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg_match'}), '(ValueError, match=msg_match)\n', (6290, 6319), False, 'import pytest\n'), ((6329, 6379), 'pennylane.qchem.two_particle', 'qchem.two_particle', (['v_me'], {'core': 'core', 'active': 'active'}), '(v_me, core=core, active=active)\n', (6347, 6379), False, 'from pennylane import qchem\n'), ((5191, 5218), 'os.path.join', 'os.path.join', (['ref_dir', 'name'], {}), '(ref_dir, name)\n', (5203, 5218), False, 'import os\n')] |
# Generated by Django 2.0 on 2018-02-14 13:39
from django.db import migrations, models
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('status', model_utils.fields.StatusField(choices=[('draft', 'draft'), ('published', 'published'), ('archived', 'archived')], default='draft', max_length=100, no_check_for_status=True, verbose_name='status')),
('status_changed', model_utils.fields.MonitorField(default=django.utils.timezone.now, monitor='status', verbose_name='status changed')),
('start', models.DateField(blank=True, null=True, verbose_name='start date')),
('end', models.DateField(blank=True, null=True, verbose_name='end date')),
('title', models.CharField(max_length=50, unique=True)),
('slug', models.SlugField()),
('body', models.TextField()),
],
options={
'abstract': False,
},
),
]
| [
"django.db.models.DateField",
"django.db.models.TextField",
"django.db.models.SlugField",
"django.db.models.AutoField",
"django.db.models.CharField"
] | [((356, 449), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (372, 449), False, 'from django.db import migrations, models\n'), ((1142, 1208), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""start date"""'}), "(blank=True, null=True, verbose_name='start date')\n", (1158, 1208), False, 'from django.db import migrations, models\n'), ((1235, 1299), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""end date"""'}), "(blank=True, null=True, verbose_name='end date')\n", (1251, 1299), False, 'from django.db import migrations, models\n'), ((1328, 1372), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'unique': '(True)'}), '(max_length=50, unique=True)\n', (1344, 1372), False, 'from django.db import migrations, models\n'), ((1400, 1418), 'django.db.models.SlugField', 'models.SlugField', ([], {}), '()\n', (1416, 1418), False, 'from django.db import migrations, models\n'), ((1446, 1464), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1462, 1464), False, 'from django.db import migrations, models\n')] |
from django.contrib import admin
from django import forms
from establishment.accounts.utils import get_user_search_fields
from .models import SocialApp, SocialAccount, SocialToken, SocialProvider
class SocialAppForm(forms.ModelForm):
class Meta:
model = SocialApp
exclude = []
widgets = {
"client_id": forms.TextInput(attrs={"size": "100"}),
"key": forms.TextInput(attrs={"size": "100"}),
"secret": forms.TextInput(attrs={"size": "100"})
}
class SocialAppAdmin(admin.ModelAdmin):
form = SocialAppForm
list_display = ("name", "provider_instance",)
filter_horizontal = ("sites",)
class SocialAccountAdmin(admin.ModelAdmin):
search_fields = []
raw_id_fields = ("user",)
list_display = ("user", "uid", "provider_instance")
list_filter = ("provider_instance",)
def get_search_fields(self, request):
return ["user__" + attr for attr in get_user_search_fields()]
class SocialTokenAdmin(admin.ModelAdmin):
raw_id_fields = ("app", "account",)
list_display = ("app", "account", "truncated_token", "expires_at")
list_filter = ("app", "app__provider_instance", "expires_at")
def truncated_token(self, token):
max_chars = 40
ret = token.token
if len(ret) > max_chars:
ret = ret[0:max_chars] + "...(truncated)"
return ret
truncated_token.short_description = "Token"
admin.site.register(SocialApp, SocialAppAdmin)
admin.site.register(SocialToken, SocialTokenAdmin)
admin.site.register(SocialAccount, SocialAccountAdmin)
admin.site.register(SocialProvider)
| [
"establishment.accounts.utils.get_user_search_fields",
"django.contrib.admin.site.register",
"django.forms.TextInput"
] | [((1442, 1488), 'django.contrib.admin.site.register', 'admin.site.register', (['SocialApp', 'SocialAppAdmin'], {}), '(SocialApp, SocialAppAdmin)\n', (1461, 1488), False, 'from django.contrib import admin\n'), ((1489, 1539), 'django.contrib.admin.site.register', 'admin.site.register', (['SocialToken', 'SocialTokenAdmin'], {}), '(SocialToken, SocialTokenAdmin)\n', (1508, 1539), False, 'from django.contrib import admin\n'), ((1540, 1594), 'django.contrib.admin.site.register', 'admin.site.register', (['SocialAccount', 'SocialAccountAdmin'], {}), '(SocialAccount, SocialAccountAdmin)\n', (1559, 1594), False, 'from django.contrib import admin\n'), ((1595, 1630), 'django.contrib.admin.site.register', 'admin.site.register', (['SocialProvider'], {}), '(SocialProvider)\n', (1614, 1630), False, 'from django.contrib import admin\n'), ((346, 384), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'size': '100'}"}), "(attrs={'size': '100'})\n", (361, 384), False, 'from django import forms\n'), ((405, 443), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'size': '100'}"}), "(attrs={'size': '100'})\n", (420, 443), False, 'from django import forms\n'), ((467, 505), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'size': '100'}"}), "(attrs={'size': '100'})\n", (482, 505), False, 'from django import forms\n'), ((951, 975), 'establishment.accounts.utils.get_user_search_fields', 'get_user_search_fields', ([], {}), '()\n', (973, 975), False, 'from establishment.accounts.utils import get_user_search_fields\n')] |
"""GitHub tap class."""
from typing import List
from singer_sdk import Tap, Stream
from singer_sdk import typing as th # JSON schema typing helpers
from tap_github.streams import (
CommitsStream,
CommunityProfileStream,
IssueCommentsStream,
IssueEventsStream,
IssuesStream,
PullRequestsStream,
ReadmeStream,
RepositoryStream,
)
class TapGitHub(Tap):
"""GitHub tap class."""
name = "tap-github"
config_jsonschema = th.PropertiesList(
th.Property("user_agent", th.StringType),
th.Property("metrics_log_level", th.StringType),
th.Property("auth_token", th.StringType),
th.Property(
"searches",
th.ArrayType(
th.ObjectType(
th.Property("name", th.StringType, required=True),
th.Property("query", th.StringType, required=True),
)
),
),
th.Property("repositories", th.ArrayType(th.StringType)),
th.Property("start_date", th.DateTimeType),
th.Property("stream_maps", th.ObjectType()),
th.Property("stream_map_config", th.ObjectType()),
).to_dict()
def discover_streams(self) -> List[Stream]:
"""Return a list of discovered streams."""
return [
CommitsStream(tap=self),
CommunityProfileStream(tap=self),
IssueCommentsStream(tap=self),
IssueEventsStream(tap=self),
IssuesStream(tap=self),
PullRequestsStream(tap=self),
ReadmeStream(tap=self),
RepositoryStream(tap=self),
]
# CLI Execution:
cli = TapGitHub.cli
| [
"tap_github.streams.CommunityProfileStream",
"tap_github.streams.RepositoryStream",
"tap_github.streams.IssueEventsStream",
"tap_github.streams.IssuesStream",
"singer_sdk.typing.Property",
"singer_sdk.typing.ObjectType",
"tap_github.streams.IssueCommentsStream",
"singer_sdk.typing.ArrayType",
"tap_g... | [((1306, 1329), 'tap_github.streams.CommitsStream', 'CommitsStream', ([], {'tap': 'self'}), '(tap=self)\n', (1319, 1329), False, 'from tap_github.streams import CommitsStream, CommunityProfileStream, IssueCommentsStream, IssueEventsStream, IssuesStream, PullRequestsStream, ReadmeStream, RepositoryStream\n'), ((1343, 1375), 'tap_github.streams.CommunityProfileStream', 'CommunityProfileStream', ([], {'tap': 'self'}), '(tap=self)\n', (1365, 1375), False, 'from tap_github.streams import CommitsStream, CommunityProfileStream, IssueCommentsStream, IssueEventsStream, IssuesStream, PullRequestsStream, ReadmeStream, RepositoryStream\n'), ((1389, 1418), 'tap_github.streams.IssueCommentsStream', 'IssueCommentsStream', ([], {'tap': 'self'}), '(tap=self)\n', (1408, 1418), False, 'from tap_github.streams import CommitsStream, CommunityProfileStream, IssueCommentsStream, IssueEventsStream, IssuesStream, PullRequestsStream, ReadmeStream, RepositoryStream\n'), ((1432, 1459), 'tap_github.streams.IssueEventsStream', 'IssueEventsStream', ([], {'tap': 'self'}), '(tap=self)\n', (1449, 1459), False, 'from tap_github.streams import CommitsStream, CommunityProfileStream, IssueCommentsStream, IssueEventsStream, IssuesStream, PullRequestsStream, ReadmeStream, RepositoryStream\n'), ((1473, 1495), 'tap_github.streams.IssuesStream', 'IssuesStream', ([], {'tap': 'self'}), '(tap=self)\n', (1485, 1495), False, 'from tap_github.streams import CommitsStream, CommunityProfileStream, IssueCommentsStream, IssueEventsStream, IssuesStream, PullRequestsStream, ReadmeStream, RepositoryStream\n'), ((1509, 1537), 'tap_github.streams.PullRequestsStream', 'PullRequestsStream', ([], {'tap': 'self'}), '(tap=self)\n', (1527, 1537), False, 'from tap_github.streams import CommitsStream, CommunityProfileStream, IssueCommentsStream, IssueEventsStream, IssuesStream, PullRequestsStream, ReadmeStream, RepositoryStream\n'), ((1551, 1573), 'tap_github.streams.ReadmeStream', 'ReadmeStream', ([], {'tap': 'self'}), '(tap=self)\n', (1563, 1573), False, 'from tap_github.streams import CommitsStream, CommunityProfileStream, IssueCommentsStream, IssueEventsStream, IssuesStream, PullRequestsStream, ReadmeStream, RepositoryStream\n'), ((1587, 1613), 'tap_github.streams.RepositoryStream', 'RepositoryStream', ([], {'tap': 'self'}), '(tap=self)\n', (1603, 1613), False, 'from tap_github.streams import CommitsStream, CommunityProfileStream, IssueCommentsStream, IssueEventsStream, IssuesStream, PullRequestsStream, ReadmeStream, RepositoryStream\n'), ((493, 533), 'singer_sdk.typing.Property', 'th.Property', (['"""user_agent"""', 'th.StringType'], {}), "('user_agent', th.StringType)\n", (504, 533), True, 'from singer_sdk import typing as th\n'), ((543, 590), 'singer_sdk.typing.Property', 'th.Property', (['"""metrics_log_level"""', 'th.StringType'], {}), "('metrics_log_level', th.StringType)\n", (554, 590), True, 'from singer_sdk import typing as th\n'), ((600, 640), 'singer_sdk.typing.Property', 'th.Property', (['"""auth_token"""', 'th.StringType'], {}), "('auth_token', th.StringType)\n", (611, 640), True, 'from singer_sdk import typing as th\n'), ((1005, 1047), 'singer_sdk.typing.Property', 'th.Property', (['"""start_date"""', 'th.DateTimeType'], {}), "('start_date', th.DateTimeType)\n", (1016, 1047), True, 'from singer_sdk import typing as th\n'), ((967, 994), 'singer_sdk.typing.ArrayType', 'th.ArrayType', (['th.StringType'], {}), '(th.StringType)\n', (979, 994), True, 'from singer_sdk import typing as th\n'), ((1084, 1099), 'singer_sdk.typing.ObjectType', 'th.ObjectType', ([], {}), '()\n', (1097, 1099), True, 'from singer_sdk import typing as th\n'), ((1143, 1158), 'singer_sdk.typing.ObjectType', 'th.ObjectType', ([], {}), '()\n', (1156, 1158), True, 'from singer_sdk import typing as th\n'), ((764, 813), 'singer_sdk.typing.Property', 'th.Property', (['"""name"""', 'th.StringType'], {'required': '(True)'}), "('name', th.StringType, required=True)\n", (775, 813), True, 'from singer_sdk import typing as th\n'), ((835, 885), 'singer_sdk.typing.Property', 'th.Property', (['"""query"""', 'th.StringType'], {'required': '(True)'}), "('query', th.StringType, required=True)\n", (846, 885), True, 'from singer_sdk import typing as th\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-01-10 08:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('org', '0001_initial'),
('event', '0002_auto_20180102_2143'),
]
operations = [
migrations.AddField(
model_name='event',
name='org',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='org', to='org.Org'),
),
]
| [
"django.db.models.ForeignKey"
] | [((459, 570), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""org"""', 'to': '"""org.Org"""'}), "(null=True, on_delete=django.db.models.deletion.CASCADE,\n related_name='org', to='org.Org')\n", (476, 570), False, 'from django.db import migrations, models\n')] |
import digitalio
import pulseio
from smbusslave import SMBusSlave
IODIR = 0x00
IPOL = 0x01
GPINTEN = 0x02
DEFVAL = 0x03
INTCON = 0x04
IOCON = 0x05
GPPU = 0x06
INTF = 0x07
INTCAP = 0x08
GPIO = 0x09
OLAT = 0x0a
IOCON_SEQOP = 1 << 5
IOCON_ODR = 1 << 2
IOCON_INTPOL = 1 << 1
# Pull up on interrupt pins are not supported
# Interrupts are not working yet, need PulseIn.value
class MCP23008Slave(SMBusSlave):
def __init__(self, pins, intpin=None):
if len(pins) == 0:
raise ValueError('pins is empty')
super().__init__()
pins.extend([None] * (8 - len(pins))) # Fill up with dummies
self.pins = [Pin(pin, i) for i, pin in enumerate(pins)]
self.int = None
if intpin:
self.int = digitalio.DigitalInOut(intpin)
self.int.switch_to_output(True)
self.protocol = SMBusSlave.SMBUS_BYTE_SEQ
self.max_reg = 0x0a
self.regs = [0] * (self.max_reg + 1)
self.regs[IODIR] = 0xff
self.debug2 = False
def check_events(self):
prev_intf = self.regs[INTF]
val = 0
for i in range(8):
val |= self.pins[i].interrupt << i
self.regs[INTF] = val
if self.regs[INTF] and not prev_intf:
val = 0
for i in range(8):
val |= self.pins[i].value << i
val |= self.regs[INTF] # In case we're slow and have lost it. Revisit if IPOL is supported
self.regs[INTCAP] = val
self.set_interrupt()
def readreg(self, reg):
if reg == GPIO:
val = 0
for i in range(8):
if self.regs[IODIR] & (1 << i): # Is this an input?
val |= self.pins[i].value << i
else:
val |= self.regs[OLAT] & (1 << i)
if self.regs[INTF]:
self.regs[INTF] = 0
self.clear_interrupt()
elif reg == INTCAP:
val = self.regs[INTCAP]
if self.regs[INTF]:
self.regs[INTF] = 0
self.clear_interrupt()
else:
val = self.regs[reg]
if self.debug2:
print(" 0x%02x==0x%02x" % (reg, val))
return val
def writereg(self, reg, val):
if self.debug2:
print(" 0x%02x=0x%02x" % (reg, val))
changed = self.regs[reg] ^ val
if reg == IODIR:
self.regs[IODIR] = val
self.setpinmode(changed)
elif reg == IPOL:
if val:
# Not used by the Linux driver
raise NotImplementedError('IPOL is not implemented')
elif reg == GPINTEN:
self.regs[GPINTEN] = val
self.setpinmode(changed)
elif reg == DEFVAL:
pass
elif reg == INTCON:
pass
elif reg == IOCON:
val &= 0b00111110
if val & IOCON_SEQOP:
# Not used by the Linux driver
raise NotImplementedError('IOCON:SEQOP is not implemented')
if self.int:
if changed & IOCON_ODR:
if val & IOCON_ODR:
self.int.drive_mode = digitalio.DriveMode.OPEN_DRAIN
else:
self.int.drive_mode = digitalio.DriveMode.PUSH_PULL
if changed & IOCON_INTPOL:
self.int.value = not val & IOCON_INTPOL
elif reg == GPPU:
self.regs[GPPU] = val
self.setpinmode(changed)
elif reg == INTF:
return # Read only
elif reg == INTCAP:
return # Read only
elif reg == GPIO or reg == OLAT:
if reg == GPIO:
self.regs[OLAT] = val
for i in range(8):
mask = 1 << i
if changed & mask and not self.regs[IODIR] & mask: # Changed and not input
self.pins[i].value = val & mask
self.regs[reg] = val
def setpinmode(self, changed):
for i in range(8):
mask = 1 << i
if changed & mask:
if self.regs[IODIR] & mask:
interrupt = self.regs[GPINTEN] & mask
pull = digitalio.Pull.UP if self.regs[GPPU] & mask else None
self.pins[i].switch_to_input(pull, interrupt)
else:
val = self.regs[OLAT] & mask
self.pins[i].switch_to_output(val)
def set_interrupt(self):
if self.debug2:
print('\nset_interrupt: INTF=%02x INTCAP=%02x\n' % (self.regs[INTF], self.regs[INTCAP]))
if self.int:
active = bool(self.regs[IOCON] & IOCON_INTPOL)
self.int.value = active
def clear_interrupt(self):
if self.debug2:
print('\nclear_interrupt\n')
if self.int:
active = bool(self.regs[IOCON] & IOCON_INTPOL)
self.int.value = not active
# Doubles as a DigitalInOut and PulseIn dummy for the Pin class
class DummyIO:
def __init__(self):
self.direction = digitalio.Direction.INPUT
self.drive_mode = digitalio.DriveMode.PUSH_PULL
self.value = False
self.pull = None
def switch_to_output(self, value=False, drive_mode=digitalio.DriveMode.PUSH_PULL):
self.direction = digitalio.Direction.OUTPUT
self.value = value
self.drive_mode = drive_mode
def switch_to_input(self, pull=None):
self.direction = digitalio.Direction.INPUT
self.pull = pull
if pull == digitalio.Pull.UP:
self.value = True
else:
self.value = False
def __len__(self):
return 0
class Pin:
def __init__(self, pin, index):
self.pin = pin
self.index = index
self.io = None
self.pulseio = None
self.pulseio_val = None
self.pulseio_maxlen = 10
self._interrupt = False
self.debug = False
if self.pin is None:
self.io = DummyIO()
self.pulseio = self.io
self.pulseio_val = False
else:
self._ensure_io()
def switch_to_output(self, value=False, drive_mode=digitalio.DriveMode.PUSH_PULL):
self._ensure_io()
if self.debug:
print('%d.switch_to_output(%r)' % (self.index, value,))
self.io.switch_to_output(value)
# Edge/level?
def switch_to_input(self, pull=None, interrupt=False):
if interrupt:
self._ensure_pulseio()
else:
self._ensure_io()
if self.debug:
print('%s.switch_to_input(%r)' % (self.index, pull,))
self.io.switch_to_input(pull)
@property
def value(self):
if self.io is not None:
val = bool(self.io.value)
if self.debug and self.pin:
print('%s.value == %r' % (self.index, val,))
return val
if self.pulseio is not None:
val = self._get_pulseio_value()
if val is not None:
if self.debug:
print('%s.value == %r (%d)' % (self.index, val, len(self.pulseio)))
return val
# Unable to determine value so look at the pin
self.pulseio.deinit()
tmp = digitalio.DigitalInOut(self.pin)
tmp.switch_to_input(None)
val = tmp.value
tmp.deinit()
self.pulseio = None
self._ensure_pulseio()
if self.debug:
print('%s.value(DIG) == %r' % (self.index, val,))
return val
raise ValueError('bug: neither io nor pulseio is set')
@value.setter
def value(self, val):
if self.io is None or self.io.direction == digitalio.Direction.INPUT:
raise AttributeError('Cannot set value when direction is input.')
val = bool(val)
self.io.value = val
if self.debug:
print('%s.value = %r' % (self.index, val,))
@property
def interrupt(self):
if self.pulseio is None:
return False
val = self._interrupt
self._interrupt = False
return val
def _get_pulseio_value(self):
pulses = [self.pulseio.popleft() for _ in range(len(self.pulseio))]
num_pulses = len(pulses)
if num_pulses == 0:
return self.pulseio_val
self._interrupt = True
if num_pulses == self.pulseio_maxlen:
return None
if self.pulseio_val is None:
self.pulseio_val = False
num_pulses += 1 # The 'missing' first edge
val = bool(self.pulseio_val ^ bool(num_pulses % 2))
self.pulseio_val = val
return val
def _ensure_io(self):
if self.pin is None:
return
if self.pulseio is not None:
if self.debug:
print('%s.PulseIn(%r).deinit()' % (self.index, self.pin,))
self.pulseio.deinit()
self.pulseio = None
if self.io is None:
if self.debug:
print('%d = DigitalInOut(%r)' % (self.index, self.pin,))
self.io = digitalio.DigitalInOut(self.pin)
def _ensure_pulseio(self):
if self.pin is None:
return
if self.io is not None:
if self.debug:
print('%s.DigitalInOut(%r).deinit()' % (self.index, self.pin,))
self.io.deinit()
self.io = None
if self.pulseio is None:
if self.debug:
print('%s = PulseIn(%r, maxlen=%d)' % (self.index, self.pin, self.pulseio_maxlen,))
self.pulseio = pulseio.PulseIn(self.pin, maxlen=self.pulseio_maxlen) # , idle_state=False)
self.pulseio_val = None
| [
"digitalio.DigitalInOut",
"pulseio.PulseIn"
] | [((752, 782), 'digitalio.DigitalInOut', 'digitalio.DigitalInOut', (['intpin'], {}), '(intpin)\n', (774, 782), False, 'import digitalio\n'), ((7334, 7366), 'digitalio.DigitalInOut', 'digitalio.DigitalInOut', (['self.pin'], {}), '(self.pin)\n', (7356, 7366), False, 'import digitalio\n'), ((9199, 9231), 'digitalio.DigitalInOut', 'digitalio.DigitalInOut', (['self.pin'], {}), '(self.pin)\n', (9221, 9231), False, 'import digitalio\n'), ((9694, 9747), 'pulseio.PulseIn', 'pulseio.PulseIn', (['self.pin'], {'maxlen': 'self.pulseio_maxlen'}), '(self.pin, maxlen=self.pulseio_maxlen)\n', (9709, 9747), False, 'import pulseio\n')] |
###############################
# MODULE: Object settings #
# AUTHOR: <NAME> #
# LAST UPDATE: 08/04/2019 #
###############################
import copy
from enum import Enum, IntEnum
from direct.gui.OnscreenText import TransparencyAttrib
BLACK = (0.15, 0.15, 0.15, 1)
#BLACK = (0.0, 0.0, 0.0, 1)
WHITE = (0.75, 0.75, 0.75, 1)
class RenderState(Enum):
DEFAULT = -1
INPUT = 0
MENU = 1
GAME = 2
class RenderModels(Enum):
PLANE = 0
class RenderObject(IntEnum):
BLACK_KING = 0
BLACK_QUEEN = 1
BLACK_BISHOP = 2
BLACK_KNIGHT = 3
BLACK_ROOK = 4
BLACK_PAWN = 5
WHITE_KING = 6
WHITE_QUEEN = 7
WHITE_BISHOP = 8
WHITE_KNIGHT = 9
WHITE_ROOK = 10
WHITE_PAWN = 11
PLANE = 12
def figure_as_render_object(figure_latter):
res = 0
lower = figure_latter.lower()
if figure_latter.isupper():
res += 6
if lower == 'k':
res += 0
if lower == 'q':
res += 1
if lower == 'b':
res += 2
if lower == 'n':
res += 3
if lower == 'r':
res += 4
if lower == 'p':
res += 5
return RenderObject(res)
class FigureMngr:
def __init__(self, blackside_pack, whiteside_pack):
self.data_path = "ChessRender/data/"
self.whiteside_pack_name = self.data_path + "chess_figures/" + whiteside_pack + "/"
self.blackside_pack_name = self.data_path + "chess_figures/" + blackside_pack + "/"
self.textures = dict({
RenderObject.BLACK_KING : loader.loadTexture(self.blackside_pack_name + "bK.png"),
RenderObject.BLACK_QUEEN : loader.loadTexture(self.blackside_pack_name + "bQ.png"),
RenderObject.BLACK_BISHOP : loader.loadTexture(self.blackside_pack_name + "bB.png"),
RenderObject.BLACK_KNIGHT : loader.loadTexture(self.blackside_pack_name + "bN.png"),
RenderObject.BLACK_ROOK : loader.loadTexture(self.blackside_pack_name + "bR.png"),
RenderObject.BLACK_PAWN : loader.loadTexture(self.blackside_pack_name + "bP.png"),
RenderObject.WHITE_KING : loader.loadTexture(self.whiteside_pack_name + "wK.png"),
RenderObject.WHITE_QUEEN : loader.loadTexture(self.whiteside_pack_name + "wQ.png"),
RenderObject.WHITE_BISHOP : loader.loadTexture(self.whiteside_pack_name + "wB.png"),
RenderObject.WHITE_KNIGHT : loader.loadTexture(self.whiteside_pack_name + "wN.png"),
RenderObject.WHITE_ROOK : loader.loadTexture(self.whiteside_pack_name + "wR.png"),
RenderObject.WHITE_PAWN : loader.loadTexture(self.whiteside_pack_name + "wP.png"),
})
self.modeles = dict({
RenderObject.WHITE_KING: loader.loadModel(self.whiteside_pack_name + "king.egg"),
RenderObject.WHITE_QUEEN: loader.loadModel(self.whiteside_pack_name + "queen.egg"),
RenderObject.WHITE_BISHOP: loader.loadModel(self.whiteside_pack_name + "bishop.egg"),
RenderObject.WHITE_KNIGHT: loader.loadModel(self.whiteside_pack_name + "knight.egg"),
RenderObject.WHITE_ROOK: loader.loadModel(self.whiteside_pack_name + "rook.egg"),
RenderObject.WHITE_PAWN: loader.loadModel(self.whiteside_pack_name + "pawn.egg"),
RenderObject.BLACK_KING: loader.loadModel(self.blackside_pack_name + "king.egg"),
RenderObject.BLACK_QUEEN: loader.loadModel(self.blackside_pack_name + "queen.egg"),
RenderObject.BLACK_BISHOP: loader.loadModel(self.blackside_pack_name + "bishop.egg"),
RenderObject.BLACK_KNIGHT: loader.loadModel(self.blackside_pack_name + "knight.egg"),
RenderObject.BLACK_ROOK: loader.loadModel(self.blackside_pack_name + "rook.egg"),
RenderObject.BLACK_PAWN: loader.loadModel(self.blackside_pack_name + "pawn.egg"),
RenderObject.PLANE: loader.loadModel(self.data_path + "plane.egg")
})
def load_figure_model(self, figure_latter):
render_obj = figure_as_render_object(figure_latter)
obj = copy.deepcopy(self.modeles[render_obj])
if RenderObject.BLACK_KING <= RenderObject(render_obj) <= RenderObject.BLACK_PAWN:
obj.setColor(BLACK)
else:
obj.setColor(WHITE)
return obj
def load_figure_model_2D(self, figure_latter):
render_obj = figure_as_render_object(figure_latter)
return self.load_plane_object(render_obj)
def load_plane_object(self, render_object):
obj = copy.deepcopy(self.modeles[RenderObject.PLANE])
texture = copy.deepcopy(self.textures[render_object])
obj.set_texture(texture)
obj.setTransparency(TransparencyAttrib.MAlpha)
return obj
def load_cube(self):
self.data_path = "ChessRender/data/"
obj = loader.loadModel(self.data_path + "cube.egg")
return obj
def load_plane_textured(self, texture_path):
obj = copy.deepcopy(self.modeles[RenderObject.PLANE])
if texture_path is not None:
texture = loader.loadTexture(texture_path)
obj.set_texture(texture)
obj.setTransparency(TransparencyAttrib.MAlpha)
return obj
def load_skybox_white_side(self):
return loader.loadModel(self.whiteside_pack_name + "cubemap.bam")
def load_skybox_black_side(self):
return loader.loadModel(self.blackside_pack_name + "cubemap.bam")
| [
"copy.deepcopy"
] | [((4084, 4123), 'copy.deepcopy', 'copy.deepcopy', (['self.modeles[render_obj]'], {}), '(self.modeles[render_obj])\n', (4097, 4123), False, 'import copy\n'), ((4537, 4584), 'copy.deepcopy', 'copy.deepcopy', (['self.modeles[RenderObject.PLANE]'], {}), '(self.modeles[RenderObject.PLANE])\n', (4550, 4584), False, 'import copy\n'), ((4603, 4646), 'copy.deepcopy', 'copy.deepcopy', (['self.textures[render_object]'], {}), '(self.textures[render_object])\n', (4616, 4646), False, 'import copy\n'), ((4968, 5015), 'copy.deepcopy', 'copy.deepcopy', (['self.modeles[RenderObject.PLANE]'], {}), '(self.modeles[RenderObject.PLANE])\n', (4981, 5015), False, 'import copy\n')] |
# encoding: utf-8
'''
Match articles with annotated data
'''
from collections import defaultdict
import argparse
from blamepipeline.preprocess.dataloader import Dataset
case1, case2 = 0, 0
def match_data(source):
dataset = Dataset(source)
articles = dataset.get_articles()
entries = dataset.get_entries()
date_articles = defaultdict(list)
for article in articles:
date_articles[article['date']].append(article)
print('{} dates of {} articles loaded.'.format(len(date_articles), len(articles)))
print('{} entries loaded.'.format(len(entries)))
title_match = 0
subtitle_match = 0
pairs = []
def matches(entry_title, article_title):
if not entry_title or len(entry_title) < 10:
return False
elif entry_title and article_title and entry_title == article_title:
return True
elif entry_title and entry_title in article_title:
return True
return False
for entry in entries:
for article in date_articles[entry['date']]:
if matches(entry['title'], article['title']):
title_match += 1
pairs.append((entry, article))
break
elif matches(entry['title'], article['subtitle']):
subtitle_match += 1
pairs.append((entry, article))
break
print('title match:', title_match)
print('subtitle match:', subtitle_match)
return pairs
def main(args):
if args.source == 'all':
sources = ['FOX']
else:
sources = [args.source.upper()]
for source in sources:
print(source)
pairs = match_data(source)
print('matched pairs:', len(pairs))
print('---')
global case1, case2
print(f'{case1}, {case2}')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='match articles and entries')
parser.add_argument('--source', type=str, choices=['all', 'fox'], default='all')
args = parser.parse_args()
main(args)
| [
"blamepipeline.preprocess.dataloader.Dataset",
"collections.defaultdict",
"argparse.ArgumentParser"
] | [((233, 248), 'blamepipeline.preprocess.dataloader.Dataset', 'Dataset', (['source'], {}), '(source)\n', (240, 248), False, 'from blamepipeline.preprocess.dataloader import Dataset\n'), ((344, 361), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (355, 361), False, 'from collections import defaultdict\n'), ((1855, 1920), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""match articles and entries"""'}), "(description='match articles and entries')\n", (1878, 1920), False, 'import argparse\n')] |
import tensorflow as tf
i = tf.compat.v1.constant(0, name="Hole")
c = lambda i: tf.compat.v1.less(i, 10)
b = lambda i: tf.compat.v1.add(i, 1)
r = tf.compat.v1.while_loop(c, b, [i], name="While")
| [
"tensorflow.compat.v1.while_loop",
"tensorflow.compat.v1.less",
"tensorflow.compat.v1.constant",
"tensorflow.compat.v1.add"
] | [((29, 66), 'tensorflow.compat.v1.constant', 'tf.compat.v1.constant', (['(0)'], {'name': '"""Hole"""'}), "(0, name='Hole')\n", (50, 66), True, 'import tensorflow as tf\n'), ((148, 196), 'tensorflow.compat.v1.while_loop', 'tf.compat.v1.while_loop', (['c', 'b', '[i]'], {'name': '"""While"""'}), "(c, b, [i], name='While')\n", (171, 196), True, 'import tensorflow as tf\n'), ((82, 106), 'tensorflow.compat.v1.less', 'tf.compat.v1.less', (['i', '(10)'], {}), '(i, 10)\n', (99, 106), True, 'import tensorflow as tf\n'), ((121, 143), 'tensorflow.compat.v1.add', 'tf.compat.v1.add', (['i', '(1)'], {}), '(i, 1)\n', (137, 143), True, 'import tensorflow as tf\n')] |
import unittest
import decimal
import prosperpy
def get_prices():
prices = ['90.704', '92.900', '92.978', '91.802', '92.665', '92.684', '92.302', '92.773', '92.537', '92.949',
'93.204', '91.067', '89.832', '89.744', '90.399', '90.739', '88.018', '88.087', '88.844', '90.778',
'90.542', '91.389', '90.650']
return [decimal.Decimal(price) for price in prices]
class TestSimpleMovingAverage(unittest.TestCase):
def test_simple_moving_average(self):
prices = get_prices()
data = [('91.2422', '94.53214587189516', '87.95225412810484', '6.57989174379032'),
('91.16665', '94.36908071900080', '87.96421928099920', '6.40486143800160'),
('91.05025', '94.14840337741694', '87.95209662258306', '6.19630675483388')]
data = [(decimal.Decimal(item[0]), decimal.Decimal(item[1]), decimal.Decimal(item[2])) for item in data]
bollinger_bands = prosperpy.overlays.BollingerBands(prices[:20])
self.assertEqual(bollinger_bands.moving_average.value, decimal.Decimal('91.2503'))
self.assertEqual(bollinger_bands.upper, decimal.Decimal('94.53410225348604'))
self.assertEqual(bollinger_bands.lower, decimal.Decimal('87.96649774651396'))
self.assertEqual(bollinger_bands.bandwidth, decimal.Decimal('6.56760450697208'))
for price, value in zip(prices[20:], data):
bollinger_bands.add(price)
self.assertEqual(bollinger_bands.moving_average.value, value[0])
self.assertEqual(bollinger_bands.upper, value[1])
self.assertEqual(bollinger_bands.lower, value[2])
| [
"prosperpy.overlays.BollingerBands",
"decimal.Decimal"
] | [((353, 375), 'decimal.Decimal', 'decimal.Decimal', (['price'], {}), '(price)\n', (368, 375), False, 'import decimal\n'), ((936, 982), 'prosperpy.overlays.BollingerBands', 'prosperpy.overlays.BollingerBands', (['prices[:20]'], {}), '(prices[:20])\n', (969, 982), False, 'import prosperpy\n'), ((1046, 1072), 'decimal.Decimal', 'decimal.Decimal', (['"""91.2503"""'], {}), "('91.2503')\n", (1061, 1072), False, 'import decimal\n'), ((1122, 1158), 'decimal.Decimal', 'decimal.Decimal', (['"""94.53410225348604"""'], {}), "('94.53410225348604')\n", (1137, 1158), False, 'import decimal\n'), ((1208, 1244), 'decimal.Decimal', 'decimal.Decimal', (['"""87.96649774651396"""'], {}), "('87.96649774651396')\n", (1223, 1244), False, 'import decimal\n'), ((1298, 1333), 'decimal.Decimal', 'decimal.Decimal', (['"""6.56760450697208"""'], {}), "('6.56760450697208')\n", (1313, 1333), False, 'import decimal\n'), ((813, 837), 'decimal.Decimal', 'decimal.Decimal', (['item[0]'], {}), '(item[0])\n', (828, 837), False, 'import decimal\n'), ((839, 863), 'decimal.Decimal', 'decimal.Decimal', (['item[1]'], {}), '(item[1])\n', (854, 863), False, 'import decimal\n'), ((865, 889), 'decimal.Decimal', 'decimal.Decimal', (['item[2]'], {}), '(item[2])\n', (880, 889), False, 'import decimal\n')] |
import os
import math
from scapy.all import *
from pytest_main import eth_config
from pytest_main import dump_eth_config
def getstatusoutput(cmd):
pipe = os.popen(cmd + " 2>&1", 'r')
text = pipe.read()
sts = pipe.close()
if sts is None: sts=0
if text[-1:] == "\n": text = text[:-1]
return sts, text
def getoutput(cmd):
result = getstatusoutput(cmd)
return result[1]
def send_all_pkts( pkts_dir , pkts_list):
for pkt_name in pkts_list:
# print("send pkt : " + pkts_dir + pkts_list)
ret = getoutput("/bin/bash ./tools/send_pkt_raw.sh " + eth_config + " " + pkts_dir + pkt_name)
ret_value = ret.split('\n')[-1]
# print(ret_value)
assert int(ret_value) == 0
tcpdump_output="./result/tresult.pcap"
def send_pkts_with_tcpdump( pkts_dir , pkt_name , waittime = 1):
sh_str_tmp = "/bin/bash ./tools/send_pkt_with_tcpdump.sh {0} {1} {2} {3} {4} 2>&1; echo send_pkt_with_tcpdump_result:$?"
sh_str = sh_str_tmp.format(eth_config , tcpdump_output , pkts_dir+pkt_name , dump_eth_config, waittime)
print(sh_str)
ret = getoutput(sh_str)
ret_value_list = ret.split('\n')
ret_value = -1
for line in ret_value_list:
if line.find("send_pkt_with_tcpdump_result") != -1:
tmp = line.split(":")
ret_value = int(tmp[1])
# print(ret)
assert int(ret_value) == 0
#this function user tcpdump options -Q so:tcpdump version must >4.9.1
def send_pkts_with_tcpdump_with_direction( pkts_dir , pkt_name , direction = "inout" ,waittime = 1):
sh_str_tmp = "/bin/bash ./tools/send_pkt_with_tcpdump_with_direction.sh {0} {1} {2} {3} {4} {5} 2>&1; echo send_pkt_with_tcpdump_result_with_direction:$?"
sh_str = sh_str_tmp.format(eth_config , tcpdump_output , pkts_dir+pkt_name , dump_eth_config, direction ,waittime)
print(sh_str)
ret = getoutput(sh_str)
ret_value_list = ret.split('\n')
ret_value = -1
for line in ret_value_list:
if line.find("send_pkt_with_tcpdump_result_with_direction") != -1:
tmp = line.split(":")
ret_value = int(tmp[1])
assert int(ret_value) == 0
def pkt_tcpdump_count(count = 1):
sh_str = "/bin/bash ./tools/pkt_tcpdump.sh {0} {1} {2} 2>&1; echo pkt_tcp_dump_result:$?".format( dump_eth_config, tcpdump_output, count)
print(sh_str)
ret = getoutput(sh_str)
ret_value_list = ret.split('\n')
ret_value = -1
for line in ret_value_list:
if line.find("pkt_tcp_dump_result") != -1:
tmp = line.split(":")
ret_value = int(tmp[1])
assert int(ret_value) == 0
def check_pkt_content(captured_pkt_list , check_pkts):
print(captured_pkt_list)
print(check_pkts)
assert len(captured_pkt_list) == len(check_pkts)
for i in range(len(check_pkts)):
a = raw(captured_pkt_list[i])
b = raw(check_pkts[i])
assert a == b
# editcap_num : New packages are generated from the <editcap_num> package
def editcap_pkt_num(pkt, begin_editcap_num, end_edtcap_num = None):
if end_edtcap_num != None:
editcap_str = "editcap -r {0} {0} {1}-{2}".format(pkt, begin_editcap_num, end_edtcap_num)
else:
editcap_str = "editcap -r {0} {0} {1}".format(pkt, begin_editcap_num)
ret = getstatusoutput(editcap_str)
assert ret[0] >= 0
| [
"os.popen"
] | [((161, 189), 'os.popen', 'os.popen', (["(cmd + ' 2>&1')", '"""r"""'], {}), "(cmd + ' 2>&1', 'r')\n", (169, 189), False, 'import os\n')] |
import os
import sys
import time
import torch
import torch.nn as nn
import random
import numpy as np
import torchvision.transforms as transforms
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_ROOT = os.path.join(FILE_DIR, '../../../data')
sys.path.append(os.path.join(FILE_DIR, '../'))
sys.path.append(os.path.join(FILE_DIR, '../../'))
from dataset import CIFAR10, CIFAR100
from utils import BaseTrainer, Partition
class CIFARTrainer(BaseTrainer):
def set_dataloader(self):
"""The function to set the dataset parameters"""
if self.args.dataset == 'CIFAR10':
self.dataset = CIFAR10
self.num_classes = 10
self.dataset_size = 60000
elif self.args.dataset == 'CIFAR100':
self.dataset = CIFAR100
self.num_classes = 100
self.dataset_size = 60000
if self.args.if_data_augmentation:
print('With data augmentation')
transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(), transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010))])
else:
print('Without data augmentation')
transform_train = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465),
(0.2023, 0.1994, 0.2010))])
transform_test = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
self.transform_train = transform_train
self.transform_test = transform_test
### Set partition
if self.args.partition == 'target':
indices = np.arange(self.dataset_size).astype(int)
np.random.shuffle(indices)
np.save(os.path.join(self.save_dir, 'full_idx'), indices)
partition = Partition(dataset_size=self.dataset_size, indices=indices)
self.partition = partition
self.trainset_idx, self.testset_idx = partition.get_target_indices()
elif self.args.partition == 'shadow':
try:
target_path = os.path.join(self.save_dir.replace("shadow", ""), 'full_idx.npy')
indices = np.load(target_path)
print('Load indices from target model:', target_path)
except:
print('Cannot find target model, reinitialize indices')
indices = np.arange(self.dataset_size).astype(int)
np.random.shuffle(indices)
np.save(os.path.join(self.save_dir, 'full_idx'), indices)
partition = Partition(dataset_size=self.dataset_size, indices=indices)
self.partition = partition
self.trainset_idx, self.testset_idx = partition.get_shadow_indices()
## Set dataloader
trainset = self.dataset(root=self.data_root, indices=self.trainset_idx,
download=True, transform=self.transform_train)
testset = self.dataset(root=self.data_root, indices=self.testset_idx,
download=True, transform=self.transform_test)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=self.args.train_batchsize,
shuffle=True, num_workers=self.args.num_workers)
testloader = torch.utils.data.DataLoader(testset, batch_size=self.args.test_batchsize,
shuffle=False, num_workers=self.args.num_workers)
self.trainset = trainset
self.trainloader = trainloader
self.testset = testset
self.testloader = testloader
| [
"utils.Partition",
"os.path.join",
"torchvision.transforms.RandomHorizontalFlip",
"torchvision.transforms.RandomCrop",
"torchvision.transforms.Normalize",
"torch.utils.data.DataLoader",
"os.path.abspath",
"torchvision.transforms.ToTensor",
"numpy.load",
"numpy.arange",
"numpy.random.shuffle"
] | [((212, 251), 'os.path.join', 'os.path.join', (['FILE_DIR', '"""../../../data"""'], {}), "(FILE_DIR, '../../../data')\n", (224, 251), False, 'import os\n'), ((173, 198), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (188, 198), False, 'import os\n'), ((268, 297), 'os.path.join', 'os.path.join', (['FILE_DIR', '"""../"""'], {}), "(FILE_DIR, '../')\n", (280, 297), False, 'import os\n'), ((315, 347), 'os.path.join', 'os.path.join', (['FILE_DIR', '"""../../"""'], {}), "(FILE_DIR, '../../')\n", (327, 347), False, 'import os\n'), ((3511, 3639), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['trainset'], {'batch_size': 'self.args.train_batchsize', 'shuffle': '(True)', 'num_workers': 'self.args.num_workers'}), '(trainset, batch_size=self.args.train_batchsize,\n shuffle=True, num_workers=self.args.num_workers)\n', (3538, 3639), False, 'import torch\n'), ((3707, 3834), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['testset'], {'batch_size': 'self.args.test_batchsize', 'shuffle': '(False)', 'num_workers': 'self.args.num_workers'}), '(testset, batch_size=self.args.test_batchsize,\n shuffle=False, num_workers=self.args.num_workers)\n', (3734, 3834), False, 'import torch\n'), ((2093, 2119), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (2110, 2119), True, 'import numpy as np\n'), ((2214, 2272), 'utils.Partition', 'Partition', ([], {'dataset_size': 'self.dataset_size', 'indices': 'indices'}), '(dataset_size=self.dataset_size, indices=indices)\n', (2223, 2272), False, 'from utils import BaseTrainer, Partition\n'), ((1711, 1732), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1730, 1732), True, 'import torchvision.transforms as transforms\n'), ((1779, 1850), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4914, 0.4822, 0.4465)', '(0.2023, 0.1994, 0.201)'], {}), '((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))\n', (1799, 1850), True, 'import torchvision.transforms as transforms\n'), ((2140, 2179), 'os.path.join', 'os.path.join', (['self.save_dir', '"""full_idx"""'], {}), "(self.save_dir, 'full_idx')\n", (2152, 2179), False, 'import os\n'), ((2969, 3027), 'utils.Partition', 'Partition', ([], {'dataset_size': 'self.dataset_size', 'indices': 'indices'}), '(dataset_size=self.dataset_size, indices=indices)\n', (2978, 3027), False, 'from utils import BaseTrainer, Partition\n'), ((993, 1029), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(32)'], {'padding': '(4)'}), '(32, padding=4)\n', (1014, 1029), True, 'import torchvision.transforms as transforms\n'), ((1081, 1114), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (1112, 1114), True, 'import torchvision.transforms as transforms\n'), ((1116, 1137), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1135, 1137), True, 'import torchvision.transforms as transforms\n'), ((1189, 1260), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4914, 0.4822, 0.4465)', '(0.2023, 0.1994, 0.201)'], {}), '((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))\n', (1209, 1260), True, 'import torchvision.transforms as transforms\n'), ((1446, 1467), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1465, 1467), True, 'import torchvision.transforms as transforms\n'), ((1519, 1590), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.4914, 0.4822, 0.4465)', '(0.2023, 0.1994, 0.201)'], {}), '((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))\n', (1539, 1590), True, 'import torchvision.transforms as transforms\n'), ((2040, 2068), 'numpy.arange', 'np.arange', (['self.dataset_size'], {}), '(self.dataset_size)\n', (2049, 2068), True, 'import numpy as np\n'), ((2578, 2598), 'numpy.load', 'np.load', (['target_path'], {}), '(target_path)\n', (2585, 2598), True, 'import numpy as np\n'), ((2844, 2870), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (2861, 2870), True, 'import numpy as np\n'), ((2895, 2934), 'os.path.join', 'os.path.join', (['self.save_dir', '"""full_idx"""'], {}), "(self.save_dir, 'full_idx')\n", (2907, 2934), False, 'import os\n'), ((2787, 2815), 'numpy.arange', 'np.arange', (['self.dataset_size'], {}), '(self.dataset_size)\n', (2796, 2815), True, 'import numpy as np\n')] |
# HANGMAN GAME
from collections import namedtuple
import main
game_board = namedtuple('game_board', ['board', 'mistakes', 'letters', 'status'])
def welcome():
"""Starts the game."""
print("Welcome")
word = main._choose_word()
_print_start_game()
_print_start_spaces(word)
game_board.letters = []
game_board.mistakes = -1
game_board.status = True
while game_board.status:
user_input = input("Guess a letter. To get a hint, type hint. To quit, type QUIT: \n").lower()
if user_input != 'QUIT' and user_input != 'hint' and user_input != "\n":
print('You guessed:', user_input, '\n')
_check_input(user_input, word)
_update_blank_spaces(user_input, word)
elif user_input == 'hint':
hint = main._hint(word)
print(hint.upper())
_print_board()
_update_blank_spaces(user_input, word)
else:
print("Thanks for playing!")
game_board.status = False
print("Your word was: ", word)
print('GAME OVER')
def _print_start_game() -> None:
"""Prints the starting game board."""
top = ' _____\n'
hang1 = '| |\n'
hang2 = '| |\n'
leg1 = ' |\n'
leg2 = ' |\n'
leg3 = ' |\n'
stand = '______\n'
game_board.board = [top + hang1 + hang2 + leg1 + leg2 + leg3 + stand]
_print_board()
def _print_start_spaces(word) -> None:
for spaces in word:
if spaces == " ":
print(" ", end='')
else:
print("_ ", end='')
print()
print()
def _check_input(user_input: str, word: str):
"""Checks if there is or isn't a wrong answer."""
count_letters = 0
for letters in word:
if user_input == letters:
count_letters += 1
if count_letters > 1:
print('You guessed correctly:', count_letters, 'letters\n')
else:
print('You guessed correctly:', count_letters, 'letter\n')
if count_letters == 0:
_wrong_answers()
else:
game_board.letters.append(user_input)
_print_board()
def _wrong_answers():
"""Prints the man on the hangman board."""
game_board.mistakes += 1
top = ' ____\n'
hang1 = ' | |\n'
hang2 = ' | |\n'
top_body = top + hang1 + hang2
wrong_answers = [' o |\n',' | |\n', '\| |\n', '\|/ |\n', '/ |\n', '/ \ |\n', ' _____\n']
rest_of_body = [' |\n', ' |\n', ' _____\n']
if game_board.mistakes == 0:
game_board.board = [top_body + wrong_answers[0] + rest_of_body[0] + rest_of_body[1] + rest_of_body[2]]
_print_board()
elif game_board.mistakes == 1:
game_board.board = [top_body + wrong_answers[0] + wrong_answers[1]+ rest_of_body[1] + rest_of_body[2]]
_print_board()
elif game_board.mistakes == 2:
game_board.board = [top_body + wrong_answers[0] + wrong_answers[2] + rest_of_body[1] + rest_of_body[2]]
_print_board()
elif game_board.mistakes == 3:
game_board.board = [top_body + wrong_answers[0] + wrong_answers[3] + rest_of_body[1] + rest_of_body[2]]
_print_board()
elif game_board.mistakes == 4:
game_board.board = [top_body + wrong_answers[0] + wrong_answers[3] + wrong_answers[4] + rest_of_body[2]]
_print_board()
elif game_board.mistakes == 5:
game_board.board = [top_body + wrong_answers[0] + wrong_answers[3] + wrong_answers[5] + rest_of_body[2]]
_print_board()
_game_over()
def _update_blank_spaces(user_input, word):
"""Prints out the letter spaces."""
for letter in word:
if letter == user_input:
print(letter, end='')
elif letter in game_board.letters:
print(letter, end='')
elif letter == ' ':
print(" ", end='')
else:
print('_ ', end='')
print()
print()
_check_winner(word)
def _print_board():
"""Prints the board."""
for piece in game_board.board:
print(piece)
def _check_winner(word):
"""Checks if there is a winner."""
how_many = 0
for letter in word:
if letter in game_board.letters:
how_many += 1
if letter == " ":
how_many += 1
if how_many == len(word):
print('WINNER')
game_board.status = False
def _game_over():
"""Ends game."""
game_board.status = False
if __name__ == "__main__":
welcome()
| [
"main._hint",
"collections.namedtuple",
"main._choose_word"
] | [((76, 144), 'collections.namedtuple', 'namedtuple', (['"""game_board"""', "['board', 'mistakes', 'letters', 'status']"], {}), "('game_board', ['board', 'mistakes', 'letters', 'status'])\n", (86, 144), False, 'from collections import namedtuple\n'), ((219, 238), 'main._choose_word', 'main._choose_word', ([], {}), '()\n', (236, 238), False, 'import main\n'), ((792, 808), 'main._hint', 'main._hint', (['word'], {}), '(word)\n', (802, 808), False, 'import main\n')] |
# ------------------------------------------------------------------------------
# Joyous events models
# ------------------------------------------------------------------------------
import datetime as dt
from django.db import models
from django.db.models.query import ModelIterable
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from wagtail.core.models import Page
from wagtail.admin.edit_handlers import FieldPanel
from wagtail.images.edit_handlers import ImageChooserPanel
from ..utils.telltime import (todayUtc, getAwareDatetime, getLocalDatetime,
getLocalDate, getLocalTime)
from ..utils.telltime import timeFormat
from ..edit_handlers import TimePanel
from ..forms import FormDefender
from .groups import get_group_model_string
from .event_base import (ThisEvent, EventsByDayList,
EventManager, EventQuerySet, EventPageForm, EventBase)
# ------------------------------------------------------------------------------
# Helper types and constants
# ------------------------------------------------------------------------------
_1day = dt.timedelta(days=1)
_2days = dt.timedelta(days=2)
# ------------------------------------------------------------------------------
# Event models
# ------------------------------------------------------------------------------
class SimpleEventQuerySet(EventQuerySet):
def current(self):
qs = super().current()
return qs.filter(date__gte = todayUtc() - _1day)
def future(self):
qs = super().future()
return qs.filter(date__gte = todayUtc() - _1day)
def past(self):
qs = super().past()
return qs.filter(date__lte = todayUtc() + _1day)
def byDay(self, fromDate, toDate):
request = self.request
class ByDayIterable(ModelIterable):
def __iter__(self):
evods = EventsByDayList(fromDate, toDate)
for page in super().__iter__():
pageFromDate = getLocalDate(page.date,
page.time_from, page.tz)
pageToDate = getLocalDate(page.date,
page.time_to, page.tz)
thisEvent = ThisEvent(page, url=page.get_url(request))
evods.add(thisEvent, pageFromDate, pageToDate)
yield from evods
qs = self._clone()
qs._iterable_class = ByDayIterable
return qs.filter(date__range=(fromDate - _2days, toDate + _2days))
class SimpleEventPage(EventBase, Page, metaclass=FormDefender):
events = EventManager.from_queryset(SimpleEventQuerySet)()
class Meta:
verbose_name = _("event page")
verbose_name_plural = _("event pages")
default_manager_name = "objects"
parent_page_types = ["joyous.CalendarPage",
"joyous.SpecificCalendarPage",
"joyous.GeneralCalendarPage",
get_group_model_string()]
subpage_types = []
base_form_class = EventPageForm
date = models.DateField(_("date"), default=dt.date.today)
content_panels = Page.content_panels + [
FieldPanel('category'),
ImageChooserPanel('image'),
FieldPanel('date'),
TimePanel('time_from'),
TimePanel('time_to'),
FieldPanel('tz'),
] + EventBase.content_panels1
# Anything inheriting from models.Model needs its own __init__ or
# modeltranslation patch_constructor may break it
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def when(self):
"""
A string describing when the event occurs (in the local time zone).
"""
return self._getLocalWhen(self.date)
def _getFromTime(self, atDate=None):
"""
Time that the event starts (in the local time zone).
"""
return getLocalTime(self.date, self.time_from, self.tz)
def _getFromDt(self):
"""
Datetime that the event starts (in the local time zone).
"""
return getLocalDatetime(self.date, self.time_from, self.tz)
def _getToDt(self):
"""
Datetime that the event ends (in the local time zone).
"""
return getLocalDatetime(self.date, self.time_to, self.tz)
# ------------------------------------------------------------------------------
class MultidayEventQuerySet(EventQuerySet):
def current(self):
qs = super().current()
return qs.filter(date_to__gte = todayUtc() - _1day)
def future(self):
qs = super().future()
return qs.filter(date_from__gte = todayUtc() - _1day)
def past(self):
qs = super().past()
return qs.filter(date_from__lte = todayUtc() + _1day)
def byDay(self, fromDate, toDate):
request = self.request
class ByDayIterable(ModelIterable):
def __iter__(self):
evods = EventsByDayList(fromDate, toDate)
for page in super().__iter__():
pageFromDate = getLocalDate(page.date_from,
page.time_from, page.tz)
pageToDate = getLocalDate(page.date_to,
page.time_to, page.tz)
thisEvent = ThisEvent(page, url=page.get_url(request))
evods.add(thisEvent, pageFromDate, pageToDate)
yield from evods
qs = self._clone()
qs._iterable_class = ByDayIterable
return qs.filter(date_to__gte = fromDate - _2days) \
.filter(date_from__lte = toDate + _2days)
class MultidayEventPageForm(EventPageForm):
def _checkStartBeforeEnd(self, cleaned_data):
startDate = cleaned_data.get('date_from', dt.date.min)
endDate = cleaned_data.get('date_to', dt.date.max)
if startDate > endDate:
self.add_error('date_to', _("Event cannot end before it starts"))
elif startDate == endDate:
super()._checkStartBeforeEnd(cleaned_data)
class MultidayEventPage(EventBase, Page, metaclass=FormDefender):
events = EventManager.from_queryset(MultidayEventQuerySet)()
class Meta:
verbose_name = _("multiday event page")
verbose_name_plural = _("multiday event pages")
default_manager_name = "objects"
parent_page_types = ["joyous.CalendarPage",
"joyous.SpecificCalendarPage",
"joyous.GeneralCalendarPage",
get_group_model_string()]
subpage_types = []
base_form_class = MultidayEventPageForm
date_from = models.DateField(_("start date"), default=dt.date.today)
date_to = models.DateField(_("end date"), default=dt.date.today)
content_panels = Page.content_panels + [
FieldPanel('category'),
ImageChooserPanel('image'),
FieldPanel('date_from'),
TimePanel('time_from'),
FieldPanel('date_to'),
TimePanel('time_to'),
FieldPanel('tz'),
] + EventBase.content_panels1
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def when(self):
"""
A string describing when the event occurs (in the local time zone).
"""
return self._getLocalWhen(self.date_from, self.date_to)
def _getFromTime(self, atDate=None):
"""
Time that the event starts (in the local time zone).
"""
return getLocalTime(self.date_from, self.time_from, self.tz)
def _getFromDt(self):
"""
Datetime that the event starts (in the local time zone).
"""
return getLocalDatetime(self.date_from, self.time_from, self.tz)
def _getToDt(self):
"""
Datetime that the event ends (in the local time zone).
"""
return getLocalDatetime(self.date_to, self.time_to, self.tz)
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
| [
"wagtail.admin.edit_handlers.FieldPanel",
"wagtail.images.edit_handlers.ImageChooserPanel",
"datetime.timedelta",
"django.utils.translation.gettext_lazy"
] | [((1105, 1125), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1117, 1125), True, 'import datetime as dt\n'), ((1135, 1155), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(2)'}), '(days=2)\n', (1147, 1155), True, 'import datetime as dt\n'), ((2707, 2722), 'django.utils.translation.gettext_lazy', '_', (['"""event page"""'], {}), "('event page')\n", (2708, 2722), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2753, 2769), 'django.utils.translation.gettext_lazy', '_', (['"""event pages"""'], {}), "('event pages')\n", (2754, 2769), True, 'from django.utils.translation import gettext_lazy as _\n'), ((3113, 3122), 'django.utils.translation.gettext_lazy', '_', (['"""date"""'], {}), "('date')\n", (3114, 3122), True, 'from django.utils.translation import gettext_lazy as _\n'), ((6306, 6330), 'django.utils.translation.gettext_lazy', '_', (['"""multiday event page"""'], {}), "('multiday event page')\n", (6307, 6330), True, 'from django.utils.translation import gettext_lazy as _\n'), ((6361, 6386), 'django.utils.translation.gettext_lazy', '_', (['"""multiday event pages"""'], {}), "('multiday event pages')\n", (6362, 6386), True, 'from django.utils.translation import gettext_lazy as _\n'), ((6740, 6755), 'django.utils.translation.gettext_lazy', '_', (['"""start date"""'], {}), "('start date')\n", (6741, 6755), True, 'from django.utils.translation import gettext_lazy as _\n'), ((6811, 6824), 'django.utils.translation.gettext_lazy', '_', (['"""end date"""'], {}), "('end date')\n", (6812, 6824), True, 'from django.utils.translation import gettext_lazy as _\n'), ((3201, 3223), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""category"""'], {}), "('category')\n", (3211, 3223), False, 'from wagtail.admin.edit_handlers import FieldPanel\n'), ((3233, 3259), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""image"""'], {}), "('image')\n", (3250, 3259), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((3269, 3287), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""date"""'], {}), "('date')\n", (3279, 3287), False, 'from wagtail.admin.edit_handlers import FieldPanel\n'), ((3359, 3375), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""tz"""'], {}), "('tz')\n", (3369, 3375), False, 'from wagtail.admin.edit_handlers import FieldPanel\n'), ((6004, 6042), 'django.utils.translation.gettext_lazy', '_', (['"""Event cannot end before it starts"""'], {}), "('Event cannot end before it starts')\n", (6005, 6042), True, 'from django.utils.translation import gettext_lazy as _\n'), ((6903, 6925), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""category"""'], {}), "('category')\n", (6913, 6925), False, 'from wagtail.admin.edit_handlers import FieldPanel\n'), ((6935, 6961), 'wagtail.images.edit_handlers.ImageChooserPanel', 'ImageChooserPanel', (['"""image"""'], {}), "('image')\n", (6952, 6961), False, 'from wagtail.images.edit_handlers import ImageChooserPanel\n'), ((6971, 6994), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""date_from"""'], {}), "('date_from')\n", (6981, 6994), False, 'from wagtail.admin.edit_handlers import FieldPanel\n'), ((7036, 7057), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""date_to"""'], {}), "('date_to')\n", (7046, 7057), False, 'from wagtail.admin.edit_handlers import FieldPanel\n'), ((7097, 7113), 'wagtail.admin.edit_handlers.FieldPanel', 'FieldPanel', (['"""tz"""'], {}), "('tz')\n", (7107, 7113), False, 'from wagtail.admin.edit_handlers import FieldPanel\n')] |
# coding: utf-8
from unittest import TestCase
import os
import ibm_watson
import pytest
import json
import time
from ibm_watson.natural_language_understanding_v1 import Features, EntitiesOptions, KeywordsOptions
@pytest.mark.skipif(os.getenv('NATURAL_LANGUAGE_UNDERSTANDING_APIKEY') is None,
reason='requires NATURAL_LANGUAGE_UNDERSTANDING_APIKEY')
class TestNaturalLanguageUnderstandingV1(TestCase):
def setUp(self):
self.natural_language_understanding = ibm_watson.NaturalLanguageUnderstandingV1(version='2018-03-16')
self.natural_language_understanding.set_default_headers({
'X-Watson-Learning-Opt-Out': '1',
'X-Watson-Test': '1'
})
def test_analyze(self):
response = self.natural_language_understanding.analyze(
text='Bruce Banner is the Hulk and Bruce Wayne is BATMAN! '
'Superman fears not Banner, but Wayne.',
features=Features(entities=EntitiesOptions(), keywords=KeywordsOptions())).get_result()
assert response is not None
| [
"ibm_watson.NaturalLanguageUnderstandingV1",
"ibm_watson.natural_language_understanding_v1.EntitiesOptions",
"os.getenv",
"ibm_watson.natural_language_understanding_v1.KeywordsOptions"
] | [((490, 553), 'ibm_watson.NaturalLanguageUnderstandingV1', 'ibm_watson.NaturalLanguageUnderstandingV1', ([], {'version': '"""2018-03-16"""'}), "(version='2018-03-16')\n", (531, 553), False, 'import ibm_watson\n'), ((233, 283), 'os.getenv', 'os.getenv', (['"""NATURAL_LANGUAGE_UNDERSTANDING_APIKEY"""'], {}), "('NATURAL_LANGUAGE_UNDERSTANDING_APIKEY')\n", (242, 283), False, 'import os\n'), ((967, 984), 'ibm_watson.natural_language_understanding_v1.EntitiesOptions', 'EntitiesOptions', ([], {}), '()\n', (982, 984), False, 'from ibm_watson.natural_language_understanding_v1 import Features, EntitiesOptions, KeywordsOptions\n'), ((995, 1012), 'ibm_watson.natural_language_understanding_v1.KeywordsOptions', 'KeywordsOptions', ([], {}), '()\n', (1010, 1012), False, 'from ibm_watson.natural_language_understanding_v1 import Features, EntitiesOptions, KeywordsOptions\n')] |
from . import common as cmmn
import logging
import uuid
from typing import Optional
from instauto.api.structs import Surface
logger = logging.getLogger(__name__)
class _Base(cmmn.Base):
_csrftoken: str = None
radio_type: str = 'wifi-none'
device_id: str = None
_uid: str = None
_uuid: str = None
user_id: str = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._enable_datapoint_from_client('_csrftoken')
self._enable_datapoint_from_client('device_id')
self._enable_datapoint_from_client('_uid')
self._enable_datapoint_from_client('_uuid')
self._custom_data['uuid'] = self.State.required
self._custom_data['user_id'] = self.State.required
self._custom_data['endpoint'] = self.State.required
self._custom_data['surface'] = self.State.optional
class Create(_Base):
def __init__(self, user_id: str, **kwargs):
"""Use this to create a friendship, i.e. follow a user."""
super().__init__(**kwargs)
self._data['endpoint'] = 'create'
self._data['user_id'] = user_id
class Destroy(_Base):
def __init__(self, user_id: str, surface: Optional[Surface] = None, **kwargs):
"""Use this to 'destroy' a friendship, i.e. unfollow."""
super().__init__(**kwargs)
self._data['endpoint'] = 'destroy'
self._data['user_id'] = user_id
self._data['surface'] = surface
self._defaults['surface'] = surface.profile
class Remove(_Base):
def __init__(self, user_id: str, **kwargs):
super().__init__(**kwargs)
self._data['endpoint'] = 'remove_follower'
self._data['user_id'] = user_id
class Show(cmmn.Base):
"""Retrieves the following information for a friendship:
{
"blocking": False,
"followed_by": False,
"following": False,
"incoming_request": False,
"is_bestie": False,
"is_blocking_reel": False,
"is_muting_reel": False,
"is_private": False,
"is_restricted": False,
"muting": False,
"outgoing_request": False,
"status": "ok"
}
"""
def __init__(self, user_id: str, **kwargs):
super().__init__(**kwargs)
self._custom_data['user_id'] = self.State.required
self._custom_data['endpoint'] = self.State.required
self._data['user_id'] = user_id
self._data['endpoint'] = cmmn.Base.State.required
class GetBase(cmmn.Base):
def __init__(self, user_id: str, surface: Optional[Surface] = None, **kwargs):
super().__init__(**kwargs)
self._custom_data['user_id'] = self.State.required
self._custom_data['rank_token'] = self.State.required
self._custom_data['search_surface'] = self.State.required
self._custom_data['max_id'] = self.State.required
self._custom_data['page'] = self.State.required
self._data['user_id'] = user_id
self._data['search_surface'] = surface
self._defaults['search_surface'] = Surface.follow_list
self._defaults['rank_token'] = uuid.uuid4()
self._defaults['max_id'] = None
self._defaults['page'] = 0
# The requests for getting followers and your following, look exactly the same
# but we want to keep them in seperate structs for clarity.
GetFollowers = GetFollowing = GetBase
class PendingRequests:
def __init__(self):
pass
class ApproveRequest(cmmn.Base):
def __init__(self, user_id: str, **kwargs):
super().__init__(**kwargs)
self._enable_datapoint_from_client('_csrftoken')
self._enable_datapoint_from_client('_uid')
self._enable_datapoint_from_client('_uuid')
self._custom_data['radio_type'] = self.State.required
self._custom_data['surface'] = self.State.required
self._custom_data['user_id'] = self.State.required
self._data['user_id'] = user_id
self._defaults['surface'] = Surface.follow_requests
self._defaults['radio_type'] = 'wifi-none'
| [
"logging.getLogger",
"uuid.uuid4"
] | [((136, 163), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (153, 163), False, 'import logging\n'), ((3079, 3091), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3089, 3091), False, 'import uuid\n')] |
from kivymd.uix.boxlayout import MDBoxLayout
from kivymd.uix.toolbar import MDToolbar
class Menus():
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __call__(self):
box_central = MDBoxLayout(orientation='vertical')
# criar componentes
toolbar = MDToolbar(title='App Salva')
# navigation = NavegationMenu()()
#add componentes
box_central.add_widget(toolbar)
# box_central.add_widget(navigation)
return box_central | [
"kivymd.uix.toolbar.MDToolbar",
"kivymd.uix.boxlayout.MDBoxLayout"
] | [((233, 268), 'kivymd.uix.boxlayout.MDBoxLayout', 'MDBoxLayout', ([], {'orientation': '"""vertical"""'}), "(orientation='vertical')\n", (244, 268), False, 'from kivymd.uix.boxlayout import MDBoxLayout\n'), ((315, 343), 'kivymd.uix.toolbar.MDToolbar', 'MDToolbar', ([], {'title': '"""App Salva"""'}), "(title='App Salva')\n", (324, 343), False, 'from kivymd.uix.toolbar import MDToolbar\n')] |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 13 23:29:22 2022
@author: Tommaso
"""
from setuptools import setup
VERSION = '0.2.8'
DESCRIPTION = 'A python package for bspline curve approximation using deep learning'
# Setting up
setup(
name='deep-b-spline-approximation',
packages=['deep_b_spline_approximation'],
version=VERSION,
author="<NAME>",
author_email="<<EMAIL>>",
description=DESCRIPTION,
long_description_content_type="text/markdown",
url='https://github.com/t-ceccarini/deep-b-spline-approximation',
download_url='https://github.com/t-ceccarini/deep-b-spline-approximation/archive/refs/tags/v_0.2.8.tar.gz',
install_requires=['torch','prettytable','numpy','scipy','matplotlib'],
keywords=['python', 'deep learning', 'mlp', 'cnn', 'cagd', 'bspline', 'bezier'],
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
]
)
| [
"setuptools.setup"
] | [((233, 1255), 'setuptools.setup', 'setup', ([], {'name': '"""deep-b-spline-approximation"""', 'packages': "['deep_b_spline_approximation']", 'version': 'VERSION', 'author': '"""<NAME>"""', 'author_email': '"""<<EMAIL>>"""', 'description': 'DESCRIPTION', 'long_description_content_type': '"""text/markdown"""', 'url': '"""https://github.com/t-ceccarini/deep-b-spline-approximation"""', 'download_url': '"""https://github.com/t-ceccarini/deep-b-spline-approximation/archive/refs/tags/v_0.2.8.tar.gz"""', 'install_requires': "['torch', 'prettytable', 'numpy', 'scipy', 'matplotlib']", 'keywords': "['python', 'deep learning', 'mlp', 'cnn', 'cagd', 'bspline', 'bezier']", 'classifiers': "['Development Status :: 1 - Planning', 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9', 'Operating System :: Unix',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows']"}), "(name='deep-b-spline-approximation', packages=[\n 'deep_b_spline_approximation'], version=VERSION, author='<NAME>',\n author_email='<<EMAIL>>', description=DESCRIPTION,\n long_description_content_type='text/markdown', url=\n 'https://github.com/t-ceccarini/deep-b-spline-approximation',\n download_url=\n 'https://github.com/t-ceccarini/deep-b-spline-approximation/archive/refs/tags/v_0.2.8.tar.gz'\n , install_requires=['torch', 'prettytable', 'numpy', 'scipy',\n 'matplotlib'], keywords=['python', 'deep learning', 'mlp', 'cnn',\n 'cagd', 'bspline', 'bezier'], classifiers=[\n 'Development Status :: 1 - Planning', 'Intended Audience :: Developers',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9', 'Operating System :: Unix',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows'])\n", (238, 1255), False, 'from setuptools import setup\n')] |
from argparse import ArgumentParser
import os
from displ.pwscf.parseScf import fermi_from_scf
from displ.wannier.wannier_util import global_config
from displ.wannier.build import Update_Disentanglement
def _main():
parser = ArgumentParser(description="Update disentanglement window in W90 input")
parser.add_argument('--subdir', type=str, default=None,
help="Subdirectory under work_base for all job dirs")
parser.add_argument('prefix', type=str,
help="Prefix of system to update")
parser.add_argument('outer_min', type=float,
help="Distance below E_F to start outer window")
parser.add_argument('outer_max', type=float,
help="Distance above E_F to stop outer window")
parser.add_argument('inner_min', type=float,
help="Distance below E_F to start inner window")
parser.add_argument('inner_max', type=float,
help="Distance above E_F to stop inner window")
args = parser.parse_args()
gconf = global_config()
base_path = os.path.expandvars(gconf["work_base"])
if args.subdir is not None:
base_path = os.path.join(base_path, args.subdir)
wandir = os.path.join(base_path, args.prefix, "wannier")
scf_path = os.path.join(wandir, "scf.out")
E_Fermi = fermi_from_scf(scf_path)
outer = [args.outer_min, args.outer_max]
inner = [args.inner_min, args.inner_max]
win_path = os.path.join(wandir, "{}.win".format(args.prefix))
Update_Disentanglement(win_path, E_Fermi, outer, inner)
if __name__ == "__main__":
_main()
| [
"displ.pwscf.parseScf.fermi_from_scf",
"displ.wannier.wannier_util.global_config",
"argparse.ArgumentParser",
"os.path.expandvars",
"os.path.join",
"displ.wannier.build.Update_Disentanglement"
] | [((229, 301), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""Update disentanglement window in W90 input"""'}), "(description='Update disentanglement window in W90 input')\n", (243, 301), False, 'from argparse import ArgumentParser\n'), ((1005, 1020), 'displ.wannier.wannier_util.global_config', 'global_config', ([], {}), '()\n', (1018, 1020), False, 'from displ.wannier.wannier_util import global_config\n'), ((1037, 1075), 'os.path.expandvars', 'os.path.expandvars', (["gconf['work_base']"], {}), "(gconf['work_base'])\n", (1055, 1075), False, 'import os\n'), ((1179, 1226), 'os.path.join', 'os.path.join', (['base_path', 'args.prefix', '"""wannier"""'], {}), "(base_path, args.prefix, 'wannier')\n", (1191, 1226), False, 'import os\n'), ((1242, 1273), 'os.path.join', 'os.path.join', (['wandir', '"""scf.out"""'], {}), "(wandir, 'scf.out')\n", (1254, 1273), False, 'import os\n'), ((1288, 1312), 'displ.pwscf.parseScf.fermi_from_scf', 'fermi_from_scf', (['scf_path'], {}), '(scf_path)\n', (1302, 1312), False, 'from displ.pwscf.parseScf import fermi_from_scf\n'), ((1475, 1530), 'displ.wannier.build.Update_Disentanglement', 'Update_Disentanglement', (['win_path', 'E_Fermi', 'outer', 'inner'], {}), '(win_path, E_Fermi, outer, inner)\n', (1497, 1530), False, 'from displ.wannier.build import Update_Disentanglement\n'), ((1128, 1164), 'os.path.join', 'os.path.join', (['base_path', 'args.subdir'], {}), '(base_path, args.subdir)\n', (1140, 1164), False, 'import os\n')] |
import unittest
from credentialData import CredentialData
class TestCredentials(unittest.TestCase):
def setUp(self):
"""
setUp method
"""
self.new_credential = CredentialData("Instagram", "mimi", "mireille")
def test_init(self):
"""
testing initialization
"""
self.assertEqual(self.new_credential.platform, "Instagram")
self.assertEqual(self.new_credential.username, "mimi")
self.assertEqual(self.new_credential.password, "<PASSWORD>")
def tearDown(self):
CredentialData.credentials = []
def test_save_credential(self):
"""
test if credential is saved in the credentials list
"""
self.new_credential.save_credential()
self.assertEqual(len(CredentialData.credentials), 1)
def test_display_credentials(self):
"""
test display credentials method
"""
self.assertEqual(CredentialData.display_credentials(),CredentialData.credentials)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"credentialData.CredentialData.display_credentials",
"credentialData.CredentialData"
] | [((1056, 1071), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1069, 1071), False, 'import unittest\n'), ((199, 246), 'credentialData.CredentialData', 'CredentialData', (['"""Instagram"""', '"""mimi"""', '"""mireille"""'], {}), "('Instagram', 'mimi', 'mireille')\n", (213, 246), False, 'from credentialData import CredentialData\n'), ((959, 995), 'credentialData.CredentialData.display_credentials', 'CredentialData.display_credentials', ([], {}), '()\n', (993, 995), False, 'from credentialData import CredentialData\n')] |
import numbers
import random
import math
import torch
from scripts.study_case.ID_13.torch_geometric.transforms import LinearTransformation
class RandomRotate(object):
def __init__(self, degrees, axis=0):
if isinstance(degrees, numbers.Number):
degrees = (-abs(degrees), abs(degrees))
assert isinstance(degrees, (tuple, list)) and len(degrees) == 2
self.degrees = degrees
self.axis = axis
def __call__(self, data):
degree = math.pi * random.uniform(*self.degrees) / 180.0
sin, cos = math.sin(degree), math.cos(degree)
if data.pos.size(1) == 2:
matrix = [[cos, sin], [-sin, cos]]
else:
if self.axis == 0:
matrix = [[1, 0, 0], [0, cos, sin], [0, -sin, cos]]
elif self.axis == 1:
matrix = [[cos, 0, -sin], [0, 1, 0], [sin, 0, cos]]
else:
matrix = [[cos, sin, 0], [-sin, cos, 0], [0, 0, 1]]
return LinearTransformation(torch.tensor(matrix))(data)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.degrees)
| [
"math.cos",
"torch.tensor",
"random.uniform",
"math.sin"
] | [((554, 570), 'math.sin', 'math.sin', (['degree'], {}), '(degree)\n', (562, 570), False, 'import math\n'), ((572, 588), 'math.cos', 'math.cos', (['degree'], {}), '(degree)\n', (580, 588), False, 'import math\n'), ((497, 526), 'random.uniform', 'random.uniform', (['*self.degrees'], {}), '(*self.degrees)\n', (511, 526), False, 'import random\n'), ((1007, 1027), 'torch.tensor', 'torch.tensor', (['matrix'], {}), '(matrix)\n', (1019, 1027), False, 'import torch\n')] |
from flask import Flask, render_template, Response
from .OverlayCamera import OverlayCamera
from .settings import ROUTE
app = Flask(__name__)
def gen(camera):
"""Video streaming generator function."""
while True:
frame = camera.get_frame()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
@app.route(ROUTE)
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(OverlayCamera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
| [
"flask.Flask"
] | [((130, 145), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (135, 145), False, 'from flask import Flask, render_template, Response\n')] |
"""Implements the :py:class:`SearchClient` class."""
from typing import Any, List
from datetime import datetime, timezone
from pymongo.collation import Collation
import pymongo
__all__ = ['SearchClient']
class SearchClient:
"""This class executes search queries."""
def __init__(self, *, db: pymongo.database.Database, client: Any):
self.db = db
self.client = client
def execute(self, user_netid: str, query: Any) -> Any: # pylint: disable=too-many-branches,too-many-statements
"""Execute a search query
:param user_netid: The NetID of the user performing the search
:param query: The search query. See :py:mod:`shrunk.api.search` for
the search query format
"""
# We're going to build up an aggregation pipeline based on the submitted query.
# This pipeline will be executed on the organizations collection if set.set == 'org',
# or on the urls collection otherwise.
pipeline: List[Any] = []
# Filter based on search string, if provided.
if 'query' in query and query['query'] != '' and query['set']['set'] != 'shared':
pipeline += [
{'$match': {'$text': {'$search': query['query']}}},
{'$addFields': {'text_search_score': {'$meta': 'textScore'}}},
]
# Filter the appropriate links set.
if query['set']['set'] == 'user': # search within `user_netid`'s links
pipeline.append({'$match': {'netid': user_netid}})
elif query['set']['set'] == 'shared':
# If the set is 'shared', the pipeline will be executed against the 'organizations'
# collection instead of the 'urls' collection.
if 'query' in query and query['query'] != '':
pipeline += [
{'$match': {'members.netid': user_netid}},
{'$lookup': {
'from': 'urls',
'let': {'org_id':'$_id'},
'pipeline' : [
{'$match': {'$text': {'$search': query['query']}}},
{'$addFields': {'text_search_score': {'$meta': 'textScore'}}},
{'$unwind': '$viewers'},
{'$match': {'$expr':{'$eq':['$viewers._id','$$org_id']}}},
{'$match': {'text_search_score': {'$gt': 0.5}}},
],
'as': 'shared_urls',
}},
{'$unwind': '$shared_urls'},
{'$replaceRoot': {'newRoot': '$shared_urls'}},
{'$unionWith': {
'coll': 'urls',
'pipeline': [{'$match': {'$text': {'$search': query['query']}}},
{'$addFields': {'text_search_score': {'$meta': 'textScore'}}},
{'$match': {'viewers._id': user_netid}},
{'$match': {'text_search_score': {'$gt': 0.5}}}]
}}]
else:
pipeline += [
{'$match': {'members.netid': user_netid}},
{'$lookup': {
'from': 'urls',
'localField': '_id',
'foreignField': 'viewers._id',
'as': 'shared_urls',
}},
{'$unwind': '$shared_urls'},
{'$replaceRoot': {'newRoot': '$shared_urls'}},
{'$unionWith': {
'coll': 'urls',
'pipeline': [{'$match': {'viewers._id': user_netid}}]
}}]
elif query['set']['set'] == 'org': # search within the given org
pipeline.append({'$match': {'viewers.type': 'org', 'viewers._id': query['set']['org']}})
# Sort results.
sort_order = 1 if query['sort']['order'] == 'ascending' else -1
if query['sort']['key'] == 'created_time':
sort_key = 'timeCreated'
elif query['sort']['key'] == 'title':
sort_key = 'title'
elif query['sort']['key'] == 'visits':
sort_key = 'visits'
elif query['sort']['key'] == 'relevance':
sort_key = 'text_search_score'
else:
# This should never happen
raise RuntimeError(f'Bad sort key {query["sort"]["key"]}')
pipeline.append({'$sort': {sort_key: sort_order, '_id': sort_order}})
# Add is_expired field
now = datetime.now(timezone.utc)
pipeline.append({
'$addFields': {
'is_expired': {
'$and': [
{'$toBool': '$expiration_time'},
{'$gte': [now, '$expiration_time']},
],
},
},
})
if not query.get('show_deleted_links', False):
pipeline.append({'$match': {'deleted': {'$ne': True}}})
if not query.get('show_expired_links', False):
pipeline.append({'$match': {'is_expired': False}})
if 'begin_time' in query:
pipeline.append({'$match': {'timeCreated': {'$gte': query['begin_time']}}})
if 'end_time' in query:
pipeline.append({'$match': {'timeCreated': {'$lte': query['end_time']}}})
# Pagination.
facet = {
'count': [{'$count': 'count'}],
'result': [{'$skip': 0}],
}
if 'pagination' in query:
facet['result'] = [
{'$skip': query['pagination']['skip']},
{'$limit': query['pagination']['limit']},
]
pipeline.append({'$facet': facet})
# Execute the query. Make sure we use the 'en' collation so strings
# are sorted properly (e.g. wrt case and punctuation).
if query['set']['set'] == 'shared':
cursor = self.db.organizations.aggregate(pipeline, collation=Collation('en'))
else:
cursor = self.db.urls.aggregate(pipeline, collation=Collation('en'))
def prepare_result(res: Any) -> Any:
"""Turn a result from the DB into something than can be JSON-serialized."""
def is_alias_visible(alias: Any) -> bool:
if query.get('show_deleted_links', False):
return True
return not alias['deleted']
if res.get('expiration_time'):
expiration_time = res['expiration_time']
else:
expiration_time = None
prepared = {
'id': res['_id'],
'title': res['title'],
'long_url': res['long_url'],
'created_time': res['timeCreated'],
'expiration_time': expiration_time,
'visits': res['visits'],
'unique_visits': res.get('unique_visits', 0),
'owner': res['netid'],
'aliases': [alias for alias in res['aliases'] if is_alias_visible(alias)],
'is_expired': res['is_expired'],
'may_edit': self.client.links.may_edit(res['_id'], user_netid),
}
if res.get('deleted'):
prepared['deletion_info'] = {
'deleted_by': res['deleted_by'],
'deleted_time': res['deleted_time'],
}
return prepared
result = next(cursor)
count = result['count'][0]['count'] if result['count'] else 0
results = [prepare_result(res) for res in result['result']]
# Remove possible duplicates in results and update total count
unique = { each['id'] : each for each in results}.values()
unique_results = list(unique)
diff = len(results) - len(unique_results)
count = count - diff
return {
'count': count,
'results': unique_results,
} | [
"datetime.datetime.now",
"pymongo.collation.Collation"
] | [((4670, 4696), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (4682, 4696), False, 'from datetime import datetime, timezone\n'), ((6111, 6126), 'pymongo.collation.Collation', 'Collation', (['"""en"""'], {}), "('en')\n", (6120, 6126), False, 'from pymongo.collation import Collation\n'), ((6206, 6221), 'pymongo.collation.Collation', 'Collation', (['"""en"""'], {}), "('en')\n", (6215, 6221), False, 'from pymongo.collation import Collation\n')] |
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Const Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.rendering
from enum import IntEnum
from typing import TYPE_CHECKING
from ooo.oenv.env_const import UNO_ENVIRONMENT, UNO_RUNTIME
_DYNAMIC = False
if (not TYPE_CHECKING) and UNO_RUNTIME and UNO_ENVIRONMENT:
_DYNAMIC = True
if not TYPE_CHECKING and _DYNAMIC:
from com.sun.star.rendering import RepaintResult as RepaintResult
if hasattr(RepaintResult, '_constants') and isinstance(RepaintResult._constants, dict):
RepaintResult._constants['__ooo_ns__'] = 'com.sun.star.rendering'
RepaintResult._constants['__ooo_full_ns__'] = 'com.sun.star.rendering.RepaintResult'
RepaintResult._constants['__ooo_type_name__'] = 'const'
def build_enum():
global RepaintResultEnum
ls = [f for f in dir(RepaintResult) if not callable(getattr(RepaintResult, f)) and not f.startswith('__')]
_dict = {}
for name in ls:
_dict[name] = getattr(RepaintResult, name)
RepaintResultEnum = IntEnum('RepaintResultEnum', _dict)
build_enum()
else:
from ...lo.rendering.repaint_result import RepaintResult as RepaintResult
class RepaintResultEnum(IntEnum):
"""
Enum of Const Class RepaintResult
These constants specify the result of the XCachedPrimitive render operation.
**since**
OOo 2.0
"""
REDRAWN = RepaintResult.REDRAWN
"""
Repaint succeeded, primitive has been exactly reproduced.
"""
DRAFTED = RepaintResult.DRAFTED
"""
Repaint succeeded, primitive has been reproduced in preview quality.
"""
FAILED = RepaintResult.FAILED
"""
Repaint failed altogether.
"""
__all__ = ['RepaintResult', 'RepaintResultEnum']
| [
"enum.IntEnum"
] | [((1680, 1715), 'enum.IntEnum', 'IntEnum', (['"""RepaintResultEnum"""', '_dict'], {}), "('RepaintResultEnum', _dict)\n", (1687, 1715), False, 'from enum import IntEnum\n')] |
############################################################
import pytesseract
import os
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
#############################################################
R_percentage = 0
if "TESSERACT_EXEC" in os.environ:
pytesseract.pytesseract.tesseract_cmd = os.environ["TESSERACT_EXEC"]
#############################################################
def real(t):
return "".join([c for c in t if c.isalnum()])
def Read(abs_folder_in, abs_folder_out, abs_folder_out_pdf, lang, debug, name):
global R_percentage
app_folder = os.path.dirname(__file__)
s = sorted([int(i[:-4]) for i in os.listdir(abs_folder_in) if i.endswith(".jpg")])
images_list = [os.path.join(abs_folder_in, str(i) + ".jpg") for i in s]
for c, img_name in enumerate(images_list, 0):
if debug: print("Creating hOCR")
pytesseract.pytesseract.run_tesseract(img_name, os.path.join(abs_folder_in, str(s[c])), lang = lang, extension = "", config = "hocr")
if debug: print("Done ", c+1, " of ", len(images_list))
R_percentage += 1 / len(images_list)
if debug: print("Creating Pdf from Hocr and images")
os.system("hocr-pdf --savefile " + os.path.join(abs_folder_out_pdf, name + ".pdf" ) + " " + abs_folder_in)
if debug: print("Moving the hocr to their folder")
for i, n in zip(images_list, s):
os.rename(i[:-4]+".hocr", os.path.join(abs_folder_out, str(n)+".hocr"))
R_percentage = 0
def get_percentage():
global R_percentage
return R_percentage
###############################################################
| [
"os.path.dirname",
"os.listdir",
"os.path.join"
] | [((690, 715), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (705, 715), False, 'import os\n'), ((753, 778), 'os.listdir', 'os.listdir', (['abs_folder_in'], {}), '(abs_folder_in)\n', (763, 778), False, 'import os\n'), ((1317, 1364), 'os.path.join', 'os.path.join', (['abs_folder_out_pdf', "(name + '.pdf')"], {}), "(abs_folder_out_pdf, name + '.pdf')\n", (1329, 1364), False, 'import os\n')] |
"""
pdict.py -- Implement a (remote) persistent dictionary that is accessed over
a socket. The backend dictionary could be dbshelve, BSDDB, or
even a relational table of (key, blob).
*** The purpose is to have a bullet-proof, separate-process, persistent dictionary
that is very fast, globally shared by many processes, and can't be harmed by
process segfaults.
***
"""
from twisted.internet import reactor
from twisted.internet.protocol import ServerFactory
from twisted.protocols.basic import LineReceiver
from twisted.python import log
import os
import sys
import socket
from bsddb3 import dbshelve
import pickle as pickle
try:
from UserDict import DictMixin
except ImportError:
class DictMixin:
pass
# retrieve work unit cache dir and file from user configuration
from sciflo.utils import ScifloConfigParser, validateDirectory
scp = ScifloConfigParser()
WorkUnitCacheDir = scp.getParameter("cacheHome")
WorkUnitCacheFile = scp.getParameter("cacheDb")
WorkUnitCachePort = int(scp.getParameter("cachePort"))
WorkUnitCache = os.path.join(WorkUnitCacheDir, WorkUnitCacheFile)
WorkUnitCacheLog = os.path.join(sys.prefix, 'log', '%s.log' %
os.path.splitext(WorkUnitCacheFile)[0])
DEBUG = False
# Registry of named (shareable) dictionaries
NamedDicts = {'WorkUnitCache':
{'dbFile': WorkUnitCache, 'port': WorkUnitCachePort,
'logFile': WorkUnitCacheLog},
'EventStore':
{'dbFile': '/tmp/EventStore/eventStore.db', 'port': 8002,
'logFile': 'eventStoreServer.log'},
'Test':
{'dbFile': None, 'port': 8009, 'logFile': '/tmp/Test.log'},
}
# String constants for client/server protocol across wire
NNL = '\r\n' # network newline
MsgPrefix = '#!#'
OkMsg = MsgPrefix + 'ok'
NoneMsg = MsgPrefix + 'None'
ErrorMsg = MsgPrefix + 'error: '
EndMsg = MsgPrefix + 'end'
EndToken = EndMsg + NNL
_TestDict = {'foo': 'bar', 'bush': 'sucks', 'fool': 'no money'}
class PersistentDictProtocol(LineReceiver):
"""A twisted server to allow access to a persistent dictionary (e.g. bsddb)
from multiple remote clients. The line-oriented protocol accepts the commands:
- ping<NNL> : see if the server is up on a given port)
- get<NNL>key<NNL> : get the string value of a string key)
- delete<NNL>key<NNL> : delete a key/value pair from the dictionary
- insert<NNL>key<NNL>val<EndMsg><NNL> : insert a multi-line string value under that key)
- length<NNL> : return number of keys in dict (**CURRENTLY BROKEN, returns zero**)
Notes:
- Keys cannot contain network newlines, NNL = '\r\n'.
- Values can be multi-line strings (python pickles or XML).
- Newlines are used to separate parts of the commands so that the cmd can be
parsed using LineReceiver
"""
def __init__(self, state='start'):
self.state = state # state of FSM = 'start', 'get', 'delete', 'insert', or 'getval'
self.key = None # key to insert value under
self.val = None # value to insert
def connectionMade(self):
if DEBUG:
print('PersistentDict: connection made.')
def lineReceived(self, line):
"""Simple finite state machine to process the four possible commands.
"""
dic = self.factory.dict # get dictionary opened in factory init()
if DEBUG:
print(('**', line, '**'))
if self.state == 'start':
if line == 'ping':
print('ping')
self.sendline(OkMsg)
elif line == 'length':
print('length')
self.sendline('1')
# self.sendline( str(len(dic)) )
elif line in ('get', 'delete', 'insert'):
if DEBUG:
print(('Change state to', line))
self.state = line
elif self.state == 'get':
print(('get', line))
val = dic.get(line, NoneMsg)
self.sendline(val + EndMsg)
self.state = 'start'
elif self.state == 'delete':
print(('delete', line))
if line in dic:
del dic[line]
self.sendline(OkMsg)
self.state = 'start'
elif self.state == 'insert':
print(('insert', line))
self.key = line
self.val = ''
self.state = 'getval'
elif self.state == 'getval':
if DEBUG:
print(('Adding to val:', line))
self.val += line
if line.endswith(EndMsg):
val = self.val[:-len(EndMsg)]
dic[self.key] = val
if DEBUG:
print('Inserted:')
if DEBUG:
print(val)
self.sendline(OkMsg)
self.state = 'start'
def sendline(self, line):
self.transport.write(line + NNL)
class PersistentDictFactoryException(RuntimeError):
pass
class PersistentDictFactory(ServerFactory):
protocol = PersistentDictProtocol
def __init__(self, dictName, dictRegistry=NamedDicts):
"""Set up for the protocol by opening the named persistent dictionary.
"""
self.dictName = dictName
try:
self.dbFile = dictRegistry[dictName]['dbFile']
self.port = dictRegistry[dictName]['port']
if self.dbFile:
dbHome = os.path.split(self.dbFile)[0]
if not os.path.exists(dbHome):
os.makedirs(dbHome, 0o777)
self.dbHome = dbHome
logFile = dictRegistry[dictName]['logFile']
if not logFile.startswith('/'):
logFile = os.path.join(dbHome, logFile)
self.logFile = logFile
except:
raise PersistentDictFactoryException(
'Error, no dict of that name: %s' % dictName)
validateDirectory(os.path.dirname(self.logFile))
log.startLogging(open(self.logFile, 'w'))
if dictName == 'Test':
self.dict = _TestDict
else:
self.dict = dbshelve.open(self.dbFile)
os.chmod(self.dbFile, 0o666)
class PersistentDictClientException(RuntimeError):
pass
class PersistentDictClient:
"""A simple client to call a persistent dictionary (e.g. bsddb) across a socket.
The client only has four useful methods: ping, get, delete, insert.
"""
def __init__(self, dictName, dictRegistry=NamedDicts, pickleVals=False, timeout=3.0, bufsize=4096):
self.dictName = dictName
self.pickleVals = pickleVals
self.timeout = timeout
self.bufsize = bufsize
try:
self.port = dictRegistry[dictName]['port']
except:
raise PersistentDictClientException(
'Error, no dict of that name: %s' % dictName)
self.soc = self._openLocalSocket(self.port)
if not self.ping():
raise PersistentDictClientException(
'Error, server for %s on port %s does not return ping' % (dictName, self.port))
def close(self):
self.soc.close()
if DEBUG:
print(('PersistentDictClient: Closed socket connection to dictName, port: %s, %d' % (
self.dictName, self.port)))
def ping(self):
"""Ping server to ensure it's alive."""
try:
return self._sendCmd('ping')
except:
return False
def get(self, key, default=None):
"""Get value of a string key, or default value if missing."""
soc = self.soc
cmd = 'get' + NNL + key + NNL
try:
soc.sendall(cmd)
except socket.error as msg:
soc.close()
raise PersistentDictClientException(
'Error, cannot send to socket: %s' % cmd)
data = ''
firstTry = True
while not data.endswith(EndToken):
try:
data += soc.recv(self.bufsize)
if DEBUG:
print(('Got data:', data))
except socket.error as msg:
soc.close()
raise PersistentDictClientException(
'Error, no data received from socket, sent: %s' % cmd)
if data.startswith(NoneMsg) or (firstTry and len(data) == 0):
return default
firstTry = False
data = data[:-len(EndToken)]
if self.pickleVals:
return pickle.loads(data)
else:
return data
def delete(self, key):
"""Delete a key and its value from persistent dict."""
cmd = 'delete' + NNL + key + NNL
try:
return self._sendCmd(cmd)
except:
return False
def insert(self, key, val):
"""Insert or change the value of a key."""
if self.pickleVals:
val = pickle.dumps(val)
cmd = 'insert' + NNL + key + NNL + val + EndToken
try:
return self._sendCmd(cmd)
except:
return False
def length(self):
"""Return number of keys in dict."""
try:
return int(self._sendCmd('length'))
except:
return 0
def _openLocalSocket(self, port):
"""Open a port on localhost and send a ping command to ensure server is alive."""
try:
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
soc.connect(('127.0.0.1', port))
soc.settimeout(self.timeout)
except socket.error as e:
soc.close()
print(
('PersistentDictClient: Error, cannot connect socket to local port: %s' % port))
raise e
return soc
def _sendCmd(self, cmd):
"""Send a command and check for returned 'ok' message."""
soc = self.soc
if cmd[-2:] != NNL:
cmd += NNL
try:
soc.sendall(cmd)
except socket.error as msg:
soc.close()
raise RuntimeError(
'PersistentDictClient: Error, cannot send to socket: %s' % cmd)
try:
data = soc.recv(self.bufsize)
except socket.error as e:
soc.close()
print(
('PersistentDictClient: Error, no data received from socket, sent: %s' % cmd))
raise e
data = data[-len(NNL):]
if data == OkMsg:
data = True
return data
class PersistentDictException(RuntimeError):
pass
class PersistentDict(DictMixin):
"""Presents the usual dict interface, accessing a *named*, shared, persistent dictionary,
and hides the (socket) client and (twisted) server classes from view.
"""
def __init__(self, dictName, pickleVals=False):
self.dictName = dictName
self.db = None
self.db = PersistentDictClient(dictName, pickleVals=pickleVals)
def __del__(self):
if self.db:
self.db.close()
def __getattr__(self, name):
"""Many methods we can just pass through to the DB object."""
return getattr(self.db, name)
# dictionary access methods
def __len__(self):
return self.db.length()
def __getitem__(self, key):
return self.db.get(key)
def __setitem__(self, key, val):
self.db.insert(key, val)
def __delitem__(self, key):
self.db.delete(key)
def keys(self, txn=None):
raise PersistentDictException(
'Error, class does not implement keys() method.')
def items(self, txn=None):
raise PersistentDictException(
'Error, class does not implement items() method.')
def values(self, txn=None):
raise PersistentDictException(
'Error, class does not implement values() method.')
def startPersistentDictServer():
"""This code belongs in a twisted tac file (at toplevel)."""
from .pdict import NamedDicts, PersistentDictFactory
from twisted.application import internet, service
namedDict = "EventStore"
port = NamedDicts[namedDict]['port']
application = service.Application("pdict")
factory = PersistentDictFactory(namedDict)
pdictService = internet.TCPServer(port, factory)
pdictService.setServiceParent(service.IServiceCollection(application))
def testClientSimple():
dic = PersistentDict("Test")
print((dic['foo']))
del dic['foo']
dic['you'] = 'tube'
print((dic['you']))
del dic
def testClient():
dic = PersistentDict("EventStore")
print((len(dic)))
print((dic['foo']))
dic['foo'] = 'bar'
dic['bush'] = 'sucks'
dic['fool'] = 'no money'
print((dic['foo']))
del dic['foo']
dic['you'] = 'tube'
print((dic['you']))
print((len(dic)))
def main():
testClient()
if __name__ == '__main__':
main()
| [
"os.path.exists",
"twisted.application.service.Application",
"twisted.application.service.IServiceCollection",
"socket.socket",
"bsddb3.dbshelve.open",
"pickle.dumps",
"os.makedirs",
"os.path.join",
"os.path.splitext",
"os.chmod",
"os.path.split",
"os.path.dirname",
"twisted.application.inte... | [((890, 910), 'sciflo.utils.ScifloConfigParser', 'ScifloConfigParser', ([], {}), '()\n', (908, 910), False, 'from sciflo.utils import ScifloConfigParser, validateDirectory\n'), ((1079, 1128), 'os.path.join', 'os.path.join', (['WorkUnitCacheDir', 'WorkUnitCacheFile'], {}), '(WorkUnitCacheDir, WorkUnitCacheFile)\n', (1091, 1128), False, 'import os\n'), ((12188, 12216), 'twisted.application.service.Application', 'service.Application', (['"""pdict"""'], {}), "('pdict')\n", (12207, 12216), False, 'from twisted.application import internet, service\n'), ((12283, 12316), 'twisted.application.internet.TCPServer', 'internet.TCPServer', (['port', 'factory'], {}), '(port, factory)\n', (12301, 12316), False, 'from twisted.application import internet, service\n'), ((12351, 12390), 'twisted.application.service.IServiceCollection', 'service.IServiceCollection', (['application'], {}), '(application)\n', (12377, 12390), False, 'from twisted.application import internet, service\n'), ((1223, 1258), 'os.path.splitext', 'os.path.splitext', (['WorkUnitCacheFile'], {}), '(WorkUnitCacheFile)\n', (1239, 1258), False, 'import os\n'), ((6007, 6036), 'os.path.dirname', 'os.path.dirname', (['self.logFile'], {}), '(self.logFile)\n', (6022, 6036), False, 'import os\n'), ((6191, 6217), 'bsddb3.dbshelve.open', 'dbshelve.open', (['self.dbFile'], {}), '(self.dbFile)\n', (6204, 6217), False, 'from bsddb3 import dbshelve\n'), ((6230, 6256), 'os.chmod', 'os.chmod', (['self.dbFile', '(438)'], {}), '(self.dbFile, 438)\n', (6238, 6256), False, 'import os\n'), ((8556, 8574), 'pickle.loads', 'pickle.loads', (['data'], {}), '(data)\n', (8568, 8574), True, 'import pickle as pickle\n'), ((8967, 8984), 'pickle.dumps', 'pickle.dumps', (['val'], {}), '(val)\n', (8979, 8984), True, 'import pickle as pickle\n'), ((9461, 9510), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (9474, 9510), False, 'import socket\n'), ((5485, 5511), 'os.path.split', 'os.path.split', (['self.dbFile'], {}), '(self.dbFile)\n', (5498, 5511), False, 'import os\n'), ((5538, 5560), 'os.path.exists', 'os.path.exists', (['dbHome'], {}), '(dbHome)\n', (5552, 5560), False, 'import os\n'), ((5582, 5606), 'os.makedirs', 'os.makedirs', (['dbHome', '(511)'], {}), '(dbHome, 511)\n', (5593, 5606), False, 'import os\n'), ((5784, 5813), 'os.path.join', 'os.path.join', (['dbHome', 'logFile'], {}), '(dbHome, logFile)\n', (5796, 5813), False, 'import os\n')] |
import timeit
# bro, probably could just use %timeit if
# you are on ipython. :-P
starttime = timeit.default_timer()
"""
your code here
"""
endtime = timeit.default_timer()
print(endtime - starttime)
| [
"timeit.default_timer"
] | [((96, 118), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (116, 118), False, 'import timeit\n'), ((154, 176), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (174, 176), False, 'import timeit\n')] |
import os
import re
from subprocess import call
from time import sleep
supervisor_dir = "/etc/supervisor/conf.d/"
_, _, files = next(os.walk(supervisor_dir))
for f in files:
m = re.match("(hortiradar-worker\d)\.conf", f)
if m:
worker = m.group(1)
call(["supervisorctl", "restart", worker])
sleep(60)
| [
"time.sleep",
"re.match",
"subprocess.call",
"os.walk"
] | [((136, 159), 'os.walk', 'os.walk', (['supervisor_dir'], {}), '(supervisor_dir)\n', (143, 159), False, 'import os\n'), ((186, 230), 're.match', 're.match', (['"""(hortiradar-worker\\\\d)\\\\.conf"""', 'f'], {}), "('(hortiradar-worker\\\\d)\\\\.conf', f)\n", (194, 230), False, 'import re\n'), ((275, 317), 'subprocess.call', 'call', (["['supervisorctl', 'restart', worker]"], {}), "(['supervisorctl', 'restart', worker])\n", (279, 317), False, 'from subprocess import call\n'), ((326, 335), 'time.sleep', 'sleep', (['(60)'], {}), '(60)\n', (331, 335), False, 'from time import sleep\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from ykdl.util.html import default_proxy_handler, get_content
from ykdl.util.match import match1, matchall
from ykdl.extractor import VideoExtractor
from ykdl.videoinfo import VideoInfo
from ykdl.compact import install_opener, build_opener, HTTPCookieProcessor
import json
import sys
import base64
import uuid
import time
py3 = sys.version_info[0] == 3
if py3:
maketrans = bytes.maketrans
bytearray2str = bytearray.decode
else:
from string import maketrans
bytearray2str = str
encode_translation = maketrans(b'+/=', b'_~-')
def generate_did_tk2():
did = str(uuid.uuid4())
s = 'pno=1000|ver=0.3.0001|did={}|clit={}'.format(did, int(time.time()))
if not isinstance(s, bytes):
s = s.encode()
e = bytearray(base64.b64encode(s).translate(encode_translation))
e.reverse()
return did, bytearray2str(e)
class Hunantv(VideoExtractor):
name = u"芒果TV (HunanTV)"
supported_stream_profile = [ u'蓝光', u'超清', u'高清', u'标清' ]
supported_stream_types = [ 'BD', 'TD', 'HD', 'SD' ]
profile_2_types = { u'蓝光': 'BD', u'超清': 'TD', u'高清': 'HD', u'标清': 'SD' }
def prepare(self):
handlers = [HTTPCookieProcessor()]
if default_proxy_handler:
handlers += default_proxy_handler
install_opener(build_opener(*handlers))
info = VideoInfo(self.name)
if self.url and not self.vid:
self.vid = match1(self.url, 'https?://www.mgtv.com/b/\d+/(\d+).html')
if self.vid is None:
html = get_content(self.url)
self.vid = match1(html, 'vid=(\d+)', 'vid=\"(\d+)', 'vid: (\d+)')
did, tk2 = generate_did_tk2()
api_info_url = 'https://pcweb.api.mgtv.com/player/video?video_id={}&did={}&tk2={}'.format(self.vid, did, tk2)
meta = json.loads(get_content(api_info_url))
assert meta['code'] == 200, '[failed] code: {}, msg: {}'.format(meta['code'], meta['msg'])
assert meta['data'], '[Failed] Video info not found.'
pm2 = meta['data']['atc']['pm2']
info.title = meta['data']['info']['title']
api_source_url = 'https://pcweb.api.mgtv.com/player/getSource?video_id={}&did={}&pm2={}&tk2={}'.format(self.vid, did, pm2, tk2)
meta = json.loads(get_content(api_source_url))
assert meta['code'] == 200, '[failed] code: {}, msg: {}'.format(meta['code'], meta['msg'])
assert meta['data'], '[Failed] Video source not found.'
data = meta['data']
domain = data['stream_domain'][0]
for lstream in data['stream']:
if lstream['url']:
url = json.loads(get_content(domain + lstream['url']))['info']
info.streams[self.profile_2_types[lstream['name']]] = {'container': 'm3u8', 'video_profile': lstream['name'], 'src' : [url]}
info.stream_types.append(self.profile_2_types[lstream['name']])
info.stream_types= sorted(info.stream_types, key = self.supported_stream_types.index)
return info
def prepare_list(self):
html = get_content(self.url, headers={})
return matchall(html, ['"a-pic-play" href="([^"]+)"'])
site = Hunantv()
| [
"string.maketrans",
"ykdl.compact.build_opener",
"base64.b64encode",
"ykdl.util.match.matchall",
"uuid.uuid4",
"ykdl.videoinfo.VideoInfo",
"ykdl.util.match.match1",
"ykdl.compact.HTTPCookieProcessor",
"time.time",
"ykdl.util.html.get_content"
] | [((565, 590), 'string.maketrans', 'maketrans', (["b'+/='", "b'_~-'"], {}), "(b'+/=', b'_~-')\n", (574, 590), False, 'from string import maketrans\n'), ((630, 642), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (640, 642), False, 'import uuid\n'), ((1367, 1387), 'ykdl.videoinfo.VideoInfo', 'VideoInfo', (['self.name'], {}), '(self.name)\n', (1376, 1387), False, 'from ykdl.videoinfo import VideoInfo\n'), ((3089, 3122), 'ykdl.util.html.get_content', 'get_content', (['self.url'], {'headers': '{}'}), '(self.url, headers={})\n', (3100, 3122), False, 'from ykdl.util.html import default_proxy_handler, get_content\n'), ((3139, 3186), 'ykdl.util.match.matchall', 'matchall', (['html', '[\'"a-pic-play" href="([^"]+)"\']'], {}), '(html, [\'"a-pic-play" href="([^"]+)"\'])\n', (3147, 3186), False, 'from ykdl.util.match import match1, matchall\n'), ((707, 718), 'time.time', 'time.time', ([], {}), '()\n', (716, 718), False, 'import time\n'), ((1200, 1221), 'ykdl.compact.HTTPCookieProcessor', 'HTTPCookieProcessor', ([], {}), '()\n', (1219, 1221), False, 'from ykdl.compact import install_opener, build_opener, HTTPCookieProcessor\n'), ((1326, 1349), 'ykdl.compact.build_opener', 'build_opener', (['*handlers'], {}), '(*handlers)\n', (1338, 1349), False, 'from ykdl.compact import install_opener, build_opener, HTTPCookieProcessor\n'), ((1449, 1509), 'ykdl.util.match.match1', 'match1', (['self.url', '"""https?://www.mgtv.com/b/\\\\d+/(\\\\d+).html"""'], {}), "(self.url, 'https?://www.mgtv.com/b/\\\\d+/(\\\\d+).html')\n", (1455, 1509), False, 'from ykdl.util.match import match1, matchall\n'), ((1851, 1876), 'ykdl.util.html.get_content', 'get_content', (['api_info_url'], {}), '(api_info_url)\n', (1862, 1876), False, 'from ykdl.util.html import default_proxy_handler, get_content\n'), ((2296, 2323), 'ykdl.util.html.get_content', 'get_content', (['api_source_url'], {}), '(api_source_url)\n', (2307, 2323), False, 'from ykdl.util.html import default_proxy_handler, get_content\n'), ((795, 814), 'base64.b64encode', 'base64.b64encode', (['s'], {}), '(s)\n', (811, 814), False, 'import base64\n'), ((1564, 1585), 'ykdl.util.html.get_content', 'get_content', (['self.url'], {}), '(self.url)\n', (1575, 1585), False, 'from ykdl.util.html import default_proxy_handler, get_content\n'), ((1613, 1669), 'ykdl.util.match.match1', 'match1', (['html', '"""vid=(\\\\d+)"""', '"""vid="(\\\\d+)"""', '"""vid: (\\\\d+)"""'], {}), '(html, \'vid=(\\\\d+)\', \'vid="(\\\\d+)\', \'vid: (\\\\d+)\')\n', (1619, 1669), False, 'from ykdl.util.match import match1, matchall\n'), ((2663, 2699), 'ykdl.util.html.get_content', 'get_content', (["(domain + lstream['url'])"], {}), "(domain + lstream['url'])\n", (2674, 2699), False, 'from ykdl.util.html import default_proxy_handler, get_content\n')] |
from instagram.client import InstagramAPI
from pprint import pprint
test_access_token = "1890268.7c3f1ab.e1c64ca8df38410099d98bff8a868bb6"
api = InstagramAPI(access_token=test_access_token)
pprint( api.user_recent_media())
| [
"instagram.client.InstagramAPI"
] | [((147, 191), 'instagram.client.InstagramAPI', 'InstagramAPI', ([], {'access_token': 'test_access_token'}), '(access_token=test_access_token)\n', (159, 191), False, 'from instagram.client import InstagramAPI\n')] |
""" Actor registry for rodario framework """
# local
from rodario import get_redis_connection
from rodario.exceptions import RegistrationException
# pylint: disable=C1001
class _RegistrySingleton(object):
""" Singleton for actor registry """
def __init__(self, prefix=None):
"""
Initialize the registry.
:param str prefix: Optional prefix for redis key names
"""
self._redis = get_redis_connection()
self._list = '{prefix}actors'.format(prefix=prefix)
@property
def actors(self):
"""
Retrieve a list of registered actors.
:rtype: :class:`set`
"""
return self._redis.smembers(self._list)
def register(self, uuid):
"""
Register a new actor.
:param str uuid: The UUID of the actor to register
"""
if self._redis.sadd(self._list, uuid) == 0:
raise RegistrationException('Failed adding member to set')
def unregister(self, uuid):
"""
Unregister an existing actor.
:param str uuid: The UUID of the actor to unregister
"""
self._redis.srem(self._list, uuid)
def exists(self, uuid):
"""
Test whether an actor exists in the registry.
:param str uuid: UUID of the actor to check for
:rtype: :class:`bool`
"""
return self._redis.sismember(self._list, uuid) == 1
# pylint: disable=R0201
def get_proxy(self, uuid):
"""
Return an ActorProxy for the given UUID.
:param str uuid: The UUID to return a proxy object for
:rtype: :class:`rodario.actors.ActorProxy`
"""
# avoid cyclic import
proxy_module = __import__('rodario.actors',
fromlist=('ActorProxy',))
return proxy_module.ActorProxy(uuid=uuid)
# pylint: disable=R0903
class Registry(object):
""" Actor registry class (singleton wrapper) """
_instance = None
def __new__(cls, prefix=None):
"""
Retrieve the singleton instance for Registry.
:param str prefix: Optional prefix for redis key names
:rtype: :class:`rodario.registry._RegistrySingleton`
"""
if not cls._instance:
cls._instance = _RegistrySingleton(prefix=prefix)
return cls._instance
| [
"rodario.get_redis_connection",
"rodario.exceptions.RegistrationException"
] | [((432, 454), 'rodario.get_redis_connection', 'get_redis_connection', ([], {}), '()\n', (452, 454), False, 'from rodario import get_redis_connection\n'), ((917, 969), 'rodario.exceptions.RegistrationException', 'RegistrationException', (['"""Failed adding member to set"""'], {}), "('Failed adding member to set')\n", (938, 969), False, 'from rodario.exceptions import RegistrationException\n')] |
#!/usr/bin/env python3
# Copyright 2019, <NAME> <<EMAIL>>, <NAME> <<EMAIL>>
# SPDX-License-Identifier: BSD-2-Clause
# Reads ms task measurement results from CSV files.
#
# If the path to the results is not given it is read from the following environment variables:
# * SCHED_MSRESULTS
# * SCHED_RESULTS and SCHED_HOST: $SCHED_RESULTS/$SCHED_HOST/ms_results
#
# Use MSResults.load_results() get a MSResults object containing the results.
import os
import os.path
import re
import csv
# Regex for format of CSV files
# e.g. "ms_markov(1024)@IntelXeon_energy.csv"
# e.g. "ms_gaussblur(512)@NvidiaTesla_time.csv"
re_msresult_filename = re.compile("^ms_([^(]+)\(([0-9]+)\)@([^_]+)_(energy|time).csv")
# Contains measurement results read from energy and time CSV files
class MSResult:
def __init__(self, task, size, res):
self.task = task
self.size = size
self.res = res
self.time = [] # all csv entries
self.energy = [] # all csv entries
self.avgtime = [] # averaged over all measurements
self.avgenergy = [] # averaged over all measurements
def avg_time(self):
if len(self.avgtime) == 0:
self.computeAvg()
return self.avgtime[3]
def avg_init(self):
if len(self.avgtime) == 0:
self.computeAvg()
return self.avgtime[4]
def avg_fini(self):
if len(self.avgtime) == 0:
self.computeAvg()
return self.avgtime[6]
def avg_energy(self):
if len(self.avgenergy) == 0:
self.computeAvg()
return self.avgenergy[2]
def read(task, size, resource, mspath):
res = MSResult(task, size, resource)
time = []
energy = []
try:
csvpath = os.path.join(mspath,"ms_"+task+"("+str(size)+")@"+resource+"_time.csv")
#print(csvpath)
with open(csvpath,"r") as f:
csvr = csv.reader(f, delimiter=";")
for ix,row in enumerate(csvr):
if ix < 2:
continue
frow = []
for string in row:
if string == "":
continue
frow.append(float(string))
time.append(frow)
except Exception as e:
print("MSResults read failed: ",e,"time",task,resource,size)
try:
csvpath = os.path.join(mspath,"ms_"+task+"("+str(size)+")@"+resource+"_energy.csv")
#print(csvpath)
with open(csvpath,"r") as f:
csvr = csv.reader(f, delimiter=";")
for ix,row in enumerate(csvr):
if ix < 2:
continue
frow = []
for string in row:
if string == "":
continue
frow.append(float(string))
energy.append(frow)
except Exception as e:
print("MSResults read failed:",e,"energy",task,resource,size)
if len(time) == 0 and len(energy) == 0:
return None
res.time = time
res.energy = energy
res.computeAvg()
return res
def computeAvg(self):
self.avgtime = []
self.avgenergy = []
for c in range(0,7):
a = 0.0
n = 0
for r in self.time:
n += 1
a += r[c]
avg = 0
if n != 0:
avg = a/n
self.avgtime.append(avg)
for c in range(0,7):
a = 0.0
n = 0
for r in self.energy:
n += 1
a += r[c]
avg = 0.0
if n != 0:
avg = a/n
self.avgenergy.append(avg)
# Contains list of available results and loaded task measurments
# MSResult objects are created lazily on first access
class MSResults:
def __init__(self, mspath=None):
self.mspath = mspath
self.result_list = []
self.results = {}
# Return list of available sizes for $task
def task_sizes(self, task):
sizes = []
for t in self.result_list:
if t[0] == task and t[1] not in sizes:
sizes.append(t[1])
return sorted(sizes)
def task_res_sizes(self, task, res):
sizes = []
for t in self.result_list:
if t[0] == task and t[2] == res and t[1] not in sizes:
sizes.append(t[1])
return sorted(sizes)
# Read ms results directory with task measurement results
# If $mspath is not given, the environment variables are used
def load_results(mspath=None):
if mspath == None:
# try to find mspath
if "SCHED_MSRESULTS" in os.environ:
mspath = os.environ["SCHED_MSRESULTS"]
elif "SCHED_RESULTS" in os.environ or "SCHED_HOST" in os.environ:
mspath = os.path.join(os.environ["SCHED_RESULTS"], os.environ["SCHED_HOST"], "ms_results")
if mspath == None:
print("Error: SCHED_MSRESULTS or SCHED_RESULTS and SCHED_HOST environment variables not defined, can't locate ms results")
return None
if os.path.isdir(mspath) == False:
print("Error: ms results path seems not to exist: ", mspath)
print("Check SCHED_MSRESULTS, SCHED_RESULTS and SCHED_HOST environment variables")
return None
msres = MSResults(mspath=mspath)
allresults = []
results = []
# Check list of files in $mspath matching the $re_msresult_filename regex
for dirname, dirnames, filenames in os.walk(mspath):
for name in filenames:
match = re_msresult_filename.match(name)
if match == None:
continue
task, size, res, restype = match.groups()
entry = (task, int(size), res)
if entry not in results:
results.append(entry)
allresults.append(match.groups())
# Check if both, energy and time, CSV files were found
# Only use results with both files existing
for entry in results:
task, size, res = entry
eentry = (task, str(size), res, "energy")
tentry = (task, str(size), res, "time")
if eentry in allresults and tentry in allresults:
msres.result_list.append(entry)
return msres
# Return result for $task with $size on resource $res
def result(self, task, size, res):
if (task, size, res) not in self.result_list:
print("task", task, size, res, "not in ms results" )
return None
# check if result was already loaded, else load the result and add it to the dict
if (task, size, res) not in self.results:
result = MSResult.read(task, size, res, self.mspath)
self.results[(task, size, res)] = result
return self.results[(task, size, res)]
| [
"re.compile",
"os.path.join",
"os.path.isdir",
"csv.reader",
"os.walk"
] | [((637, 702), 're.compile', 're.compile', (['"""^ms_([^(]+)\\\\(([0-9]+)\\\\)@([^_]+)_(energy|time).csv"""'], {}), "('^ms_([^(]+)\\\\(([0-9]+)\\\\)@([^_]+)_(energy|time).csv')\n", (647, 702), False, 'import re\n'), ((4642, 4657), 'os.walk', 'os.walk', (['mspath'], {}), '(mspath)\n', (4649, 4657), False, 'import os\n'), ((1708, 1736), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""";"""'}), "(f, delimiter=';')\n", (1718, 1736), False, 'import csv\n'), ((2182, 2210), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""";"""'}), "(f, delimiter=';')\n", (2192, 2210), False, 'import csv\n'), ((4260, 4281), 'os.path.isdir', 'os.path.isdir', (['mspath'], {}), '(mspath)\n', (4273, 4281), False, 'import os\n'), ((4007, 4092), 'os.path.join', 'os.path.join', (["os.environ['SCHED_RESULTS']", "os.environ['SCHED_HOST']", '"""ms_results"""'], {}), "(os.environ['SCHED_RESULTS'], os.environ['SCHED_HOST'],\n 'ms_results')\n", (4019, 4092), False, 'import os\n')] |
import time
from prometheus_client import start_http_server, Gauge, Enum
from temper import Temper
def main():
port = 9204
t = Temper()
label_names = ['vendorid','productid','busnum','devnum']
temp_c = Gauge('temper_internal_temperature_celsius', 'Temperature in °C', label_names)
humid = Gauge('temper_internal_humidity_percent', 'Humidity in percent', label_names)
report_time = Gauge('temper_time', 'Time of report', label_names)
print('Listening on port %d' % port)
start_http_server(port)
while True:
data = t.read()
# print(data)
for d in data:
l = []
for label in label_names:
l.append(str(d[label]))
# print(l)
temp_c.labels(*l).set(d['internal temperature'])
humid.labels(*l).set(d['internal humidity'])
report_time.labels(*l).set_to_current_time()
time.sleep(500)
| [
"prometheus_client.start_http_server",
"temper.Temper",
"time.sleep",
"prometheus_client.Gauge"
] | [((136, 144), 'temper.Temper', 'Temper', ([], {}), '()\n', (142, 144), False, 'from temper import Temper\n'), ((219, 297), 'prometheus_client.Gauge', 'Gauge', (['"""temper_internal_temperature_celsius"""', '"""Temperature in °C"""', 'label_names'], {}), "('temper_internal_temperature_celsius', 'Temperature in °C', label_names)\n", (224, 297), False, 'from prometheus_client import start_http_server, Gauge, Enum\n'), ((310, 387), 'prometheus_client.Gauge', 'Gauge', (['"""temper_internal_humidity_percent"""', '"""Humidity in percent"""', 'label_names'], {}), "('temper_internal_humidity_percent', 'Humidity in percent', label_names)\n", (315, 387), False, 'from prometheus_client import start_http_server, Gauge, Enum\n'), ((406, 457), 'prometheus_client.Gauge', 'Gauge', (['"""temper_time"""', '"""Time of report"""', 'label_names'], {}), "('temper_time', 'Time of report', label_names)\n", (411, 457), False, 'from prometheus_client import start_http_server, Gauge, Enum\n'), ((503, 526), 'prometheus_client.start_http_server', 'start_http_server', (['port'], {}), '(port)\n', (520, 526), False, 'from prometheus_client import start_http_server, Gauge, Enum\n'), ((915, 930), 'time.sleep', 'time.sleep', (['(500)'], {}), '(500)\n', (925, 930), False, 'import time\n')] |
#!/usr/bin/env python
"""SPECFIT.PY - Generic stellar abundance determination software
"""
from __future__ import print_function
__authors__ = '<NAME> <<EMAIL>>'
__version__ = '20200711' # yyyymmdd
import os
import shutil
import contextlib, io, sys
import numpy as np
import warnings
from astropy.io import fits
from astropy.table import Table
from dlnpyutils.minpack import curve_fit
from dlnpyutils.least_squares import least_squares
from scipy.interpolate import interp1d
from dlnpyutils import utils as dln, bindata, astro
import doppler
from doppler.spec1d import Spec1D
from doppler import (cannon,utils,reader)
import copy
import logging
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.legend import Legend
import tempfile
from . import models
from synple import synple
# Ignore these warnings, it's a bug
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
cspeed = 2.99792458e5 # speed of light in km/s
def synmodel(spec,params,alinefile=None,mlinefile=None,verbose=False,normalize=True):
"""
Synthetic spectrum model.
Parameters
----------
spec : Spec1D object or str
The observed Spec1D spectrum to match or the name of a spectrum file.
params : dict
Dictionary of initial values to use or parameters/elements to hold fixed.
normalize : bool, optional
Renormalize the model spectrum using the observed spectrum's continuum function. The
synthetic spectrum will already have been normalized using the "true" continuum. This
step is to simulate any systematic effects of the spectrum normalization algorithm that
the observed spectrum undergoes. Default is True.
verbose : int, optional
Verbosity level (0, 1, or 2). The default is 0 and verbose=2 is for debugging.
alinefile : str, optional
The atomic linelist to use. Default is None which means the default synple linelist is used.
mlinefile : str, optional
The molecular linelist to use. Default is None which means the default synple linelist is used.
Returns
-------
model : Spec1D object
The synthetic spectrum. The "true" continuum is in model.cont.
Example
-------
.. code-block:: python
model = synmodel(spec,params)
"""
# Read in the spectrum
if type(spec) is str:
filename = spec
spec = doppler.read(filename)
if spec is None:
print('Problem loading '+filename)
return
params = dict((key.upper(), value) for (key, value) in params.items()) # all CAPS
# Initialize the fitter
fitparams = ['TEFF'] # "dummy" fitting variable
spfitter = SpecFitter(spec,params,fitparams=fitparams,verbose=(verbose>=2),
alinefile=alinefile,mlinefile=mlinefile)
spfitter.norm = normalize # normalize the synthetic spectrum
model = spfitter.model(spec.wave.flatten(),params['TEFF'],retobj=True)
model.instrument = 'Model'
return model
class SpecFitter:
def __init__ (self,spec,params,fitparams=None,norm=True,verbose=False,
alinefile=None,mlinefile=None):
# Parameters
self.params = params
if fitparams is not None:
self.fitparams = fitparams
else:
self.fitparams = list(params.keys()) # by default fit all parameters
self.nsynfev = 0 # number of synthetic spectra made
self.njac = 0 # number of times jacobian called
# Save spectrum information
self.spec = spec.copy()
self.flux = spec.flux.flatten()
self.err = spec.err.flatten()
self.wave = spec.wave.flatten()
self.lsf = spec.lsf.copy()
self.lsf.wavevac = spec.wavevac # need this later for synspec prep
self.wavevac = spec.wavevac
self.verbose = verbose
self.norm = norm # normalize
self.continuum_func = spec.continuum_func
self.alinefile = alinefile
self.mlinefile = mlinefile
# Convert vacuum to air wavelengths
# synspec uses air wavelengths
if spec.wavevac is True:
wave = astro.vactoair(spec.wave.copy().flatten()).reshape(spec.wave.shape)
else:
wave = spec.wave.copy()
if wave.ndim==1:
wave = np.atleast_2d(wave).T
# Figure out the wavelength parameters
npix = spec.npix
norder = spec.norder
xp = np.arange(npix//20)*20
wr = np.zeros((spec.lsf.norder,2),np.float64)
dw = np.zeros(spec.lsf.norder,np.float64)
mindw = np.zeros(norder,np.float64)
for o in range(spec.norder):
dw[o] = np.median(dln.slope(wave[:,o]))
wr[o,0] = np.min(wave[:,o])
wr[o,1] = np.max(wave[:,o])
fwhm = spec.lsf.fwhm(wave[xp,o],xtype='Wave',order=o)
# FWHM is in units of lsf.xtype, convert to wavelength/angstroms, if necessary
if spec.lsf.xtype.lower().find('pix')>-1:
fwhm *= np.abs(dw[o])
# need at least ~4 pixels per LSF FWHM across the spectrum
# using 3 affects the final profile shape
mindw[o] = np.min(fwhm/4)
self._dwair = np.min(mindw) # IN AIR WAVELENGTHS!!
self._w0air = np.min(wave)
self._w1air = np.max(wave)
# parameters to save
self._all_pars = []
self._all_model = []
self._all_chisq = []
self._jac_array = None
@property
def params(self):
return self._params
@params.setter
def params(self,params):
""" Dictionary, keys must be all CAPS."""
self._params = dict((key.upper(), value) for (key, value) in params.items()) # all CAPS
@property
def fitparams(self):
return self._fitparams
@fitparams.setter
def fitparams(self,fitparams):
""" list, keys must be all CAPS."""
self._fitparams = [v.upper() for v in fitparams] # all CAPS
def mkinputs(self,args):
""" Make INPUTS dictionary."""
# Create INPUTS with all arguments needed to make the spectrum
inputs = self.params.copy() # initialize with initial/fixed values
for k in range(len(self.fitparams)): # this overwrites the values for the fitted values
inputs[self.fitparams[k]] = args[k]
inputs['DW'] = self._dwair # add in wavelength parameters
inputs['W0'] = self._w0air
inputs['W1'] = self._w1air
return inputs
def chisq(self,model):
return np.sqrt( np.sum( (self.flux-model)**2/self.err**2 )/len(self.flux) )
def model(self, xx, *args, retobj=False):
""" Return a model spectrum flux with the given input arguments."""
# The input arguments correspond to FITPARAMS
# This corrects for air/vacuum wavelength differences
if self.verbose:
print(args)
# The arguments correspond to the fitting parameters
inputs = self.mkinputs(args)
if self.verbose:
print(inputs)
# Create the synthetic spectrum
synspec = model_spectrum(inputs,verbose=self.verbose, # always returns air wavelengths
alinefile=self.alinefile,mlinefile=self.mlinefile)
self.nsynfev += 1
# Convolve with the LSF and do air/vacuum wave conversion
pspec = prepare_synthspec(synspec,self.lsf,norm=self.norm,
continuum_func=self.continuum_func)
# Save models/pars/chisq
self._all_pars.append(list(args).copy())
self._all_model.append(pspec.flux.flatten().copy())
self._all_chisq.append(self.chisq(pspec.flux.flatten()))
# Return flattened spectrum
if retobj:
return pspec
else:
return pspec.flux.flatten()
def getstep(self,name,val,relstep=0.02):
""" Calculate step for a parameter."""
# It mainly deals with edge cases
#if val != 0.0:
# step = relstep*val
#else:
# if name=='RV':
# step = 1.0
# elif name=='VROT':
# step = 0.5
# elif name=='VMICRO':
# step = 0.5
# elif name.endswith('_H'):
# step = 0.02
# else:
# step = 0.02
if name=='TEFF':
step = 5.0
elif name=='RV':
step = 0.1
elif name=='VROT':
step = 0.5
elif name=='VMICRO':
step = 0.5
elif name.endswith('_H'):
step = 0.01
else:
step = 0.01
return step
return step
def jac(self,x,*args):
""" Compute the Jacobian matrix (an m-by-n matrix, where element (i, j)
is the partial derivative of f[i] with respect to x[j]). """
if hasattr(self,'logger') is False:
logger = dln.basiclogger()
else:
logger = self.logger
logger.info(args)
if self.verbose:
logger.info(' ')
logger.info('##### Calculating Jacobian Matrix #####')
logger.info(' ')
# A new synthetic spectrum does not need to be generated RV, vmicro or vsini.
# Some time can be saved by not remaking those.
# Use a one-sided derivative.
# Boundaries
lbounds,ubounds = mkbounds(self.fitparams)
relstep = 0.02
npix = len(x)
npar = len(args)
# Get INPUTS dictionary and make keys all CAPS
inputs = self.mkinputs(args)
inputs = dict((key.upper(), value) for (key, value) in inputs.items())
# Some important parameters
w0 = inputs['W0']
w1 = inputs['W1']
dw = inputs['DW']
rv = inputs.get('RV')
vrot = inputs.get('VROT')
vmicro = inputs.get('VMICRO')
# Create synthetic spectrum at current values
# set vrot=vmicro=rv=0, will modify later if necessary
if self.verbose:
logger.info('--- Current values ---')
logger.info(args)
tinputs = inputs.copy()
tinputs['VMICRO'] = 0
tinputs['VROT'] = 0
tinputs['RV'] = 0
origspec = model_spectrum(tinputs,keepextend=True, # always are wavelengths
alinefile=self.alinefile,mlinefile=self.mlinefile)
self.nsynfev += 1
# Smooth and shift
smorigspec = smoothshift_spectrum(origspec,vrot=vrot,vmicro=vmicro,rv=rv)
# Trim to final wavelengths
smorigspec = trim_spectrum(smorigspec,w0,w1)
# Convolve with the LSF and do air/vacuum wave conversion
pspec = prepare_synthspec(smorigspec,self.lsf,norm=self.norm,
continuum_func=self.continuum_func)
# Flatten the spectrum
f0 = pspec.flux.flatten()
# Save models/pars/chisq
self._all_pars.append(list(args).copy())
self._all_model.append(f0.copy())
self._all_chisq.append(self.chisq(f0))
chisq = np.sqrt( np.sum( (self.flux-f0)**2/self.err**2 )/len(self.flux) )
self
if self.verbose:
logger.info('chisq = '+str(chisq))
# MASK PIXELS!?
# Initialize jacobian matrix
jac = np.zeros((npix,npar),np.float64)
# Loop over parameters
for i in range(npar):
pars = np.array(copy.deepcopy(args))
step = self.getstep(self.fitparams[i],pars[i],relstep)
# Check boundaries, if above upper boundary
# go the opposite way
if pars[i]>ubounds[i]:
step *= -1
pars[i] += step
tinputs = self.mkinputs(pars)
if self.verbose:
logger.info(' ')
logger.info('--- '+str(i+1)+' '+self.fitparams[i]+' '+str(pars[i])+' ---')
logger.info(pars)
# VROT/VMICRO/RV, just shift/smooth original spectrum
if self.fitparams[i]=='VROT' or self.fitparams[i]=='VMICRO' or self.fitparams[i]=='RV':
tvrot = tinputs.get('VROT')
tvmicro = tinputs.get('VMICRO')
trv = tinputs.get('RV')
#import pdb; pdb.set_trace()
# Smooth and shift
synspec = smoothshift_spectrum(origspec,vrot=tvrot,vmicro=tvmicro,rv=trv)
# Trim to final wavelengths
synspec = trim_spectrum(synspec,w0,w1)
else:
synspec = model_spectrum(tinputs,alinefile=self.alinefile,
mlinefile=self.mlinefile) # always returns air wavelengths
self.nsynfev += 1
# Convert to vacuum wavelengths if necessary
if self.wavevac:
synspec.wave = astro.airtovac(synspec.wave)
synspec.wavevac = True
# Convolve with the LSF and do air/vacuum wave conversion
pspec = prepare_synthspec(synspec,self.lsf,norm=self.norm,
continuum_func=self.continuum_func)
# Flatten the spectrum
f1 = pspec.flux.flatten()
# Save models/pars/chisq
self._all_pars.append(list(pars).copy())
self._all_model.append(f1.copy())
self._all_chisq.append(self.chisq(f1))
if np.sum(~np.isfinite(f1))>0:
print('some nans/infs')
import pdb; pdb.set_trace()
jac[:,i] = (f1-f0)/step
if np.sum(~np.isfinite(jac))>0:
print('some nans/infs')
import pdb; pdb.set_trace()
self._jac_array = jac.copy() # keep a copy
self.njac += 1
return jac
def trim_spectrum(spec,w0,w1):
""" Trim a synthetic spectrum to [w0,w1]."""
# This assumes that the spectrum has a single order
wv1, ind1 = dln.closest(spec.wave,w0)
wv2, ind2 = dln.closest(spec.wave,w1)
# Nothing to do
if ind1==0 and ind2==(spec.npix-1):
return spec
outspec = spec.copy()
outspec.flux = outspec.flux[ind1:ind2+1]
outspec.wave = outspec.wave[ind1:ind2+1]
if outspec.err is not None:
outspec.err = outspec.err[ind1:ind2+1]
if outspec.mask is not None:
outspec.mask = outspec.mask[ind1:ind2+1]
if hasattr(outspec,'cont'):
if outspec.cont is not None:
outspec.cont = outspec.cont[ind1:ind2+1]
outspec.npix = len(outspec.flux)
return outspec
def getabund(inputs,verbose=False):
""" Grab the abundances out of the input file and return array of abundances."""
# Create the input 99-element abundance array
codedir = os.path.dirname(os.path.abspath(__file__))
pertab = Table.read(codedir+'/data/periodic_table.txt',format='ascii')
feh = inputs.get('FEH')
if feh is None:
feh = inputs.get('FE_H')
if feh is None:
raise ValueError('FE_H missing from inputs')
# Read model atmosphere
modelfile = inputs.get('modelfile')
if modelfile is None:
raise ValueError('modelfile missing from inputs')
atmostype, teff, logg, vmicro2, mabu, nd, atmos = synple.read_model(modelfile,verbose=verbose)
mlines = dln.readlines(modelfile)
# solar abundances
# first two are Teff and logg
# last two are Hydrogen and Helium
solar_abund = np.array([ 4750., 2.5,
-10.99, -10.66, -9.34, -3.61, -4.21,
-3.35, -7.48, -4.11, -5.80, -4.44,
-5.59, -4.53, -6.63, -4.92, -6.54,
-5.64, -7.01, -5.70, -8.89, -7.09,
-8.11, -6.40, -6.61, -4.54, -7.05,
-5.82, -7.85, -7.48, -9.00, -8.39,
-9.74, -8.70, -9.50, -8.79, -9.52,
-9.17, -9.83, -9.46, -10.58, -10.16,
-20.00, -10.29, -11.13, -10.47, -11.10,
-10.33, -11.24, -10.00, -11.03, -9.86,
-10.49, -9.80, -10.96, -9.86, -10.94,
-10.46, -11.32, -10.62, -20.00, -11.08,
-11.52, -10.97, -11.74, -10.94, -11.56,
-11.12, -11.94, -11.20, -11.94, -11.19,
-12.16, -11.19, -11.78, -10.64, -10.66,
-10.42, -11.12, -10.87, -11.14, -10.29,
-11.39, -20.00, -20.00, -20.00, -20.00,
-20.00, -20.00, -12.02, -20.00, -12.58,
-20.00, -20.00, -20.00, -20.00, -20.00,
-20.00, -20.00])
# Deal with alpha abundances
# only add the individual alpha abundance if it's not already there
# sometimes we might fit a single alpha element but want to use
# ALPHA_H to set the rest of them
if inputs.get('ALPHA_H') is not None:
alpha = inputs['ALPHA_H']
elem = ['O','MG','SI','S','CA','TI']
for k in range(len(elem)):
if inputs.get(elem[k]+'_H') is None:
inputs[elem[k]+'_H'] = alpha
# Scale global metallicity
abu = solar_abund.copy()
abu[2:] += feh
# Now offset the elements with [X/Fe], [X/Fe]=[X/H]-[Fe/H]
g, = np.where( (np.char.array(list(inputs.keys())).find('_H') != -1) &
(np.char.array(list(inputs.keys())) != 'FE_H') )
if len(g)>0:
ind1,ind2 = dln.match(np.char.array(list(inputs.keys()))[g],np.char.array(pertab['symbol']).upper()+'_H')
for k in range(len(ind1)):
key1 = np.char.array(list(inputs.keys()))[g[ind1[k]]]
abu[ind2[k]] += float(inputs[key1]) - feh
if verbose:
print('%s %f' % (key1,float(inputs[key1])))
# convert to linear
abu[2:] = 10**abu[2:]
# Divide by N(H)
g, = np.where(np.char.array(mlines).find('ABUNDANCE SCALE') != -1)
nhtot = np.float64(mlines[g[0]].split()[6])
abu[2:] /= nhtot
# use model values for H and He
abu[0:2] = mabu[0:2]
return abu
def synple_wrapper(inputs,verbose=False,tmpbase='/tmp',alinefile=None,mlinefile=None):
""" This is a wrapper around synple to generate a new synthetic spectrum."""
# Wavelengths are all AIR!!
# inputs is a dictionary with all of the inputs
# Teff, logg, [Fe/H], some [X/Fe], and the wavelength parameters (w0, w1, dw).
# Make temporary directory for synple to work in
curdir = os.path.abspath(os.curdir)
tdir = os.path.abspath(tempfile.mkdtemp(prefix="syn",dir=tmpbase))
os.chdir(tdir)
# Linelists to use
linelist = ['gfallx3_bpo.19','kmol3_0.01_30.20'] # default values
if alinefile is not None: # atomic linelist input
linelist[0] = alinefile
if mlinefile is not None: # molecular linelist input
linelist[1] = mlinefile
if verbose:
print('Using linelist: ',linelist)
# Make key names all CAPS
inputs = dict((key.upper(), value) for (key, value) in inputs.items())
# Make the model atmosphere file
teff = inputs['TEFF']
logg = inputs['LOGG']
metal = inputs['FE_H']
tid,modelfile = tempfile.mkstemp(prefix="mod",dir=".")
os.close(tid) # close the open file
# Limit values
# of course the logg/feh ranges vary with Teff
mteff = dln.limit(teff,3500.0,60000.0)
mlogg = dln.limit(logg,0.0,5.0)
mmetal = dln.limit(metal,-2.5,0.5)
model, header, tail = models.mkmodel(mteff,mlogg,mmetal,modelfile)
inputs['modelfile'] = modelfile
if os.path.exists(modelfile) is False or os.stat(modelfile).st_size==0:
print('model atmosphere file does NOT exist')
import pdb; pdb.set_trace()
# Create the synspec synthetic spectrum
w0 = inputs['W0']
w1 = inputs['W1']
dw = inputs['DW']
vmicro = inputs.get('VMICRO')
vrot = inputs.get('VROT')
if vrot is None:
vrot = 0.0
# Get the abundances
abu = getabund(inputs,verbose=verbose)
wave,flux,cont = synple.syn(modelfile,(w0,w1),dw,vmicro=vmicro,vrot=vrot,
abu=list(abu),verbose=verbose,linelist=linelist)
# Delete temporary files
shutil.rmtree(tdir)
os.chdir(curdir)
return (wave,flux,cont)
def smoothshift_spectrum(inpspec,vmicro=None,vrot=None,rv=None):
""" This smoothes the spectrum by Vrot+Vmicro and
shifts it by RV."""
#vmicro = inputs.get('VMICRO')
#vrot = inputs.get('VROT')
#rv = inputs.get('RV')
# Nothing to do
if vmicro is None and vrot is None and rv is None:
return inpspec.copy()
# Initialize output spectrum
spec = inpspec.copy()
# Some broadening
if vmicro is not None or vrot is not None:
flux = utils.broaden(spec.wave,spec.flux,vgauss=vmicro,vsini=vrot)
spec.flux = flux
## Vrot/Vsini (km/s) and Vmicro (in km/s)
#if vrot is not None or vmicro is not None:
# wave, flux = synple.call_rotin(wave, flux, vrot, fwhm, space, steprot, stepfwhm, clean=False, reuseinputfiles=True)
# Doppler shift only (in km/s)
if rv is not None:
if rv != 0.0:
shiftwave = spec.wave*(1+rv/cspeed)
gd,ngd,bd,nbd = dln.where( (spec.wave >= np.min(shiftwave)) & (spec.wave <= np.max(shiftwave)), comp=True)
# Doppler shift and interpolate onto wavelength array
if hasattr(spec,'cont'):
cont = synple.interp_spl(spec.wave[gd], shiftwave, spec.cont)
spec.cont *= 0
spec.cont[gd] = cont
# interpolate the continuing to the missing pixels
if nbd>0:
contmissing = dln.interp(spec.wave[gd],spec.cont[gd],spec.wave[bd],kind='linear',assume_sorted=False)
spec.cont[bd] = contmissing
flux = synple.interp_spl(spec.wave[gd], shiftwave, spec.flux)
spec.flux *= 0
spec.flux[gd] = flux
if nbd>0:
# Fill in missing values with interpolated values
if np.sum(np.isfinite(spec.flux[gd]))>0:
coef = dln.poly_fit(spec.wave[gd],spec.flux[gd],2)
fluxmissing = dln.poly(spec.wave[bd],coef)
spec.flux[bd] = fluxmissing
# Mask these pixels
if spec.mask is None:
spec.mask = np.zeros(len(spec.flux),bool)
spec.mask[bd] = True
return spec
def model_spectrum(inputs,verbose=False,keepextend=False,alinefile=None,mlinefile=None):
"""
This creates a model spectrum given the inputs:
RV, Teff, logg, vmicro, vsini, [Fe/H], [X/Fe], w0, w1, dw.
This creates the new synthetic spectrum and then convolves with vmicro, vsini and
shifts to velocity RV.
The returned spectrum always uses AIR wavelengths!!!
Parameters
----------
inputs : dictionary
Input parameters, stellar parameters, abundances.
keepextend : bool, optional
Keep the extensions on the ends. Default is False.
alinefile : str, optional
Atomic linelist filename. Default is None (use synple's default one).
mlinefile : str, optional
Molecular linelist filename. Default is None (use synple's default one).
verbose : bool, optional
Verbose output. Default is False.
Returns
-------
synspec : Spec1D
The synthetic spectrum as Spec1D object.
"""
# Make key names all CAPS
inputs = dict((key.upper(), value) for (key, value) in inputs.items())
# Extend on the ends for RV/convolution purposes
w0 = inputs['W0']
w1 = inputs['W1']
dw = inputs['DW']
rv = inputs.get('RV')
vrot = inputs.get('VROT')
vmicro = inputs.get('VMICRO')
inputsext = inputs.copy()
if rv is not None or vrot is not None or vmicro is not None:
numext = int(np.ceil(w1*(1.0+1500/cspeed)-w1))
inputsext['W0'] = w0-numext*dw
inputsext['W1'] = w1+numext*dw
if verbose:
print('Extending wavelength by '+str(numext)+' pixels on each end')
# Create the synthetic spectrum
# set vrot=vmicro=0, will convolve later if necessary
inputsext['VMICRO'] = 0
inputsext['VROT'] = 0
wave1,flux1,cont1 = synple_wrapper(inputsext,verbose=verbose,alinefile=alinefile,
mlinefile=mlinefile)
# Get final wavelength array
wv1, ind1 = dln.closest(wave1,w0)
wv2, ind2 = dln.closest(wave1,w1)
synspec = Spec1D(flux1/cont1,err=flux1*0,wave=wave1,lsfpars=np.array(0.0))
synspec.cont = cont1
synspec.wavevac = False
# Smooth and shift
if rv is not None or vrot is not None or vmicro is not None:
synspec = smoothshift_spectrum(synspec,vrot=vrot,vmicro=vmicro,rv=rv)
# Trim to final wavelengths
if keepextend is False:
synspec = trim_spectrum(synspec,w0,w1)
return synspec
def prepare_synthspec(synspec,lsf,norm=True,continuum_func=None):
""" Prepare a synthetic spectrum to be compared to an observed spectrum."""
# Convolve with LSF and do air<->vacuum wavelength conversion
# Convert wavelength from air->vacuum or vice versa
if synspec.wavevac != lsf.wavevac:
# Air -> Vacuum
if synspec.wavevac is False:
synspec.wave = astro.airtovac(synspec.wave)
synspec.wavevac = True
# Vacuum -> Air
else:
synspec.dispersion = astro.vactoair(synspec.wave)
synspec.wavevac = False
# Initialize the output spectrum
if lsf.wave.ndim==2:
npix,norder = lsf.wave.shape
else:
npix = len(lsf.wave)
norder = 1
pspec = Spec1D(np.zeros((npix,norder),np.float32),err=np.zeros((npix,norder),np.float32),
wave=lsf.wave,lsfpars=lsf.pars,lsftype=lsf.lsftype,lsfxtype=lsf.xtype)
pspec.cont = np.zeros((npix,norder),np.float32)
if continuum_func is not None:
pspec.continuum_func = continuum_func
# Loop over orders
if lsf.wave.ndim==1:
wave = np.atleast_2d(lsf.wave.copy()).T
else:
wave = lsf.wave.copy()
for o in range(lsf.norder):
wobs = wave[:,o]
dw = np.median(dln.slope(wobs))
wv1,ind1 = dln.closest(synspec.wave,np.min(wobs)-2*np.abs(dw))
wv2,ind2 = dln.closest(synspec.wave,np.max(wobs)+2*np.abs(dw))
modelflux = synspec.flux[ind1:ind2+1]
modelwave = synspec.wave[ind1:ind2+1]
modelcont = synspec.cont[ind1:ind2+1]
# Rebin, if necessary
# get LSF FWHM (A) for a handful of positions across the spectrum
xp = np.arange(npix//20)*20
fwhm = lsf.fwhm(wobs[xp],xtype='Wave',order=o)
# FWHM is in units of lsf.xtype, convert to wavelength/angstroms, if necessary
if lsf.xtype.lower().find('pix')>-1:
fwhm *= np.abs(dw)
# convert FWHM (A) in number of model pixels at those positions
dwmod = dln.slope(modelwave)
dwmod = np.hstack((dwmod,dwmod[-1]))
xpmod = dln.interp(modelwave,np.arange(len(modelwave)),wobs[xp],kind='cubic',assume_sorted=False,extrapolate=True)
xpmod = np.round(xpmod).astype(int)
fwhmpix = np.abs(fwhm/dwmod[xpmod])
# need at least ~4 pixels per LSF FWHM across the spectrum
# using 3 affects the final profile shape
nbin = np.round(np.min(fwhmpix)//4).astype(int)
if np.min(fwhmpix) < 3.7:
warnings.warn('Model has lower resolution than the observed spectrum. Only '+str(np.min(fwhmpix))+' model pixels per resolution element')
if np.min(fwhmpix) < 2.8:
raise Exception('Model has lower resolution than the observed spectrum. Only '+str(np.min(fwhmpix))+' model pixels per resolution element')
if nbin>1:
npix2 = np.round(len(synspec.flux) // nbin).astype(int)
modelflux = dln.rebin(modelflux[0:npix2*nbin],npix2)
modelwave = dln.rebin(modelwave[0:npix2*nbin],npix2)
modelcont = dln.rebin(modelcont[0:npix2*nbin],npix2)
# Convolve
lsf2d = lsf.anyarray(modelwave,xtype='Wave',order=o,original=False)
cflux = utils.convolve_sparse(modelflux,lsf2d)
# Interpolate onto final wavelength array
flux = synple.interp_spl(wobs, modelwave, cflux)
cont = synple.interp_spl(wobs, modelwave, modelcont)
pspec.flux[:,o] = flux
pspec.cont[:,o] = cont
pspec.normalized = True
# Normalize
if norm is True:
newcont = pspec.continuum_func(pspec)
pspec.flux /= newcont
pspec.cont *= newcont
return pspec
def mkbounds(params,paramlims=None):
""" Make lower and upper boundaries for parameters """
params = np.char.array(params).upper()
if paramlims is not None:
limkeys = np.char.array(list(paramlims.keys())).upper()
n = len(params)
lbounds = np.zeros(n,np.float64)
ubounds = np.zeros(n,np.float64)
# Teff
g, = np.where(params=='TEFF')
if len(g)>0:
lbounds[g[0]] = 3500
ubounds[g[0]] = 60000
# logg
g, = np.where(params=='LOGG')
if len(g)>0:
lbounds[g[0]] = 0
ubounds[g[0]] = 5
# fe_h
g, = np.where(params=='FE_H')
if len(g)>0:
lbounds[g[0]] = -3
ubounds[g[0]] = 1
# Vmicro
g, = np.where(params=='VMICRO')
if len(g)>0:
lbounds[g[0]] = 0
ubounds[g[0]] = 5
# Vsini/vrot
g, = np.where(params=='VROT')
if len(g)>0:
lbounds[g[0]] = 0
ubounds[g[0]] = 500
# RV
g, = np.where(params=='RV')
if len(g)>0:
lbounds[g[0]] = -1500
ubounds[g[0]] = 1500
# abundances
g, = np.where( (params.find('_H') != -1) & (params != 'FE_H') )
if len(g)>0:
lbounds[g] = -3
ubounds[g] = 10
# Use input parameter limits
if paramlims is not None:
for i,f in enumerate(params):
g, = np.where(limkeys==f)
if len(g)>0:
lbounds[i] = paramlims[limkeys[g[0]]][0]
ubounds[i] = paramlims[limkeys[g[0]]][1]
bounds = (lbounds,ubounds)
return bounds
def mkdxlim(fitparams):
""" Make array of parameter changes at which curve_fit should finish."""
npar = len(fitparams)
dx_lim = np.zeros(npar,float)
for k in range(npar):
if fitparams[k]=='TEFF':
dx_lim[k] = 1.0
elif fitparams[k]=='LOGG':
dx_lim[k] = 0.005
elif fitparams[k]=='VMICRO':
dx_lim[k] = 0.1
elif fitparams[k]=='VROT':
dx_lim[k] = 0.1
elif fitparams[k]=='RV':
dx_lim[k] = 0.01
elif fitparams[k].endswith('_H'):
dx_lim[k] = 0.005
else:
dx_lim[k] = 0.01
return dx_lim
def initpars(params,fitparams,bounds=None):
""" Make initial set of parameters given PARAMS and
FITPARAMS."""
params = dict((key.upper(), value) for (key, value) in params.items()) # all CAPS
fitparams = [v.upper() for v in fitparams] # all CAPS
npars = len(fitparams)
pinit = np.zeros(npars,np.float64)
# Loop over parameters
for k in range(npars):
ind, = np.where(np.char.array(list(params.keys()))==fitparams[k])
# This parameter is in PARAMS
if len(ind)>0:
pinit[k] = params[fitparams[k]]
# Not in PARAMS
else:
if fitparams[k]=='RV':
pinit[k] = 0.0
elif fitparams[k]=='VMICRO':
pinit[k] = 2.0
elif fitparams[k]=='VROT':
pinit[k] = 0.0
elif fitparams[k]=='TEFF':
pinit[k] = 5000.0
elif fitparams[k]=='LOGG':
pinit[k] = 3.0
elif fitparams[k].endswith('_H'):
# Abundances, use FE_H if possible
if 'FE_H' in params.keys():
pinit[k] = params['FE_H']
else:
pinit[k] = 0.0
else:
pinit[k] = 0.0
# Make sure inital parameters are within the boundary limits
if bounds is not None:
for k in range(npars):
pinit[k] = dln.limit(pinit[k],bounds[0][k],bounds[1][k])
return pinit
def specfigure(figfile,spec,fmodel,out,original=None,verbose=True,figsize=10):
""" Make diagnostic figure."""
#import matplotlib
matplotlib.use('Agg')
#import matplotlib.pyplot as plt
if os.path.exists(figfile): os.remove(figfile)
norder = spec.norder
nlegcol = 2
if original is not None: nlegcol=3
# Single-order plot
if norder==1:
fig,ax = plt.subplots()
fig.set_figheight(figsize*0.5)
fig.set_figwidth(figsize)
if original is not None:
plt.plot(original.wave,original.flux,color='green',label='Original',linewidth=1)
plt.plot(spec.wave,spec.flux,'b',label='Masked Data',linewidth=0.5)
plt.plot(fmodel.wave,fmodel.flux,'r',label='Model',linewidth=0.5,alpha=0.8)
leg = ax.legend(loc='upper left', frameon=True, framealpha=0.8, ncol=nlegcol)
plt.xlabel('Wavelength (Angstroms)')
plt.ylabel('Normalized Flux')
xr = dln.minmax(spec.wave)
yr = [np.min([spec.flux,fmodel.flux]), np.max([spec.flux,fmodel.flux])]
if original is not None:
yr = [np.min([original.flux,spec.flux,fmodel.flux]), np.max([spec.flux,fmodel.flux])]
yr = [yr[0]-dln.valrange(yr)*0.15,yr[1]+dln.valrange(yr)*0.005]
yr = [np.max([yr[0],-0.2]), np.min([yr[1],2.0])]
plt.xlim(xr)
plt.ylim(yr)
snr = np.nanmedian(spec.flux/spec.err)
plt.title(spec.filename)
#ax.annotate(r'S/N=%5.1f Teff=%5.1f$\pm$%5.1f logg=%5.2f$\pm$%5.2f [Fe/H]=%5.2f$\pm$%5.2f Vrel=%5.2f$\pm$%5.2f chisq=%5.2f' %
# (snr, out['TEFF'], out['tefferr'], out['LOGG'], out['loggerr'], out['FE_H'], out['feherr'], out['RV'], out['vrelerr'], out['chisq']),
# xy=(np.mean(xr), yr[0]+dln.valrange(yr)*0.05),ha='center')
# Multi-order plot
else:
fig,ax = plt.subplots(norder)
fig.set_figheight(figsize)
fig.set_figwidth(figsize)
for i in range(norder):
if original is not None:
ax[i].plot(original.wave[:,i],original.flux[:,i],color='green',label='Original',linewidth=1)
ax[i].plot(spec.wave[:,i],spec.flux[:,i],'b',label='Masked Data',linewidth=0.5)
ax[i].plot(fmodel.wave[:,i],fmodel.flux[:,i],'r',label='Model',linewidth=0.5,alpha=0.8)
if i==0:
leg = ax[i].legend(loc='upper left', frameon=True, framealpha=0.8, ncol=nlegcol)
ax[i].set_xlabel('Wavelength (Angstroms)')
ax[i].set_ylabel('Normalized Flux')
xr = dln.minmax(spec.wave[:,i])
yr = [np.min([spec.flux[:,i],fmodel.flux[:,i]]), np.max([spec.flux[:,i],fmodel.flux[:,i]])]
if original is not None:
yr = [np.min([original.flux[:,i],spec.flux[:,i],fmodel.flux[:,i]]), np.max([spec.flux[:,i],fmodel.flux[:,i]])]
yr = [yr[0]-dln.valrange(yr)*0.05,yr[1]+dln.valrange(yr)*0.05]
if i==0:
yr = [yr[0]-dln.valrange(yr)*0.15,yr[1]+dln.valrange(yr)*0.05]
yr = [np.max([yr[0],-0.2]), np.min([yr[1],2.0])]
ax[i].set_xlim(xr)
ax[i].set_ylim(yr)
# legend
if i==0:
snr = np.nanmedian(spec.flux/spec.err)
ax[i].set_title(spec.filename)
#ax[i].annotate(r'S/N=%5.1f Teff=%5.1f$\pm$%5.1f logg=%5.2f$\pm$%5.2f [Fe/H]=%5.2f$\pm$%5.2f Vrel=%5.2f$\pm$%5.2f chisq=%5.2f' %
# (snr,out['teff'],out['tefferr'],out['logg'],out['loggerr'],out['feh'],out['feherr'],out['vrel'],out['vrelerr'],out['chisq']),
# xy=(np.mean(xr), yr[0]+dln.valrange(yr)*0.05),ha='center')
plt.savefig(figfile,bbox_inches='tight')
plt.close(fig)
if verbose is True: print('Figure saved to '+figfile)
def dopvrot_lsq(spec,models=None,initpar=None,verbose=False,logger=None):
"""
Least Squares fitting with forward modeling of the spectrum.
Parameters
----------
spec : Spec1D object
The observed spectrum to match.
models : list of Cannon models, optional
A list of Cannon models to use. The default is to load all of the Cannon
models in the data/ directory and use those.
initpar : numpy array, optional
Initial estimate for [teff, logg, feh, RV, vsini], optional.
verbose : bool, optional
Verbose output of the various steps. This is False by default.
Returns
-------
out : numpy structured array
The output structured array of the final derived RVs, stellar parameters and errors.
bmodel : Spec1D object
The best-fitting Cannon model spectrum (as Spec1D object).
Example
-------
.. code-block:: python
out, bmodel = fit_lsq(spec)
"""
if logger is None:
logger = dln.basiclogger()
# Load and prepare the Cannon models
#-------------------------------------------
if models is None:
models = cannon.models.copy()
models.prepare(spec)
# Get initial estimates
if initpar is None:
initpar = np.array([6000.0, 2.5, -0.5, 0.0, 0.0])
initpar = np.array(initpar).flatten()
# Calculate the bounds
lbounds = np.zeros(5,float)+1e5
ubounds = np.zeros(5,float)-1e5
for p in models:
lbounds[0:3] = np.minimum(lbounds[0:3],np.min(p.ranges,axis=1))
ubounds[0:3] = np.maximum(ubounds[0:3],np.max(p.ranges,axis=1))
lbounds[3] = -1000
ubounds[3] = 1000
lbounds[4] = 0.0
ubounds[4] = 500.0
bounds = (lbounds, ubounds)
# function to use with curve_fit
def spec_interp_vsini(x,teff,logg,feh,rv,vsini):
""" This returns the interpolated model for a given spectrum."""
# The "models" and "spec" must already exist outside of this function
m = models(teff=teff,logg=logg,feh=feh,rv=rv)
if m is None: # there was a problem
return np.zeros(spec.flux.shape,float).flatten()+1e30
# Broaden to vsini
if spec.norder>1:
smflux = spec.flux*0
for k in range(spec.norder):
smflux[:,k] = utils.broaden(m.wave[:,k],m.flux[:,k],vsini=vsini)
else:
smflux = utils.broaden(m.wave.flatten(),m.flux.flatten(),vsini=vsini)
return smflux.flatten()
def spec_interp_vsini_jac(x,*args):
""" Compute the Jacobian matrix (an m-by-n matrix, where element (i, j)
is the partial derivative of f[i] with respect to x[j]). """
relstep = 0.02
npix = len(x)
npar = len(args)
# Current values
f0 = spec_interp_vsini(x,*args)
# Initialize jacobian matrix
jac = np.zeros((npix,npar),np.float64)
# Loop over parameters
for i in range(npar):
pars = np.array(copy.deepcopy(args))
step = relstep*pars[i]
if step<=0.0:
step = 0.02
pars[i] += step
f1 = spec_interp_vsini(x,*pars)
jac[:,i] = (f1-f0)/step
return jac
# Use curve_fit
lspars, lscov = curve_fit(spec_interp_vsini, spec.wave.flatten(), spec.flux.flatten(), sigma=spec.err.flatten(),
p0=initpar, bounds=bounds, jac=spec_interp_vsini_jac)
# If it hits a boundary then the solution won't change much compared to initpar
# setting absolute_sigma=True gives crazy low lsperror values
lsperror = np.sqrt(np.diag(lscov))
if verbose is True:
logger.info('Least Squares RV and stellar parameters:')
for k,n in enumerate(['Teff','logg','[Fe/H]','RV','Vsini']):
logger.info('%s = %f' % (n,lspars[k]))
lsmodel = spec_interp_vsini(spec.wave,teff=lspars[0],logg=lspars[1],feh=lspars[2],rv=lspars[3],vsini=lspars[4])
lschisq = np.sqrt(np.sum(((spec.flux.flatten()-lsmodel)/spec.err.flatten())**2)/len(lsmodel))
if verbose is True: logger.info('chisq = %5.2f' % lschisq)
# Put it into the output structure
npar = len(lspars)
dtype = np.dtype([('pars',float,npar),('parerr',float,npar),('parcov',float,(npar,npar)),('chisq',float)])
out = np.zeros(1,dtype=dtype)
out['pars'] = lspars
out['parerr'] = lsperror
out['parcov'] = lscov
out['chisq'] = lschisq
return out, lsmodel
def fit_elem(spec,params,elem,verbose=0,alinefile=None,mlinefile=None,logger=None):
""" Fit an individual element."""
t0 = time.time()
if logger is None:
logger = dln.basiclogger()
# Create fitparams
#fitparams = [e+'_H' for e in elem]
fitparams = elem.copy()
if verbose>0:
logger.info('Fitting: '+', '.join(fitparams))
# Initialize the fitter
spfitter = SpecFitter(spec,params,fitparams=fitparams,verbose=(verbose>=2),
alinefile=alinefile,mlinefile=mlinefile)
spfitter.logger = logger
spfitter.norm = True # normalize the synthetic spectrum
#spfitter.verbose = True
bounds = mkbounds(elem)
pinit = initpars(params,elem,bounds)
# Initalize output
npar = len(fitparams)
dtyp = []
for f in fitparams:
dtyp += [(f,float)]
dtyp += [('pars',float,npar),('chisq',float),('nsynfev',int)]
dtype = np.dtype(dtyp)
out = np.zeros(1,dtype=dtype)
# Loop over elemental abundances
flag = 0
abund = -2.0
dabund = 1.0
count = 0
abundarr = []
chisq = []
modelarr = []
# Loop from -2 to +1 or until we get through the minimum
while (flag==0):
model = spfitter.model(spec.wave.flatten(),abund)
chisq1 = spfitter.chisq(model)
abundarr.append(abund)
modelarr.append(model)
chisq.append(chisq1)
if verbose>0:
logger.info('%f %f' % (abund,chisq1))
# Are we done?
if (abund>=1) and (chisq1 != np.min(np.array(chisq))):
flag = 1
if (abund >= 10):
flag = 1
# Increment the abundance
abund += dabund
count += 1
# Best value is at the end, just return that value
bestind = np.argmin(chisq)
if (bestind==0) or (bestind==len(chisq)-1):
bestabund = abundarr[bestind]
for k,f in enumerate(fitparams):
out[f] = bestabund
out['pars'] = bestabund
out['chisq'] = np.min(chisq)
out['nsynfev'] = spfitter.nsynfev
model = modelarr[bestind]
if verbose>0:
logger.info('%f %f' % (bestabund,np.min(chisq)))
logger.info('nfev = %i' % spfitter.nsynfev)
logger.info('dt = %.2f sec.' % (time.time()-t0))
logger.info(' ')
return out, model
# Now refine twice
for i in range(2):
# Get best value
bestind = np.argmin(np.array(chisq))
# get values half-way to left and right
# Left
lftind = bestind-1
lftabund = np.mean([abundarr[lftind],abundarr[bestind]])
lftmodel = spfitter.model(spec.wave.flatten(),lftabund)
lftchisq = spfitter.chisq(lftmodel)
abundarr.append(lftabund)
modelarr.append(lftmodel)
chisq.append(lftchisq)
if verbose>0:
logger.info('%f %f' % (lftabund,lftchisq))
# Right
rgtind = bestind+1
rgtabund = np.mean([abundarr[bestind],abundarr[rgtind]])
rgtmodel = spfitter.model(spec.wave.flatten(),rgtabund)
rgtchisq = spfitter.chisq(rgtmodel)
abundarr.append(rgtabund)
modelarr.append(rgtmodel)
chisq.append(rgtchisq)
if verbose>0:
logger.info('%f %f' % (rgtabund,rgtchisq))
# Sort arrays
si = np.argsort(abundarr)
abundarr = [abundarr[k] for k in si]
chisq = [chisq[k] for k in si]
modelarr = [modelarr[k] for k in si]
# Now interpolate to find the best value
abundarr2 = np.linspace(np.min(abundarr),np.max(abundarr),1000)
chisq2 = interp1d(abundarr,chisq,kind='quadratic')(abundarr2)
bestind = np.argmin(chisq2)
bestabund = abundarr2[bestind]
# Get the model at the best value
model = spfitter.model(spec.wave.flatten(),bestabund)
bestchisq = spfitter.chisq(model)
# Populate output structure
for k,f in enumerate(fitparams):
out[f] = bestabund
out['pars'] = bestabund
out['chisq'] = bestchisq
out['nsynfev'] = spfitter.nsynfev
if verbose>0:
logger.info('%f %f' % (bestabund,bestchisq))
logger.info('nfev = %i' % spfitter.nsynfev)
logger.info('dt = %.2f sec.' % (time.time()-t0))
logger.info(' ')
return out, model
def fit_lsq(spec,params,fitparams=None,fparamlims=None,verbose=0,alinefile=None,mlinefile=None,logger=None):
"""
Fit a spectrum with a synspec synthetic spectrum and determine stellar parameters and
abundances using least-squares.
Parameters
----------
spec : Spec1D object
The observed spectrum to match.
params : dict
Dictionary of initial values to use or parameters/elements to hold fixed.
fitparams : list, optional
List of parameter names to fit (e.g., TEFF, LOGG, FE_H, RV). By default all values
in PARAMS are fit.
fparamlims : dict, optional
Dictionary of lower and upper limits for each of the fitparams.
verbose : int, optional
Verbosity level (0, 1, or 2). The default is 0 and verbose=2 is for debugging.
alinefile : str, optional
The atomic linelist to use. Default is None which means the default synple linelist is used.
mlinefile : str, optional
The molecular linelist to use. Default is None which means the default synple linelist is used.
logger : logging object, optional
Logging object.
Returns
-------
out : numpy structured array
Catalog of best-fit values.
model : numpy array
The best-fit synthetic stellar spectrum.
Example
-------
.. code-block:: python
spec = doppler.read(file)
params = {'teff':5500,'logg':3.0,'fe_h':-1.0,'rv':0.0,'ca_h':-1.0}
fitparams = ['teff','logg','fe_h','rv','ca_h']
out,model = specfit.fit_lsq(spec,params,fitparams=fitparams)
"""
t0 = time.time()
if logger is None:
logger = dln.basiclogger()
# Normalize the spectrum
if spec.normalized==False:
spec.normalize()
# Capitalize the inputs
# Make key names all CAPS
params = dict((key.upper(), value) for (key, value) in params.items())
# Fitting parameters
if fitparams is None:
fitparams = list(params.keys())
fitparams = [v.upper() for v in fitparams] # all CAPS
npar = len(fitparams)
# Initialize the fitter
spfitter = SpecFitter(spec,params,fitparams=fitparams,verbose=(verbose>=2),
alinefile=alinefile,mlinefile=mlinefile)
spfitter.logger = logger
spfitter.norm = True # normalize the synthetic spectrum
bounds = mkbounds(fitparams,fparamlims)
pinit = initpars(params,fitparams,bounds)
if verbose>0:
logger.info('Fitting: '+', '.join(fitparams))
# Fit the spectrum using curve_fit
dx_lim = mkdxlim(fitparams)
pars, cov = curve_fit(spfitter.model,spfitter.wave,spfitter.flux,dx_lim=dx_lim,
sigma=spfitter.err,p0=pinit,bounds=bounds,jac=spfitter.jac)
error = np.sqrt(np.diag(cov))
if verbose>0:
logger.info('Best values:')
for k in range(npar):
logger.info('%s = %.3f +/- %.3f' % (fitparams[k],pars[k],error[k]))
model = spfitter.model(spfitter.wave,*pars)
chisq = np.sqrt(np.sum(((spfitter.flux-model)/spfitter.err)**2)/len(model))
if verbose>0:
logger.info('chisq = %.2f' % chisq)
logger.info('nfev = %i' % spfitter.nsynfev)
logger.info('dt = %.2f sec.' % (time.time()-t0))
# Put it into the output structure
dtyp = []
for f in fitparams:
dtyp += [(f,float),(f+'_ERR',float)]
dtyp += [('pars',float,npar),('parerr',float,npar),('parcov',float,(npar,npar)),('chisq',float),('nsynfev',int)]
dtype = np.dtype(dtyp)
out = np.zeros(1,dtype=dtype)
for k,f in enumerate(fitparams):
out[f] = pars[k]
out[f+'_ERR'] = error[k]
out['pars'] = pars
out['parerr'] = error
out['parcov'] = cov
out['chisq'] = chisq
out['nsynfev'] = spfitter.nsynfev
# Reshape final model spectrum
model = model.reshape(spec.flux.shape)
return out, model
def fit(spec,params=None,elem=None,figfile=None,fitvsini=False,fitvmicro=False,
fparamlims=None,verbose=1,alinefile=None,mlinefile=None,logger=None):
"""
Fit a spectrum with a synspec synthetic spectrum and determine stellar parameters and
abundances using a multi-step iterative method.
Step 1: Fit Teff/logg/[Fe/H]/RV using Doppler
Step 2: Fit Teff/logg/[Fe/H]/RV + vsini with Doppler model
Step 3: Fit stellar parameters (Teff/logg/[Fe/H]/[alpha/H]), RV and broadening (Vrot/Vmicro)
Step 4: Fit each element one at a time holding everything else fixed.
Step 5: Fit everything simultaneously
Parameters
----------
spec : Spec1D object
The observed spectrum to match.
params : dict, optional
Dictionary of initial values to use or parameters/elements to hold fixed.
elem : list, optional
List of elements to fit. The default is:
elem = ['C','N','O','NA','MG','AL','SI','K','CA','TI','V','CR','MN','CO','NI','CU','CE','ND']
Input an empty list [] to fit no elements.
figfile : string, optional
The filename for a diagnostic plot showing the observed spectrum and model spectrum.
fitvsini : bool, optional
Fit rotational velocity (vsini). By default, Vsini will be fit initially with a Doppler
model, but only included in the final fit if it improved chisq.
fitvmicro : bool, optional
Fit Vmicro. Default is False. By default, Vmicro is set (if not included in PARAMS)
logg>=3.8: vmicro = 2.0
logg<3.8: vmicro = 10^(0.226−0.0228*logg+0.0297*(logg)^2−0.0113*(logg)^3 )
fparamlims : dict, optional
Dictionary of lower and upper limits for each of the fitted parameter.
For example, if params is {'teff': 9000, 'logg': 4.00, 'rv': -16.124}, fparamlims
could be {'teff': [8000,10000], 'logg': [3.50,4.50], 'rv': [-20.124,-12.124]}.
verbose : int, optional
Verbosity level (0, 1, or 2). The default is 0 and verbose=2 is for debugging.
alinefile : str, optional
The atomic linelist to use. Default is None which means the default synple linelist is used.
mlinefile : str, optional
The molecular linelist to use. Default is None which means the default synple linelist is used.
logger : logging object, optional
Logging object.
Returns
-------
out : numpy structured array
Catalog of best-fit values.
model : numpy array
The best-fit synthetic stellar spectrum.
Example
-------
.. code-block:: python
spec = doppler.read(file)
out,model = specfit.fit(spec)
"""
t0 = time.time()
if logger is None:
logger = dln.basiclogger()
logger.handlers[0].setFormatter(logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s"))
logger.handlers[0].setStream(sys.stdout)
# Default set of elements
if elem is None:
elem = ['C','N','O','NA','MG','AL','SI','K','CA','TI','V','CR','MN','CO','NI','CU','SR','CE','ND']
# Normalize the spectrum
if spec.normalized==False:
spec.normalize()
# Print out inputs
if verbose>0:
logger.info('Inputs:')
if params is not None:
logger.info('PARAMS:')
for k,n in enumerate(params.keys()):
logger.info('%s = %f' % (n,params[n]))
else:
logger.info('PARAMS: None')
if fitvmicro:
logger.info('Fitting VMICRO')
if fitvsini:
logger.info('Fitting VSINI')
if len(elem)>0:
logger.info('Elements to fit: '+', '.join(elem))
else:
logger.info('No elements to fit')
logger.info(' ')
# Input linelists
if verbose and alinefile is not None:
logger.info('Using input atomic linelist: ',alinefile)
if verbose and mlinefile is not None:
logger.info('Using input molecular linelist: ',mlinefile)
# 1) Doppler (Teff, logg, feh, RV)
#---------------------------------
t1 = time.time()
if verbose>0:
logger.info('Step 1: Running Doppler')
# Use Doppler to get initial guess of stellar parameters and RV
dopout, dopfmodel, dopspecm = doppler.fit(spec)
if verbose>0:
logger.info('Teff = %.2f +/- %.2f' % (dopout['teff'][0],dopout['tefferr'][0]))
logger.info('logg = %.3f +/- %.3f' % (dopout['logg'][0],dopout['loggerr'][0]))
logger.info('[Fe/H] = %.3f +/- %.3f' % (dopout['feh'][0],dopout['feherr'][0]))
logger.info('Vrel = %.4f +/- %.4f' % (dopout['vrel'][0],dopout['vrelerr'][0]))
logger.info('chisq = %.3f' % dopout['chisq'][0])
logger.info('dt = %.2f sec.' % (time.time()-t1))
# typically 5 sec
# 2) Fit vsini as well with Doppler model
#-----------------------------------------
t2 = time.time()
if verbose>0:
logger.info(' ')
logger.info('Step 2: Fitting vsini with Doppler model')
# For APOGEE resolution you need vsini~4 km/s or greater to see an effect
initpar2 = [dopout['teff'][0], dopout['logg'][0], dopout['feh'][0], dopout['vrel'][0], 5.0]
out2, model2 = dopvrot_lsq(spec,initpar=initpar2,verbose=verbose,logger=logger)
if verbose>0:
logger.info('Teff = %.2f +/- %.2f' % (out2['pars'][0][0],out2['parerr'][0][0]))
logger.info('logg = %.3f +/- %.3f' % (out2['pars'][0][1],out2['parerr'][0][1]))
logger.info('[Fe/H] = %.3f +/- %.3f' % (out2['pars'][0][2],out2['parerr'][0][2]))
logger.info('Vrel = %.4f +/- %.4f' % (out2['pars'][0][3],out2['parerr'][0][3]))
logger.info('Vsini = %.3f +/- %.3f' % (out2['pars'][0][4],out2['parerr'][0][4]))
logger.info('chisq = %.3f' % out2['chisq'][0])
logger.info('dt = %.2f sec.' % (time.time()-t2))
# typically 5 sec
if out2['chisq'][0] > dopout['chisq'][0]:
if verbose>0:
logger.info('Doppler Vrot=0 chisq is better')
out2['pars'][0] = [dopout['teff'][0],dopout['logg'][0],dopout['feh'][0],dopout['vrel'][0],0.0]
# Initialize params
if params is None:
params = {}
else:
params = dict((key.upper(), value) for (key, value) in params.items()) # all CAPS
# Using input values when possible, otherwise Doppler values
for k,name in enumerate(['TEFF','LOGG','FE_H','RV','VROT']):
if params.get(name) is None:
params[name] = out2['pars'][0][k]
# Get Vmicro using Teff/logg relation
# APOGEE DR14 vmicro relation (Holtzman et al. 2018)
# for stars with [M/H]>-1 and logg<3.8
# vmicro = 10^(0.226−0.0228*logg+0.0297*(logg)^2−0.0113*(logg)^3 )
# coef = [0.226,0.0228,0.0297,−0.0113]
# only giants, was fit in dwarfs
if params.get('VMICRO') is None:
vmicro = 2.0 # default
if params['LOGG']<3.8:
vmcoef = [0.226,0.0228,0.0297,-0.0113]
vmicro = 10**dln.poly(params['LOGG'],vmcoef[::-1])
params['VMICRO'] = vmicro
# for giants
# vmacro = 10^(0.741−0.0998*logg−0.225[M/H])
# maximum of 15 km/s
# 3) Fit stellar parameters (Teff, logg, feh, alpha, RV, Vsini)
#--------------------------------------------------------------
t3 = time.time()
if verbose>0:
logger.info(' ')
logger.info('Step 3: Fitting stellar parameters, RV and broadening')
params3 = params.copy()
fitparams3 = ['TEFF','LOGG','FE_H','ALPHA_H','RV']
if params3['VROT']>0 or fitvsini is True:
fitparams3.append('VROT')
# Fit Vmicro as well if it's a dwarf
if params3['LOGG']>3.8 or params3['TEFF']>8000 or fitvmicro is True:
fitparams3.append('VMICRO')
out3, model3 = fit_lsq(spec,params3,fitparams3,fparamlims,verbose=verbose,
alinefile=alinefile,mlinefile=mlinefile,logger=logger)
# typically 9 min.
# Should we fit C_H and N_H as well??
# Tweak the continuum
if verbose is not None:
logger.info('Tweaking continuum using best-fit synthetic model')
tmodel = Spec1D(model3,wave=spec.wave.copy(),lsfpars=np.array(0.0))
spec = doppler.rv.tweakcontinuum(spec,tmodel)
# 4) Fit each element separately
#-------------------------------
t4 = time.time()
if verbose>0:
logger.info(' ')
logger.info('Step 4: Fitting each element separately')
params4 = params3.copy()
for k in range(len(fitparams3)):
params4[fitparams3[k]] = out3['pars'][0][k]
nelem = len(elem)
if nelem>0:
if verbose>0:
logger.info('Elements: '+', '.join(elem))
elemcat = np.zeros(nelem,dtype=np.dtype([('name',np.str,10),('par',np.float64),('parerr',np.float64)]))
elemcat['name'] = elem
for k in range(nelem):
t4b = time.time()
parselem = params4.copy()
if elem[k] in ['O','MG','SI','S','CA','TI']:
parselem[elem[k]+'_H'] = params4['ALPHA_H']
else:
parselem[elem[k]+'_H'] = params4['FE_H']
fitparselem = [elem[k]+'_H']
#out4, model4 = fit_lsq(spec,parselem,fitparselem,verbose=verbose,logger=logger)
out4, model4 = fit_elem(spec,parselem,fitparselem,verbose=verbose,
alinefile=alinefile,mlinefile=mlinefile,logger=logger)
elemcat['par'][k] = out4['pars'][0]
#elemcat['parerr'][k] = out4['parerr'][0]
if verbose>0:
logger.info('dt = %f sec.' % (time.time()-t4))
logger.info(' ')
else:
if verbose>0:
logger.info('No elements to fit')
# about 50 min.
# 5) Fit all parameters simultaneously
#---------------------------------------
# if NO elements to fit, then nothing to do
if nelem>0:
t5 = time.time()
if verbose>0:
logger.info('Step 5: Fit all parameters simultaneously')
params5 = params4.copy()
for k in range(nelem):
params5[elem[k]+'_H'] = elemcat['par'][k]
if params5.get('ALPHA_H') is not None:
del params5['ALPHA_H']
fitparams5 = ['TEFF','LOGG','FE_H','RV']
if 'VROT' in fitparams3 or fitvsini is True:
fitparams5.append('VROT')
if 'VMICRO' in fitparams3 or fitvmicro is True:
fitparams5.append('VMICRO')
fitparams5 = fitparams5+list(np.char.array(elem)+'_H')
out5, model5 = fit_lsq(spec,params5,fitparams5,fparamlims,verbose=verbose,
alinefile=alinefile,mlinefile=mlinefile,logger=logger)
else:
out5 = out3
model5 = model3
fitparams5 = fitparams3
# Make final structure and save the figure
out = out5
dtyp = []
npar = len(fitparams5)
for f in fitparams5:
dtyp += [(f,float),(f+'_ERR',float)]
dtyp += [('pars',float,npar),('parerr',float,npar),('parcov',float,(npar,npar)),('chisq',float),('vhelio',float)]
dtype = np.dtype(dtyp)
out = np.zeros(1,dtype=dtype)
for k,f in enumerate(fitparams5):
out[f] = out5['pars'][0][k]
out[f+'_ERR'] = out5['parerr'][0][k]
out['pars'] = out5['pars'][0]
out['parerr'] = out5['parerr'][0]
out['parcov'] = out5['parcov'][0]
out['chisq'] = out5['chisq'][0]
out['vhelio'] = out5['RV']+spec.barycorr()
if verbose>0:
logger.info('Vhelio = %.3f' % out['vhelio'])
# Final model
model = Spec1D(model5,wave=spec.wave.copy(),lsfpars=np.array(0.0))
model.lsf = spec.lsf.copy()
# Make figure
if figfile is not None:
specfigure(figfile,spec,model,out,verbose=(verbose>=2))
if verbose>0:
logger.info('dt = %.2f sec.' % (time.time()-t0))
return out, model
| [
"numpy.hstack",
"matplotlib.pyplot.ylabel",
"scipy.interpolate.interp1d",
"dlnpyutils.utils.basiclogger",
"numpy.array",
"numpy.argsort",
"dlnpyutils.utils.interp",
"numpy.isfinite",
"copy.deepcopy",
"synple.synple.read_model",
"doppler.rv.tweakcontinuum",
"numpy.arange",
"os.remove",
"os.... | [((681, 702), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (695, 702), False, 'import matplotlib\n'), ((872, 941), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'message': '"""numpy.dtype size changed"""'}), "('ignore', message='numpy.dtype size changed')\n", (895, 941), False, 'import warnings\n'), ((942, 1011), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'message': '"""numpy.ufunc size changed"""'}), "('ignore', message='numpy.ufunc size changed')\n", (965, 1011), False, 'import warnings\n'), ((14280, 14306), 'dlnpyutils.utils.closest', 'dln.closest', (['spec.wave', 'w0'], {}), '(spec.wave, w0)\n', (14291, 14306), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((14322, 14348), 'dlnpyutils.utils.closest', 'dln.closest', (['spec.wave', 'w1'], {}), '(spec.wave, w1)\n', (14333, 14348), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((15139, 15203), 'astropy.table.Table.read', 'Table.read', (["(codedir + '/data/periodic_table.txt')"], {'format': '"""ascii"""'}), "(codedir + '/data/periodic_table.txt', format='ascii')\n", (15149, 15203), False, 'from astropy.table import Table\n'), ((15571, 15616), 'synple.synple.read_model', 'synple.read_model', (['modelfile'], {'verbose': 'verbose'}), '(modelfile, verbose=verbose)\n', (15588, 15616), False, 'from synple import synple\n'), ((15629, 15653), 'dlnpyutils.utils.readlines', 'dln.readlines', (['modelfile'], {}), '(modelfile)\n', (15642, 15653), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((15769, 16552), 'numpy.array', 'np.array', (['[4750.0, 2.5, -10.99, -10.66, -9.34, -3.61, -4.21, -3.35, -7.48, -4.11, -\n 5.8, -4.44, -5.59, -4.53, -6.63, -4.92, -6.54, -5.64, -7.01, -5.7, -\n 8.89, -7.09, -8.11, -6.4, -6.61, -4.54, -7.05, -5.82, -7.85, -7.48, -\n 9.0, -8.39, -9.74, -8.7, -9.5, -8.79, -9.52, -9.17, -9.83, -9.46, -\n 10.58, -10.16, -20.0, -10.29, -11.13, -10.47, -11.1, -10.33, -11.24, -\n 10.0, -11.03, -9.86, -10.49, -9.8, -10.96, -9.86, -10.94, -10.46, -\n 11.32, -10.62, -20.0, -11.08, -11.52, -10.97, -11.74, -10.94, -11.56, -\n 11.12, -11.94, -11.2, -11.94, -11.19, -12.16, -11.19, -11.78, -10.64, -\n 10.66, -10.42, -11.12, -10.87, -11.14, -10.29, -11.39, -20.0, -20.0, -\n 20.0, -20.0, -20.0, -20.0, -12.02, -20.0, -12.58, -20.0, -20.0, -20.0, \n -20.0, -20.0, -20.0, -20.0]'], {}), '([4750.0, 2.5, -10.99, -10.66, -9.34, -3.61, -4.21, -3.35, -7.48, -\n 4.11, -5.8, -4.44, -5.59, -4.53, -6.63, -4.92, -6.54, -5.64, -7.01, -\n 5.7, -8.89, -7.09, -8.11, -6.4, -6.61, -4.54, -7.05, -5.82, -7.85, -\n 7.48, -9.0, -8.39, -9.74, -8.7, -9.5, -8.79, -9.52, -9.17, -9.83, -9.46,\n -10.58, -10.16, -20.0, -10.29, -11.13, -10.47, -11.1, -10.33, -11.24, -\n 10.0, -11.03, -9.86, -10.49, -9.8, -10.96, -9.86, -10.94, -10.46, -\n 11.32, -10.62, -20.0, -11.08, -11.52, -10.97, -11.74, -10.94, -11.56, -\n 11.12, -11.94, -11.2, -11.94, -11.19, -12.16, -11.19, -11.78, -10.64, -\n 10.66, -10.42, -11.12, -10.87, -11.14, -10.29, -11.39, -20.0, -20.0, -\n 20.0, -20.0, -20.0, -20.0, -12.02, -20.0, -12.58, -20.0, -20.0, -20.0, \n -20.0, -20.0, -20.0, -20.0])\n', (15777, 16552), True, 'import numpy as np\n'), ((18937, 18963), 'os.path.abspath', 'os.path.abspath', (['os.curdir'], {}), '(os.curdir)\n', (18952, 18963), False, 'import os\n'), ((19040, 19054), 'os.chdir', 'os.chdir', (['tdir'], {}), '(tdir)\n', (19048, 19054), False, 'import os\n'), ((19646, 19685), 'tempfile.mkstemp', 'tempfile.mkstemp', ([], {'prefix': '"""mod"""', 'dir': '"""."""'}), "(prefix='mod', dir='.')\n", (19662, 19685), False, 'import tempfile\n'), ((19689, 19702), 'os.close', 'os.close', (['tid'], {}), '(tid)\n', (19697, 19702), False, 'import os\n'), ((19809, 19841), 'dlnpyutils.utils.limit', 'dln.limit', (['teff', '(3500.0)', '(60000.0)'], {}), '(teff, 3500.0, 60000.0)\n', (19818, 19841), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((19852, 19877), 'dlnpyutils.utils.limit', 'dln.limit', (['logg', '(0.0)', '(5.0)'], {}), '(logg, 0.0, 5.0)\n', (19861, 19877), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((19889, 19916), 'dlnpyutils.utils.limit', 'dln.limit', (['metal', '(-2.5)', '(0.5)'], {}), '(metal, -2.5, 0.5)\n', (19898, 19916), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((20673, 20692), 'shutil.rmtree', 'shutil.rmtree', (['tdir'], {}), '(tdir)\n', (20686, 20692), False, 'import shutil\n'), ((20697, 20713), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (20705, 20713), False, 'import os\n'), ((24976, 24998), 'dlnpyutils.utils.closest', 'dln.closest', (['wave1', 'w0'], {}), '(wave1, w0)\n', (24987, 24998), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((25014, 25036), 'dlnpyutils.utils.closest', 'dln.closest', (['wave1', 'w1'], {}), '(wave1, w1)\n', (25025, 25036), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((26430, 26466), 'numpy.zeros', 'np.zeros', (['(npix, norder)', 'np.float32'], {}), '((npix, norder), np.float32)\n', (26438, 26466), True, 'import numpy as np\n'), ((29528, 29551), 'numpy.zeros', 'np.zeros', (['n', 'np.float64'], {}), '(n, np.float64)\n', (29536, 29551), True, 'import numpy as np\n'), ((29565, 29588), 'numpy.zeros', 'np.zeros', (['n', 'np.float64'], {}), '(n, np.float64)\n', (29573, 29588), True, 'import numpy as np\n'), ((29612, 29638), 'numpy.where', 'np.where', (["(params == 'TEFF')"], {}), "(params == 'TEFF')\n", (29620, 29638), True, 'import numpy as np\n'), ((29733, 29759), 'numpy.where', 'np.where', (["(params == 'LOGG')"], {}), "(params == 'LOGG')\n", (29741, 29759), True, 'import numpy as np\n'), ((29851, 29877), 'numpy.where', 'np.where', (["(params == 'FE_H')"], {}), "(params == 'FE_H')\n", (29859, 29877), True, 'import numpy as np\n'), ((29975, 30003), 'numpy.where', 'np.where', (["(params == 'VMICRO')"], {}), "(params == 'VMICRO')\n", (29983, 30003), True, 'import numpy as np\n'), ((30105, 30131), 'numpy.where', 'np.where', (["(params == 'VROT')"], {}), "(params == 'VROT')\n", (30113, 30131), True, 'import numpy as np\n'), ((30227, 30251), 'numpy.where', 'np.where', (["(params == 'RV')"], {}), "(params == 'RV')\n", (30235, 30251), True, 'import numpy as np\n'), ((30967, 30988), 'numpy.zeros', 'np.zeros', (['npar', 'float'], {}), '(npar, float)\n', (30975, 30988), True, 'import numpy as np\n'), ((31781, 31808), 'numpy.zeros', 'np.zeros', (['npars', 'np.float64'], {}), '(npars, np.float64)\n', (31789, 31808), True, 'import numpy as np\n'), ((33082, 33103), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (33096, 33103), False, 'import matplotlib\n'), ((33148, 33171), 'os.path.exists', 'os.path.exists', (['figfile'], {}), '(figfile)\n', (33162, 33171), False, 'import os\n'), ((36675, 36716), 'matplotlib.pyplot.savefig', 'plt.savefig', (['figfile'], {'bbox_inches': '"""tight"""'}), "(figfile, bbox_inches='tight')\n", (36686, 36716), True, 'import matplotlib.pyplot as plt\n'), ((36720, 36734), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (36729, 36734), True, 'import matplotlib.pyplot as plt\n'), ((41088, 41201), 'numpy.dtype', 'np.dtype', (["[('pars', float, npar), ('parerr', float, npar), ('parcov', float, (npar,\n npar)), ('chisq', float)]"], {}), "([('pars', float, npar), ('parerr', float, npar), ('parcov', float,\n (npar, npar)), ('chisq', float)])\n", (41096, 41201), True, 'import numpy as np\n'), ((41197, 41221), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'dtype'}), '(1, dtype=dtype)\n', (41205, 41221), True, 'import numpy as np\n'), ((41491, 41502), 'time.time', 'time.time', ([], {}), '()\n', (41500, 41502), False, 'import time\n'), ((42297, 42311), 'numpy.dtype', 'np.dtype', (['dtyp'], {}), '(dtyp)\n', (42305, 42311), True, 'import numpy as np\n'), ((42322, 42346), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'dtype'}), '(1, dtype=dtype)\n', (42330, 42346), True, 'import numpy as np\n'), ((43143, 43159), 'numpy.argmin', 'np.argmin', (['chisq'], {}), '(chisq)\n', (43152, 43159), True, 'import numpy as np\n'), ((45081, 45098), 'numpy.argmin', 'np.argmin', (['chisq2'], {}), '(chisq2)\n', (45090, 45098), True, 'import numpy as np\n'), ((47324, 47335), 'time.time', 'time.time', ([], {}), '()\n', (47333, 47335), False, 'import time\n'), ((48333, 48470), 'dlnpyutils.minpack.curve_fit', 'curve_fit', (['spfitter.model', 'spfitter.wave', 'spfitter.flux'], {'dx_lim': 'dx_lim', 'sigma': 'spfitter.err', 'p0': 'pinit', 'bounds': 'bounds', 'jac': 'spfitter.jac'}), '(spfitter.model, spfitter.wave, spfitter.flux, dx_lim=dx_lim,\n sigma=spfitter.err, p0=pinit, bounds=bounds, jac=spfitter.jac)\n', (48342, 48470), False, 'from dlnpyutils.minpack import curve_fit\n'), ((49245, 49259), 'numpy.dtype', 'np.dtype', (['dtyp'], {}), '(dtyp)\n', (49253, 49259), True, 'import numpy as np\n'), ((49270, 49294), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'dtype'}), '(1, dtype=dtype)\n', (49278, 49294), True, 'import numpy as np\n'), ((52337, 52348), 'time.time', 'time.time', ([], {}), '()\n', (52346, 52348), False, 'import time\n'), ((53761, 53772), 'time.time', 'time.time', ([], {}), '()\n', (53770, 53772), False, 'import time\n'), ((53948, 53965), 'doppler.fit', 'doppler.fit', (['spec'], {}), '(spec)\n', (53959, 53965), False, 'import doppler\n'), ((54580, 54591), 'time.time', 'time.time', ([], {}), '()\n', (54589, 54591), False, 'import time\n'), ((56971, 56982), 'time.time', 'time.time', ([], {}), '()\n', (56980, 56982), False, 'import time\n'), ((57892, 57931), 'doppler.rv.tweakcontinuum', 'doppler.rv.tweakcontinuum', (['spec', 'tmodel'], {}), '(spec, tmodel)\n', (57917, 57931), False, 'import doppler\n'), ((58025, 58036), 'time.time', 'time.time', ([], {}), '()\n', (58034, 58036), False, 'import time\n'), ((60798, 60812), 'numpy.dtype', 'np.dtype', (['dtyp'], {}), '(dtyp)\n', (60806, 60812), True, 'import numpy as np\n'), ((60823, 60847), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'dtype'}), '(1, dtype=dtype)\n', (60831, 60847), True, 'import numpy as np\n'), ((2512, 2534), 'doppler.read', 'doppler.read', (['filename'], {}), '(filename)\n', (2524, 2534), False, 'import doppler\n'), ((4640, 4682), 'numpy.zeros', 'np.zeros', (['(spec.lsf.norder, 2)', 'np.float64'], {}), '((spec.lsf.norder, 2), np.float64)\n', (4648, 4682), True, 'import numpy as np\n'), ((4694, 4731), 'numpy.zeros', 'np.zeros', (['spec.lsf.norder', 'np.float64'], {}), '(spec.lsf.norder, np.float64)\n', (4702, 4731), True, 'import numpy as np\n'), ((4747, 4775), 'numpy.zeros', 'np.zeros', (['norder', 'np.float64'], {}), '(norder, np.float64)\n', (4755, 4775), True, 'import numpy as np\n'), ((5392, 5405), 'numpy.min', 'np.min', (['mindw'], {}), '(mindw)\n', (5398, 5405), True, 'import numpy as np\n'), ((5454, 5466), 'numpy.min', 'np.min', (['wave'], {}), '(wave)\n', (5460, 5466), True, 'import numpy as np\n'), ((5489, 5501), 'numpy.max', 'np.max', (['wave'], {}), '(wave)\n', (5495, 5501), True, 'import numpy as np\n'), ((11586, 11620), 'numpy.zeros', 'np.zeros', (['(npix, npar)', 'np.float64'], {}), '((npix, npar), np.float64)\n', (11594, 11620), True, 'import numpy as np\n'), ((15099, 15124), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (15114, 15124), False, 'import os\n'), ((18992, 19035), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'prefix': '"""syn"""', 'dir': 'tmpbase'}), "(prefix='syn', dir=tmpbase)\n", (19008, 19035), False, 'import tempfile\n'), ((20172, 20187), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (20185, 20187), False, 'import pdb\n'), ((21245, 21307), 'doppler.utils.broaden', 'utils.broaden', (['spec.wave', 'spec.flux'], {'vgauss': 'vmicro', 'vsini': 'vrot'}), '(spec.wave, spec.flux, vgauss=vmicro, vsini=vrot)\n', (21258, 21307), False, 'from doppler import cannon, utils, reader\n'), ((26248, 26284), 'numpy.zeros', 'np.zeros', (['(npix, norder)', 'np.float32'], {}), '((npix, norder), np.float32)\n', (26256, 26284), True, 'import numpy as np\n'), ((27518, 27538), 'dlnpyutils.utils.slope', 'dln.slope', (['modelwave'], {}), '(modelwave)\n', (27527, 27538), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((27555, 27584), 'numpy.hstack', 'np.hstack', (['(dwmod, dwmod[-1])'], {}), '((dwmod, dwmod[-1]))\n', (27564, 27584), True, 'import numpy as np\n'), ((27769, 27796), 'numpy.abs', 'np.abs', (['(fwhm / dwmod[xpmod])'], {}), '(fwhm / dwmod[xpmod])\n', (27775, 27796), True, 'import numpy as np\n'), ((28771, 28810), 'doppler.utils.convolve_sparse', 'utils.convolve_sparse', (['modelflux', 'lsf2d'], {}), '(modelflux, lsf2d)\n', (28792, 28810), False, 'from doppler import cannon, utils, reader\n'), ((28875, 28916), 'synple.synple.interp_spl', 'synple.interp_spl', (['wobs', 'modelwave', 'cflux'], {}), '(wobs, modelwave, cflux)\n', (28892, 28916), False, 'from synple import synple\n'), ((28932, 28977), 'synple.synple.interp_spl', 'synple.interp_spl', (['wobs', 'modelwave', 'modelcont'], {}), '(wobs, modelwave, modelcont)\n', (28949, 28977), False, 'from synple import synple\n'), ((33173, 33191), 'os.remove', 'os.remove', (['figfile'], {}), '(figfile)\n', (33182, 33191), False, 'import os\n'), ((33331, 33345), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (33343, 33345), True, 'import matplotlib.pyplot as plt\n'), ((33553, 33624), 'matplotlib.pyplot.plot', 'plt.plot', (['spec.wave', 'spec.flux', '"""b"""'], {'label': '"""Masked Data"""', 'linewidth': '(0.5)'}), "(spec.wave, spec.flux, 'b', label='Masked Data', linewidth=0.5)\n", (33561, 33624), True, 'import matplotlib.pyplot as plt\n'), ((33629, 33714), 'matplotlib.pyplot.plot', 'plt.plot', (['fmodel.wave', 'fmodel.flux', '"""r"""'], {'label': '"""Model"""', 'linewidth': '(0.5)', 'alpha': '(0.8)'}), "(fmodel.wave, fmodel.flux, 'r', label='Model', linewidth=0.5, alpha=0.8\n )\n", (33637, 33714), True, 'import matplotlib.pyplot as plt\n'), ((33799, 33835), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Wavelength (Angstroms)"""'], {}), "('Wavelength (Angstroms)')\n", (33809, 33835), True, 'import matplotlib.pyplot as plt\n'), ((33844, 33873), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Normalized Flux"""'], {}), "('Normalized Flux')\n", (33854, 33873), True, 'import matplotlib.pyplot as plt\n'), ((33887, 33908), 'dlnpyutils.utils.minmax', 'dln.minmax', (['spec.wave'], {}), '(spec.wave)\n', (33897, 33908), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((34269, 34281), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xr'], {}), '(xr)\n', (34277, 34281), True, 'import matplotlib.pyplot as plt\n'), ((34290, 34302), 'matplotlib.pyplot.ylim', 'plt.ylim', (['yr'], {}), '(yr)\n', (34298, 34302), True, 'import matplotlib.pyplot as plt\n'), ((34317, 34351), 'numpy.nanmedian', 'np.nanmedian', (['(spec.flux / spec.err)'], {}), '(spec.flux / spec.err)\n', (34329, 34351), True, 'import numpy as np\n'), ((34358, 34382), 'matplotlib.pyplot.title', 'plt.title', (['spec.filename'], {}), '(spec.filename)\n', (34367, 34382), True, 'import matplotlib.pyplot as plt\n'), ((34810, 34830), 'matplotlib.pyplot.subplots', 'plt.subplots', (['norder'], {}), '(norder)\n', (34822, 34830), True, 'import matplotlib.pyplot as plt\n'), ((37825, 37842), 'dlnpyutils.utils.basiclogger', 'dln.basiclogger', ([], {}), '()\n', (37840, 37842), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((37974, 37994), 'doppler.cannon.models.copy', 'cannon.models.copy', ([], {}), '()\n', (37992, 37994), False, 'from doppler import cannon, utils, reader\n'), ((38099, 38138), 'numpy.array', 'np.array', (['[6000.0, 2.5, -0.5, 0.0, 0.0]'], {}), '([6000.0, 2.5, -0.5, 0.0, 0.0])\n', (38107, 38138), True, 'import numpy as np\n'), ((38227, 38245), 'numpy.zeros', 'np.zeros', (['(5)', 'float'], {}), '(5, float)\n', (38235, 38245), True, 'import numpy as np\n'), ((38263, 38281), 'numpy.zeros', 'np.zeros', (['(5)', 'float'], {}), '(5, float)\n', (38271, 38281), True, 'import numpy as np\n'), ((39725, 39759), 'numpy.zeros', 'np.zeros', (['(npix, npar)', 'np.float64'], {}), '((npix, npar), np.float64)\n', (39733, 39759), True, 'import numpy as np\n'), ((40507, 40521), 'numpy.diag', 'np.diag', (['lscov'], {}), '(lscov)\n', (40514, 40521), True, 'import numpy as np\n'), ((41548, 41565), 'dlnpyutils.utils.basiclogger', 'dln.basiclogger', ([], {}), '()\n', (41563, 41565), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((43373, 43386), 'numpy.min', 'np.min', (['chisq'], {}), '(chisq)\n', (43379, 43386), True, 'import numpy as np\n'), ((43948, 43994), 'numpy.mean', 'np.mean', (['[abundarr[lftind], abundarr[bestind]]'], {}), '([abundarr[lftind], abundarr[bestind]])\n', (43955, 43994), True, 'import numpy as np\n'), ((44348, 44394), 'numpy.mean', 'np.mean', (['[abundarr[bestind], abundarr[rgtind]]'], {}), '([abundarr[bestind], abundarr[rgtind]])\n', (44355, 44394), True, 'import numpy as np\n'), ((44721, 44741), 'numpy.argsort', 'np.argsort', (['abundarr'], {}), '(abundarr)\n', (44731, 44741), True, 'import numpy as np\n'), ((44961, 44977), 'numpy.min', 'np.min', (['abundarr'], {}), '(abundarr)\n', (44967, 44977), True, 'import numpy as np\n'), ((44978, 44994), 'numpy.max', 'np.max', (['abundarr'], {}), '(abundarr)\n', (44984, 44994), True, 'import numpy as np\n'), ((45014, 45057), 'scipy.interpolate.interp1d', 'interp1d', (['abundarr', 'chisq'], {'kind': '"""quadratic"""'}), "(abundarr, chisq, kind='quadratic')\n", (45022, 45057), False, 'from scipy.interpolate import interp1d\n'), ((47381, 47398), 'dlnpyutils.utils.basiclogger', 'dln.basiclogger', ([], {}), '()\n', (47396, 47398), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((48507, 48519), 'numpy.diag', 'np.diag', (['cov'], {}), '(cov)\n', (48514, 48519), True, 'import numpy as np\n'), ((52394, 52411), 'dlnpyutils.utils.basiclogger', 'dln.basiclogger', ([], {}), '()\n', (52409, 52411), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((59624, 59635), 'time.time', 'time.time', ([], {}), '()\n', (59633, 59635), False, 'import time\n'), ((4604, 4625), 'numpy.arange', 'np.arange', (['(npix // 20)'], {}), '(npix // 20)\n', (4613, 4625), True, 'import numpy as np\n'), ((4886, 4904), 'numpy.min', 'np.min', (['wave[:, o]'], {}), '(wave[:, o])\n', (4892, 4904), True, 'import numpy as np\n'), ((4926, 4944), 'numpy.max', 'np.max', (['wave[:, o]'], {}), '(wave[:, o])\n', (4932, 4944), True, 'import numpy as np\n'), ((5355, 5371), 'numpy.min', 'np.min', (['(fwhm / 4)'], {}), '(fwhm / 4)\n', (5361, 5371), True, 'import numpy as np\n'), ((9144, 9161), 'dlnpyutils.utils.basiclogger', 'dln.basiclogger', ([], {}), '()\n', (9159, 9161), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((13984, 13999), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (13997, 13999), False, 'import pdb\n'), ((20029, 20054), 'os.path.exists', 'os.path.exists', (['modelfile'], {}), '(modelfile)\n', (20043, 20054), False, 'import os\n'), ((22349, 22403), 'synple.synple.interp_spl', 'synple.interp_spl', (['spec.wave[gd]', 'shiftwave', 'spec.flux'], {}), '(spec.wave[gd], shiftwave, spec.flux)\n', (22366, 22403), False, 'from synple import synple\n'), ((24406, 24446), 'numpy.ceil', 'np.ceil', (['(w1 * (1.0 + 1500 / cspeed) - w1)'], {}), '(w1 * (1.0 + 1500 / cspeed) - w1)\n', (24413, 24446), True, 'import numpy as np\n'), ((25100, 25113), 'numpy.array', 'np.array', (['(0.0)'], {}), '(0.0)\n', (25108, 25113), True, 'import numpy as np\n'), ((25863, 25891), 'dlnpyutils.astro.airtovac', 'astro.airtovac', (['synspec.wave'], {}), '(synspec.wave)\n', (25877, 25891), False, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((25998, 26026), 'dlnpyutils.astro.vactoair', 'astro.vactoair', (['synspec.wave'], {}), '(synspec.wave)\n', (26012, 26026), False, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((26287, 26323), 'numpy.zeros', 'np.zeros', (['(npix, norder)', 'np.float32'], {}), '((npix, norder), np.float32)\n', (26295, 26323), True, 'import numpy as np\n'), ((26772, 26787), 'dlnpyutils.utils.slope', 'dln.slope', (['wobs'], {}), '(wobs)\n', (26781, 26787), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((27188, 27209), 'numpy.arange', 'np.arange', (['(npix // 20)'], {}), '(npix // 20)\n', (27197, 27209), True, 'import numpy as np\n'), ((27418, 27428), 'numpy.abs', 'np.abs', (['dw'], {}), '(dw)\n', (27424, 27428), True, 'import numpy as np\n'), ((27989, 28004), 'numpy.min', 'np.min', (['fwhmpix'], {}), '(fwhmpix)\n', (27995, 28004), True, 'import numpy as np\n'), ((28173, 28188), 'numpy.min', 'np.min', (['fwhmpix'], {}), '(fwhmpix)\n', (28179, 28188), True, 'import numpy as np\n'), ((28468, 28511), 'dlnpyutils.utils.rebin', 'dln.rebin', (['modelflux[0:npix2 * nbin]', 'npix2'], {}), '(modelflux[0:npix2 * nbin], npix2)\n', (28477, 28511), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((28533, 28576), 'dlnpyutils.utils.rebin', 'dln.rebin', (['modelwave[0:npix2 * nbin]', 'npix2'], {}), '(modelwave[0:npix2 * nbin], npix2)\n', (28542, 28576), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((28598, 28641), 'dlnpyutils.utils.rebin', 'dln.rebin', (['modelcont[0:npix2 * nbin]', 'npix2'], {}), '(modelcont[0:npix2 * nbin], npix2)\n', (28607, 28641), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((29369, 29390), 'numpy.char.array', 'np.char.array', (['params'], {}), '(params)\n', (29382, 29390), True, 'import numpy as np\n'), ((30599, 30621), 'numpy.where', 'np.where', (['(limkeys == f)'], {}), '(limkeys == f)\n', (30607, 30621), True, 'import numpy as np\n'), ((32870, 32917), 'dlnpyutils.utils.limit', 'dln.limit', (['pinit[k]', 'bounds[0][k]', 'bounds[1][k]'], {}), '(pinit[k], bounds[0][k], bounds[1][k])\n', (32879, 32917), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((33464, 33552), 'matplotlib.pyplot.plot', 'plt.plot', (['original.wave', 'original.flux'], {'color': '"""green"""', 'label': '"""Original"""', 'linewidth': '(1)'}), "(original.wave, original.flux, color='green', label='Original',\n linewidth=1)\n", (33472, 33552), True, 'import matplotlib.pyplot as plt\n'), ((33923, 33955), 'numpy.min', 'np.min', (['[spec.flux, fmodel.flux]'], {}), '([spec.flux, fmodel.flux])\n', (33929, 33955), True, 'import numpy as np\n'), ((33956, 33988), 'numpy.max', 'np.max', (['[spec.flux, fmodel.flux]'], {}), '([spec.flux, fmodel.flux])\n', (33962, 33988), True, 'import numpy as np\n'), ((34218, 34239), 'numpy.max', 'np.max', (['[yr[0], -0.2]'], {}), '([yr[0], -0.2])\n', (34224, 34239), True, 'import numpy as np\n'), ((34240, 34260), 'numpy.min', 'np.min', (['[yr[1], 2.0]'], {}), '([yr[1], 2.0])\n', (34246, 34260), True, 'import numpy as np\n'), ((35520, 35547), 'dlnpyutils.utils.minmax', 'dln.minmax', (['spec.wave[:, i]'], {}), '(spec.wave[:, i])\n', (35530, 35547), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((38153, 38170), 'numpy.array', 'np.array', (['initpar'], {}), '(initpar)\n', (38161, 38170), True, 'import numpy as np\n'), ((38353, 38377), 'numpy.min', 'np.min', (['p.ranges'], {'axis': '(1)'}), '(p.ranges, axis=1)\n', (38359, 38377), True, 'import numpy as np\n'), ((38425, 38449), 'numpy.max', 'np.max', (['p.ranges'], {'axis': '(1)'}), '(p.ranges, axis=1)\n', (38431, 38449), True, 'import numpy as np\n'), ((43822, 43837), 'numpy.array', 'np.array', (['chisq'], {}), '(chisq)\n', (43830, 43837), True, 'import numpy as np\n'), ((48754, 48807), 'numpy.sum', 'np.sum', (['(((spfitter.flux - model) / spfitter.err) ** 2)'], {}), '(((spfitter.flux - model) / spfitter.err) ** 2)\n', (48760, 48807), True, 'import numpy as np\n'), ((52452, 52517), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s [%(levelname)-5.5s] %(message)s"""'], {}), "('%(asctime)s [%(levelname)-5.5s] %(message)s')\n", (52469, 52517), False, 'import logging\n'), ((57858, 57871), 'numpy.array', 'np.array', (['(0.0)'], {}), '(0.0)\n', (57866, 57871), True, 'import numpy as np\n'), ((58571, 58582), 'time.time', 'time.time', ([], {}), '()\n', (58580, 58582), False, 'import time\n'), ((61312, 61325), 'numpy.array', 'np.array', (['(0.0)'], {}), '(0.0)\n', (61320, 61325), True, 'import numpy as np\n'), ((4468, 4487), 'numpy.atleast_2d', 'np.atleast_2d', (['wave'], {}), '(wave)\n', (4481, 4487), True, 'import numpy as np\n'), ((4842, 4863), 'dlnpyutils.utils.slope', 'dln.slope', (['wave[:, o]'], {}), '(wave[:, o])\n', (4851, 4863), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((5191, 5204), 'numpy.abs', 'np.abs', (['dw[o]'], {}), '(dw[o])\n', (5197, 5204), True, 'import numpy as np\n'), ((6749, 6797), 'numpy.sum', 'np.sum', (['((self.flux - model) ** 2 / self.err ** 2)'], {}), '((self.flux - model) ** 2 / self.err ** 2)\n', (6755, 6797), True, 'import numpy as np\n'), ((11351, 11396), 'numpy.sum', 'np.sum', (['((self.flux - f0) ** 2 / self.err ** 2)'], {}), '((self.flux - f0) ** 2 / self.err ** 2)\n', (11357, 11396), True, 'import numpy as np\n'), ((11717, 11736), 'copy.deepcopy', 'copy.deepcopy', (['args'], {}), '(args)\n', (11730, 11736), False, 'import copy\n'), ((13162, 13190), 'dlnpyutils.astro.airtovac', 'astro.airtovac', (['synspec.wave'], {}), '(synspec.wave)\n', (13176, 13190), False, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((13830, 13845), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (13843, 13845), False, 'import pdb\n'), ((20067, 20085), 'os.stat', 'os.stat', (['modelfile'], {}), '(modelfile)\n', (20074, 20085), False, 'import os\n'), ((21944, 21998), 'synple.synple.interp_spl', 'synple.interp_spl', (['spec.wave[gd]', 'shiftwave', 'spec.cont'], {}), '(spec.wave[gd], shiftwave, spec.cont)\n', (21961, 21998), False, 'from synple import synple\n'), ((26833, 26845), 'numpy.min', 'np.min', (['wobs'], {}), '(wobs)\n', (26839, 26845), True, 'import numpy as np\n'), ((26904, 26916), 'numpy.max', 'np.max', (['wobs'], {}), '(wobs)\n', (26910, 26916), True, 'import numpy as np\n'), ((27723, 27738), 'numpy.round', 'np.round', (['xpmod'], {}), '(xpmod)\n', (27731, 27738), True, 'import numpy as np\n'), ((34040, 34087), 'numpy.min', 'np.min', (['[original.flux, spec.flux, fmodel.flux]'], {}), '([original.flux, spec.flux, fmodel.flux])\n', (34046, 34087), True, 'import numpy as np\n'), ((34087, 34119), 'numpy.max', 'np.max', (['[spec.flux, fmodel.flux]'], {}), '([spec.flux, fmodel.flux])\n', (34093, 34119), True, 'import numpy as np\n'), ((35565, 35609), 'numpy.min', 'np.min', (['[spec.flux[:, i], fmodel.flux[:, i]]'], {}), '([spec.flux[:, i], fmodel.flux[:, i]])\n', (35571, 35609), True, 'import numpy as np\n'), ((35608, 35652), 'numpy.max', 'np.max', (['[spec.flux[:, i], fmodel.flux[:, i]]'], {}), '([spec.flux[:, i], fmodel.flux[:, i]])\n', (35614, 35652), True, 'import numpy as np\n'), ((36020, 36041), 'numpy.max', 'np.max', (['[yr[0], -0.2]'], {}), '([yr[0], -0.2])\n', (36026, 36041), True, 'import numpy as np\n'), ((36042, 36062), 'numpy.min', 'np.min', (['[yr[1], 2.0]'], {}), '([yr[1], 2.0])\n', (36048, 36062), True, 'import numpy as np\n'), ((36189, 36223), 'numpy.nanmedian', 'np.nanmedian', (['(spec.flux / spec.err)'], {}), '(spec.flux / spec.err)\n', (36201, 36223), True, 'import numpy as np\n'), ((39143, 39197), 'doppler.utils.broaden', 'utils.broaden', (['m.wave[:, k]', 'm.flux[:, k]'], {'vsini': 'vsini'}), '(m.wave[:, k], m.flux[:, k], vsini=vsini)\n', (39156, 39197), False, 'from doppler import cannon, utils, reader\n'), ((39856, 39875), 'copy.deepcopy', 'copy.deepcopy', (['args'], {}), '(args)\n', (39869, 39875), False, 'import copy\n'), ((56651, 56689), 'dlnpyutils.utils.poly', 'dln.poly', (["params['LOGG']", 'vmcoef[::-1]'], {}), "(params['LOGG'], vmcoef[::-1])\n", (56659, 56689), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((58418, 58495), 'numpy.dtype', 'np.dtype', (["[('name', np.str, 10), ('par', np.float64), ('parerr', np.float64)]"], {}), "([('name', np.str, 10), ('par', np.float64), ('parerr', np.float64)])\n", (58426, 58495), True, 'import numpy as np\n'), ((13903, 13919), 'numpy.isfinite', 'np.isfinite', (['jac'], {}), '(jac)\n', (13914, 13919), True, 'import numpy as np\n'), ((18325, 18346), 'numpy.char.array', 'np.char.array', (['mlines'], {}), '(mlines)\n', (18338, 18346), True, 'import numpy as np\n'), ((22194, 22289), 'dlnpyutils.utils.interp', 'dln.interp', (['spec.wave[gd]', 'spec.cont[gd]', 'spec.wave[bd]'], {'kind': '"""linear"""', 'assume_sorted': '(False)'}), "(spec.wave[gd], spec.cont[gd], spec.wave[bd], kind='linear',\n assume_sorted=False)\n", (22204, 22289), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((22636, 22681), 'dlnpyutils.utils.poly_fit', 'dln.poly_fit', (['spec.wave[gd]', 'spec.flux[gd]', '(2)'], {}), '(spec.wave[gd], spec.flux[gd], 2)\n', (22648, 22681), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((22714, 22743), 'dlnpyutils.utils.poly', 'dln.poly', (['spec.wave[bd]', 'coef'], {}), '(spec.wave[bd], coef)\n', (22722, 22743), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((26848, 26858), 'numpy.abs', 'np.abs', (['dw'], {}), '(dw)\n', (26854, 26858), True, 'import numpy as np\n'), ((26919, 26929), 'numpy.abs', 'np.abs', (['dw'], {}), '(dw)\n', (26925, 26929), True, 'import numpy as np\n'), ((34152, 34168), 'dlnpyutils.utils.valrange', 'dln.valrange', (['yr'], {}), '(yr)\n', (34164, 34168), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((34180, 34196), 'dlnpyutils.utils.valrange', 'dln.valrange', (['yr'], {}), '(yr)\n', (34192, 34196), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((35710, 35775), 'numpy.min', 'np.min', (['[original.flux[:, i], spec.flux[:, i], fmodel.flux[:, i]]'], {}), '([original.flux[:, i], spec.flux[:, i], fmodel.flux[:, i]])\n', (35716, 35775), True, 'import numpy as np\n'), ((35772, 35816), 'numpy.max', 'np.max', (['[spec.flux[:, i], fmodel.flux[:, i]]'], {}), '([spec.flux[:, i], fmodel.flux[:, i]])\n', (35778, 35816), True, 'import numpy as np\n'), ((42909, 42924), 'numpy.array', 'np.array', (['chisq'], {}), '(chisq)\n', (42917, 42924), True, 'import numpy as np\n'), ((45624, 45635), 'time.time', 'time.time', ([], {}), '()\n', (45633, 45635), False, 'import time\n'), ((48968, 48979), 'time.time', 'time.time', ([], {}), '()\n', (48977, 48979), False, 'import time\n'), ((54429, 54440), 'time.time', 'time.time', ([], {}), '()\n', (54438, 54440), False, 'import time\n'), ((55521, 55532), 'time.time', 'time.time', ([], {}), '()\n', (55530, 55532), False, 'import time\n'), ((60200, 60219), 'numpy.char.array', 'np.char.array', (['elem'], {}), '(elem)\n', (60213, 60219), True, 'import numpy as np\n'), ((61528, 61539), 'time.time', 'time.time', ([], {}), '()\n', (61537, 61539), False, 'import time\n'), ((13742, 13757), 'numpy.isfinite', 'np.isfinite', (['f1'], {}), '(f1)\n', (13753, 13757), True, 'import numpy as np\n'), ((17951, 17982), 'numpy.char.array', 'np.char.array', (["pertab['symbol']"], {}), "(pertab['symbol'])\n", (17964, 17982), True, 'import numpy as np\n'), ((21752, 21769), 'numpy.min', 'np.min', (['shiftwave'], {}), '(shiftwave)\n', (21758, 21769), True, 'import numpy as np\n'), ((21787, 21804), 'numpy.max', 'np.max', (['shiftwave'], {}), '(shiftwave)\n', (21793, 21804), True, 'import numpy as np\n'), ((22578, 22604), 'numpy.isfinite', 'np.isfinite', (['spec.flux[gd]'], {}), '(spec.flux[gd])\n', (22589, 22604), True, 'import numpy as np\n'), ((27937, 27952), 'numpy.min', 'np.min', (['fwhmpix'], {}), '(fwhmpix)\n', (27943, 27952), True, 'import numpy as np\n'), ((35839, 35855), 'dlnpyutils.utils.valrange', 'dln.valrange', (['yr'], {}), '(yr)\n', (35851, 35855), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((35867, 35883), 'dlnpyutils.utils.valrange', 'dln.valrange', (['yr'], {}), '(yr)\n', (35879, 35883), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((38939, 38971), 'numpy.zeros', 'np.zeros', (['spec.flux.shape', 'float'], {}), '(spec.flux.shape, float)\n', (38947, 38971), True, 'import numpy as np\n'), ((43530, 43543), 'numpy.min', 'np.min', (['chisq'], {}), '(chisq)\n', (43536, 43543), True, 'import numpy as np\n'), ((43646, 43657), 'time.time', 'time.time', ([], {}), '()\n', (43655, 43657), False, 'import time\n'), ((59295, 59306), 'time.time', 'time.time', ([], {}), '()\n', (59304, 59306), False, 'import time\n'), ((28105, 28120), 'numpy.min', 'np.min', (['fwhmpix'], {}), '(fwhmpix)\n', (28111, 28120), True, 'import numpy as np\n'), ((28291, 28306), 'numpy.min', 'np.min', (['fwhmpix'], {}), '(fwhmpix)\n', (28297, 28306), True, 'import numpy as np\n'), ((35939, 35955), 'dlnpyutils.utils.valrange', 'dln.valrange', (['yr'], {}), '(yr)\n', (35951, 35955), True, 'from dlnpyutils import utils as dln, bindata, astro\n'), ((35967, 35983), 'dlnpyutils.utils.valrange', 'dln.valrange', (['yr'], {}), '(yr)\n', (35979, 35983), True, 'from dlnpyutils import utils as dln, bindata, astro\n')] |
# author: <NAME>
# date: 2020-11-25
"""
This script Splits the raw cleaned data to train and test splits
based on the user input and saves them into two separate csv files
Usage: clean_data.py --input_file_path=<input_file_path> --saving_path_train=<saving_path_train> --saving_path_test=<saving_path_test> --test_size=<test_size>
Options:
--input_file_path=<file_path> Path to the cleaned input data file
--saving_path_train=<saving_path_train> Path the training data file must be saved as csv file
--saving_path_test=<saving_path_test> Path the testing data file must be saved as csv file
--test_size=<saving_path_test> The proportion of test data to all the data. must be between 0 and 1.
"""
import pandas as pd
from docopt import docopt
from sklearn.model_selection import train_test_split
opt = docopt(__doc__)
def main(input_file_path, saving_path_train, saving_path_test, test_size):
"""
The main function of script
which splits the cleaned data to train and test
portions for the predictive model
Args:
input_file_path (string): the file path to cleaned data file
saving_path_train (string): the file path the script will
save the train data to the csv file.
saving_path_test (string): the file path the script will
save the test data to the csv file.
test_size (float) : the test portion of the data. must be
between 0 and 1.
Returns:
0 if main was successful
-1 if main failed.
"""
# read in data
try:
df = pd.read_csv(input_file_path)
except Exception as e:
print(f"The script failed to open the cleaned data file with the error {e}")
return -1
# Check test size is valid
try:
test_size = float(test_size)
if test_size < 0 or test_size > 1:
print("The test_size argument must be between 0 and 1")
return -1
except:
print("The test_size argument must be a numeric number")
return -1
# Split dataframe
try:
train_data, test_data = train_test_split(
df, test_size=test_size, random_state=123
)
except Exception as e:
print(f"The script failed to split data with error {e}")
return -1
# Save data
try:
# save train portion
train_data.to_csv(saving_path_train, index_label=False, index=False)
# save test portion
test_data.to_csv(saving_path_test, index_label=False, index=False)
except Exception as e:
print(f"The script failed to save the save train or test with the error {e}")
return -1
return 0
if __name__ == "__main__":
main(
opt["--input_file_path"],
opt["--saving_path_train"],
opt["--saving_path_test"],
opt["--test_size"],
)
| [
"sklearn.model_selection.train_test_split",
"docopt.docopt",
"pandas.read_csv"
] | [((814, 829), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (820, 829), False, 'from docopt import docopt\n'), ((1543, 1571), 'pandas.read_csv', 'pd.read_csv', (['input_file_path'], {}), '(input_file_path)\n', (1554, 1571), True, 'import pandas as pd\n'), ((2072, 2131), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df'], {'test_size': 'test_size', 'random_state': '(123)'}), '(df, test_size=test_size, random_state=123)\n', (2088, 2131), False, 'from sklearn.model_selection import train_test_split\n')] |
import base64
import json
import os
import tempfile
import uuid
import zipfile
from io import BytesIO
import werkzeug
from flask import Blueprint, jsonify, request
from ..config import get_config
from ..dataset import convert_ndarray_to_image, import_csv_as_mdp_dataset
from ..models.dataset import Dataset, DatasetSchema
from .generator import generate_for_model
dataset_route = Blueprint("dataset", __name__)
generate_for_model(dataset_route, Dataset, DatasetSchema)
@dataset_route.route("/upload", methods=["POST"])
def upload_dataset():
# validation
if "dataset" not in request.files:
return jsonify({"status": "dataset is empty"}), 400
# save uploaded files and create MDPDataset
with tempfile.TemporaryDirectory() as dname:
# save file
file = request.files["dataset"]
file_name = werkzeug.utils.secure_filename(file.filename)
file_path = os.path.join(dname, file_name)
file.save(file_path)
# save image files
is_image = request.form.get("is_image") == "true"
if is_image:
# save zip file
zip_file = request.files["zip_file"]
zip_file_name = werkzeug.utils.secure_filename(zip_file.filename)
zip_file_path = os.path.join(dname, zip_file_name)
zip_file.save(zip_file_path)
# decompress zip file
with zipfile.ZipFile(zip_file_path) as zip_fd:
zip_fd.extractall(dname)
# convert uploaded data to MDPDataset
try:
mdp_dataset = import_csv_as_mdp_dataset(file_path, image=is_image)
except ValueError:
return jsonify({"status": "dataset conversion failed."}), 400
# save MDPDataset object.
dataset_name = str(uuid.uuid1()) + ".h5"
dataset_path = os.path.join(get_config("DATASET_DIR"), dataset_name)
mdp_dataset.dump(dataset_path)
# get dataset size
data_size = os.path.getsize(dataset_path)
episode_size = len(mdp_dataset)
step_size = sum(map(len, mdp_dataset))
# compute statistics
stats = mdp_dataset.compute_stats()
stats["observation_shape"] = mdp_dataset.get_observation_shape()
stats["action_size"] = mdp_dataset.get_action_size()
# handle ndarray serialization
stats_json = json.dumps(jsonify(stats).json)
# insert record
dataset = Dataset.create(
file_name,
dataset_name,
episode_size,
step_size,
data_size,
is_image,
mdp_dataset.is_action_discrete(),
stats_json,
)
# return json
return jsonify(DatasetSchema().dump(dataset))
@dataset_route.route("/<dataset_id>/example", methods=["GET"])
def get_example_vector_observation(dataset_id):
dataset = Dataset.get(dataset_id, raise_404=True)
# take care of computational cost
mdp_dataset = dataset.load_mdp_dataset()
if dataset.is_image:
# take first 3 samples
ndarrays = mdp_dataset.observations[:3]
observations = []
for ndarray in ndarrays:
image = convert_ndarray_to_image(ndarray)
# encode image to base64
buffer = BytesIO()
image.save(buffer, format="PNG")
encoded_image = base64.b64encode(buffer.getvalue())
# return in string
observations.append(encoded_image.decode().replace("'", ""))
else:
# take first 100 samples
n_steps = min(100, mdp_dataset.observations.shape[0])
observations = mdp_dataset.observations[:n_steps]
return jsonify({"observations": observations})
| [
"tempfile.TemporaryDirectory",
"os.path.getsize",
"zipfile.ZipFile",
"os.path.join",
"io.BytesIO",
"flask.request.form.get",
"uuid.uuid1",
"werkzeug.utils.secure_filename",
"flask.Blueprint",
"flask.jsonify"
] | [((383, 413), 'flask.Blueprint', 'Blueprint', (['"""dataset"""', '__name__'], {}), "('dataset', __name__)\n", (392, 413), False, 'from flask import Blueprint, jsonify, request\n'), ((1947, 1976), 'os.path.getsize', 'os.path.getsize', (['dataset_path'], {}), '(dataset_path)\n', (1962, 1976), False, 'import os\n'), ((3567, 3606), 'flask.jsonify', 'jsonify', (["{'observations': observations}"], {}), "({'observations': observations})\n", (3574, 3606), False, 'from flask import Blueprint, jsonify, request\n'), ((721, 750), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (748, 750), False, 'import tempfile\n'), ((841, 886), 'werkzeug.utils.secure_filename', 'werkzeug.utils.secure_filename', (['file.filename'], {}), '(file.filename)\n', (871, 886), False, 'import werkzeug\n'), ((907, 937), 'os.path.join', 'os.path.join', (['dname', 'file_name'], {}), '(dname, file_name)\n', (919, 937), False, 'import os\n'), ((618, 657), 'flask.jsonify', 'jsonify', (["{'status': 'dataset is empty'}"], {}), "({'status': 'dataset is empty'})\n", (625, 657), False, 'from flask import Blueprint, jsonify, request\n'), ((1014, 1042), 'flask.request.form.get', 'request.form.get', (['"""is_image"""'], {}), "('is_image')\n", (1030, 1042), False, 'from flask import Blueprint, jsonify, request\n'), ((1179, 1228), 'werkzeug.utils.secure_filename', 'werkzeug.utils.secure_filename', (['zip_file.filename'], {}), '(zip_file.filename)\n', (1209, 1228), False, 'import werkzeug\n'), ((1257, 1291), 'os.path.join', 'os.path.join', (['dname', 'zip_file_name'], {}), '(dname, zip_file_name)\n', (1269, 1291), False, 'import os\n'), ((2311, 2325), 'flask.jsonify', 'jsonify', (['stats'], {}), '(stats)\n', (2318, 2325), False, 'from flask import Blueprint, jsonify, request\n'), ((3168, 3177), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (3175, 3177), False, 'from io import BytesIO\n'), ((1384, 1414), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_file_path'], {}), '(zip_file_path)\n', (1399, 1414), False, 'import zipfile\n'), ((1769, 1781), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (1779, 1781), False, 'import uuid\n'), ((1652, 1701), 'flask.jsonify', 'jsonify', (["{'status': 'dataset conversion failed.'}"], {}), "({'status': 'dataset conversion failed.'})\n", (1659, 1701), False, 'from flask import Blueprint, jsonify, request\n')] |
"""
Update release numbers in various places, according to a release.ini file places at the project root
"""
import configparser
import logging
import sys
from configparser import ConfigParser
from pathlib import Path
from typing import Optional, Tuple
import click
from bump_release import helpers
from bump_release.helpers import split_version
# region Globals
__version__ = VERSION = "0.9.6"
RELEASE_FILE: Optional[Path] = None
RELEASE_CONFIG: Optional[ConfigParser] = None
# endregion Globals
@click.command()
@click.option(
"-r",
"--release-file",
"release_file",
help="Release file path, default `./release.ini`",
)
@click.option(
"-n",
"--dry-run",
"dry_run",
is_flag=True,
help="If set, no operation are performed on files",
default=False,
)
@click.option(
"-d",
"--debug",
"debug",
is_flag=True,
help="If set, more traces are printed for users",
default=False,
)
@click.version_option(version=__version__)
@click.argument("release")
def bump_release(
release: str,
release_file: Optional[str] = None,
dry_run: bool = False,
debug: bool = False,
) -> int:
"""
Updates the files according to the release.ini file
:param release: Version number, as "X.X.X"
:param release_file: path to the release.ini config file
:param dry_run: If `True`, no operation performed
:param debug: If `True`, more traces !
:return: 0 if no error...
"""
# Loads the release.ini file
global RELEASE_CONFIG, RELEASE_FILE
if release_file is None:
RELEASE_FILE = Path.cwd() / "release.ini"
else:
RELEASE_FILE = Path(release_file)
if not RELEASE_FILE.exists():
print(f"Unable to find release.ini file in the current directory {Path.cwd()}", file=sys.stderr)
return 1
RELEASE_CONFIG = helpers.load_release_file(release_file=RELEASE_FILE)
try:
return process_update(release_file=RELEASE_FILE, release=release, dry_run=dry_run, debug=debug)
except Exception as e:
print(f"ERROR: {e}", file=sys.stderr)
return 2
def process_update(release_file: Path, release: str, dry_run: bool, debug: bool = False) -> int:
version = split_version(release)
# Initialize the logging
if debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
# region Updates the main project (DJANGO_SETTINGS_MODULE file for django projects, __init__.py file...)
try:
new_row = update_main_file(version=version, dry_run=dry_run)
if new_row is not None:
logging.debug(f"process_update() `main_project`: new_row = {new_row.strip()}")
except helpers.NothingToDoException as e:
logging.warning(f"process_update() No release section for `main_project`: {e}")
# endregion
# region Updates sonar-scanner properties
try:
new_row = update_sonar_properties(version=version, dry_run=dry_run)
if new_row is not None:
logging.debug(f"process_update() `sonar`: new_row = {new_row.strip()}")
except helpers.NothingToDoException as e:
logging.warning(f"process_update() No release section for `sonar`: {e}")
# endregion
# region Updates setup.py file
try:
new_row = update_setup_file(version=version, dry_run=dry_run)
if new_row is not None:
logging.debug(f"process_update() `setup`: new_row = {new_row.strip()}")
except helpers.NothingToDoException as e:
logging.warning(f"process_update() No release section for `setup`: {e}")
# endregion
# region Updates sphinx file
try:
new_row = update_docs_conf(version=version, dry_run=dry_run)
if new_row is not None:
logging.debug(f"process_update() `docs`: new_row = {new_row.strip()}")
except helpers.NothingToDoException as e:
logging.warning(f"process_update() No release section for `docs`: {e}")
# endregion
# region Updates node packages file
try:
new_row = update_node_package(version=version, dry_run=dry_run)
if new_row is not None:
logging.debug(
f"process_update() `node`: new_row = {new_row}",
)
except helpers.NothingToDoException as e:
logging.warning(f"process_update() No release section for `node`: {e}")
# endregion
# region Updates YAML file
try:
new_row = update_ansible_vars(version=version, dry_run=dry_run)
if new_row is not None:
logging.debug(f"process_update() `ansible`: new_row = {new_row.strip()}")
except helpers.NothingToDoException as e:
logging.warning(f"process_update() No release section for `ansible`: {e}")
# endregion
# region Updates the release.ini file with the new release number
new_row = update_release_ini(path=release_file, version=version, dry_run=dry_run)
if new_row is not None:
logging.warning(f"process_update() `release.ini`: new_row = {new_row.strip()}")
# endregion
return 0
def update_main_file(version: Tuple[str, str, str], dry_run: bool = True) -> Optional[str]:
"""
Updates the main django settings file, or a python script with
:param version: Release number tuple (major, minor, release)
:param dry_run: If `True`, no operation performed
:return: changed string
"""
assert RELEASE_CONFIG is not None
if not RELEASE_CONFIG.has_section("main_project"):
raise helpers.NothingToDoException("No `main_project` section in release.ini file")
try:
_path = RELEASE_CONFIG["main_project"].get("path")
if _path is None:
raise helpers.NothingToDoException("No action to perform for main project: No path provided.")
path = Path(_path)
pattern = RELEASE_CONFIG["main_project"].get("pattern", "").strip('"') or helpers.MAIN_PROJECT_PATTERN
template = RELEASE_CONFIG["main_project"].get("template", "").strip('"') or helpers.MAIN_PROJECT_TEMPLATE
except configparser.Error as e:
raise helpers.NothingToDoException("Unable to update main project file", e)
return helpers.update_file(path=path, pattern=pattern, template=template, version=version, dry_run=dry_run)
def update_setup_file(version: Tuple[str, str, str], dry_run: bool = False) -> Optional[str]:
"""
Updates the setup.py file
:param version: Release number tuple (major, minor, release)
:param dry_run: If `True`, no operation performed
:return: changed string
"""
assert RELEASE_CONFIG is not None
if not RELEASE_CONFIG.has_section("setup"):
raise helpers.NothingToDoException("No `setup` section in release.ini file")
try:
_path = RELEASE_CONFIG["setup"].get("path")
path = Path(_path)
pattern = RELEASE_CONFIG["setup"].get("pattern", "").strip('"') or helpers.SETUP_PATTERN
template = RELEASE_CONFIG["setup"].get("template", "").strip('"') or helpers.SETUP_TEMPLATE
except configparser.Error as e:
raise helpers.NothingToDoException("No action to perform for setup file", e)
return helpers.update_file(path=path, pattern=pattern, template=template, version=version, dry_run=dry_run)
def update_sonar_properties(version: Tuple[str, str, str], dry_run: bool = False) -> Optional[str]:
"""
Updates the sonar-project.properties file with the new release number
:param version: Release number tuple (major, minor, release)
:param dry_run: If `True`, no operation performed
:return: changed string
"""
assert RELEASE_CONFIG is not None
if not RELEASE_CONFIG.has_section("sonar"):
raise helpers.NothingToDoException("No `sonar` section in release.ini file")
try:
_path = RELEASE_CONFIG["sonar"].get("path")
path = Path(_path)
pattern = RELEASE_CONFIG["sonar"].get("pattern", "").strip('"') or helpers.SONAR_PATTERN
template = RELEASE_CONFIG["sonar"].get("template", "").strip('"') or helpers.SONAR_TEMPLATE
except configparser.Error as e:
raise helpers.NothingToDoException("No action to perform for sonar file", e)
return helpers.update_file(path=path, pattern=pattern, template=template, version=version, dry_run=dry_run)
def update_docs_conf(version: Tuple[str, str, str], dry_run: bool = False) -> Optional[str]:
"""
Updates the Sphinx conf.py file with the new release number
:param version: Release number tuple (major, minor, release)
:param dry_run: If `True`, no operation performed
:return: changed string
"""
assert RELEASE_CONFIG is not None
if not RELEASE_CONFIG.has_section("docs"):
raise helpers.NothingToDoException("No `docs` section in release.ini file")
try:
_path = RELEASE_CONFIG["docs"].get("path")
path = Path(_path)
pattern_release = RELEASE_CONFIG["docs"].get("pattern_release", "").strip('"') or helpers.DOCS_RELEASE_PATTERN
template_release = RELEASE_CONFIG["docs"].get("template_release", "").strip('"') or helpers.DOCS_RELEASE_FORMAT
pattern_version = RELEASE_CONFIG["docs"].get("pattern_version", "").strip('"') or helpers.DOCS_VERSION_PATTERN
template_version = RELEASE_CONFIG["docs"].get("template_version", "").strip('"') or helpers.DOCS_VERSION_FORMAT
except configparser.Error as e:
raise helpers.NothingToDoException("No action to perform for docs file", e)
update_release = helpers.update_file(
path=path,
pattern=pattern_release,
template=template_release,
version=version,
dry_run=dry_run,
)
update_version = helpers.update_file(
path=path,
pattern=pattern_version,
template=template_version,
version=version,
dry_run=dry_run,
)
return str(update_release) + str(update_version)
def update_node_package(version: Tuple[str, str, str], dry_run: bool = False) -> Optional[str]:
"""
Updates the nodejs package file with the new release number
:param version: Release number tuple (major, minor, release)
:param dry_run: If `True`, no operation performed
:return: changed string
"""
assert RELEASE_CONFIG is not None
try:
path = Path(RELEASE_CONFIG.get("node", "path"))
key = RELEASE_CONFIG.get("node", "key", fallback=helpers.NODE_KEY) # noqa
except configparser.Error as e:
raise helpers.NothingToDoException("No action to perform for node packages file", e)
return helpers.update_node_packages(path=path, version=version, key=key, dry_run=dry_run)
def update_ansible_vars(version: Tuple[str, str, str], dry_run: bool = False) -> Optional[str]:
"""
Updates the ansible project variables file with the new release number
:param version: Release number tuple (major, minor, release)
:param dry_run: If `True`, no operation performed
:return: changed string
"""
assert RELEASE_CONFIG is not None
try:
path = Path(RELEASE_CONFIG.get("ansible", "path"))
key = RELEASE_CONFIG.get("ansible", "key", fallback=helpers.ANSIBLE_KEY) # noqa
except configparser.Error as e:
raise helpers.NothingToDoException("No action to perform for ansible file", e)
return helpers.updates_yaml_file(path=path, version=version, key=key, dry_run=dry_run)
def update_release_ini(path: Path, version: Tuple[str, str, str], dry_run: bool = False) -> Optional[str]:
"""
Updates the release.ini file with the new release number
:param path: Release file path
:param version: release number, as (<major>, <minor>, <release>)
:param dry_run: If `True`, the operation WILL NOT be performed
:return: Updated lines
"""
return helpers.update_file(
path=path,
pattern=helpers.RELEASE_INI_PATTERN,
template=helpers.RELEASE_INI_TEMPLATE,
version=version,
dry_run=dry_run,
)
| [
"logging.basicConfig",
"bump_release.helpers.update_file",
"click.argument",
"logging.debug",
"bump_release.helpers.load_release_file",
"pathlib.Path",
"click.option",
"bump_release.helpers.updates_yaml_file",
"pathlib.Path.cwd",
"logging.warning",
"bump_release.helpers.NothingToDoException",
... | [((503, 518), 'click.command', 'click.command', ([], {}), '()\n', (516, 518), False, 'import click\n'), ((520, 628), 'click.option', 'click.option', (['"""-r"""', '"""--release-file"""', '"""release_file"""'], {'help': '"""Release file path, default `./release.ini`"""'}), "('-r', '--release-file', 'release_file', help=\n 'Release file path, default `./release.ini`')\n", (532, 628), False, 'import click\n'), ((644, 772), 'click.option', 'click.option', (['"""-n"""', '"""--dry-run"""', '"""dry_run"""'], {'is_flag': '(True)', 'help': '"""If set, no operation are performed on files"""', 'default': '(False)'}), "('-n', '--dry-run', 'dry_run', is_flag=True, help=\n 'If set, no operation are performed on files', default=False)\n", (656, 772), False, 'import click\n'), ((796, 918), 'click.option', 'click.option', (['"""-d"""', '"""--debug"""', '"""debug"""'], {'is_flag': '(True)', 'help': '"""If set, more traces are printed for users"""', 'default': '(False)'}), "('-d', '--debug', 'debug', is_flag=True, help=\n 'If set, more traces are printed for users', default=False)\n", (808, 918), False, 'import click\n'), ((942, 983), 'click.version_option', 'click.version_option', ([], {'version': '__version__'}), '(version=__version__)\n', (962, 983), False, 'import click\n'), ((985, 1010), 'click.argument', 'click.argument', (['"""release"""'], {}), "('release')\n", (999, 1010), False, 'import click\n'), ((1841, 1893), 'bump_release.helpers.load_release_file', 'helpers.load_release_file', ([], {'release_file': 'RELEASE_FILE'}), '(release_file=RELEASE_FILE)\n', (1866, 1893), False, 'from bump_release import helpers\n'), ((2210, 2232), 'bump_release.helpers.split_version', 'split_version', (['release'], {}), '(release)\n', (2223, 2232), False, 'from bump_release.helpers import split_version\n'), ((6155, 6260), 'bump_release.helpers.update_file', 'helpers.update_file', ([], {'path': 'path', 'pattern': 'pattern', 'template': 'template', 'version': 'version', 'dry_run': 'dry_run'}), '(path=path, pattern=pattern, template=template, version=\n version, dry_run=dry_run)\n', (6174, 6260), False, 'from bump_release import helpers\n'), ((7136, 7241), 'bump_release.helpers.update_file', 'helpers.update_file', ([], {'path': 'path', 'pattern': 'pattern', 'template': 'template', 'version': 'version', 'dry_run': 'dry_run'}), '(path=path, pattern=pattern, template=template, version=\n version, dry_run=dry_run)\n', (7155, 7241), False, 'from bump_release import helpers\n'), ((8166, 8271), 'bump_release.helpers.update_file', 'helpers.update_file', ([], {'path': 'path', 'pattern': 'pattern', 'template': 'template', 'version': 'version', 'dry_run': 'dry_run'}), '(path=path, pattern=pattern, template=template, version=\n version, dry_run=dry_run)\n', (8185, 8271), False, 'from bump_release import helpers\n'), ((9469, 9590), 'bump_release.helpers.update_file', 'helpers.update_file', ([], {'path': 'path', 'pattern': 'pattern_release', 'template': 'template_release', 'version': 'version', 'dry_run': 'dry_run'}), '(path=path, pattern=pattern_release, template=\n template_release, version=version, dry_run=dry_run)\n', (9488, 9590), False, 'from bump_release import helpers\n'), ((9654, 9775), 'bump_release.helpers.update_file', 'helpers.update_file', ([], {'path': 'path', 'pattern': 'pattern_version', 'template': 'template_version', 'version': 'version', 'dry_run': 'dry_run'}), '(path=path, pattern=pattern_version, template=\n template_version, version=version, dry_run=dry_run)\n', (9673, 9775), False, 'from bump_release import helpers\n'), ((10523, 10610), 'bump_release.helpers.update_node_packages', 'helpers.update_node_packages', ([], {'path': 'path', 'version': 'version', 'key': 'key', 'dry_run': 'dry_run'}), '(path=path, version=version, key=key, dry_run=\n dry_run)\n', (10551, 10610), False, 'from bump_release import helpers\n'), ((11272, 11351), 'bump_release.helpers.updates_yaml_file', 'helpers.updates_yaml_file', ([], {'path': 'path', 'version': 'version', 'key': 'key', 'dry_run': 'dry_run'}), '(path=path, version=version, key=key, dry_run=dry_run)\n', (11297, 11351), False, 'from bump_release import helpers\n'), ((11748, 11892), 'bump_release.helpers.update_file', 'helpers.update_file', ([], {'path': 'path', 'pattern': 'helpers.RELEASE_INI_PATTERN', 'template': 'helpers.RELEASE_INI_TEMPLATE', 'version': 'version', 'dry_run': 'dry_run'}), '(path=path, pattern=helpers.RELEASE_INI_PATTERN,\n template=helpers.RELEASE_INI_TEMPLATE, version=version, dry_run=dry_run)\n', (11767, 11892), False, 'from bump_release import helpers\n'), ((1643, 1661), 'pathlib.Path', 'Path', (['release_file'], {}), '(release_file)\n', (1647, 1661), False, 'from pathlib import Path\n'), ((2285, 2325), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (2304, 2325), False, 'import logging\n'), ((2344, 2383), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (2363, 2383), False, 'import logging\n'), ((5492, 5569), 'bump_release.helpers.NothingToDoException', 'helpers.NothingToDoException', (['"""No `main_project` section in release.ini file"""'], {}), "('No `main_project` section in release.ini file')\n", (5520, 5569), False, 'from bump_release import helpers\n'), ((5787, 5798), 'pathlib.Path', 'Path', (['_path'], {}), '(_path)\n', (5791, 5798), False, 'from pathlib import Path\n'), ((6646, 6716), 'bump_release.helpers.NothingToDoException', 'helpers.NothingToDoException', (['"""No `setup` section in release.ini file"""'], {}), "('No `setup` section in release.ini file')\n", (6674, 6716), False, 'from bump_release import helpers\n'), ((6794, 6805), 'pathlib.Path', 'Path', (['_path'], {}), '(_path)\n', (6798, 6805), False, 'from pathlib import Path\n'), ((7677, 7747), 'bump_release.helpers.NothingToDoException', 'helpers.NothingToDoException', (['"""No `sonar` section in release.ini file"""'], {}), "('No `sonar` section in release.ini file')\n", (7705, 7747), False, 'from bump_release import helpers\n'), ((7825, 7836), 'pathlib.Path', 'Path', (['_path'], {}), '(_path)\n', (7829, 7836), False, 'from pathlib import Path\n'), ((8689, 8758), 'bump_release.helpers.NothingToDoException', 'helpers.NothingToDoException', (['"""No `docs` section in release.ini file"""'], {}), "('No `docs` section in release.ini file')\n", (8717, 8758), False, 'from bump_release import helpers\n'), ((8835, 8846), 'pathlib.Path', 'Path', (['_path'], {}), '(_path)\n', (8839, 8846), False, 'from pathlib import Path\n'), ((1583, 1593), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (1591, 1593), False, 'from pathlib import Path\n'), ((2749, 2828), 'logging.warning', 'logging.warning', (['f"""process_update() No release section for `main_project`: {e}"""'], {}), "(f'process_update() No release section for `main_project`: {e}')\n", (2764, 2828), False, 'import logging\n'), ((3147, 3219), 'logging.warning', 'logging.warning', (['f"""process_update() No release section for `sonar`: {e}"""'], {}), "(f'process_update() No release section for `sonar`: {e}')\n", (3162, 3219), False, 'import logging\n'), ((3521, 3593), 'logging.warning', 'logging.warning', (['f"""process_update() No release section for `setup`: {e}"""'], {}), "(f'process_update() No release section for `setup`: {e}')\n", (3536, 3593), False, 'import logging\n'), ((3891, 3962), 'logging.warning', 'logging.warning', (['f"""process_update() No release section for `docs`: {e}"""'], {}), "(f'process_update() No release section for `docs`: {e}')\n", (3906, 3962), False, 'import logging\n'), ((4145, 4207), 'logging.debug', 'logging.debug', (['f"""process_update() `node`: new_row = {new_row}"""'], {}), "(f'process_update() `node`: new_row = {new_row}')\n", (4158, 4207), False, 'import logging\n'), ((4293, 4364), 'logging.warning', 'logging.warning', (['f"""process_update() No release section for `node`: {e}"""'], {}), "(f'process_update() No release section for `node`: {e}')\n", (4308, 4364), False, 'import logging\n'), ((4666, 4740), 'logging.warning', 'logging.warning', (['f"""process_update() No release section for `ansible`: {e}"""'], {}), "(f'process_update() No release section for `ansible`: {e}')\n", (4681, 4740), False, 'import logging\n'), ((5683, 5776), 'bump_release.helpers.NothingToDoException', 'helpers.NothingToDoException', (['"""No action to perform for main project: No path provided."""'], {}), "(\n 'No action to perform for main project: No path provided.')\n", (5711, 5776), False, 'from bump_release import helpers\n'), ((6074, 6143), 'bump_release.helpers.NothingToDoException', 'helpers.NothingToDoException', (['"""Unable to update main project file"""', 'e'], {}), "('Unable to update main project file', e)\n", (6102, 6143), False, 'from bump_release import helpers\n'), ((7054, 7124), 'bump_release.helpers.NothingToDoException', 'helpers.NothingToDoException', (['"""No action to perform for setup file"""', 'e'], {}), "('No action to perform for setup file', e)\n", (7082, 7124), False, 'from bump_release import helpers\n'), ((8084, 8154), 'bump_release.helpers.NothingToDoException', 'helpers.NothingToDoException', (['"""No action to perform for sonar file"""', 'e'], {}), "('No action to perform for sonar file', e)\n", (8112, 8154), False, 'from bump_release import helpers\n'), ((9377, 9446), 'bump_release.helpers.NothingToDoException', 'helpers.NothingToDoException', (['"""No action to perform for docs file"""', 'e'], {}), "('No action to perform for docs file', e)\n", (9405, 9446), False, 'from bump_release import helpers\n'), ((10433, 10511), 'bump_release.helpers.NothingToDoException', 'helpers.NothingToDoException', (['"""No action to perform for node packages file"""', 'e'], {}), "('No action to perform for node packages file', e)\n", (10461, 10511), False, 'from bump_release import helpers\n'), ((11188, 11260), 'bump_release.helpers.NothingToDoException', 'helpers.NothingToDoException', (['"""No action to perform for ansible file"""', 'e'], {}), "('No action to perform for ansible file', e)\n", (11216, 11260), False, 'from bump_release import helpers\n'), ((1771, 1781), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (1779, 1781), False, 'from pathlib import Path\n')] |
import numpy as np
import pandas as pd
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
import matplotlib.pyplot as plt
import contractions # Expanding contractions
from nltk.stem.wordnet import WordNetLemmatizer
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
print(' ------------------------------------------')
print('| Classifying Gender Dysphoria Disclosures |')
print('| on Social Media with Machine Learning. |')
print(' ------------------------------------------')
print()
print('Team members: <NAME>')
print(' <NAME> ')
print(' <NAME>')
print()
print('Data Processing....')
print()
#num_of_lines = 2
dataset = pd.read_csv('df_truth.csv')
dataset.tail()
#print('Dataset size: ',dataset.shape)
# ------ ORIGINAL DATA --------
#print('Original Dataset: \n',dataset)
headers = list(dataset.columns.values)
#print(headers)
text = dataset.iloc[:,1] # text = dataset['text']
#print(text.shape)
#print(text)
# ---------------- EXPANDING CONTRACTIONS -------------------
n_text = []
expanded_words = []
for i in range(len(text)):
a = str(text[i])
# -------------- LOWERCASE ----------
a_lower = a.lower()
line = a_lower.split()
for h in line:
expanded_words.append(contractions.fix(h))
expanded_text = ' '.join(expanded_words)
n_text.append(expanded_text)
expanded_words.clear() # Clearing List
#print(n_text)
#print('Original text: ' + text)
#print('Expanded_text: ' + n_text)
mySeries = pd.Series(n_text)
#print(mySeries)
# ----------------------------------------------------------
new_text = []
w_stopwords_text = []
for k in range(len(mySeries)):
a = str(mySeries[k])
# ----------------- REMOVING NUMBERS --------
text_ = ''.join([i for i in a if not i.isdigit()])
# -------- REMOVING SPECIAL CHARACTERS AND PUNCTUATION --------
punc = '''!()-[]{};:'"\,“”<>’./?@#$%^&*ðÿ˜=∆+_~'''
for j in text_:
if j in punc:
text_ = text_.replace(j,'')
#print(text_)
new_text.append(text_)
#print(new_text)
# -------------------- REMOVING STOP WORDS -------------------
for j in range(len(new_text)):
text_tokens = word_tokenize(new_text[j])
tokens_without_sw = [word for word in text_tokens if not word in stopwords.words('english')]
filtered_sentence = (" ").join(tokens_without_sw)
w_stopwords_text.append(filtered_sentence)
#print(w_stopwords_text)
col_text = pd.DataFrame(w_stopwords_text)
final_text = col_text[0]
#print(final_text)
# -------------------------------- NORMALIZING WORDS VIA LEMMATIZATION ---------------------------------
f_sent = []
xxx = []
yyy = []
for count in range(len(w_stopwords_text)):
b = str(w_stopwords_text[count])
words_sent = b.split()
for j in words_sent:
lemmatizer = WordNetLemmatizer()
lem_sent = lemmatizer.lemmatize(j)
f_sent.append(lem_sent)
xxx = ' '.join(f_sent)
yyy.append(xxx)
f_sent.clear()
#print(yyy)
col_text = pd.DataFrame(yyy)
final_text = col_text[0]
# --------------- CLEANED DATA PLACED IN COLUMN #2 -----------
dataset.insert(2,'new_text',final_text)
#print('Clean Dataset: \n',dataset['new_text'].values)
print('1. Text Preprocessing Done!')
X = dataset['new_text'].values
y = dataset['dysphoria'].values
y_labels = np.unique(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=1)
#print(X_train.shape)
#print(X_test.shape)
vectorizer = TfidfVectorizer()
X_train = vectorizer.fit_transform(X_train)
X_test = vectorizer.transform(X_test)
# ---------------------------------------------------------------------------------
print('2. Classifiers')
print()
# ---------------------------------------------------------------------------------
print('2.1. Support Vector Machine (SVM - RBF)')
print()
svm = SVC(kernel = 'rbf', gamma = 0.1, C = 10.0, random_state = 1)
svm.fit(X_train,y_train)
y_pred = svm.predict(X_test)
svm_predictions = svm.predict(X_test)
print(' Misclassified samples (linear model): %d'%(y_test!=y_pred).sum())
print(' Accuracy: %.3f'%accuracy_score(y_test,y_pred))
print(classification_report(y_test, svm_predictions))
# ---------------------------------------------------------------------------------
print('2.2. Decision Tree')
print()
dt = DecisionTreeClassifier(criterion="entropy", random_state = 1)
dt.fit(X_train,y_train)
y_pred = dt.predict(X_test)
dt_predictions = dt.predict(X_test)
print(' Misclassified samples: %d'%(y_test!=y_pred).sum())
print(' Accuracy: %.2f'%accuracy_score(y_test,y_pred))
print(classification_report(y_test, dt_predictions))
print()
# ---------------------------------------------------------------------------------
print('2.3. Logistic Regression')
print()
log_reg = LogisticRegression(penalty='l2', C = 10, random_state = 1)
log_reg.fit(X_train, y_train)
y_pred = log_reg.predict(X_test)
log_reg_predictions = log_reg.predict(X_test)
print(' Misclassified samples: %d'%(y_test!=y_pred).sum())
print(' Accuracy: %.2f'%accuracy_score(y_test,y_pred))
print(classification_report(y_test, log_reg_predictions))
print()
# ---------------------------------------------------------------------------------
#print('2.4. Linear Regression')
#print()
#lr = LogisticRegression()
#lr.fit(X_train, y_train)
#y_pred = lr.predict(X_test)
#lr_predictions = lr.predict(X_test)
#print(' Misclassified samples: %d'%(y_test!=y_pred).sum())
#print(' Accuracy: %.2f'%accuracy_score(y_test,y_pred))
#print(classification_report(y_test, lr_predictions))
#print()
# ---------------------------------------------------------------------------------
| [
"pandas.Series",
"numpy.unique",
"pandas.read_csv",
"nltk.corpus.stopwords.words",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.classification_report",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.linear_model.LogisticRegression",
"nltk.tokenize.word_tokenize",
"sklearn.feature_... | [((1164, 1191), 'pandas.read_csv', 'pd.read_csv', (['"""df_truth.csv"""'], {}), "('df_truth.csv')\n", (1175, 1191), True, 'import pandas as pd\n'), ((2005, 2022), 'pandas.Series', 'pd.Series', (['n_text'], {}), '(n_text)\n', (2014, 2022), True, 'import pandas as pd\n'), ((3065, 3095), 'pandas.DataFrame', 'pd.DataFrame', (['w_stopwords_text'], {}), '(w_stopwords_text)\n', (3077, 3095), True, 'import pandas as pd\n'), ((3617, 3634), 'pandas.DataFrame', 'pd.DataFrame', (['yyy'], {}), '(yyy)\n', (3629, 3634), True, 'import pandas as pd\n'), ((3933, 3945), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (3942, 3945), True, 'import numpy as np\n'), ((3982, 4035), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'random_state': '(1)'}), '(X, y, test_size=0.3, random_state=1)\n', (3998, 4035), False, 'from sklearn.model_selection import train_test_split\n'), ((4094, 4111), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {}), '()\n', (4109, 4111), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((4460, 4512), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""rbf"""', 'gamma': '(0.1)', 'C': '(10.0)', 'random_state': '(1)'}), "(kernel='rbf', gamma=0.1, C=10.0, random_state=1)\n", (4463, 4512), False, 'from sklearn.svm import SVC\n'), ((4931, 4990), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'criterion': '"""entropy"""', 'random_state': '(1)'}), "(criterion='entropy', random_state=1)\n", (4953, 4990), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((5402, 5456), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': '"""l2"""', 'C': '(10)', 'random_state': '(1)'}), "(penalty='l2', C=10, random_state=1)\n", (5420, 5456), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2803, 2829), 'nltk.tokenize.word_tokenize', 'word_tokenize', (['new_text[j]'], {}), '(new_text[j])\n', (2816, 2829), False, 'from nltk.tokenize import word_tokenize\n'), ((4757, 4803), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'svm_predictions'], {}), '(y_test, svm_predictions)\n', (4778, 4803), False, 'from sklearn.metrics import classification_report\n'), ((5210, 5255), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'dt_predictions'], {}), '(y_test, dt_predictions)\n', (5231, 5255), False, 'from sklearn.metrics import classification_report\n'), ((5699, 5749), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'log_reg_predictions'], {}), '(y_test, log_reg_predictions)\n', (5720, 5749), False, 'from sklearn.metrics import classification_report\n'), ((3430, 3449), 'nltk.stem.wordnet.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (3447, 3449), False, 'from nltk.stem.wordnet import WordNetLemmatizer\n'), ((4720, 4750), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (4734, 4750), False, 'from sklearn.metrics import accuracy_score\n'), ((5173, 5203), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (5187, 5203), False, 'from sklearn.metrics import accuracy_score\n'), ((5662, 5692), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (5676, 5692), False, 'from sklearn.metrics import accuracy_score\n'), ((1751, 1770), 'contractions.fix', 'contractions.fix', (['h'], {}), '(h)\n', (1767, 1770), False, 'import contractions\n'), ((2899, 2925), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (2914, 2925), False, 'from nltk.corpus import stopwords\n')] |
from prefect import task, Flow, Parameter
from prefect.tasks.prefect import StartFlowRun
from prefect.storage import GitHub
with Flow("token-test") as flow:
StartFlowRun(project_name="testing", flow_name="flow_must_fail")()
flow.storage = GitHub(repo="kvnkho/demos", path="/prefect/token_test.py")
flow.register("testing") | [
"prefect.tasks.prefect.StartFlowRun",
"prefect.Flow",
"prefect.storage.GitHub"
] | [((245, 303), 'prefect.storage.GitHub', 'GitHub', ([], {'repo': '"""kvnkho/demos"""', 'path': '"""/prefect/token_test.py"""'}), "(repo='kvnkho/demos', path='/prefect/token_test.py')\n", (251, 303), False, 'from prefect.storage import GitHub\n'), ((130, 148), 'prefect.Flow', 'Flow', (['"""token-test"""'], {}), "('token-test')\n", (134, 148), False, 'from prefect import task, Flow, Parameter\n'), ((162, 226), 'prefect.tasks.prefect.StartFlowRun', 'StartFlowRun', ([], {'project_name': '"""testing"""', 'flow_name': '"""flow_must_fail"""'}), "(project_name='testing', flow_name='flow_must_fail')\n", (174, 226), False, 'from prefect.tasks.prefect import StartFlowRun\n')] |
import torch
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
hparams_file, overrides = 'train.yaml',''
PATH = './results/4234/save/CKPT+2021-04-17+16-05-06+00/model.ckpt'
# 加载超参数文件
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# 加载模型
model=hparams["model"]
model=model.eval()
state_dict = torch.load(PATH)
model.load_state_dict(state_dict)
# 输入一个音频文件
wav = ".\\data\\LibriSpeech\\test-clean\\1089\\134686\\1089-134686-0000.flac"
# 生成噪音文件
def generat_noisy(wav):
clean_sig = sb.dataio.dataio.read_audio(wav)
noisy_sig = hparams["env_corruption"](
clean_sig.unsqueeze(0), torch.ones(1)
).squeeze(0)
return noisy_sig
noisy_wav = generat_noisy(wav)
# 保存噪音文件
tmpfile = './noisy.wav'
sb.dataio.dataio.write_audio(tmpfile, noisy_wav, 16000)
# 计算特征值
def compute_feats(wavs):
"""Returns corresponding log-spectral features of the input waveforms.
Arguments
---------
wavs : torch.Tensor
The batch of waveforms to convert to log-spectral features.
"""
# Log-spectral features
feats = hparams['compute_STFT'](wavs)
feats = sb.processing.features.spectral_magnitude(feats, power=0.5)
# Log1p reduces the emphasis on small differences
feats = torch.log1p(feats)
return feats
noisy_wav = noisy_wav.unsqueeze(0)
inputdata = compute_feats(noisy_wav)
# 输入模型
with torch.no_grad():
output = model(inputdata)
# 转为音频
predict_spec = torch.mul(output, inputdata)
# 还原原始的音频信号
predict_wav =hparams['resynth'](
torch.expm1(predict_spec), noisy_wav
)
predict_wav = predict_wav.squeeze(0)
# 保存增强后的文件
tmpfile_au = './agument.wav'
sb.dataio.dataio.write_audio(tmpfile_au, predict_wav, 16000)
| [
"torch.mul",
"torch.expm1",
"torch.ones",
"speechbrain.dataio.dataio.write_audio",
"speechbrain.dataio.dataio.read_audio",
"torch.load",
"speechbrain.processing.features.spectral_magnitude",
"torch.no_grad",
"hyperpyyaml.load_hyperpyyaml",
"torch.log1p"
] | [((342, 358), 'torch.load', 'torch.load', (['PATH'], {}), '(PATH)\n', (352, 358), False, 'import torch\n'), ((757, 812), 'speechbrain.dataio.dataio.write_audio', 'sb.dataio.dataio.write_audio', (['tmpfile', 'noisy_wav', '(16000)'], {}), '(tmpfile, noisy_wav, 16000)\n', (785, 812), True, 'import speechbrain as sb\n'), ((1452, 1480), 'torch.mul', 'torch.mul', (['output', 'inputdata'], {}), '(output, inputdata)\n', (1461, 1480), False, 'import torch\n'), ((1647, 1707), 'speechbrain.dataio.dataio.write_audio', 'sb.dataio.dataio.write_audio', (['tmpfile_au', 'predict_wav', '(16000)'], {}), '(tmpfile_au, predict_wav, 16000)\n', (1675, 1707), True, 'import speechbrain as sb\n'), ((246, 278), 'hyperpyyaml.load_hyperpyyaml', 'load_hyperpyyaml', (['fin', 'overrides'], {}), '(fin, overrides)\n', (262, 278), False, 'from hyperpyyaml import load_hyperpyyaml\n'), ((533, 565), 'speechbrain.dataio.dataio.read_audio', 'sb.dataio.dataio.read_audio', (['wav'], {}), '(wav)\n', (560, 565), True, 'import speechbrain as sb\n'), ((1133, 1192), 'speechbrain.processing.features.spectral_magnitude', 'sb.processing.features.spectral_magnitude', (['feats'], {'power': '(0.5)'}), '(feats, power=0.5)\n', (1174, 1192), True, 'import speechbrain as sb\n'), ((1260, 1278), 'torch.log1p', 'torch.log1p', (['feats'], {}), '(feats)\n', (1271, 1278), False, 'import torch\n'), ((1382, 1397), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1395, 1397), False, 'import torch\n'), ((1531, 1556), 'torch.expm1', 'torch.expm1', (['predict_spec'], {}), '(predict_spec)\n', (1542, 1556), False, 'import torch\n'), ((641, 654), 'torch.ones', 'torch.ones', (['(1)'], {}), '(1)\n', (651, 654), False, 'import torch\n')] |