hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f714bf511a5db4837fb464801747ab0f635499bc | 3,832 | py | Python | networkx/drawing/tests/test_pydot.py | rakschahsa/networkx | 6cac55b1064c3c346665f9281680fa3b66442ad0 | [
"BSD-3-Clause"
] | 445 | 2019-01-26T13:50:26.000Z | 2022-03-18T05:17:38.000Z | Library/lib/python3.7/site-packages/networkx/drawing/tests/test_pydot.py | gengyong/Carnets | 8930a14f69360d4db115a85ff9e0f6efa80fa2e7 | [
"BSD-3-Clause"
] | 242 | 2019-01-29T15:48:27.000Z | 2022-03-31T22:09:21.000Z | site-packages/networkx/drawing/tests/test_pydot.py | Wristlebane/Pyto | 901ac307b68486d8289105c159ca702318bea5b0 | [
"MIT"
] | 31 | 2019-03-10T09:51:27.000Z | 2022-02-14T23:11:12.000Z | """Unit tests for pydot drawing functions."""
try:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
except ImportError:
from io import StringIO
import sys
import tempfile
from nose.tools import assert_equal, assert_is_instance, assert_true
import networkx as nx
from networkx.testing import assert_graphs_equal
class TestPydot(object):
@classmethod
def setupClass(cls):
'''
Fixture defining the `pydot` global to be the `pydot` module if both
importable and of sufficient version _or_ skipping this test.
'''
global pydot
pydot = nx.nx_pydot.setup_module(sys.modules[__name__])
assert pydot is not None
def pydot_checks(self, G, prog):
'''
Validate :mod:`pydot`-based usage of the passed NetworkX graph with the
passed basename of an external GraphViz command (e.g., `dot`, `neato`).
'''
# Set the name of this graph to... "G". Failing to do so will
# subsequently trip an assertion expecting this name.
G.graph['name'] = 'G'
# Add arbitrary nodes and edges to the passed empty graph.
G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'C'), ('A', 'D')])
G.add_node('E')
# Validate layout of this graph with the passed GraphViz command.
graph_layout = nx.nx_pydot.pydot_layout(G, prog=prog)
assert_is_instance(graph_layout, dict)
# Convert this graph into a "pydot.Dot" instance.
P = nx.nx_pydot.to_pydot(G)
# Convert this "pydot.Dot" instance back into a graph of the same type.
G2 = G.__class__(nx.nx_pydot.from_pydot(P))
# Validate the original and resulting graphs to be the same.
assert_graphs_equal(G, G2)
# Serialize this "pydot.Dot" instance to a temporary file in dot format.
fname = tempfile.mktemp()
P.write_raw(fname)
# Deserialize a list of new "pydot.Dot" instances back from this file.
Pin_list = pydot.graph_from_dot_file(path=fname, encoding='utf-8')
# Validate this file to contain only one graph.
assert_equal(len(Pin_list), 1)
# The single "pydot.Dot" instance deserialized from this file.
Pin = Pin_list[0]
# Sorted list of all nodes in the original "pydot.Dot" instance.
n1 = sorted([p.get_name() for p in P.get_node_list()])
# Sorted list of all nodes in the deserialized "pydot.Dot" instance.
n2 = sorted([p.get_name() for p in Pin.get_node_list()])
# Validate these instances to contain the same nodes.
assert_equal(n1, n2)
# Sorted list of all edges in the original "pydot.Dot" instance.
e1 = sorted([
(e.get_source(), e.get_destination()) for e in P.get_edge_list()])
# Sorted list of all edges in the original "pydot.Dot" instance.
e2 = sorted([
(e.get_source(), e.get_destination()) for e in Pin.get_edge_list()])
# Validate these instances to contain the same edges.
assert_equal(e1, e2)
# Deserialize a new graph of the same type back from this file.
Hin = nx.nx_pydot.read_dot(fname)
Hin = G.__class__(Hin)
# Validate the original and resulting graphs to be the same.
assert_graphs_equal(G, Hin)
def test_undirected(self):
self.pydot_checks(nx.Graph(), prog='neato')
def test_directed(self):
self.pydot_checks(nx.DiGraph(), prog='dot')
def test_read_write(self):
G = nx.MultiGraph()
G.graph['name'] = 'G'
G.add_edge('1', '2', key='0') # read assumes strings
fh = StringIO()
nx.nx_pydot.write_dot(G, fh)
fh.seek(0)
H = nx.nx_pydot.read_dot(fh)
assert_graphs_equal(G, H)
| 35.155963 | 80 | 0.632307 | try:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
except ImportError:
from io import StringIO
import sys
import tempfile
from nose.tools import assert_equal, assert_is_instance, assert_true
import networkx as nx
from networkx.testing import assert_graphs_equal
class TestPydot(object):
@classmethod
def setupClass(cls):
global pydot
pydot = nx.nx_pydot.setup_module(sys.modules[__name__])
assert pydot is not None
def pydot_checks(self, G, prog):
G.graph['name'] = 'G'
G.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'C'), ('A', 'D')])
G.add_node('E')
graph_layout = nx.nx_pydot.pydot_layout(G, prog=prog)
assert_is_instance(graph_layout, dict)
P = nx.nx_pydot.to_pydot(G)
G2 = G.__class__(nx.nx_pydot.from_pydot(P))
assert_graphs_equal(G, G2)
fname = tempfile.mktemp()
P.write_raw(fname)
Pin_list = pydot.graph_from_dot_file(path=fname, encoding='utf-8')
assert_equal(len(Pin_list), 1)
Pin = Pin_list[0]
n1 = sorted([p.get_name() for p in P.get_node_list()])
n2 = sorted([p.get_name() for p in Pin.get_node_list()])
assert_equal(n1, n2)
e1 = sorted([
(e.get_source(), e.get_destination()) for e in P.get_edge_list()])
e2 = sorted([
(e.get_source(), e.get_destination()) for e in Pin.get_edge_list()])
assert_equal(e1, e2)
Hin = nx.nx_pydot.read_dot(fname)
Hin = G.__class__(Hin)
assert_graphs_equal(G, Hin)
def test_undirected(self):
self.pydot_checks(nx.Graph(), prog='neato')
def test_directed(self):
self.pydot_checks(nx.DiGraph(), prog='dot')
def test_read_write(self):
G = nx.MultiGraph()
G.graph['name'] = 'G'
G.add_edge('1', '2', key='0')
fh = StringIO()
nx.nx_pydot.write_dot(G, fh)
fh.seek(0)
H = nx.nx_pydot.read_dot(fh)
assert_graphs_equal(G, H)
| true | true |
f714c0121365938eec8fc72d484d1b10524db95f | 358 | py | Python | Gallery/migrations/0002_auto_20210115_1331.py | CiganOliviu/InfiniteShoot | 14f7fb21e360e3c58876d82ebbe206054c72958e | [
"MIT"
] | 1 | 2021-04-02T16:45:37.000Z | 2021-04-02T16:45:37.000Z | Gallery/migrations/0002_auto_20210115_1331.py | CiganOliviu/InfiniteShoot-1 | 6322ae34f88caaffc1de29dfa4f6d86d175810a7 | [
"Apache-2.0"
] | null | null | null | Gallery/migrations/0002_auto_20210115_1331.py | CiganOliviu/InfiniteShoot-1 | 6322ae34f88caaffc1de29dfa4f6d86d175810a7 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.0.8 on 2021-01-15 13:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Gallery', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='imageclient',
old_name='product',
new_name='client',
),
]
| 18.842105 | 47 | 0.578212 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Gallery', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='imageclient',
old_name='product',
new_name='client',
),
]
| true | true |
f714c0f34fff9800c3a67b955f8cc23e9eeb99c8 | 9,027 | py | Python | emu/containers/docker_container.py | CONQ-Agency/android-emulator-container-scripts | 0d5f55ca938818486a2ad638b91464e952e87cf4 | [
"Apache-2.0"
] | null | null | null | emu/containers/docker_container.py | CONQ-Agency/android-emulator-container-scripts | 0d5f55ca938818486a2ad638b91464e952e87cf4 | [
"Apache-2.0"
] | 1 | 2021-06-15T11:59:58.000Z | 2021-06-16T12:08:38.000Z | emu/containers/docker_container.py | CONQ-Agency/android-emulator-container-scripts | 0d5f55ca938818486a2ad638b91464e952e87cf4 | [
"Apache-2.0"
] | 1 | 2021-05-12T14:08:12.000Z | 2021-05-12T14:08:12.000Z | # Copyright 2019 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import re
import sys
import shutil
import abc
import docker
from tqdm import tqdm
from emu.utils import mkdir_p
class ProgressTracker(object):
"""Tracks progress using tqdm for a set of layers that are pushed."""
def __init__(self):
# This tracks the information for a given layer id.
self.progress = {}
self.idx = -1
def __del__(self):
for k in self.progress:
self.progress[k]["tqdm"].close()
def update(self, entry):
"""Update the progress bars given a an entry.."""
if "id" not in entry:
return
identity = entry["id"]
if identity not in self.progress:
self.idx += 1
self.progress[identity] = {
"tqdm": tqdm(total=0, position=self.idx, unit="B", unit_scale=True), # The progress bar
"total": 0, # Total of bytes we are shipping
"status": "", # Status message.
"current": 0, # Current of total already send.
}
prog = self.progress[identity]
total = int(entry.get("progressDetail", {}).get("total", -1))
current = int(entry.get("progressDetail", {}).get("current", 0))
if prog["total"] != total and total != -1:
prog["total"] = total
prog["tqdm"].reset(total=total)
if prog["status"] != entry["status"]:
prog["tqdm"].set_description("{0} {1}".format(entry.get("status"), identity))
if current != 0:
diff = current - prog["current"]
prog["current"] = current
prog["tqdm"].update(diff)
class DockerContainer(object):
"""A Docker Device is capable of creating and launching docker images.
In order to successfully create and launch a docker image you must either
run this as root, or have enabled sudoless docker.
"""
TAG_REGEX = re.compile(r"[a-zA-Z0-9][a-zA-Z0-9._-]*:?[a-zA-Z0-9._-]*")
def __init__(self, repo=None):
if repo and repo[-1] != "/":
repo += "/"
self.repo = repo
def get_client(self):
return docker.from_env()
def get_api_client(self):
try:
api_client = docker.APIClient()
logging.info(api_client.version())
return api_client
except:
logging.exception("Failed to create default client, trying domain socket.", exc_info=True)
api_client = docker.APIClient(base_url="unix://var/run/docker.sock")
logging.info(api_client.version())
return api_client
def push(self):
image = self.full_name()
print("Pushing docker image: {}.. be patient this can take a while!".format(self.full_name()))
tracker = ProgressTracker()
try:
client = docker.from_env()
result = client.images.push(image, "latest", stream=True, decode=True)
for entry in result:
tracker.update(entry)
self.docker_image().tag("{}{}:latest".format(self.repo, self.image_name()))
except:
logging.exception("Failed to push image.", exc_info=True)
logging.warning("You can manually push the image as follows:")
logging.warning("docker push %s", image)
def launch(self, port_map):
"""Launches the container with the given sha, publishing abd on port, and gRPC on port 8554
Returns the container.
"""
image = self.docker_image()
client = docker.from_env()
try:
container = client.containers.run(
image=image.id,
privileged=True,
publish_all_ports=True,
detach=True,
ports=port_map,
)
print("Launched {} (id:{})".format(container.name, container.id))
print("docker logs -f {}".format(container.name))
print("docker stop {}".format(container.name))
return container
except:
logging.exception("Unable to run the %s", image_sha)
print("Unable to start the container, try running it as:")
print("./run.sh ", image_sha)
def create_container(self, dest):
"""Creates the docker container, returning the sha of the container, or None in case of failure."""
identity = None
image_tag = self.full_name()
print("docker build {} -t {}".format(dest, image_tag))
try:
api_client = self.get_api_client()
logging.info("build(path=%s, tag=%s, rm=True, decode=True)", dest, image_tag)
result = api_client.build(path=dest, tag=image_tag, rm=True, decode=True)
for entry in result:
if "stream" in entry:
sys.stdout.write(entry["stream"])
if "aux" in entry and "ID" in entry["aux"]:
identity = entry["aux"]["ID"]
client = docker.from_env()
image = client.images.get(identity)
image.tag(self.repo + self.image_name(), "latest")
except:
logging.exception("Failed to create container.", exc_info=True)
logging.warning("You can manually create the container as follows:")
logging.warning("docker build -t %s %s", image_tag, dest)
return identity
def clean(self, dest):
if os.path.exists(dest):
shutil.rmtree(dest)
mkdir_p(dest)
def pull(self, image, tag):
"""Tries to retrieve the given image and tag.
Return True if succeeded, False when failed.
"""
client = self.get_api_client()
try:
tracker = ProgressTracker()
result = client.pull(self.repo + image, tag)
for entry in result:
tracker.update(entry)
except:
logging.info("Failed to retrieve image, this is not uncommon.", exc_info=True)
return False
return True
def full_name(self):
if self.repo:
return "{}{}:{}".format(self.repo, self.image_name(), self.docker_tag())
return (self.image_name(), self.docker_tag())
def latest_name(self):
if self.repo:
return "{}{}:{}".format(self.repo, self.image_name(), "latest")
return (self.image_name(), "latest")
def create_cloud_build_step(self, dest):
return {
"name": "gcr.io/cloud-builders/docker",
"args": [
"build",
"-t",
self.full_name(),
"-t",
self.latest_name(),
os.path.basename(dest),
],
}
def docker_image(self):
"""The docker local docker image if any
Returns:
{docker.models.images.Image}: A docker image object, or None.
"""
client = self.get_client()
for img in client.images.list():
for tag in img.tags:
if self.image_name() in tag:
return img
return None
def available(self):
"""True if this container image is locally available."""
return self.docker_image() != None
def build(self, dest):
self.write(dest)
return self.create_container(dest)
def can_pull(self):
"""True if this container image can be pulled from a registry."""
return self.pull(self.image_name(), self.docker_tag())
@abc.abstractmethod
def write(self, destination):
"""Method responsible for writing the Dockerfile and all necessary files to build a container.
Args:
destination ({string}): A path to a directory where all the container files should reside.
Raises:
NotImplementedError: [description]
"""
raise NotImplementedError()
@abc.abstractmethod
def image_name(self):
"""The image name without the tag used to uniquely identify this image.
Raises:
NotImplementedError: [description]
"""
raise NotImplementedError()
@abc.abstractmethod
def docker_tag(self):
raise NotImplementedError()
@abc.abstractmethod
def depends_on(self):
"""Name of the system image this container is build on."""
raise NotImplementedError()
def __str__(self):
return self.image_name() + ":" + self.docker_tag()
| 34.586207 | 107 | 0.583029 |
import logging
import os
import re
import sys
import shutil
import abc
import docker
from tqdm import tqdm
from emu.utils import mkdir_p
class ProgressTracker(object):
def __init__(self):
self.progress = {}
self.idx = -1
def __del__(self):
for k in self.progress:
self.progress[k]["tqdm"].close()
def update(self, entry):
if "id" not in entry:
return
identity = entry["id"]
if identity not in self.progress:
self.idx += 1
self.progress[identity] = {
"tqdm": tqdm(total=0, position=self.idx, unit="B", unit_scale=True),
"total": 0,
"status": "",
"current": 0,
}
prog = self.progress[identity]
total = int(entry.get("progressDetail", {}).get("total", -1))
current = int(entry.get("progressDetail", {}).get("current", 0))
if prog["total"] != total and total != -1:
prog["total"] = total
prog["tqdm"].reset(total=total)
if prog["status"] != entry["status"]:
prog["tqdm"].set_description("{0} {1}".format(entry.get("status"), identity))
if current != 0:
diff = current - prog["current"]
prog["current"] = current
prog["tqdm"].update(diff)
class DockerContainer(object):
TAG_REGEX = re.compile(r"[a-zA-Z0-9][a-zA-Z0-9._-]*:?[a-zA-Z0-9._-]*")
def __init__(self, repo=None):
if repo and repo[-1] != "/":
repo += "/"
self.repo = repo
def get_client(self):
return docker.from_env()
def get_api_client(self):
try:
api_client = docker.APIClient()
logging.info(api_client.version())
return api_client
except:
logging.exception("Failed to create default client, trying domain socket.", exc_info=True)
api_client = docker.APIClient(base_url="unix://var/run/docker.sock")
logging.info(api_client.version())
return api_client
def push(self):
image = self.full_name()
print("Pushing docker image: {}.. be patient this can take a while!".format(self.full_name()))
tracker = ProgressTracker()
try:
client = docker.from_env()
result = client.images.push(image, "latest", stream=True, decode=True)
for entry in result:
tracker.update(entry)
self.docker_image().tag("{}{}:latest".format(self.repo, self.image_name()))
except:
logging.exception("Failed to push image.", exc_info=True)
logging.warning("You can manually push the image as follows:")
logging.warning("docker push %s", image)
def launch(self, port_map):
image = self.docker_image()
client = docker.from_env()
try:
container = client.containers.run(
image=image.id,
privileged=True,
publish_all_ports=True,
detach=True,
ports=port_map,
)
print("Launched {} (id:{})".format(container.name, container.id))
print("docker logs -f {}".format(container.name))
print("docker stop {}".format(container.name))
return container
except:
logging.exception("Unable to run the %s", image_sha)
print("Unable to start the container, try running it as:")
print("./run.sh ", image_sha)
def create_container(self, dest):
identity = None
image_tag = self.full_name()
print("docker build {} -t {}".format(dest, image_tag))
try:
api_client = self.get_api_client()
logging.info("build(path=%s, tag=%s, rm=True, decode=True)", dest, image_tag)
result = api_client.build(path=dest, tag=image_tag, rm=True, decode=True)
for entry in result:
if "stream" in entry:
sys.stdout.write(entry["stream"])
if "aux" in entry and "ID" in entry["aux"]:
identity = entry["aux"]["ID"]
client = docker.from_env()
image = client.images.get(identity)
image.tag(self.repo + self.image_name(), "latest")
except:
logging.exception("Failed to create container.", exc_info=True)
logging.warning("You can manually create the container as follows:")
logging.warning("docker build -t %s %s", image_tag, dest)
return identity
def clean(self, dest):
if os.path.exists(dest):
shutil.rmtree(dest)
mkdir_p(dest)
def pull(self, image, tag):
client = self.get_api_client()
try:
tracker = ProgressTracker()
result = client.pull(self.repo + image, tag)
for entry in result:
tracker.update(entry)
except:
logging.info("Failed to retrieve image, this is not uncommon.", exc_info=True)
return False
return True
def full_name(self):
if self.repo:
return "{}{}:{}".format(self.repo, self.image_name(), self.docker_tag())
return (self.image_name(), self.docker_tag())
def latest_name(self):
if self.repo:
return "{}{}:{}".format(self.repo, self.image_name(), "latest")
return (self.image_name(), "latest")
def create_cloud_build_step(self, dest):
return {
"name": "gcr.io/cloud-builders/docker",
"args": [
"build",
"-t",
self.full_name(),
"-t",
self.latest_name(),
os.path.basename(dest),
],
}
def docker_image(self):
client = self.get_client()
for img in client.images.list():
for tag in img.tags:
if self.image_name() in tag:
return img
return None
def available(self):
return self.docker_image() != None
def build(self, dest):
self.write(dest)
return self.create_container(dest)
def can_pull(self):
return self.pull(self.image_name(), self.docker_tag())
@abc.abstractmethod
def write(self, destination):
raise NotImplementedError()
@abc.abstractmethod
def image_name(self):
raise NotImplementedError()
@abc.abstractmethod
def docker_tag(self):
raise NotImplementedError()
@abc.abstractmethod
def depends_on(self):
raise NotImplementedError()
def __str__(self):
return self.image_name() + ":" + self.docker_tag()
| true | true |
f714c0fe8b4759fb11a941007a0f6f1a8f1d8178 | 5,927 | py | Python | statsmodels/multivariate/cancorr.py | aliavni/statsmodels | ef5d57a8d45de76a895e9401705280d558d688ad | [
"BSD-3-Clause"
] | 1 | 2022-01-24T15:17:37.000Z | 2022-01-24T15:17:37.000Z | statsmodels/multivariate/cancorr.py | aliavni/statsmodels | ef5d57a8d45de76a895e9401705280d558d688ad | [
"BSD-3-Clause"
] | null | null | null | statsmodels/multivariate/cancorr.py | aliavni/statsmodels | ef5d57a8d45de76a895e9401705280d558d688ad | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Canonical correlation analysis
author: Yichuan Liu
"""
import numpy as np
from numpy.linalg import svd
import scipy
import pandas as pd
from statsmodels.base.model import Model
from statsmodels.iolib import summary2
from .multivariate_ols import multivariate_stats
class CanCorr(Model):
"""
Canonical correlation analysis using singular value decomposition
For matrices exog=x and endog=y, find projections x_cancoef and y_cancoef
such that:
x1 = x * x_cancoef, x1' * x1 is identity matrix
y1 = y * y_cancoef, y1' * y1 is identity matrix
and the correlation between x1 and y1 is maximized.
Attributes
----------
endog : ndarray
See Parameters.
exog : ndarray
See Parameters.
cancorr : ndarray
The canonical correlation values
y_cancoeff : ndarray
The canonical coefficients for endog
x_cancoeff : ndarray
The canonical coefficients for exog
References
----------
.. [*] http://numerical.recipes/whp/notes/CanonCorrBySVD.pdf
.. [*] http://www.csun.edu/~ata20315/psy524/docs/Psy524%20Lecture%208%20CC.pdf
.. [*] http://www.mathematica-journal.com/2014/06/canonical-correlation-analysis/
""" # noqa:E501
def __init__(self, endog, exog, tolerance=1e-8, missing='none', hasconst=None, **kwargs):
super(CanCorr, self).__init__(endog, exog, missing=missing,
hasconst=hasconst, **kwargs)
self._fit(tolerance)
def _fit(self, tolerance=1e-8):
"""Fit the model
A ValueError is raised if there are singular values smaller than the
tolerance. The treatment of singular arrays might change in future.
Parameters
----------
tolerance : float
eigenvalue tolerance, values smaller than which is considered 0
"""
nobs, k_yvar = self.endog.shape
nobs, k_xvar = self.exog.shape
k = np.min([k_yvar, k_xvar])
x = np.array(self.exog)
x = x - x.mean(0)
y = np.array(self.endog)
y = y - y.mean(0)
ux, sx, vx = svd(x, 0)
# vx_ds = vx.T divided by sx
vx_ds = vx.T
mask = sx > tolerance
if mask.sum() < len(mask):
raise ValueError('exog is collinear.')
vx_ds[:, mask] /= sx[mask]
uy, sy, vy = svd(y, 0)
# vy_ds = vy.T divided by sy
vy_ds = vy.T
mask = sy > tolerance
if mask.sum() < len(mask):
raise ValueError('endog is collinear.')
vy_ds[:, mask] /= sy[mask]
u, s, v = svd(ux.T.dot(uy), 0)
# Correct any roundoff
self.cancorr = np.array([max(0, min(s[i], 1)) for i in range(len(s))])
self.x_cancoef = vx_ds.dot(u[:, :k])
self.y_cancoef = vy_ds.dot(v.T[:, :k])
def corr_test(self):
"""Approximate F test
Perform multivariate statistical tests of the hypothesis that
there is no canonical correlation between endog and exog.
For each canonical correlation, testing its significance based on
Wilks' lambda.
Returns
-------
CanCorrTestResults instance
"""
nobs, k_yvar = self.endog.shape
nobs, k_xvar = self.exog.shape
eigenvals = np.power(self.cancorr, 2)
stats = pd.DataFrame(columns=['Canonical Correlation', "Wilks' lambda",
'Num DF','Den DF', 'F Value','Pr > F'],
index=list(range(len(eigenvals) - 1, -1, -1)))
prod = 1
for i in range(len(eigenvals) - 1, -1, -1):
prod *= 1 - eigenvals[i]
p = k_yvar - i
q = k_xvar - i
r = (nobs - k_yvar - 1) - (p - q + 1) / 2
u = (p * q - 2) / 4
df1 = p * q
if p ** 2 + q ** 2 - 5 > 0:
t = np.sqrt(((p * q) ** 2 - 4) / (p ** 2 + q ** 2 - 5))
else:
t = 1
df2 = r * t - 2 * u
lmd = np.power(prod, 1 / t)
F = (1 - lmd) / lmd * df2 / df1
stats.loc[i, 'Canonical Correlation'] = self.cancorr[i]
stats.loc[i, "Wilks' lambda"] = prod
stats.loc[i, 'Num DF'] = df1
stats.loc[i, 'Den DF'] = df2
stats.loc[i, 'F Value'] = F
pval = scipy.stats.f.sf(F, df1, df2)
stats.loc[i, 'Pr > F'] = pval
'''
# Wilk's Chi square test of each canonical correlation
df = (p - i + 1) * (q - i + 1)
chi2 = a * np.log(prod)
pval = stats.chi2.sf(chi2, df)
stats.loc[i, 'Canonical correlation'] = self.cancorr[i]
stats.loc[i, 'Chi-square'] = chi2
stats.loc[i, 'DF'] = df
stats.loc[i, 'Pr > ChiSq'] = pval
'''
ind = stats.index.values[::-1]
stats = stats.loc[ind, :]
# Multivariate tests (remember x has mean removed)
stats_mv = multivariate_stats(eigenvals,
k_yvar, k_xvar, nobs - k_xvar - 1)
return CanCorrTestResults(stats, stats_mv)
class CanCorrTestResults:
"""
Canonical correlation results class
Attributes
----------
stats : DataFrame
Contain statistical tests results for each canonical correlation
stats_mv : DataFrame
Contain the multivariate statistical tests results
"""
def __init__(self, stats, stats_mv):
self.stats = stats
self.stats_mv = stats_mv
def __str__(self):
return self.summary().__str__()
def summary(self):
summ = summary2.Summary()
summ.add_title('Cancorr results')
summ.add_df(self.stats)
summ.add_dict({'': ''})
summ.add_dict({'Multivariate Statistics and F Approximations': ''})
summ.add_df(self.stats_mv)
return summ
| 33.111732 | 93 | 0.549182 |
import numpy as np
from numpy.linalg import svd
import scipy
import pandas as pd
from statsmodels.base.model import Model
from statsmodels.iolib import summary2
from .multivariate_ols import multivariate_stats
class CanCorr(Model):
def __init__(self, endog, exog, tolerance=1e-8, missing='none', hasconst=None, **kwargs):
super(CanCorr, self).__init__(endog, exog, missing=missing,
hasconst=hasconst, **kwargs)
self._fit(tolerance)
def _fit(self, tolerance=1e-8):
nobs, k_yvar = self.endog.shape
nobs, k_xvar = self.exog.shape
k = np.min([k_yvar, k_xvar])
x = np.array(self.exog)
x = x - x.mean(0)
y = np.array(self.endog)
y = y - y.mean(0)
ux, sx, vx = svd(x, 0)
vx_ds = vx.T
mask = sx > tolerance
if mask.sum() < len(mask):
raise ValueError('exog is collinear.')
vx_ds[:, mask] /= sx[mask]
uy, sy, vy = svd(y, 0)
vy_ds = vy.T
mask = sy > tolerance
if mask.sum() < len(mask):
raise ValueError('endog is collinear.')
vy_ds[:, mask] /= sy[mask]
u, s, v = svd(ux.T.dot(uy), 0)
self.cancorr = np.array([max(0, min(s[i], 1)) for i in range(len(s))])
self.x_cancoef = vx_ds.dot(u[:, :k])
self.y_cancoef = vy_ds.dot(v.T[:, :k])
def corr_test(self):
nobs, k_yvar = self.endog.shape
nobs, k_xvar = self.exog.shape
eigenvals = np.power(self.cancorr, 2)
stats = pd.DataFrame(columns=['Canonical Correlation', "Wilks' lambda",
'Num DF','Den DF', 'F Value','Pr > F'],
index=list(range(len(eigenvals) - 1, -1, -1)))
prod = 1
for i in range(len(eigenvals) - 1, -1, -1):
prod *= 1 - eigenvals[i]
p = k_yvar - i
q = k_xvar - i
r = (nobs - k_yvar - 1) - (p - q + 1) / 2
u = (p * q - 2) / 4
df1 = p * q
if p ** 2 + q ** 2 - 5 > 0:
t = np.sqrt(((p * q) ** 2 - 4) / (p ** 2 + q ** 2 - 5))
else:
t = 1
df2 = r * t - 2 * u
lmd = np.power(prod, 1 / t)
F = (1 - lmd) / lmd * df2 / df1
stats.loc[i, 'Canonical Correlation'] = self.cancorr[i]
stats.loc[i, "Wilks' lambda"] = prod
stats.loc[i, 'Num DF'] = df1
stats.loc[i, 'Den DF'] = df2
stats.loc[i, 'F Value'] = F
pval = scipy.stats.f.sf(F, df1, df2)
stats.loc[i, 'Pr > F'] = pval
ind = stats.index.values[::-1]
stats = stats.loc[ind, :]
stats_mv = multivariate_stats(eigenvals,
k_yvar, k_xvar, nobs - k_xvar - 1)
return CanCorrTestResults(stats, stats_mv)
class CanCorrTestResults:
def __init__(self, stats, stats_mv):
self.stats = stats
self.stats_mv = stats_mv
def __str__(self):
return self.summary().__str__()
def summary(self):
summ = summary2.Summary()
summ.add_title('Cancorr results')
summ.add_df(self.stats)
summ.add_dict({'': ''})
summ.add_dict({'Multivariate Statistics and F Approximations': ''})
summ.add_df(self.stats_mv)
return summ
| true | true |
f714c1e8dffd5c7377f91e0b8a143f15545e6c6f | 4,479 | py | Python | subcmds/branches.py | opensourcechipspark/repo | 5db69f3f6616ea199a7840f0602b988f8d5504b9 | [
"Apache-2.0"
] | null | null | null | subcmds/branches.py | opensourcechipspark/repo | 5db69f3f6616ea199a7840f0602b988f8d5504b9 | [
"Apache-2.0"
] | null | null | null | subcmds/branches.py | opensourcechipspark/repo | 5db69f3f6616ea199a7840f0602b988f8d5504b9 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (C) 2009 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
from color import Coloring
from command import Command
class BranchColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'branch')
self.current = self.printer('current', fg='green')
self.local = self.printer('local')
self.notinproject = self.printer('notinproject', fg='red')
class BranchInfo(object):
def __init__(self, name):
self.name = name
self.current = 0
self.published = 0
self.published_equal = 0
self.projects = []
def add(self, b):
if b.current:
self.current += 1
if b.published:
self.published += 1
if b.revision == b.published:
self.published_equal += 1
self.projects.append(b)
@property
def IsCurrent(self):
return self.current > 0
@property
def IsPublished(self):
return self.published > 0
@property
def IsPublishedEqual(self):
return self.published_equal == len(self.projects)
class Branches(Command):
common = True
helpSummary = "View current topic branches"
helpUsage = """
%prog [<project>...]
Summarizes the currently available topic branches.
Branch Display
--------------
The branch display output by this command is organized into four
columns of information; for example:
*P nocolor | in repo
repo2 |
The first column contains a * if the branch is the currently
checked out branch in any of the specified projects, or a blank
if no project has the branch checked out.
The second column contains either blank, p or P, depending upon
the upload status of the branch.
(blank): branch not yet published by repo upload
P: all commits were published by repo upload
p: only some commits were published by repo upload
The third column contains the branch name.
The fourth column (after the | separator) lists the projects that
the branch appears in, or does not appear in. If no project list
is shown, then the branch appears in all projects.
"""
def Execute(self, opt, args):
projects = self.GetProjects(args)
out = BranchColoring(self.manifest.manifestProject.config)
all_branches = {}
project_cnt = len(projects)
for project in projects:
for name, b in project.GetBranches().items():
b.project = project
if name not in all_branches:
all_branches[name] = BranchInfo(name)
all_branches[name].add(b)
names = list(sorted(all_branches))
if not names:
print(' (no branches)', file=sys.stderr)
return
width = 25
for name in names:
if width < len(name):
width = len(name)
for name in names:
i = all_branches[name]
in_cnt = len(i.projects)
if i.IsCurrent:
current = '*'
hdr = out.current
else:
current = ' '
hdr = out.local
if i.IsPublishedEqual:
published = 'P'
elif i.IsPublished:
published = 'p'
else:
published = ' '
hdr('%c%c %-*s' % (current, published, width, name))
out.write(' |')
if in_cnt < project_cnt:
fmt = out.write
paths = []
if in_cnt < project_cnt - in_cnt:
in_type = 'in'
for b in i.projects:
paths.append(b.project.relpath)
else:
fmt = out.notinproject
in_type = 'not in'
have = set()
for b in i.projects:
have.add(b.project)
for p in projects:
if not p in have:
paths.append(p.relpath)
s = ' %s %s' % (in_type, ', '.join(paths))
if width + 7 + len(s) < 80:
fmt(s)
else:
fmt(' %s:' % in_type)
for p in paths:
out.nl()
fmt(width*' ' + ' %s' % p)
else:
out.write(' in all projects')
out.nl()
| 26.820359 | 74 | 0.625363 |
from __future__ import print_function
import sys
from color import Coloring
from command import Command
class BranchColoring(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'branch')
self.current = self.printer('current', fg='green')
self.local = self.printer('local')
self.notinproject = self.printer('notinproject', fg='red')
class BranchInfo(object):
def __init__(self, name):
self.name = name
self.current = 0
self.published = 0
self.published_equal = 0
self.projects = []
def add(self, b):
if b.current:
self.current += 1
if b.published:
self.published += 1
if b.revision == b.published:
self.published_equal += 1
self.projects.append(b)
@property
def IsCurrent(self):
return self.current > 0
@property
def IsPublished(self):
return self.published > 0
@property
def IsPublishedEqual(self):
return self.published_equal == len(self.projects)
class Branches(Command):
common = True
helpSummary = "View current topic branches"
helpUsage = """
%prog [<project>...]
Summarizes the currently available topic branches.
Branch Display
--------------
The branch display output by this command is organized into four
columns of information; for example:
*P nocolor | in repo
repo2 |
The first column contains a * if the branch is the currently
checked out branch in any of the specified projects, or a blank
if no project has the branch checked out.
The second column contains either blank, p or P, depending upon
the upload status of the branch.
(blank): branch not yet published by repo upload
P: all commits were published by repo upload
p: only some commits were published by repo upload
The third column contains the branch name.
The fourth column (after the | separator) lists the projects that
the branch appears in, or does not appear in. If no project list
is shown, then the branch appears in all projects.
"""
def Execute(self, opt, args):
projects = self.GetProjects(args)
out = BranchColoring(self.manifest.manifestProject.config)
all_branches = {}
project_cnt = len(projects)
for project in projects:
for name, b in project.GetBranches().items():
b.project = project
if name not in all_branches:
all_branches[name] = BranchInfo(name)
all_branches[name].add(b)
names = list(sorted(all_branches))
if not names:
print(' (no branches)', file=sys.stderr)
return
width = 25
for name in names:
if width < len(name):
width = len(name)
for name in names:
i = all_branches[name]
in_cnt = len(i.projects)
if i.IsCurrent:
current = '*'
hdr = out.current
else:
current = ' '
hdr = out.local
if i.IsPublishedEqual:
published = 'P'
elif i.IsPublished:
published = 'p'
else:
published = ' '
hdr('%c%c %-*s' % (current, published, width, name))
out.write(' |')
if in_cnt < project_cnt:
fmt = out.write
paths = []
if in_cnt < project_cnt - in_cnt:
in_type = 'in'
for b in i.projects:
paths.append(b.project.relpath)
else:
fmt = out.notinproject
in_type = 'not in'
have = set()
for b in i.projects:
have.add(b.project)
for p in projects:
if not p in have:
paths.append(p.relpath)
s = ' %s %s' % (in_type, ', '.join(paths))
if width + 7 + len(s) < 80:
fmt(s)
else:
fmt(' %s:' % in_type)
for p in paths:
out.nl()
fmt(width*' ' + ' %s' % p)
else:
out.write(' in all projects')
out.nl()
| true | true |
f714c30f238ba9efcd7dd75cb3798c567f7905d2 | 35,412 | py | Python | demosaic_pack/amaze_demosaic.py | rongtianjie/dcraw_py | fd45d819a67d2f52d7ca61abbe145ab1b172bee9 | [
"Unlicense"
] | 1 | 2022-03-22T02:45:10.000Z | 2022-03-22T02:45:10.000Z | demosaic_pack/amaze_demosaic.py | rongtianjie/dcraw_py | fd45d819a67d2f52d7ca61abbe145ab1b172bee9 | [
"Unlicense"
] | null | null | null | demosaic_pack/amaze_demosaic.py | rongtianjie/dcraw_py | fd45d819a67d2f52d7ca61abbe145ab1b172bee9 | [
"Unlicense"
] | null | null | null | import numpy as np
def amaze_demosaic(src, raw):
cfarray = raw.raw_colors
cfarray[cfarray == 3] = 1
rgb = amaze_demosaic_libraw(src, cfarray, raw.daylight_whitebalance)
return rgb
def amaze_demosaic_libraw(src, cfarray, daylight_wb):
TS = 512
winx = winy = 0
width = src.shape[1]
height = src.shape[0]
image = np.empty([height, width, 3], dtype=np.uint16)
clip_pt = min(daylight_wb[0], daylight_wb[1], daylight_wb[2])
v1 = TS
v2 = 2 * TS
v3 = 3 * TS
p1 = -TS + 1
p2 = -2 * TS + 2
p3 = -3 * TS + 3
m1 = TS + 1
m2 = 2 * TS + 2
m3 = 3 * TS + 3
nbr = [-v2,-2,2,v2,0]
eps, epssq = 1e-5, 1e-10
# adaptive ratios threshold
arthresh=0.75
# nyquist texture test threshold
nyqthresh=0.5
# diagonal interpolation test threshold
pmthresh=0.25
# factors for bounding interpolation in saturated regions
lbd, ubd = 1, 1 # lbd=0.66, ubd=1.5 alternative values;
# gaussian on 5x5 quincunx, sigma=1.2
gaussodd = [0.14659727707323927, 0.103592713382435, 0.0732036125103057, 0.0365543548389495]
# gaussian on 5x5, sigma=1.2
gaussgrad = [0.07384411893421103, 0.06207511968171489, 0.0521818194747806, 0.03687419286733595, 0.03099732204057846, 0.018413194161458882]
# gaussian on 3x3, sigma =0.7
gauss1 = [0.3376688223162362, 0.12171198028231786, 0.04387081413862306]
# gaussian on 5x5 alt quincunx, sigma=1.5
gausseven = [0.13719494435797422, 0.05640252782101291]
# guassian on quincunx grid
gquinc = [0.169917, 0.108947, 0.069855, 0.0287182]
rgb = np.empty([TS*TS, 3], dtype=np.float32)
delh = np.empty(TS*TS, dtype=np.float32)
delv = np.empty(TS*TS, dtype=np.float32)
delhsq = np.empty(TS*TS, dtype=np.float32)
delvsq = np.empty(TS*TS, dtype=np.float32)
dirwts = np.empty([TS*TS, 2], dtype=np.float32)
vcd = np.empty(TS*TS, dtype=np.float32)
hcd = np.empty(TS*TS, dtype=np.float32)
vcdalt = np.empty(TS*TS, dtype=np.float32)
hcdalt = np.empty(TS*TS, dtype=np.float32)
vcdsq = np.empty(TS*TS, dtype=np.float32)
hcdsq = np.empty(TS*TS, dtype=np.float32)
cddiffsq = np.empty(TS*TS, dtype=np.float32)
hvwt = np.empty(TS*TS, dtype=np.float32)
Dgrb = np.empty([TS*TS, 2], dtype=np.float32)
delp = np.empty(TS*TS, dtype=np.float32)
delm = np.empty(TS*TS, dtype=np.float32)
rbint = np.empty(TS*TS, dtype=np.float32)
Dgrbh2 = np.empty(TS*TS, dtype=np.float32)
Dgrbv2 = np.empty(TS*TS, dtype=np.float32)
dgintv = np.empty(TS*TS, dtype=np.float32)
dginth = np.empty(TS*TS, dtype=np.float32)
Dgrbpsq1 = np.empty(TS*TS, dtype=np.float32)
Dgrbmsq1 = np.empty(TS*TS, dtype=np.float32)
cfa = np.empty(TS*TS, dtype=np.float32)
pmwt = np.empty(TS*TS, dtype=np.float32)
rbp = np.empty(TS*TS, dtype=np.float32)
rbm = np.empty(TS*TS, dtype=np.float32)
nyquist = np.empty(TS*TS, dtype=np.int32)
# determine GRBG coset; (ey,ex) is the offset of the R subarray
if cfarray[0][0] == 1:
if cfarray[0][1] == 0:
ex, ey = 1, 0
else:
ex, ey = 0, 1
else:
if cfarray[0][0] == 0:
ex = ey = 0
else:
ex = ey = 1
# Start main loop
loop_cnt = 1
for top in range(winy-16, winy+height, TS-32):
for left in range(winx-16, winx+width, TS-32):
print("Loop [{}]: top: {} left: {}".format(loop_cnt, top, left))
loop_cnt += 1
# location of tile bottom edge
bottom = min(top+TS, winy+height+16)
# location of tile right edge
right = min(left+TS, winx+width+16)
# tile width (=TS except for right edge of image)
rr1 = bottom - top
# tile height (=TS except for bottom edge of image)
cc1 = right - left
# rgb from input CFA data
# rgb values should be floating point number between 0 and 1
# after white balance multipliers are applied
# a 16 pixel border is added to each side of the image
# bookkeeping for borders
rrmin = 16 if top < winy else 0
ccmin = 16 if left < winx else 0
rrmax = winy+height-top if bottom>(winy+height) else rr1
ccmax = winx+width-left if right>(winx+width) else cc1
for rr in range(rrmin, rrmax):
row = rr + top
for cc in range(ccmin, ccmax):
col = cc + left
c = cfarray[rr, cc]
indx1 = rr * TS + cc
indx = row * width + col
rgb[indx1, c] = src[row, col] / 65535
cfa[indx1] = rgb[indx1, c]
# fill borders
if rrmin > 0:
for rr in range(16):
for cc in range(ccmin, ccmax):
c = cfarray[rr, cc]
rgb[rr*TS+cc, c] = rgb[(32-rr)*TS+cc, c]
cfa[rr*TS+cc] = rgb[rr*TS+cc, c]
if rrmax < rr1:
for rr in range(16):
for cc in range(ccmin, ccmax):
c = cfarray[rr, cc]
rgb[(rrmax+rr)*TS+cc, c] = (src[(winy+height-rr-2), left+cc])/65535
cfa[(rrmax+rr)*TS+cc] = rgb[(rrmax+rr)*TS+cc, c]
if ccmin > 0:
for rr in range(rrmin, rrmax):
for cc in range(16):
c = cfarray[rr, cc]
rgb[rr*TS+cc, c] = rgb[rr*TS+32-cc, c]
cfa[rr*TS+cc] = rgb[rr*TS+cc, c]
if ccmax < cc1:
for rr in range(rrmin, rrmax):
for cc in range(16):
c = cfarray[rr, cc]
rgb[rr*TS+ccmax+cc, c] = (src[(top+rr), (winx+width-cc-2)])/65535
cfa[rr*TS+ccmax+cc] = rgb[rr*TS+ccmax+cc, c]
# also, fill the image corners
if rrmin > 0 and ccmin > 0:
for rr in range(16):
for cc in range(16):
c = cfarray[rr, cc]
rgb[(rr)*TS+cc][c] = rgb[(32-rr)*TS+(32-cc)][c]
cfa[(rr)*TS+cc] = rgb[(rr)*TS+cc][c]
if rrmax < rr1 and ccmax < cc1:
for rr in range(16):
for cc in range(16):
c = cfarray[rr, cc]
rgb[(rrmax+rr)*TS+ccmax+cc][c] = (src[(winy+height-rr-2)][(winx+width-cc-2)])/65535
cfa[(rrmax+rr)*TS+ccmax+cc] = rgb[(rrmax+rr)*TS+ccmax+cc][c]
if rrmin > 0 and ccmax < cc1:
for rr in range(16):
for cc in range(16):
c = cfarray[rr, cc]
rgb[(rr)*TS+ccmax+cc][c] = (src[(winy+32-rr)][(winx+width-cc-2)])/65535
cfa[(rr)*TS+ccmax+cc] = rgb[(rr)*TS+ccmax+cc][c]
if rrmax < rr1 and ccmin > 0:
for rr in range(16):
for cc in range(16):
c = cfarray[rr, cc]
rgb[(rrmax+rr)*TS+cc][c] = (src[(winy+height-rr-2)][(winx+32-cc)])/65535
cfa[(rrmax+rr)*TS+cc] = rgb[(rrmax+rr)*TS+cc][c]
# end of border fill
for rr in range(1, rr1-1):
for cc in range(1, cc1-1):
indx = rr*TS+cc
delh[indx] = abs(cfa[indx + 1] - cfa[indx - 1])
delv[indx] = abs(cfa[indx + v1] - cfa[indx - v1])
delhsq[indx] = SQR(delh[indx])
delvsq[indx] = SQR(delv[indx])
delp[indx] = abs(cfa[indx+p1]-cfa[indx-p1])
delm[indx] = abs(cfa[indx+m1]-cfa[indx-m1])
for rr in range(2, rr1-2):
for cc in range(2, cc1-2):
indx = rr*TS+cc
# vert directional averaging weights
dirwts[indx][0] = eps+delv[indx+v1]+delv[indx-v1]+delv[indx]
# horizontal weights
dirwts[indx][1] = eps+delh[indx+1]+delh[indx-1]+delh[indx]
if cfarray[rr, cc] & 1:
# for later use in diagonal interpolation
Dgrbpsq1[indx]=(SQR(cfa[indx]-cfa[indx-p1])+SQR(cfa[indx]-cfa[indx+p1]))
Dgrbmsq1[indx]=(SQR(cfa[indx]-cfa[indx-m1])+SQR(cfa[indx]-cfa[indx+m1]))
for rr in range(4, rr1 - 4):
for cc in range(4, cc1 - 4):
indx = rr*TS+cc
c = cfarray[rr, cc]
sgn = -1 if c & 1 else 1
# initialization of nyquist test
nyquist[indx]=0
# preparation for diag interp
rbint[indx]=0
# color ratios in each cardinal direction
cru = cfa[indx - v1] * (dirwts[indx - v2][0] + dirwts[indx][0]) / (dirwts[indx - v2][0] * (eps + cfa[indx]) + dirwts[indx][0] * (eps + cfa[indx - v2]))
crd = cfa[indx + v1] * (dirwts[indx + v2][0] + dirwts[indx][0]) / (dirwts[indx + v2][0] * (eps + cfa[indx]) + dirwts[indx][0] * (eps + cfa[indx + v2]))
crl = cfa[indx - 1] * (dirwts[indx - 2][1] + dirwts[indx][1]) / (dirwts[indx - 2][1] * (eps + cfa[indx]) + dirwts[indx][1] * (eps + cfa[indx - 2]))
crr = cfa[indx + 1] * (dirwts[indx + 2][1] + dirwts[indx][1]) / (dirwts[indx + 2][1] * (eps + cfa[indx]) + dirwts[indx][1] * (eps + cfa[indx + 2]))
# G interpolated in vert/hor directions using Hamilton-Adams method
guha = min(clip_pt, cfa[indx - v1] + 0.5 * (cfa[indx] - cfa[indx - v2]))
gdha = min(clip_pt, cfa[indx + v1] + 0.5 * (cfa[indx] - cfa[indx + v2]))
glha = min(clip_pt, cfa[indx - 1] + 0.5 * (cfa[indx] - cfa[indx - 2]))
grha = min(clip_pt, cfa[indx + 1] + 0.5 * (cfa[indx] - cfa[indx + 2]))
# G interpolated in vert/hor directions using adaptive ratios
guar = cfa[indx] * cru if abs(1-cru) < arthresh else guha
gdar = cfa[indx] * crd if abs(1-crd) < arthresh else gdha
glar = cfa[indx] * crl if abs(1-crl) < arthresh else glha
grar = cfa[indx] * crr if abs(1-crr) < arthresh else grha
# adaptive weights for vertical/horizontal directions
hwt = dirwts[indx - 1][1] / (dirwts[indx - 1][1] + dirwts[indx + 1][1])
vwt = dirwts[indx - v1][0] / (dirwts[indx + v1][0] + dirwts[indx - v1][0])
# interpolated G via adaptive weighTS of cardinal evaluations
Gintvar = vwt * gdar + (1-vwt) * guar
Ginthar = hwt * grar + (1-hwt) * glar
Gintvha = vwt * gdha + (1-vwt) * guha
Ginthha = hwt * grha + (1-hwt) * glha
# interpolated color differences
vcd[indx] = sgn * (Gintvar-cfa[indx])
hcd[indx] = sgn * (Ginthar-cfa[indx])
vcdalt[indx] = sgn * (Gintvha-cfa[indx])
hcdalt[indx] = sgn * (Ginthha-cfa[indx])
if cfa[indx] > 0.8 * clip_pt or Gintvha > 0.8 * clip_pt or Ginthha > 0.8 * clip_pt:
# use HA if highlighTS are (nearly) clipped
guar = guha
gdar = gdha
glar = glha
grar = grha
vcd[indx] = vcdalt[indx]
hcd[indx] = hcdalt[indx]
# differences of interpolations in opposite directions
dgintv[indx] = min((guha - gdha) ** 2, (guar - gdar) ** 2)
dginth[indx] = min((glha - grha) ** 2, (glar - grar) ** 2)
for rr in range(4, rr1-4):
for cc in range(4, cc1-4):
c = cfarray[rr, cc]
hcdvar = 3*(SQR(hcd[indx-2])+SQR(hcd[indx])+SQR(hcd[indx+2]))-SQR(hcd[indx-2]+hcd[indx]+hcd[indx+2])
hcdaltvar = 3*(SQR(hcdalt[indx-2])+SQR(hcdalt[indx])+SQR(hcdalt[indx+2]))-SQR(hcdalt[indx-2]+hcdalt[indx]+hcdalt[indx+2])
vcdvar = 3*(SQR(vcd[indx-v2])+SQR(vcd[indx])+SQR(vcd[indx+v2]))-SQR(vcd[indx-v2]+vcd[indx]+vcd[indx+v2])
vcdaltvar = 3*(SQR(vcdalt[indx-v2])+SQR(vcdalt[indx])+SQR(vcdalt[indx+v2]))-SQR(vcdalt[indx-v2]+vcdalt[indx]+vcdalt[indx+v2])
# choose the smallest variance; this yields a smoother interpolation
if hcdaltvar < hcdvar:
hcd[indx] = hcdalt[indx]
if vcdaltvar < vcdvar:
vcd[indx] = vcdalt[indx]
# bound the interpolation in regions of high saturation
# vertical and horizontal G interpolations
if c & 1: # G site
Ginth = -hcd[indx] + cfa[indx]
Gintv = -vcd[indx] + cfa[indx]
if hcd[indx] > 0:
if 3 * hcd[indx] > (Ginth + cfa[indx]):
hcd[indx] = -np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) + cfa[indx]
else:
hwt = 1 - 3 * hcd[indx] / (eps + Ginth + cfa[indx])
hcd[indx] = hwt * hcd[indx] + (1 - hwt) * (-np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) + cfa[indx])
if vcd[indx] > 0:
if 3 * vcd[indx] > (Gintv + cfa[indx]):
vcd[indx] = -np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) + cfa[indx]
else:
vwt = 1 - 3 * vcd[indx] / (eps + Gintv + cfa[indx])
vcd[indx] = vwt * vcd[indx] + (1 - vwt) * (-np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) + cfa[indx])
if Ginth > clip_pt:
hcd[indx] = -np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) + cfa[indx]
if Gintv > clip_pt:
vcd[indx] = -np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) + cfa[indx]
else: # R or B site
Ginth = hcd[indx] + cfa[indx]
Gintv = vcd[indx] + cfa[indx]
if hcd[indx] < 0:
if 3 * hcd[indx] < -(Ginth + cfa[indx]):
hcd[indx] = np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) - cfa[indx]
else:
hwt = 1 + 3 * hcd[indx] / (eps + Ginth + cfa[indx])
hcd[indx] = hwt * hcd[indx] + (1 - hwt) * (np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) - cfa[indx])
if vcd[indx] < 0:
if 3 * vcd[indx] < -(Gintv + cfa[indx]):
vcd[indx] = np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) - cfa[indx]
else:
vwt = 1 + 3 * vcd[indx] / (eps + Gintv + cfa[indx])
vcd[indx] = vwt * vcd[indx] + (1 - vwt) * (np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) - cfa[indx])
if Ginth > clip_pt:
hcd[indx] = np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) - cfa[indx]
if Gintv > clip_pt:
vcd[indx] = np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) - cfa[indx]
vcdsq[indx] = SQR(vcd[indx])
hcdsq[indx] = SQR(hcd[indx])
cddiffsq[indx] = SQR(vcd[indx]-hcd[indx])
for rr in range(6, rr1-6):
for cc in range(6+(cfarray[rr, 2]&1), cc1-6, 2):
indx = rr * TS + cc
# compute color difference variances in cardinal directions
Dgrbvvaru = 4*(vcdsq[indx]+vcdsq[indx-v1]+vcdsq[indx-v2]+vcdsq[indx-v3])-SQR(vcd[indx]+vcd[indx-v1]+vcd[indx-v2]+vcd[indx-v3])
Dgrbvvard = 4*(vcdsq[indx]+vcdsq[indx+v1]+vcdsq[indx+v2]+vcdsq[indx+v3])-SQR(vcd[indx]+vcd[indx+v1]+vcd[indx+v2]+vcd[indx+v3])
Dgrbhvarl = 4*(hcdsq[indx]+hcdsq[indx-1]+hcdsq[indx-2]+hcdsq[indx-3])-SQR(hcd[indx]+hcd[indx-1]+hcd[indx-2]+hcd[indx-3])
Dgrbhvarr = 4*(hcdsq[indx]+hcdsq[indx+1]+hcdsq[indx+2]+hcdsq[indx+3])-SQR(hcd[indx]+hcd[indx+1]+hcd[indx+2]+hcd[indx+3])
hwt = dirwts[indx-1][1]/(dirwts[indx-1][1]+dirwts[indx+1][1])
vwt = dirwts[indx-v1][0]/(dirwts[indx+v1][0]+dirwts[indx-v1][0])
vcdvar = epssq+vwt*Dgrbvvard+(1-vwt)*Dgrbvvaru
hcdvar = epssq+hwt*Dgrbhvarr+(1-hwt)*Dgrbhvarl
# compute fluctuations in up/down and left/right interpolations of colors
Dgrbvvaru = (dgintv[indx])+(dgintv[indx-v1])+(dgintv[indx-v2])
Dgrbvvard = (dgintv[indx])+(dgintv[indx+v1])+(dgintv[indx+v2])
Dgrbhvarl = (dginth[indx])+(dginth[indx-1])+(dginth[indx-2])
Dgrbhvarr = (dginth[indx])+(dginth[indx+1])+(dginth[indx+2])
vcdvar1 = epssq+vwt*Dgrbvvard+(1-vwt)*Dgrbvvaru
hcdvar1 = epssq+hwt*Dgrbhvarr+(1-hwt)*Dgrbhvarl
# determine adaptive weights for G interpolation
varwt=hcdvar/(vcdvar+hcdvar)
diffwt=hcdvar1/(vcdvar1+hcdvar1)
# if both agree on interpolation direction, choose the one with strongest directional discrimination;
# otherwise, choose the u/d and l/r difference fluctuation weights
if ((0.5 - varwt) * (0.5 - diffwt) > 0) and (abs(0.5 - diffwt) < abs(0.5 - varwt)):
hvwt[indx] = varwt
else:
hvwt[indx] = diffwt
# Nyquist test
for rr in range(6, rr1-6):
for cc in range(6 + (cfarray[rr, 2]&1), cc1 - 6, 2):
indx = rr * TS + cc
# nyquist texture test: ask if difference of vcd compared to hcd is larger or smaller than RGGB gradients
nyqtest = (gaussodd[0]*cddiffsq[indx] + gaussodd[1]*(cddiffsq[indx-m1]+cddiffsq[indx+p1] + cddiffsq[indx-p1]+cddiffsq[indx+m1]) + gaussodd[2]*(cddiffsq[indx-v2]+cddiffsq[indx-2]+ cddiffsq[indx+2]+cddiffsq[indx+v2]) + gaussodd[3]*(cddiffsq[indx-m2]+cddiffsq[indx+p2] + cddiffsq[indx-p2]+cddiffsq[indx+m2]))
nyqtest -= nyqthresh*(gaussgrad[0]*(delhsq[indx]+delvsq[indx])+gaussgrad[1]*(delhsq[indx-v1]+delvsq[indx-v1]+delhsq[indx+1]+delvsq[indx+1] + delhsq[indx-1]+delvsq[indx-1]+delhsq[indx+v1]+delvsq[indx+v1])+ gaussgrad[2]*(delhsq[indx-m1]+delvsq[indx-m1]+delhsq[indx+p1]+delvsq[indx+p1]+ delhsq[indx-p1]+delvsq[indx-p1]+delhsq[indx+m1]+delvsq[indx+m1])+ gaussgrad[3]*(delhsq[indx-v2]+delvsq[indx-v2]+delhsq[indx-2]+delvsq[indx-2]+ delhsq[indx+2]+delvsq[indx+2]+delhsq[indx+v2]+delvsq[indx+v2])+ gaussgrad[4]*(delhsq[indx-2*TS-1]+delvsq[indx-2*TS-1]+delhsq[indx-2*TS+1]+delvsq[indx-2*TS+1]+ delhsq[indx-TS-2]+delvsq[indx-TS-2]+delhsq[indx-TS+2]+delvsq[indx-TS+2]+ delhsq[indx+TS-2]+delvsq[indx+TS-2]+delhsq[indx+TS+2]+delvsq[indx-TS+2]+ delhsq[indx+2*TS-1]+delvsq[indx+2*TS-1]+delhsq[indx+2*TS+1]+delvsq[indx+2*TS+1])+ gaussgrad[5]*(delhsq[indx-m2]+delvsq[indx-m2]+delhsq[indx+p2]+delvsq[indx+p2]+ delhsq[indx-p2]+delvsq[indx-p2]+delhsq[indx+m2]+delvsq[indx+m2]))
if nyqtest > 0:
# nyquist=1 for nyquist region
nyquist[indx] = 1
for rr in range(8, rr1-8):
for cc in range(8+(cfarray[rr,2]&1), cc1-8, 2):
areawt=(nyquist[indx-v2]+nyquist[indx-m1]+nyquist[indx+p1]+nyquist[indx-2]+nyquist[indx]+nyquist[indx+2]+nyquist[indx-p1]+nyquist[indx+m1]+nyquist[indx+v2])
# if most of your neighbors are named Nyquist, it's likely that you're one too
nyquist[indx] = 1 if areawt > 4 else 0
# end of Nyquist test
# in areas of Nyquist texture, do area interpolation
for rr in range(8, rr1 - 8):
for cc in range(8+(cfarray[rr,2]&1), cc1-8, 2):
indx = rr * TS + cc
if nyquist[indx]:
# area interpolation
sumh = sumv = sumsqh = sumsqv = areawt = 0
for i in range(-6, 7, 2):
for j in range(-6, 7, 2):
indx1 = (rr + i) * TS + cc + j
if nyquist[indx1]:
sumh += cfa[indx1] - 0.5 * (cfa[indx1-1]+cfa[indx1+1])
sumv += cfa[indx1] - 0.5 * (cfa[indx1-v1]+cfa[indx1+v1])
sumsqh += 0.5 * (SQR(cfa[indx1]-cfa[indx1-1]) + SQR(cfa[indx1]-cfa[indx1+1]))
sumsqv += 0.5 * (SQR(cfa[indx1]-cfa[indx1-v1]) + SQR(cfa[indx1]-cfa[indx1+v1]))
areawt += 1
# horizontal and vertical color differences, and adaptive weight
hcdvar = epssq + max(0, areawt*sumsqh-sumh*sumh)
vcdvar = epssq + max(0, areawt*sumsqv-sumv*sumv)
hvwt[indx] = hcdvar / (vcdvar + hcdvar)
# end of area interpolation
# populate G at R/B sites
for rr in range(8, rr1-8):
for cc in range(8+(cfarray[rr,2]&1), cc1-8, 2):
indx = rr * TS + cc
# first ask if one gets more directional discrimination from nearby B/R sites
hvwtalt = 0.25 * (hvwt[indx-m1] + hvwt[indx+p1] + hvwt[indx-p1] + hvwt[indx+m1])
vo = abs(0.5 - hvwt[indx])
ve = abs(0.5 - hvwtalt)
# a better result was obtained from the neighbors
if vo < ve:
hvwt[indx>>1] = hvwtalt
# evaluate color differences
Dgrb[indx][0] = (hcd[indx]*(1-hvwt[indx]) + vcd[indx]*hvwt[indx])
# evaluate G
rgb[indx][1] = cfa[indx] + Dgrb[indx][0]
# local curvature in G (preparation for nyquist refinement step)
if nyquist[indx]:
Dgrbh2[indx] = SQR(rgb[indx][1] - 0.5*(rgb[indx-1][1]+rgb[indx+1][1]))
Dgrbv2[indx] = SQR(rgb[indx][1] - 0.5*(rgb[indx-v1][1]+rgb[indx+v1][1]))
else:
Dgrbh2[indx] = Dgrbv2[indx] = 0
# end of standard interpolation
# refine Nyquist areas using G curvatures
for rr in range(8, rr1-8):
for cc in range(8+(cfarray[rr,2]&1), cc1-8, 2):
indx = rr * TS + cc
if nyquist[indx]:
# local averages (over Nyquist pixels only) of G curvature squared
gvarh = epssq + (gquinc[0]*Dgrbh2[indx]+gquinc[1]*(Dgrbh2[indx-m1]+Dgrbh2[indx+p1]+Dgrbh2[indx-p1]+Dgrbh2[indx+m1])+gquinc[2]*(Dgrbh2[indx-v2]+Dgrbh2[indx-2]+Dgrbh2[indx+2]+Dgrbh2[indx+v2])+gquinc[3]*(Dgrbh2[indx-m2]+Dgrbh2[indx+p2]+Dgrbh2[indx-p2]+Dgrbh2[indx+m2]))
gvarv = epssq + (gquinc[0]*Dgrbv2[indx]+gquinc[1]*(Dgrbv2[indx-m1]+Dgrbv2[indx+p1]+Dgrbv2[indx-p1]+Dgrbv2[indx+m1])+gquinc[2]*(Dgrbv2[indx-v2]+Dgrbv2[indx-2]+Dgrbv2[indx+2]+Dgrbv2[indx+v2])+gquinc[3]*(Dgrbv2[indx-m2]+Dgrbv2[indx+p2]+Dgrbv2[indx-p2]+Dgrbv2[indx+m2]))
# use the results as weights for refined G interpolation
Dgrb[indx][0] = (hcd[indx]*gvarv + vcd[indx]*gvarh)/(gvarv+gvarh)
rgb[indx][1] = cfa[indx] + Dgrb[indx][0]
# diagonal interpolation correction
for rr in range(8, rr1-8):
for cc in range(8+(cfarray[rr,2]&1), cc1-8, 2):
indx = rr * TS + cc
rbvarp = epssq + (gausseven[0]*(Dgrbpsq1[indx-v1]+Dgrbpsq1[indx-1]+Dgrbpsq1[indx+1]+Dgrbpsq1[indx+v1]) + gausseven[1]*(Dgrbpsq1[indx-v2-1]+Dgrbpsq1[indx-v2+1]+Dgrbpsq1[indx-2-v1]+Dgrbpsq1[indx+2-v1]+ Dgrbpsq1[indx-2+v1]+Dgrbpsq1[indx+2+v1]+Dgrbpsq1[indx+v2-1]+Dgrbpsq1[indx+v2+1]))
rbvarm = epssq + (gausseven[0]*(Dgrbmsq1[indx-v1]+Dgrbmsq1[indx-1]+Dgrbmsq1[indx+1]+Dgrbmsq1[indx+v1]) + gausseven[1]*(Dgrbmsq1[indx-v2-1]+Dgrbmsq1[indx-v2+1]+Dgrbmsq1[indx-2-v1]+Dgrbmsq1[indx+2-v1]+ Dgrbmsq1[indx-2+v1]+Dgrbmsq1[indx+2+v1]+Dgrbmsq1[indx+v2-1]+Dgrbmsq1[indx+v2+1]))
# diagonal color ratios
crse=2*(cfa[indx+m1])/(eps+cfa[indx]+(cfa[indx+m2]))
crnw=2*(cfa[indx-m1])/(eps+cfa[indx]+(cfa[indx-m2]))
crne=2*(cfa[indx+p1])/(eps+cfa[indx]+(cfa[indx+p2]))
crsw=2*(cfa[indx-p1])/(eps+cfa[indx]+(cfa[indx-p2]))
# assign B/R at R/B sites
if abs(1 - crse) < arthresh:
rbse = cfa[indx] * crse
else:
rbse = cfa[indx + m1] + 0.5 * (cfa[indx] - cfa[indx + m2])
if abs(1 - crnw) < arthresh:
rbnw = (cfa[indx - m1]) + 0.5 *(cfa[indx] - cfa[indx - m2])
if abs(1 - crne) < arthresh:
rbne = cfa[indx] * crne
else:
rbne = (cfa[indx + p1]) + 0.5 * cfa[indx] - cfa[indx + p2]
if abs(1 - crsw) < arthresh:
rbsw = cfa[indx] * crsw
else:
rbsw = (cfa[indx - p1]) + 0.5 * (cfa[indx] - cfa[indx - p2])
wtse= eps+delm[indx]+delm[indx+m1]+delm[indx+m2] # same as for wtu,wtd,wtl,wtr
wtnw= eps+delm[indx]+delm[indx-m1]+delm[indx-m2]
wtne= eps+delp[indx]+delp[indx+p1]+delp[indx+p2]
wtsw= eps+delp[indx]+delp[indx-p1]+delp[indx-p2]
rbm[indx] = (wtse*rbnw+wtnw*rbse)/(wtse+wtnw)
rbp[indx] = (wtne*rbsw+wtsw*rbne)/(wtne+wtsw)
pmwt[indx] = rbvarm/(rbvarp+rbvarm)
# bound the interpolation in regions of high saturation
if rbp[indx] < cfa[indx]:
if 2 * (rbp[indx]) < cfa[indx]:
rbp[indx] = np.median([rbp[indx] , cfa[indx - p1], cfa[indx + p1]])
else:
pwt = 2 * (cfa[indx] - rbp[indx]) / (eps + rbp[indx] + cfa[indx])
rbp[indx] = pwt * rbp[indx] + (1 - pwt) * np.median([rbp[indx], cfa[indx - p1], cfa[indx + p1]])
if rbm[indx] < cfa[indx]:
if 2 * (rbm[indx]) < cfa[indx]:
rbm[indx] = np.median([rbm[indx] , cfa[indx - m1], cfa[indx + m1]])
else:
mwt = 2 * (cfa[indx] - rbm[indx]) / (eps + rbm[indx] + cfa[indx])
rbm[indx] = mwt * rbm[indx] + (1 - mwt) * np.median([rbm[indx], cfa[indx - m1], cfa[indx + m1]])
if rbp[indx] > clip_pt:
rbp[indx] = np.median([rbp[indx], cfa[indx - p1], cfa[indx + p1]])
if rbm[indx] > clip_pt:
rbm[indx] = np.median([rbm[indx], cfa[indx - m1], cfa[indx + m1]])
for rr in range(10, rr1-10):
for cc in range(10 + (cfarray[rr, 2]&1), cc1-10, 2):
indx = rr * TS + cc
# first ask if one geTS more directional discrimination from nearby B/R sites
pmwtalt = 0.25*(pmwt[indx-m1]+pmwt[indx+p1]+pmwt[indx-p1]+pmwt[indx+m1])
vo = abs(0.5-pmwt[indx])
ve = abs(0.5-pmwtalt)
if vo < ve:
pmwt[indx] = pmwtalt
rbint[indx] = 0.5*(cfa[indx] + rbm[indx]*(1-pmwt[indx]) + rbp[indx]*pmwt[indx])
for rr in range(12, rr1 - 12):
for cc in range(12 + (cfarray[rr, 2]&1), cc1 - 12, 2):
indx = rr * TS + cc
if abs(0.5 - pmwt[indx]) < abs(0.5 - hvwt[indx]):
continue
# now interpolate G vertically/horizontally using R+B values
# unfortunately, since G interpolation cannot be done diagonally this may lead to colour shifts
# colour ratios for G interpolation
cru = cfa[indx-v1]*2/(eps+rbint[indx]+rbint[indx-v2])
crd = cfa[indx+v1]*2/(eps+rbint[indx]+rbint[indx+v2])
crl = cfa[indx-1]*2/(eps+rbint[indx]+rbint[indx-2])
crr = cfa[indx+1]*2/(eps+rbint[indx]+rbint[indx+2])
# interpolated G via adaptive ratios or Hamilton-Adams in each cardinal direction
if abs(1 - cru) < arthresh:
gu = rbint[indx] * cru
else:
gu = cfa[indx - v1] + 0.5 * (rbint[indx] - rbint[(indx - v1)])
if abs(1 - crd) < arthresh:
gd = rbint[indx] * crd
else:
gd = cfa[indx + v1] + 0.5 * (rbint[indx] - rbint[(indx + v1)])
if abs(1 - crl) < arthresh:
gl = rbint[indx] * crl
else:
gl = cfa[indx - 1] + 0.5 * (rbint[indx] - rbint[(indx - 1)])
if abs(1 - crr) < arthresh:
gr = rbint[indx] * crr
else:
gr = cfa[indx + 1] + 0.5 * (rbint[indx] - rbint[(indx + 1)])
# interpolated G via adaptive weighTS of cardinal evaluations
Gintv = (dirwts[indx - v1][0] * gd + dirwts[indx + v1][0] * gu) / (dirwts[indx + v1][0] + dirwts[indx - v1][0])
Ginth = (dirwts[indx - 1][1] * gr + dirwts[indx + 1][1] * gl) / (dirwts[indx - 1][1] + dirwts[indx + 1][1])
# bound the interpolation in regions of high saturation
if Gintv < rbint[indx]:
if (2 * Gintv < rbint[indx]):
Gintv = np.median([Gintv , cfa[indx - v1], cfa[indx + v1]])
else:
vwt = 2 * (rbint[indx] - Gintv) / (eps + Gintv + rbint[indx])
Gintv = vwt * Gintv + (1 - vwt) * np.median([Gintv, cfa[indx - v1], cfa[indx + v1]])
if Ginth < rbint[indx]:
if 2 * Ginth < rbint[indx]:
Ginth = np.median([Ginth , cfa[indx - 1], cfa[indx + 1]])
else:
hwt = 2 * (rbint[indx] - Ginth) / (eps + Ginth + rbint[indx])
Ginth = hwt * Ginth + (1 - hwt) * np.median([Ginth, cfa[indx - 1], cfa[indx + 1]])
if Ginth > clip_pt:
Ginth = np.median([Ginth, cfa[indx - 1], cfa[indx + 1]])
if Gintv > clip_pt:
Gintv = np.median([Gintv, cfa[indx - v1], cfa[indx + v1]])
rgb[indx][1] = Ginth*(1-hvwt[indx]) + Gintv*hvwt[indx]
Dgrb[indx][0] = rgb[indx][1]-cfa[indx]
# end of diagonal interpolation correction
# fancy chrominance interpolation
# (ey,ex) is location of R site
for rr in range(13-ey, rr1-12, 2):
for cc in range(13-ex, cc1-12, 2):
indx = rr*TS+cc
Dgrb[indx][1]=Dgrb[indx][0] # split out G-B from G-R
Dgrb[indx][0]=0
for rr in range(12, rr1-12):
c = int(1- cfarray[rr, 12+(cfarray[rr,2]&1)]/2)
for cc in range(12+(cfarray[rr,2]&1), cc1-12, 2):
indx = rr * TS + cc
wtnw=1/(eps+abs(Dgrb[indx-m1][c]-Dgrb[indx+m1][c])+abs(Dgrb[indx-m1][c]-Dgrb[indx-m3][c])+abs(Dgrb[indx+m1][c]-Dgrb[indx-m3][c]))
wtne=1/(eps+abs(Dgrb[indx+p1][c]-Dgrb[indx-p1][c])+abs(Dgrb[indx+p1][c]-Dgrb[indx+p3][c])+abs(Dgrb[indx-p1][c]-Dgrb[indx+p3][c]))
wtsw=1/(eps+abs(Dgrb[indx-p1][c]-Dgrb[indx+p1][c])+abs(Dgrb[indx-p1][c]-Dgrb[indx+m3][c])+abs(Dgrb[indx+p1][c]-Dgrb[indx-p3][c]))
wtse=1/(eps+abs(Dgrb[indx+m1][c]-Dgrb[indx-m1][c])+abs(Dgrb[indx+m1][c]-Dgrb[indx-p3][c])+abs(Dgrb[indx-m1][c]-Dgrb[indx+m3][c]))
Dgrb[indx][c]=(wtnw*(1.325*Dgrb[indx-m1][c]-0.175*Dgrb[indx-m3][c]-0.075*Dgrb[indx-m1-2][c]-0.075*Dgrb[indx-m1-v2][c] )+ wtne*(1.325*Dgrb[indx+p1][c]-0.175*Dgrb[indx+p3][c]-0.075*Dgrb[indx+p1+2][c]-0.075*Dgrb[indx+p1+v2][c] )+ wtsw*(1.325*Dgrb[indx-p1][c]-0.175*Dgrb[indx-p3][c]-0.075*Dgrb[indx-p1-2][c]-0.075*Dgrb[indx-p1-v2][c] )+ wtse*(1.325*Dgrb[indx+m1][c]-0.175*Dgrb[indx+m3][c]-0.075*Dgrb[indx+m1+2][c]-0.075*Dgrb[indx+m1+v2][c] ))/(wtnw+wtne+wtsw+wtse)
for rr in range(12, rr1-12):
# c = int(cfarray[rr, 12+(cfarray[rr,1]&1)+1]/2)
for cc in range(12+(cfarray[rr,1]&1), cc1-12, 2):
for c in range(2):
Dgrb[indx][c]=((hvwt[indx-v1])*Dgrb[indx-v1][c]+(1-hvwt[indx+1])*Dgrb[indx+1][c]+(1-hvwt[indx-1])*Dgrb[indx-1][c]+(hvwt[indx+v1])*Dgrb[indx+v1][c])/((hvwt[indx-v1])+(1-hvwt[indx+1])+(1-hvwt[indx-1])+(hvwt[indx+v1]))
for rr in range(12, rr1-12):
for cc in range(12, cc1-12):
indx = rr * TS + cc
rgb[indx][0]=(rgb[indx][1]-Dgrb[indx][0])
rgb[indx][2]=(rgb[indx][1]-Dgrb[indx][1])
# copy smoothed results back to image matrix
for rr in range(16, rr1-16):
row = rr + top
for cc in range(16, cc1-16):
col = cc + left
for c in range(3):
image[row, col, c] = int(rgb[rr*TS+cc, c] * 65535 + 0.5)
# end of main loop
return image
# Define some utility functions for demosaicing
# For AMAzE
def fc(cfa, r, c):
return cfa[r&1, c&1]
def intp(a, b, c):
return a * (b - c) + c
def SQR(x):
return x ** 2 | 53.492447 | 978 | 0.471789 | import numpy as np
def amaze_demosaic(src, raw):
cfarray = raw.raw_colors
cfarray[cfarray == 3] = 1
rgb = amaze_demosaic_libraw(src, cfarray, raw.daylight_whitebalance)
return rgb
def amaze_demosaic_libraw(src, cfarray, daylight_wb):
TS = 512
winx = winy = 0
width = src.shape[1]
height = src.shape[0]
image = np.empty([height, width, 3], dtype=np.uint16)
clip_pt = min(daylight_wb[0], daylight_wb[1], daylight_wb[2])
v1 = TS
v2 = 2 * TS
v3 = 3 * TS
p1 = -TS + 1
p2 = -2 * TS + 2
p3 = -3 * TS + 3
m1 = TS + 1
m2 = 2 * TS + 2
m3 = 3 * TS + 3
nbr = [-v2,-2,2,v2,0]
eps, epssq = 1e-5, 1e-10
arthresh=0.75
nyqthresh=0.5
pmthresh=0.25
lbd, ubd = 1, 1
gaussodd = [0.14659727707323927, 0.103592713382435, 0.0732036125103057, 0.0365543548389495]
gaussgrad = [0.07384411893421103, 0.06207511968171489, 0.0521818194747806, 0.03687419286733595, 0.03099732204057846, 0.018413194161458882]
gauss1 = [0.3376688223162362, 0.12171198028231786, 0.04387081413862306]
gausseven = [0.13719494435797422, 0.05640252782101291]
gquinc = [0.169917, 0.108947, 0.069855, 0.0287182]
rgb = np.empty([TS*TS, 3], dtype=np.float32)
delh = np.empty(TS*TS, dtype=np.float32)
delv = np.empty(TS*TS, dtype=np.float32)
delhsq = np.empty(TS*TS, dtype=np.float32)
delvsq = np.empty(TS*TS, dtype=np.float32)
dirwts = np.empty([TS*TS, 2], dtype=np.float32)
vcd = np.empty(TS*TS, dtype=np.float32)
hcd = np.empty(TS*TS, dtype=np.float32)
vcdalt = np.empty(TS*TS, dtype=np.float32)
hcdalt = np.empty(TS*TS, dtype=np.float32)
vcdsq = np.empty(TS*TS, dtype=np.float32)
hcdsq = np.empty(TS*TS, dtype=np.float32)
cddiffsq = np.empty(TS*TS, dtype=np.float32)
hvwt = np.empty(TS*TS, dtype=np.float32)
Dgrb = np.empty([TS*TS, 2], dtype=np.float32)
delp = np.empty(TS*TS, dtype=np.float32)
delm = np.empty(TS*TS, dtype=np.float32)
rbint = np.empty(TS*TS, dtype=np.float32)
Dgrbh2 = np.empty(TS*TS, dtype=np.float32)
Dgrbv2 = np.empty(TS*TS, dtype=np.float32)
dgintv = np.empty(TS*TS, dtype=np.float32)
dginth = np.empty(TS*TS, dtype=np.float32)
Dgrbpsq1 = np.empty(TS*TS, dtype=np.float32)
Dgrbmsq1 = np.empty(TS*TS, dtype=np.float32)
cfa = np.empty(TS*TS, dtype=np.float32)
pmwt = np.empty(TS*TS, dtype=np.float32)
rbp = np.empty(TS*TS, dtype=np.float32)
rbm = np.empty(TS*TS, dtype=np.float32)
nyquist = np.empty(TS*TS, dtype=np.int32)
if cfarray[0][0] == 1:
if cfarray[0][1] == 0:
ex, ey = 1, 0
else:
ex, ey = 0, 1
else:
if cfarray[0][0] == 0:
ex = ey = 0
else:
ex = ey = 1
loop_cnt = 1
for top in range(winy-16, winy+height, TS-32):
for left in range(winx-16, winx+width, TS-32):
print("Loop [{}]: top: {} left: {}".format(loop_cnt, top, left))
loop_cnt += 1
bottom = min(top+TS, winy+height+16)
right = min(left+TS, winx+width+16)
rr1 = bottom - top
cc1 = right - left
rrmin = 16 if top < winy else 0
ccmin = 16 if left < winx else 0
rrmax = winy+height-top if bottom>(winy+height) else rr1
ccmax = winx+width-left if right>(winx+width) else cc1
for rr in range(rrmin, rrmax):
row = rr + top
for cc in range(ccmin, ccmax):
col = cc + left
c = cfarray[rr, cc]
indx1 = rr * TS + cc
indx = row * width + col
rgb[indx1, c] = src[row, col] / 65535
cfa[indx1] = rgb[indx1, c]
if rrmin > 0:
for rr in range(16):
for cc in range(ccmin, ccmax):
c = cfarray[rr, cc]
rgb[rr*TS+cc, c] = rgb[(32-rr)*TS+cc, c]
cfa[rr*TS+cc] = rgb[rr*TS+cc, c]
if rrmax < rr1:
for rr in range(16):
for cc in range(ccmin, ccmax):
c = cfarray[rr, cc]
rgb[(rrmax+rr)*TS+cc, c] = (src[(winy+height-rr-2), left+cc])/65535
cfa[(rrmax+rr)*TS+cc] = rgb[(rrmax+rr)*TS+cc, c]
if ccmin > 0:
for rr in range(rrmin, rrmax):
for cc in range(16):
c = cfarray[rr, cc]
rgb[rr*TS+cc, c] = rgb[rr*TS+32-cc, c]
cfa[rr*TS+cc] = rgb[rr*TS+cc, c]
if ccmax < cc1:
for rr in range(rrmin, rrmax):
for cc in range(16):
c = cfarray[rr, cc]
rgb[rr*TS+ccmax+cc, c] = (src[(top+rr), (winx+width-cc-2)])/65535
cfa[rr*TS+ccmax+cc] = rgb[rr*TS+ccmax+cc, c]
if rrmin > 0 and ccmin > 0:
for rr in range(16):
for cc in range(16):
c = cfarray[rr, cc]
rgb[(rr)*TS+cc][c] = rgb[(32-rr)*TS+(32-cc)][c]
cfa[(rr)*TS+cc] = rgb[(rr)*TS+cc][c]
if rrmax < rr1 and ccmax < cc1:
for rr in range(16):
for cc in range(16):
c = cfarray[rr, cc]
rgb[(rrmax+rr)*TS+ccmax+cc][c] = (src[(winy+height-rr-2)][(winx+width-cc-2)])/65535
cfa[(rrmax+rr)*TS+ccmax+cc] = rgb[(rrmax+rr)*TS+ccmax+cc][c]
if rrmin > 0 and ccmax < cc1:
for rr in range(16):
for cc in range(16):
c = cfarray[rr, cc]
rgb[(rr)*TS+ccmax+cc][c] = (src[(winy+32-rr)][(winx+width-cc-2)])/65535
cfa[(rr)*TS+ccmax+cc] = rgb[(rr)*TS+ccmax+cc][c]
if rrmax < rr1 and ccmin > 0:
for rr in range(16):
for cc in range(16):
c = cfarray[rr, cc]
rgb[(rrmax+rr)*TS+cc][c] = (src[(winy+height-rr-2)][(winx+32-cc)])/65535
cfa[(rrmax+rr)*TS+cc] = rgb[(rrmax+rr)*TS+cc][c]
for rr in range(1, rr1-1):
for cc in range(1, cc1-1):
indx = rr*TS+cc
delh[indx] = abs(cfa[indx + 1] - cfa[indx - 1])
delv[indx] = abs(cfa[indx + v1] - cfa[indx - v1])
delhsq[indx] = SQR(delh[indx])
delvsq[indx] = SQR(delv[indx])
delp[indx] = abs(cfa[indx+p1]-cfa[indx-p1])
delm[indx] = abs(cfa[indx+m1]-cfa[indx-m1])
for rr in range(2, rr1-2):
for cc in range(2, cc1-2):
indx = rr*TS+cc
dirwts[indx][0] = eps+delv[indx+v1]+delv[indx-v1]+delv[indx]
dirwts[indx][1] = eps+delh[indx+1]+delh[indx-1]+delh[indx]
if cfarray[rr, cc] & 1:
Dgrbpsq1[indx]=(SQR(cfa[indx]-cfa[indx-p1])+SQR(cfa[indx]-cfa[indx+p1]))
Dgrbmsq1[indx]=(SQR(cfa[indx]-cfa[indx-m1])+SQR(cfa[indx]-cfa[indx+m1]))
for rr in range(4, rr1 - 4):
for cc in range(4, cc1 - 4):
indx = rr*TS+cc
c = cfarray[rr, cc]
sgn = -1 if c & 1 else 1
nyquist[indx]=0
rbint[indx]=0
cru = cfa[indx - v1] * (dirwts[indx - v2][0] + dirwts[indx][0]) / (dirwts[indx - v2][0] * (eps + cfa[indx]) + dirwts[indx][0] * (eps + cfa[indx - v2]))
crd = cfa[indx + v1] * (dirwts[indx + v2][0] + dirwts[indx][0]) / (dirwts[indx + v2][0] * (eps + cfa[indx]) + dirwts[indx][0] * (eps + cfa[indx + v2]))
crl = cfa[indx - 1] * (dirwts[indx - 2][1] + dirwts[indx][1]) / (dirwts[indx - 2][1] * (eps + cfa[indx]) + dirwts[indx][1] * (eps + cfa[indx - 2]))
crr = cfa[indx + 1] * (dirwts[indx + 2][1] + dirwts[indx][1]) / (dirwts[indx + 2][1] * (eps + cfa[indx]) + dirwts[indx][1] * (eps + cfa[indx + 2]))
guha = min(clip_pt, cfa[indx - v1] + 0.5 * (cfa[indx] - cfa[indx - v2]))
gdha = min(clip_pt, cfa[indx + v1] + 0.5 * (cfa[indx] - cfa[indx + v2]))
glha = min(clip_pt, cfa[indx - 1] + 0.5 * (cfa[indx] - cfa[indx - 2]))
grha = min(clip_pt, cfa[indx + 1] + 0.5 * (cfa[indx] - cfa[indx + 2]))
guar = cfa[indx] * cru if abs(1-cru) < arthresh else guha
gdar = cfa[indx] * crd if abs(1-crd) < arthresh else gdha
glar = cfa[indx] * crl if abs(1-crl) < arthresh else glha
grar = cfa[indx] * crr if abs(1-crr) < arthresh else grha
hwt = dirwts[indx - 1][1] / (dirwts[indx - 1][1] + dirwts[indx + 1][1])
vwt = dirwts[indx - v1][0] / (dirwts[indx + v1][0] + dirwts[indx - v1][0])
Gintvar = vwt * gdar + (1-vwt) * guar
Ginthar = hwt * grar + (1-hwt) * glar
Gintvha = vwt * gdha + (1-vwt) * guha
Ginthha = hwt * grha + (1-hwt) * glha
vcd[indx] = sgn * (Gintvar-cfa[indx])
hcd[indx] = sgn * (Ginthar-cfa[indx])
vcdalt[indx] = sgn * (Gintvha-cfa[indx])
hcdalt[indx] = sgn * (Ginthha-cfa[indx])
if cfa[indx] > 0.8 * clip_pt or Gintvha > 0.8 * clip_pt or Ginthha > 0.8 * clip_pt:
guar = guha
gdar = gdha
glar = glha
grar = grha
vcd[indx] = vcdalt[indx]
hcd[indx] = hcdalt[indx]
dgintv[indx] = min((guha - gdha) ** 2, (guar - gdar) ** 2)
dginth[indx] = min((glha - grha) ** 2, (glar - grar) ** 2)
for rr in range(4, rr1-4):
for cc in range(4, cc1-4):
c = cfarray[rr, cc]
hcdvar = 3*(SQR(hcd[indx-2])+SQR(hcd[indx])+SQR(hcd[indx+2]))-SQR(hcd[indx-2]+hcd[indx]+hcd[indx+2])
hcdaltvar = 3*(SQR(hcdalt[indx-2])+SQR(hcdalt[indx])+SQR(hcdalt[indx+2]))-SQR(hcdalt[indx-2]+hcdalt[indx]+hcdalt[indx+2])
vcdvar = 3*(SQR(vcd[indx-v2])+SQR(vcd[indx])+SQR(vcd[indx+v2]))-SQR(vcd[indx-v2]+vcd[indx]+vcd[indx+v2])
vcdaltvar = 3*(SQR(vcdalt[indx-v2])+SQR(vcdalt[indx])+SQR(vcdalt[indx+v2]))-SQR(vcdalt[indx-v2]+vcdalt[indx]+vcdalt[indx+v2])
if hcdaltvar < hcdvar:
hcd[indx] = hcdalt[indx]
if vcdaltvar < vcdvar:
vcd[indx] = vcdalt[indx]
if c & 1:
Ginth = -hcd[indx] + cfa[indx]
Gintv = -vcd[indx] + cfa[indx]
if hcd[indx] > 0:
if 3 * hcd[indx] > (Ginth + cfa[indx]):
hcd[indx] = -np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) + cfa[indx]
else:
hwt = 1 - 3 * hcd[indx] / (eps + Ginth + cfa[indx])
hcd[indx] = hwt * hcd[indx] + (1 - hwt) * (-np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) + cfa[indx])
if vcd[indx] > 0:
if 3 * vcd[indx] > (Gintv + cfa[indx]):
vcd[indx] = -np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) + cfa[indx]
else:
vwt = 1 - 3 * vcd[indx] / (eps + Gintv + cfa[indx])
vcd[indx] = vwt * vcd[indx] + (1 - vwt) * (-np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) + cfa[indx])
if Ginth > clip_pt:
hcd[indx] = -np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) + cfa[indx]
if Gintv > clip_pt:
vcd[indx] = -np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) + cfa[indx]
else:
Ginth = hcd[indx] + cfa[indx]
Gintv = vcd[indx] + cfa[indx]
if hcd[indx] < 0:
if 3 * hcd[indx] < -(Ginth + cfa[indx]):
hcd[indx] = np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) - cfa[indx]
else:
hwt = 1 + 3 * hcd[indx] / (eps + Ginth + cfa[indx])
hcd[indx] = hwt * hcd[indx] + (1 - hwt) * (np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) - cfa[indx])
if vcd[indx] < 0:
if 3 * vcd[indx] < -(Gintv + cfa[indx]):
vcd[indx] = np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) - cfa[indx]
else:
vwt = 1 + 3 * vcd[indx] / (eps + Gintv + cfa[indx])
vcd[indx] = vwt * vcd[indx] + (1 - vwt) * (np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) - cfa[indx])
if Ginth > clip_pt:
hcd[indx] = np.median([Ginth, cfa[indx - 1], cfa[indx + 1]]) - cfa[indx]
if Gintv > clip_pt:
vcd[indx] = np.median([Gintv, cfa[indx - v1], cfa[indx + v1]]) - cfa[indx]
vcdsq[indx] = SQR(vcd[indx])
hcdsq[indx] = SQR(hcd[indx])
cddiffsq[indx] = SQR(vcd[indx]-hcd[indx])
for rr in range(6, rr1-6):
for cc in range(6+(cfarray[rr, 2]&1), cc1-6, 2):
indx = rr * TS + cc
Dgrbvvaru = 4*(vcdsq[indx]+vcdsq[indx-v1]+vcdsq[indx-v2]+vcdsq[indx-v3])-SQR(vcd[indx]+vcd[indx-v1]+vcd[indx-v2]+vcd[indx-v3])
Dgrbvvard = 4*(vcdsq[indx]+vcdsq[indx+v1]+vcdsq[indx+v2]+vcdsq[indx+v3])-SQR(vcd[indx]+vcd[indx+v1]+vcd[indx+v2]+vcd[indx+v3])
Dgrbhvarl = 4*(hcdsq[indx]+hcdsq[indx-1]+hcdsq[indx-2]+hcdsq[indx-3])-SQR(hcd[indx]+hcd[indx-1]+hcd[indx-2]+hcd[indx-3])
Dgrbhvarr = 4*(hcdsq[indx]+hcdsq[indx+1]+hcdsq[indx+2]+hcdsq[indx+3])-SQR(hcd[indx]+hcd[indx+1]+hcd[indx+2]+hcd[indx+3])
hwt = dirwts[indx-1][1]/(dirwts[indx-1][1]+dirwts[indx+1][1])
vwt = dirwts[indx-v1][0]/(dirwts[indx+v1][0]+dirwts[indx-v1][0])
vcdvar = epssq+vwt*Dgrbvvard+(1-vwt)*Dgrbvvaru
hcdvar = epssq+hwt*Dgrbhvarr+(1-hwt)*Dgrbhvarl
Dgrbvvaru = (dgintv[indx])+(dgintv[indx-v1])+(dgintv[indx-v2])
Dgrbvvard = (dgintv[indx])+(dgintv[indx+v1])+(dgintv[indx+v2])
Dgrbhvarl = (dginth[indx])+(dginth[indx-1])+(dginth[indx-2])
Dgrbhvarr = (dginth[indx])+(dginth[indx+1])+(dginth[indx+2])
vcdvar1 = epssq+vwt*Dgrbvvard+(1-vwt)*Dgrbvvaru
hcdvar1 = epssq+hwt*Dgrbhvarr+(1-hwt)*Dgrbhvarl
varwt=hcdvar/(vcdvar+hcdvar)
diffwt=hcdvar1/(vcdvar1+hcdvar1)
if ((0.5 - varwt) * (0.5 - diffwt) > 0) and (abs(0.5 - diffwt) < abs(0.5 - varwt)):
hvwt[indx] = varwt
else:
hvwt[indx] = diffwt
for rr in range(6, rr1-6):
for cc in range(6 + (cfarray[rr, 2]&1), cc1 - 6, 2):
indx = rr * TS + cc
nyqtest = (gaussodd[0]*cddiffsq[indx] + gaussodd[1]*(cddiffsq[indx-m1]+cddiffsq[indx+p1] + cddiffsq[indx-p1]+cddiffsq[indx+m1]) + gaussodd[2]*(cddiffsq[indx-v2]+cddiffsq[indx-2]+ cddiffsq[indx+2]+cddiffsq[indx+v2]) + gaussodd[3]*(cddiffsq[indx-m2]+cddiffsq[indx+p2] + cddiffsq[indx-p2]+cddiffsq[indx+m2]))
nyqtest -= nyqthresh*(gaussgrad[0]*(delhsq[indx]+delvsq[indx])+gaussgrad[1]*(delhsq[indx-v1]+delvsq[indx-v1]+delhsq[indx+1]+delvsq[indx+1] + delhsq[indx-1]+delvsq[indx-1]+delhsq[indx+v1]+delvsq[indx+v1])+ gaussgrad[2]*(delhsq[indx-m1]+delvsq[indx-m1]+delhsq[indx+p1]+delvsq[indx+p1]+ delhsq[indx-p1]+delvsq[indx-p1]+delhsq[indx+m1]+delvsq[indx+m1])+ gaussgrad[3]*(delhsq[indx-v2]+delvsq[indx-v2]+delhsq[indx-2]+delvsq[indx-2]+ delhsq[indx+2]+delvsq[indx+2]+delhsq[indx+v2]+delvsq[indx+v2])+ gaussgrad[4]*(delhsq[indx-2*TS-1]+delvsq[indx-2*TS-1]+delhsq[indx-2*TS+1]+delvsq[indx-2*TS+1]+ delhsq[indx-TS-2]+delvsq[indx-TS-2]+delhsq[indx-TS+2]+delvsq[indx-TS+2]+ delhsq[indx+TS-2]+delvsq[indx+TS-2]+delhsq[indx+TS+2]+delvsq[indx-TS+2]+ delhsq[indx+2*TS-1]+delvsq[indx+2*TS-1]+delhsq[indx+2*TS+1]+delvsq[indx+2*TS+1])+ gaussgrad[5]*(delhsq[indx-m2]+delvsq[indx-m2]+delhsq[indx+p2]+delvsq[indx+p2]+ delhsq[indx-p2]+delvsq[indx-p2]+delhsq[indx+m2]+delvsq[indx+m2]))
if nyqtest > 0:
nyquist[indx] = 1
for rr in range(8, rr1-8):
for cc in range(8+(cfarray[rr,2]&1), cc1-8, 2):
areawt=(nyquist[indx-v2]+nyquist[indx-m1]+nyquist[indx+p1]+nyquist[indx-2]+nyquist[indx]+nyquist[indx+2]+nyquist[indx-p1]+nyquist[indx+m1]+nyquist[indx+v2])
nyquist[indx] = 1 if areawt > 4 else 0
for rr in range(8, rr1 - 8):
for cc in range(8+(cfarray[rr,2]&1), cc1-8, 2):
indx = rr * TS + cc
if nyquist[indx]:
sumh = sumv = sumsqh = sumsqv = areawt = 0
for i in range(-6, 7, 2):
for j in range(-6, 7, 2):
indx1 = (rr + i) * TS + cc + j
if nyquist[indx1]:
sumh += cfa[indx1] - 0.5 * (cfa[indx1-1]+cfa[indx1+1])
sumv += cfa[indx1] - 0.5 * (cfa[indx1-v1]+cfa[indx1+v1])
sumsqh += 0.5 * (SQR(cfa[indx1]-cfa[indx1-1]) + SQR(cfa[indx1]-cfa[indx1+1]))
sumsqv += 0.5 * (SQR(cfa[indx1]-cfa[indx1-v1]) + SQR(cfa[indx1]-cfa[indx1+v1]))
areawt += 1
hcdvar = epssq + max(0, areawt*sumsqh-sumh*sumh)
vcdvar = epssq + max(0, areawt*sumsqv-sumv*sumv)
hvwt[indx] = hcdvar / (vcdvar + hcdvar)
for rr in range(8, rr1-8):
for cc in range(8+(cfarray[rr,2]&1), cc1-8, 2):
indx = rr * TS + cc
hvwtalt = 0.25 * (hvwt[indx-m1] + hvwt[indx+p1] + hvwt[indx-p1] + hvwt[indx+m1])
vo = abs(0.5 - hvwt[indx])
ve = abs(0.5 - hvwtalt)
if vo < ve:
hvwt[indx>>1] = hvwtalt
Dgrb[indx][0] = (hcd[indx]*(1-hvwt[indx]) + vcd[indx]*hvwt[indx])
rgb[indx][1] = cfa[indx] + Dgrb[indx][0]
if nyquist[indx]:
Dgrbh2[indx] = SQR(rgb[indx][1] - 0.5*(rgb[indx-1][1]+rgb[indx+1][1]))
Dgrbv2[indx] = SQR(rgb[indx][1] - 0.5*(rgb[indx-v1][1]+rgb[indx+v1][1]))
else:
Dgrbh2[indx] = Dgrbv2[indx] = 0
for rr in range(8, rr1-8):
for cc in range(8+(cfarray[rr,2]&1), cc1-8, 2):
indx = rr * TS + cc
if nyquist[indx]:
gvarh = epssq + (gquinc[0]*Dgrbh2[indx]+gquinc[1]*(Dgrbh2[indx-m1]+Dgrbh2[indx+p1]+Dgrbh2[indx-p1]+Dgrbh2[indx+m1])+gquinc[2]*(Dgrbh2[indx-v2]+Dgrbh2[indx-2]+Dgrbh2[indx+2]+Dgrbh2[indx+v2])+gquinc[3]*(Dgrbh2[indx-m2]+Dgrbh2[indx+p2]+Dgrbh2[indx-p2]+Dgrbh2[indx+m2]))
gvarv = epssq + (gquinc[0]*Dgrbv2[indx]+gquinc[1]*(Dgrbv2[indx-m1]+Dgrbv2[indx+p1]+Dgrbv2[indx-p1]+Dgrbv2[indx+m1])+gquinc[2]*(Dgrbv2[indx-v2]+Dgrbv2[indx-2]+Dgrbv2[indx+2]+Dgrbv2[indx+v2])+gquinc[3]*(Dgrbv2[indx-m2]+Dgrbv2[indx+p2]+Dgrbv2[indx-p2]+Dgrbv2[indx+m2]))
Dgrb[indx][0] = (hcd[indx]*gvarv + vcd[indx]*gvarh)/(gvarv+gvarh)
rgb[indx][1] = cfa[indx] + Dgrb[indx][0]
for rr in range(8, rr1-8):
for cc in range(8+(cfarray[rr,2]&1), cc1-8, 2):
indx = rr * TS + cc
rbvarp = epssq + (gausseven[0]*(Dgrbpsq1[indx-v1]+Dgrbpsq1[indx-1]+Dgrbpsq1[indx+1]+Dgrbpsq1[indx+v1]) + gausseven[1]*(Dgrbpsq1[indx-v2-1]+Dgrbpsq1[indx-v2+1]+Dgrbpsq1[indx-2-v1]+Dgrbpsq1[indx+2-v1]+ Dgrbpsq1[indx-2+v1]+Dgrbpsq1[indx+2+v1]+Dgrbpsq1[indx+v2-1]+Dgrbpsq1[indx+v2+1]))
rbvarm = epssq + (gausseven[0]*(Dgrbmsq1[indx-v1]+Dgrbmsq1[indx-1]+Dgrbmsq1[indx+1]+Dgrbmsq1[indx+v1]) + gausseven[1]*(Dgrbmsq1[indx-v2-1]+Dgrbmsq1[indx-v2+1]+Dgrbmsq1[indx-2-v1]+Dgrbmsq1[indx+2-v1]+ Dgrbmsq1[indx-2+v1]+Dgrbmsq1[indx+2+v1]+Dgrbmsq1[indx+v2-1]+Dgrbmsq1[indx+v2+1]))
crse=2*(cfa[indx+m1])/(eps+cfa[indx]+(cfa[indx+m2]))
crnw=2*(cfa[indx-m1])/(eps+cfa[indx]+(cfa[indx-m2]))
crne=2*(cfa[indx+p1])/(eps+cfa[indx]+(cfa[indx+p2]))
crsw=2*(cfa[indx-p1])/(eps+cfa[indx]+(cfa[indx-p2]))
if abs(1 - crse) < arthresh:
rbse = cfa[indx] * crse
else:
rbse = cfa[indx + m1] + 0.5 * (cfa[indx] - cfa[indx + m2])
if abs(1 - crnw) < arthresh:
rbnw = (cfa[indx - m1]) + 0.5 *(cfa[indx] - cfa[indx - m2])
if abs(1 - crne) < arthresh:
rbne = cfa[indx] * crne
else:
rbne = (cfa[indx + p1]) + 0.5 * cfa[indx] - cfa[indx + p2]
if abs(1 - crsw) < arthresh:
rbsw = cfa[indx] * crsw
else:
rbsw = (cfa[indx - p1]) + 0.5 * (cfa[indx] - cfa[indx - p2])
wtse= eps+delm[indx]+delm[indx+m1]+delm[indx+m2]
wtnw= eps+delm[indx]+delm[indx-m1]+delm[indx-m2]
wtne= eps+delp[indx]+delp[indx+p1]+delp[indx+p2]
wtsw= eps+delp[indx]+delp[indx-p1]+delp[indx-p2]
rbm[indx] = (wtse*rbnw+wtnw*rbse)/(wtse+wtnw)
rbp[indx] = (wtne*rbsw+wtsw*rbne)/(wtne+wtsw)
pmwt[indx] = rbvarm/(rbvarp+rbvarm)
if rbp[indx] < cfa[indx]:
if 2 * (rbp[indx]) < cfa[indx]:
rbp[indx] = np.median([rbp[indx] , cfa[indx - p1], cfa[indx + p1]])
else:
pwt = 2 * (cfa[indx] - rbp[indx]) / (eps + rbp[indx] + cfa[indx])
rbp[indx] = pwt * rbp[indx] + (1 - pwt) * np.median([rbp[indx], cfa[indx - p1], cfa[indx + p1]])
if rbm[indx] < cfa[indx]:
if 2 * (rbm[indx]) < cfa[indx]:
rbm[indx] = np.median([rbm[indx] , cfa[indx - m1], cfa[indx + m1]])
else:
mwt = 2 * (cfa[indx] - rbm[indx]) / (eps + rbm[indx] + cfa[indx])
rbm[indx] = mwt * rbm[indx] + (1 - mwt) * np.median([rbm[indx], cfa[indx - m1], cfa[indx + m1]])
if rbp[indx] > clip_pt:
rbp[indx] = np.median([rbp[indx], cfa[indx - p1], cfa[indx + p1]])
if rbm[indx] > clip_pt:
rbm[indx] = np.median([rbm[indx], cfa[indx - m1], cfa[indx + m1]])
for rr in range(10, rr1-10):
for cc in range(10 + (cfarray[rr, 2]&1), cc1-10, 2):
indx = rr * TS + cc
pmwtalt = 0.25*(pmwt[indx-m1]+pmwt[indx+p1]+pmwt[indx-p1]+pmwt[indx+m1])
vo = abs(0.5-pmwt[indx])
ve = abs(0.5-pmwtalt)
if vo < ve:
pmwt[indx] = pmwtalt
rbint[indx] = 0.5*(cfa[indx] + rbm[indx]*(1-pmwt[indx]) + rbp[indx]*pmwt[indx])
for rr in range(12, rr1 - 12):
for cc in range(12 + (cfarray[rr, 2]&1), cc1 - 12, 2):
indx = rr * TS + cc
if abs(0.5 - pmwt[indx]) < abs(0.5 - hvwt[indx]):
continue
cru = cfa[indx-v1]*2/(eps+rbint[indx]+rbint[indx-v2])
crd = cfa[indx+v1]*2/(eps+rbint[indx]+rbint[indx+v2])
crl = cfa[indx-1]*2/(eps+rbint[indx]+rbint[indx-2])
crr = cfa[indx+1]*2/(eps+rbint[indx]+rbint[indx+2])
if abs(1 - cru) < arthresh:
gu = rbint[indx] * cru
else:
gu = cfa[indx - v1] + 0.5 * (rbint[indx] - rbint[(indx - v1)])
if abs(1 - crd) < arthresh:
gd = rbint[indx] * crd
else:
gd = cfa[indx + v1] + 0.5 * (rbint[indx] - rbint[(indx + v1)])
if abs(1 - crl) < arthresh:
gl = rbint[indx] * crl
else:
gl = cfa[indx - 1] + 0.5 * (rbint[indx] - rbint[(indx - 1)])
if abs(1 - crr) < arthresh:
gr = rbint[indx] * crr
else:
gr = cfa[indx + 1] + 0.5 * (rbint[indx] - rbint[(indx + 1)])
Gintv = (dirwts[indx - v1][0] * gd + dirwts[indx + v1][0] * gu) / (dirwts[indx + v1][0] + dirwts[indx - v1][0])
Ginth = (dirwts[indx - 1][1] * gr + dirwts[indx + 1][1] * gl) / (dirwts[indx - 1][1] + dirwts[indx + 1][1])
if Gintv < rbint[indx]:
if (2 * Gintv < rbint[indx]):
Gintv = np.median([Gintv , cfa[indx - v1], cfa[indx + v1]])
else:
vwt = 2 * (rbint[indx] - Gintv) / (eps + Gintv + rbint[indx])
Gintv = vwt * Gintv + (1 - vwt) * np.median([Gintv, cfa[indx - v1], cfa[indx + v1]])
if Ginth < rbint[indx]:
if 2 * Ginth < rbint[indx]:
Ginth = np.median([Ginth , cfa[indx - 1], cfa[indx + 1]])
else:
hwt = 2 * (rbint[indx] - Ginth) / (eps + Ginth + rbint[indx])
Ginth = hwt * Ginth + (1 - hwt) * np.median([Ginth, cfa[indx - 1], cfa[indx + 1]])
if Ginth > clip_pt:
Ginth = np.median([Ginth, cfa[indx - 1], cfa[indx + 1]])
if Gintv > clip_pt:
Gintv = np.median([Gintv, cfa[indx - v1], cfa[indx + v1]])
rgb[indx][1] = Ginth*(1-hvwt[indx]) + Gintv*hvwt[indx]
Dgrb[indx][0] = rgb[indx][1]-cfa[indx]
for rr in range(13-ey, rr1-12, 2):
for cc in range(13-ex, cc1-12, 2):
indx = rr*TS+cc
Dgrb[indx][1]=Dgrb[indx][0]
Dgrb[indx][0]=0
for rr in range(12, rr1-12):
c = int(1- cfarray[rr, 12+(cfarray[rr,2]&1)]/2)
for cc in range(12+(cfarray[rr,2]&1), cc1-12, 2):
indx = rr * TS + cc
wtnw=1/(eps+abs(Dgrb[indx-m1][c]-Dgrb[indx+m1][c])+abs(Dgrb[indx-m1][c]-Dgrb[indx-m3][c])+abs(Dgrb[indx+m1][c]-Dgrb[indx-m3][c]))
wtne=1/(eps+abs(Dgrb[indx+p1][c]-Dgrb[indx-p1][c])+abs(Dgrb[indx+p1][c]-Dgrb[indx+p3][c])+abs(Dgrb[indx-p1][c]-Dgrb[indx+p3][c]))
wtsw=1/(eps+abs(Dgrb[indx-p1][c]-Dgrb[indx+p1][c])+abs(Dgrb[indx-p1][c]-Dgrb[indx+m3][c])+abs(Dgrb[indx+p1][c]-Dgrb[indx-p3][c]))
wtse=1/(eps+abs(Dgrb[indx+m1][c]-Dgrb[indx-m1][c])+abs(Dgrb[indx+m1][c]-Dgrb[indx-p3][c])+abs(Dgrb[indx-m1][c]-Dgrb[indx+m3][c]))
Dgrb[indx][c]=(wtnw*(1.325*Dgrb[indx-m1][c]-0.175*Dgrb[indx-m3][c]-0.075*Dgrb[indx-m1-2][c]-0.075*Dgrb[indx-m1-v2][c] )+ wtne*(1.325*Dgrb[indx+p1][c]-0.175*Dgrb[indx+p3][c]-0.075*Dgrb[indx+p1+2][c]-0.075*Dgrb[indx+p1+v2][c] )+ wtsw*(1.325*Dgrb[indx-p1][c]-0.175*Dgrb[indx-p3][c]-0.075*Dgrb[indx-p1-2][c]-0.075*Dgrb[indx-p1-v2][c] )+ wtse*(1.325*Dgrb[indx+m1][c]-0.175*Dgrb[indx+m3][c]-0.075*Dgrb[indx+m1+2][c]-0.075*Dgrb[indx+m1+v2][c] ))/(wtnw+wtne+wtsw+wtse)
for rr in range(12, rr1-12):
for cc in range(12+(cfarray[rr,1]&1), cc1-12, 2):
for c in range(2):
Dgrb[indx][c]=((hvwt[indx-v1])*Dgrb[indx-v1][c]+(1-hvwt[indx+1])*Dgrb[indx+1][c]+(1-hvwt[indx-1])*Dgrb[indx-1][c]+(hvwt[indx+v1])*Dgrb[indx+v1][c])/((hvwt[indx-v1])+(1-hvwt[indx+1])+(1-hvwt[indx-1])+(hvwt[indx+v1]))
for rr in range(12, rr1-12):
for cc in range(12, cc1-12):
indx = rr * TS + cc
rgb[indx][0]=(rgb[indx][1]-Dgrb[indx][0])
rgb[indx][2]=(rgb[indx][1]-Dgrb[indx][1])
for rr in range(16, rr1-16):
row = rr + top
for cc in range(16, cc1-16):
col = cc + left
for c in range(3):
image[row, col, c] = int(rgb[rr*TS+cc, c] * 65535 + 0.5)
return image
def fc(cfa, r, c):
return cfa[r&1, c&1]
def intp(a, b, c):
return a * (b - c) + c
def SQR(x):
return x ** 2 | true | true |
f714c47ee80904ec569254931c1f1328492e4608 | 6,440 | py | Python | utils.py | EdLeafe/elastic_irc | 38959e25c0b9b309b89a46c3f7ab0a3576429621 | [
"MIT"
] | null | null | null | utils.py | EdLeafe/elastic_irc | 38959e25c0b9b309b89a46c3f7ab0a3576429621 | [
"MIT"
] | 1 | 2020-07-03T14:36:10.000Z | 2020-07-03T14:36:10.000Z | utils.py | EdLeafe/elastic_irc | 38959e25c0b9b309b89a46c3f7ab0a3576429621 | [
"MIT"
] | null | null | null | import copy
from datetime import datetime
from functools import wraps, update_wrapper
from hashlib import blake2b
import logging
from math import log
import os
from subprocess import Popen, PIPE
import uuid
from dateutil import parser
import elasticsearch
import pymysql
from rich import box
from rich.console import Console
from rich.table import Table
main_cursor = None
HOST = "dodata"
conn = None
CURDIR = os.getcwd()
LOG = logging.getLogger(__name__)
ABBREV_MAP = {
"p": "profox",
"l": "prolinux",
"y": "propython",
"d": "dabo-dev",
"u": "dabo-users",
"c": "codebook",
}
NAME_COLOR = "bright_red"
IntegrityError = pymysql.err.IntegrityError
def runproc(cmd):
proc = Popen([cmd], shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
stdout_text, stderr_text = proc.communicate()
return stdout_text, stderr_text
def _parse_creds():
fpath = os.path.expanduser("~/.dbcreds")
with open(fpath) as ff:
lines = ff.read().splitlines()
ret = {}
for ln in lines:
key, val = ln.split("=")
ret[key] = val
return ret
def connect():
cls = pymysql.cursors.DictCursor
creds = _parse_creds()
db = creds.get("DB_NAME") or "webdata"
ret = pymysql.connect(
host=HOST,
user=creds["DB_USERNAME"],
passwd=creds["DB_PWD"],
db=db,
charset="utf8",
cursorclass=cls,
)
return ret
def gen_uuid():
return str(uuid.uuid4())
def get_cursor():
global conn, main_cursor
if not (conn and conn.open):
LOG.debug("No DB connection")
main_cursor = None
conn = connect()
if not main_cursor:
LOG.debug("No cursor")
main_cursor = conn.cursor(pymysql.cursors.DictCursor)
return main_cursor
def commit():
conn.commit()
def logit(*args):
argtxt = [str(arg) for arg in args]
msg = " ".join(argtxt) + "\n"
with open("LOGOUT", "a") as ff:
ff.write(msg)
def debugout(*args):
with open("/tmp/debugout", "a") as ff:
ff.write("YO!")
argtxt = [str(arg) for arg in args]
msg = " ".join(argtxt) + "\n"
with open("/tmp/debugout", "a") as ff:
ff.write(msg)
def nocache(view):
@wraps(view)
def no_cache(*args, **kwargs):
response = make_response(view(*args, **kwargs))
response.headers["Last-Modified"] = datetime.now()
response.headers["Cache-Control"] = (
"no-store, no-cache, " "must-revalidate, post-check=0, pre-check=0, max-age=0"
)
response.headers["Pragma"] = "no-cache"
response.headers["Expires"] = "-1"
return response
return update_wrapper(no_cache, view)
def human_fmt(num):
"""Human friendly file size"""
# Make sure that we get a valid input. If an invalid value is passed, we
# want the exception to be raised.
num = int(num)
units = list(zip(["bytes", "K", "MB", "GB", "TB", "PB"], [0, 0, 1, 2, 2, 2]))
if num > 1:
exponent = min(int(log(num, 1024)), len(units) - 1)
quotient = float(num) / 1024 ** exponent
unit, num_decimals = units[exponent]
format_string = "{:.%sf} {}" % (num_decimals)
return format_string.format(quotient, unit)
if num == 0:
return "0 bytes"
if num == 1:
return "1 byte"
def format_number(num):
"""Return a number representation with comma separators."""
snum = str(num)
parts = []
while snum:
snum, part = snum[:-3], snum[-3:]
parts.append(part)
parts.reverse()
return ",".join(parts)
def get_elastic_client():
return elasticsearch.Elasticsearch(host=HOST)
def _get_mapping():
es_client = get_elastic_client()
return es_client.indices.get_mapping()
def get_indices():
return list(_get_mapping().keys())
def get_mapping(index):
"""Returns the field definitions for the specified index"""
props = _get_mapping().get(index, {}).get("mappings", {}).get("properties", {})
return props
def get_fields(index):
"""Returns just the field names for the specified index"""
return get_mapping(index).keys()
def gen_key(orig_rec, digest_size=8):
"""Generates a hash value by concatenating the values in the dictionary."""
# Don't modify the original dict
rec = copy.deepcopy(orig_rec)
# Remove the 'id' field, if present
rec.pop("id", None)
m = blake2b(digest_size=digest_size)
txt_vals = ["%s" % val for val in rec.values()]
txt_vals.sort()
txt = "".join(txt_vals)
m.update(txt.encode("utf-8"))
return m.hexdigest()
def extract_records(resp):
return [r["_source"] for r in resp["hits"]["hits"]]
def massage_date(val):
dt = parser.parse(val)
return dt.strftime("%Y-%m-%d %H:%M:%S")
def massage_date_records(records, field_name):
for rec in records:
rec[field_name] = massage_date(rec[field_name])
def print_messages(recs):
console = Console()
table = Table(show_header=True, header_style="bold blue_violet")
table.add_column("MSG #", justify="right")
table.add_column("List")
table.add_column("Posted", justify="right")
table.add_column("From")
table.add_column("Subject")
for rec in recs:
table.add_row(
str(rec["msg_num"]),
ABBREV_MAP.get(rec["list_name"]),
massage_date(rec["posted"]),
rec["from"],
rec["subject"],
)
console.print(table)
def print_message_list(recs):
console = Console()
table = Table(show_header=True, header_style="bold cyan", box=box.HEAVY)
# table.add_column("ID", style="dim", width=13)
table.add_column("MSG #")
table.add_column("List")
table.add_column("Posted")
table.add_column("From")
table.add_column("Subject")
for rec in recs:
sender_parts = rec["from"].split("<")
name = sender_parts[0]
addr = f"<{sender_parts[1]}" if len(sender_parts) > 1 else ""
sender = f"[bold {NAME_COLOR}]{name}[/bold {NAME_COLOR}]{addr}"
subj = rec["subject"]
low_subj = subj.lower()
if low_subj.startswith("re:") or low_subj.startswith("aw:"):
subj = f"[green]{subj[:3]}[/green]{subj[3:]}"
table.add_row(
str(rec["msg_num"]),
ABBREV_MAP.get(rec["list_name"]),
rec["posted"],
sender,
subj,
)
console.print(table)
| 26.072874 | 90 | 0.612422 | import copy
from datetime import datetime
from functools import wraps, update_wrapper
from hashlib import blake2b
import logging
from math import log
import os
from subprocess import Popen, PIPE
import uuid
from dateutil import parser
import elasticsearch
import pymysql
from rich import box
from rich.console import Console
from rich.table import Table
main_cursor = None
HOST = "dodata"
conn = None
CURDIR = os.getcwd()
LOG = logging.getLogger(__name__)
ABBREV_MAP = {
"p": "profox",
"l": "prolinux",
"y": "propython",
"d": "dabo-dev",
"u": "dabo-users",
"c": "codebook",
}
NAME_COLOR = "bright_red"
IntegrityError = pymysql.err.IntegrityError
def runproc(cmd):
proc = Popen([cmd], shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
stdout_text, stderr_text = proc.communicate()
return stdout_text, stderr_text
def _parse_creds():
fpath = os.path.expanduser("~/.dbcreds")
with open(fpath) as ff:
lines = ff.read().splitlines()
ret = {}
for ln in lines:
key, val = ln.split("=")
ret[key] = val
return ret
def connect():
cls = pymysql.cursors.DictCursor
creds = _parse_creds()
db = creds.get("DB_NAME") or "webdata"
ret = pymysql.connect(
host=HOST,
user=creds["DB_USERNAME"],
passwd=creds["DB_PWD"],
db=db,
charset="utf8",
cursorclass=cls,
)
return ret
def gen_uuid():
return str(uuid.uuid4())
def get_cursor():
global conn, main_cursor
if not (conn and conn.open):
LOG.debug("No DB connection")
main_cursor = None
conn = connect()
if not main_cursor:
LOG.debug("No cursor")
main_cursor = conn.cursor(pymysql.cursors.DictCursor)
return main_cursor
def commit():
conn.commit()
def logit(*args):
argtxt = [str(arg) for arg in args]
msg = " ".join(argtxt) + "\n"
with open("LOGOUT", "a") as ff:
ff.write(msg)
def debugout(*args):
with open("/tmp/debugout", "a") as ff:
ff.write("YO!")
argtxt = [str(arg) for arg in args]
msg = " ".join(argtxt) + "\n"
with open("/tmp/debugout", "a") as ff:
ff.write(msg)
def nocache(view):
@wraps(view)
def no_cache(*args, **kwargs):
response = make_response(view(*args, **kwargs))
response.headers["Last-Modified"] = datetime.now()
response.headers["Cache-Control"] = (
"no-store, no-cache, " "must-revalidate, post-check=0, pre-check=0, max-age=0"
)
response.headers["Pragma"] = "no-cache"
response.headers["Expires"] = "-1"
return response
return update_wrapper(no_cache, view)
def human_fmt(num):
num = int(num)
units = list(zip(["bytes", "K", "MB", "GB", "TB", "PB"], [0, 0, 1, 2, 2, 2]))
if num > 1:
exponent = min(int(log(num, 1024)), len(units) - 1)
quotient = float(num) / 1024 ** exponent
unit, num_decimals = units[exponent]
format_string = "{:.%sf} {}" % (num_decimals)
return format_string.format(quotient, unit)
if num == 0:
return "0 bytes"
if num == 1:
return "1 byte"
def format_number(num):
snum = str(num)
parts = []
while snum:
snum, part = snum[:-3], snum[-3:]
parts.append(part)
parts.reverse()
return ",".join(parts)
def get_elastic_client():
return elasticsearch.Elasticsearch(host=HOST)
def _get_mapping():
es_client = get_elastic_client()
return es_client.indices.get_mapping()
def get_indices():
return list(_get_mapping().keys())
def get_mapping(index):
props = _get_mapping().get(index, {}).get("mappings", {}).get("properties", {})
return props
def get_fields(index):
return get_mapping(index).keys()
def gen_key(orig_rec, digest_size=8):
rec = copy.deepcopy(orig_rec)
# Remove the 'id' field, if present
rec.pop("id", None)
m = blake2b(digest_size=digest_size)
txt_vals = ["%s" % val for val in rec.values()]
txt_vals.sort()
txt = "".join(txt_vals)
m.update(txt.encode("utf-8"))
return m.hexdigest()
def extract_records(resp):
return [r["_source"] for r in resp["hits"]["hits"]]
def massage_date(val):
dt = parser.parse(val)
return dt.strftime("%Y-%m-%d %H:%M:%S")
def massage_date_records(records, field_name):
for rec in records:
rec[field_name] = massage_date(rec[field_name])
def print_messages(recs):
console = Console()
table = Table(show_header=True, header_style="bold blue_violet")
table.add_column("MSG #", justify="right")
table.add_column("List")
table.add_column("Posted", justify="right")
table.add_column("From")
table.add_column("Subject")
for rec in recs:
table.add_row(
str(rec["msg_num"]),
ABBREV_MAP.get(rec["list_name"]),
massage_date(rec["posted"]),
rec["from"],
rec["subject"],
)
console.print(table)
def print_message_list(recs):
console = Console()
table = Table(show_header=True, header_style="bold cyan", box=box.HEAVY)
# table.add_column("ID", style="dim", width=13)
table.add_column("MSG #")
table.add_column("List")
table.add_column("Posted")
table.add_column("From")
table.add_column("Subject")
for rec in recs:
sender_parts = rec["from"].split("<")
name = sender_parts[0]
addr = f"<{sender_parts[1]}" if len(sender_parts) > 1 else ""
sender = f"[bold {NAME_COLOR}]{name}[/bold {NAME_COLOR}]{addr}"
subj = rec["subject"]
low_subj = subj.lower()
if low_subj.startswith("re:") or low_subj.startswith("aw:"):
subj = f"[green]{subj[:3]}[/green]{subj[3:]}"
table.add_row(
str(rec["msg_num"]),
ABBREV_MAP.get(rec["list_name"]),
rec["posted"],
sender,
subj,
)
console.print(table)
| true | true |
f714c4b095d821e174e7a6aa0d767d8a4b0c41f5 | 11,122 | py | Python | glance_docker/glance/registry/client/v1/client.py | tobegit3hub/dockerized-software | 3781bc1145b6fbb8d5fa2e2eaeaa3aa138a69632 | [
"Apache-2.0"
] | null | null | null | glance_docker/glance/registry/client/v1/client.py | tobegit3hub/dockerized-software | 3781bc1145b6fbb8d5fa2e2eaeaa3aa138a69632 | [
"Apache-2.0"
] | null | null | null | glance_docker/glance/registry/client/v1/client.py | tobegit3hub/dockerized-software | 3781bc1145b6fbb8d5fa2e2eaeaa3aa138a69632 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Simple client class to speak with any RESTful service that implements
the Glance Registry API
"""
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from glance.common.client import BaseClient
from glance.common import crypt
from glance import i18n
from glance.registry.api.v1 import images
LOG = logging.getLogger(__name__)
_LE = i18n._LE
class RegistryClient(BaseClient):
"""A client for the Registry image metadata service."""
DEFAULT_PORT = 9191
def __init__(self, host=None, port=None, metadata_encryption_key=None,
identity_headers=None, **kwargs):
"""
:param metadata_encryption_key: Key used to encrypt 'location' metadata
"""
self.metadata_encryption_key = metadata_encryption_key
# NOTE (dprince): by default base client overwrites host and port
# settings when using keystone. configure_via_auth=False disables
# this behaviour to ensure we still send requests to the Registry API
self.identity_headers = identity_headers
# store available passed request id for do_request call
self._passed_request_id = kwargs.pop('request_id', None)
BaseClient.__init__(self, host, port, configure_via_auth=False,
**kwargs)
def decrypt_metadata(self, image_metadata):
if self.metadata_encryption_key:
if image_metadata.get('location'):
location = crypt.urlsafe_decrypt(self.metadata_encryption_key,
image_metadata['location'])
image_metadata['location'] = location
if image_metadata.get('location_data'):
ld = []
for loc in image_metadata['location_data']:
url = crypt.urlsafe_decrypt(self.metadata_encryption_key,
loc['url'])
ld.append({'id': loc['id'], 'url': url,
'metadata': loc['metadata'],
'status': loc['status']})
image_metadata['location_data'] = ld
return image_metadata
def encrypt_metadata(self, image_metadata):
if self.metadata_encryption_key:
location_url = image_metadata.get('location')
if location_url:
location = crypt.urlsafe_encrypt(self.metadata_encryption_key,
location_url,
64)
image_metadata['location'] = location
if image_metadata.get('location_data'):
ld = []
for loc in image_metadata['location_data']:
if loc['url'] == location_url:
url = location
else:
url = crypt.urlsafe_encrypt(
self.metadata_encryption_key, loc['url'], 64)
ld.append({'url': url, 'metadata': loc['metadata'],
'status': loc['status'],
# NOTE(zhiyan): New location has no ID field.
'id': loc.get('id')})
image_metadata['location_data'] = ld
return image_metadata
def get_images(self, **kwargs):
"""
Returns a list of image id/name mappings from Registry
:param filters: dict of keys & expected values to filter results
:param marker: image id after which to start page
:param limit: max number of images to return
:param sort_key: results will be ordered by this image attribute
:param sort_dir: direction in which to order results (asc, desc)
"""
params = self._extract_params(kwargs, images.SUPPORTED_PARAMS)
res = self.do_request("GET", "/images", params=params)
image_list = jsonutils.loads(res.read())['images']
for image in image_list:
image = self.decrypt_metadata(image)
return image_list
def do_request(self, method, action, **kwargs):
try:
kwargs['headers'] = kwargs.get('headers', {})
kwargs['headers'].update(self.identity_headers or {})
if self._passed_request_id:
kwargs['headers']['X-Openstack-Request-ID'] = (
self._passed_request_id)
res = super(RegistryClient, self).do_request(method,
action,
**kwargs)
status = res.status
request_id = res.getheader('x-openstack-request-id')
msg = ("Registry request %(method)s %(action)s HTTP %(status)s"
" request id %(request_id)s" %
{'method': method, 'action': action,
'status': status, 'request_id': request_id})
LOG.debug(msg)
except Exception as exc:
with excutils.save_and_reraise_exception():
exc_name = exc.__class__.__name__
LOG.exception(_LE("Registry client request %(method)s "
"%(action)s raised %(exc_name)s"),
{'method': method, 'action': action,
'exc_name': exc_name})
return res
def get_images_detailed(self, **kwargs):
"""
Returns a list of detailed image data mappings from Registry
:param filters: dict of keys & expected values to filter results
:param marker: image id after which to start page
:param limit: max number of images to return
:param sort_key: results will be ordered by this image attribute
:param sort_dir: direction in which to order results (asc, desc)
"""
params = self._extract_params(kwargs, images.SUPPORTED_PARAMS)
res = self.do_request("GET", "/images/detail", params=params)
image_list = jsonutils.loads(res.read())['images']
for image in image_list:
image = self.decrypt_metadata(image)
return image_list
def get_image(self, image_id):
"""Returns a mapping of image metadata from Registry."""
res = self.do_request("GET", "/images/%s" % image_id)
data = jsonutils.loads(res.read())['image']
return self.decrypt_metadata(data)
def add_image(self, image_metadata):
"""
Tells registry about an image's metadata
"""
headers = {
'Content-Type': 'application/json',
}
if 'image' not in image_metadata:
image_metadata = dict(image=image_metadata)
encrypted_metadata = self.encrypt_metadata(image_metadata['image'])
image_metadata['image'] = encrypted_metadata
body = jsonutils.dumps(image_metadata)
res = self.do_request("POST", "/images", body=body, headers=headers)
# Registry returns a JSONified dict(image=image_info)
data = jsonutils.loads(res.read())
image = data['image']
return self.decrypt_metadata(image)
def update_image(self, image_id, image_metadata, purge_props=False,
from_state=None):
"""
Updates Registry's information about an image
"""
if 'image' not in image_metadata:
image_metadata = dict(image=image_metadata)
encrypted_metadata = self.encrypt_metadata(image_metadata['image'])
image_metadata['image'] = encrypted_metadata
image_metadata['from_state'] = from_state
body = jsonutils.dumps(image_metadata)
headers = {
'Content-Type': 'application/json',
}
if purge_props:
headers["X-Glance-Registry-Purge-Props"] = "true"
res = self.do_request("PUT", "/images/%s" % image_id, body=body,
headers=headers)
data = jsonutils.loads(res.read())
image = data['image']
return self.decrypt_metadata(image)
def delete_image(self, image_id):
"""
Deletes Registry's information about an image
"""
res = self.do_request("DELETE", "/images/%s" % image_id)
data = jsonutils.loads(res.read())
image = data['image']
return image
def get_image_members(self, image_id):
"""Return a list of membership associations from Registry."""
res = self.do_request("GET", "/images/%s/members" % image_id)
data = jsonutils.loads(res.read())['members']
return data
def get_member_images(self, member_id):
"""Return a list of membership associations from Registry."""
res = self.do_request("GET", "/shared-images/%s" % member_id)
data = jsonutils.loads(res.read())['shared_images']
return data
def replace_members(self, image_id, member_data):
"""Replace registry's information about image membership."""
if isinstance(member_data, (list, tuple)):
member_data = dict(memberships=list(member_data))
elif (isinstance(member_data, dict) and
'memberships' not in member_data):
member_data = dict(memberships=[member_data])
body = jsonutils.dumps(member_data)
headers = {'Content-Type': 'application/json', }
res = self.do_request("PUT", "/images/%s/members" % image_id,
body=body, headers=headers)
return self.get_status_code(res) == 204
def add_member(self, image_id, member_id, can_share=None):
"""Add to registry's information about image membership."""
body = None
headers = {}
# Build up a body if can_share is specified
if can_share is not None:
body = jsonutils.dumps(dict(member=dict(can_share=can_share)))
headers['Content-Type'] = 'application/json'
url = "/images/%s/members/%s" % (image_id, member_id)
res = self.do_request("PUT", url, body=body,
headers=headers)
return self.get_status_code(res) == 204
def delete_member(self, image_id, member_id):
"""Delete registry's information about image membership."""
res = self.do_request("DELETE", "/images/%s/members/%s" %
(image_id, member_id))
return self.get_status_code(res) == 204
| 41.969811 | 79 | 0.591261 |
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from glance.common.client import BaseClient
from glance.common import crypt
from glance import i18n
from glance.registry.api.v1 import images
LOG = logging.getLogger(__name__)
_LE = i18n._LE
class RegistryClient(BaseClient):
DEFAULT_PORT = 9191
def __init__(self, host=None, port=None, metadata_encryption_key=None,
identity_headers=None, **kwargs):
self.metadata_encryption_key = metadata_encryption_key
self.identity_headers = identity_headers
self._passed_request_id = kwargs.pop('request_id', None)
BaseClient.__init__(self, host, port, configure_via_auth=False,
**kwargs)
def decrypt_metadata(self, image_metadata):
if self.metadata_encryption_key:
if image_metadata.get('location'):
location = crypt.urlsafe_decrypt(self.metadata_encryption_key,
image_metadata['location'])
image_metadata['location'] = location
if image_metadata.get('location_data'):
ld = []
for loc in image_metadata['location_data']:
url = crypt.urlsafe_decrypt(self.metadata_encryption_key,
loc['url'])
ld.append({'id': loc['id'], 'url': url,
'metadata': loc['metadata'],
'status': loc['status']})
image_metadata['location_data'] = ld
return image_metadata
def encrypt_metadata(self, image_metadata):
if self.metadata_encryption_key:
location_url = image_metadata.get('location')
if location_url:
location = crypt.urlsafe_encrypt(self.metadata_encryption_key,
location_url,
64)
image_metadata['location'] = location
if image_metadata.get('location_data'):
ld = []
for loc in image_metadata['location_data']:
if loc['url'] == location_url:
url = location
else:
url = crypt.urlsafe_encrypt(
self.metadata_encryption_key, loc['url'], 64)
ld.append({'url': url, 'metadata': loc['metadata'],
'status': loc['status'],
'id': loc.get('id')})
image_metadata['location_data'] = ld
return image_metadata
def get_images(self, **kwargs):
params = self._extract_params(kwargs, images.SUPPORTED_PARAMS)
res = self.do_request("GET", "/images", params=params)
image_list = jsonutils.loads(res.read())['images']
for image in image_list:
image = self.decrypt_metadata(image)
return image_list
def do_request(self, method, action, **kwargs):
try:
kwargs['headers'] = kwargs.get('headers', {})
kwargs['headers'].update(self.identity_headers or {})
if self._passed_request_id:
kwargs['headers']['X-Openstack-Request-ID'] = (
self._passed_request_id)
res = super(RegistryClient, self).do_request(method,
action,
**kwargs)
status = res.status
request_id = res.getheader('x-openstack-request-id')
msg = ("Registry request %(method)s %(action)s HTTP %(status)s"
" request id %(request_id)s" %
{'method': method, 'action': action,
'status': status, 'request_id': request_id})
LOG.debug(msg)
except Exception as exc:
with excutils.save_and_reraise_exception():
exc_name = exc.__class__.__name__
LOG.exception(_LE("Registry client request %(method)s "
"%(action)s raised %(exc_name)s"),
{'method': method, 'action': action,
'exc_name': exc_name})
return res
def get_images_detailed(self, **kwargs):
params = self._extract_params(kwargs, images.SUPPORTED_PARAMS)
res = self.do_request("GET", "/images/detail", params=params)
image_list = jsonutils.loads(res.read())['images']
for image in image_list:
image = self.decrypt_metadata(image)
return image_list
def get_image(self, image_id):
res = self.do_request("GET", "/images/%s" % image_id)
data = jsonutils.loads(res.read())['image']
return self.decrypt_metadata(data)
def add_image(self, image_metadata):
headers = {
'Content-Type': 'application/json',
}
if 'image' not in image_metadata:
image_metadata = dict(image=image_metadata)
encrypted_metadata = self.encrypt_metadata(image_metadata['image'])
image_metadata['image'] = encrypted_metadata
body = jsonutils.dumps(image_metadata)
res = self.do_request("POST", "/images", body=body, headers=headers)
data = jsonutils.loads(res.read())
image = data['image']
return self.decrypt_metadata(image)
def update_image(self, image_id, image_metadata, purge_props=False,
from_state=None):
if 'image' not in image_metadata:
image_metadata = dict(image=image_metadata)
encrypted_metadata = self.encrypt_metadata(image_metadata['image'])
image_metadata['image'] = encrypted_metadata
image_metadata['from_state'] = from_state
body = jsonutils.dumps(image_metadata)
headers = {
'Content-Type': 'application/json',
}
if purge_props:
headers["X-Glance-Registry-Purge-Props"] = "true"
res = self.do_request("PUT", "/images/%s" % image_id, body=body,
headers=headers)
data = jsonutils.loads(res.read())
image = data['image']
return self.decrypt_metadata(image)
def delete_image(self, image_id):
res = self.do_request("DELETE", "/images/%s" % image_id)
data = jsonutils.loads(res.read())
image = data['image']
return image
def get_image_members(self, image_id):
res = self.do_request("GET", "/images/%s/members" % image_id)
data = jsonutils.loads(res.read())['members']
return data
def get_member_images(self, member_id):
res = self.do_request("GET", "/shared-images/%s" % member_id)
data = jsonutils.loads(res.read())['shared_images']
return data
def replace_members(self, image_id, member_data):
if isinstance(member_data, (list, tuple)):
member_data = dict(memberships=list(member_data))
elif (isinstance(member_data, dict) and
'memberships' not in member_data):
member_data = dict(memberships=[member_data])
body = jsonutils.dumps(member_data)
headers = {'Content-Type': 'application/json', }
res = self.do_request("PUT", "/images/%s/members" % image_id,
body=body, headers=headers)
return self.get_status_code(res) == 204
def add_member(self, image_id, member_id, can_share=None):
body = None
headers = {}
if can_share is not None:
body = jsonutils.dumps(dict(member=dict(can_share=can_share)))
headers['Content-Type'] = 'application/json'
url = "/images/%s/members/%s" % (image_id, member_id)
res = self.do_request("PUT", url, body=body,
headers=headers)
return self.get_status_code(res) == 204
def delete_member(self, image_id, member_id):
res = self.do_request("DELETE", "/images/%s/members/%s" %
(image_id, member_id))
return self.get_status_code(res) == 204
| true | true |
f714c4cb6fce4108162f924f6f32dbbefd36b682 | 4,339 | py | Python | backend/tests/test_mollufy.py | somnisomni/mollufier | 7bc42ac51615f164bd3a479ed5e05cdea5b186d5 | [
"MIT"
] | 14 | 2021-11-13T14:59:34.000Z | 2022-02-14T06:21:49.000Z | backend/tests/test_mollufy.py | somnisomni/mollufier | 7bc42ac51615f164bd3a479ed5e05cdea5b186d5 | [
"MIT"
] | 2 | 2021-11-23T13:54:47.000Z | 2021-11-26T15:35:40.000Z | backend/tests/test_mollufy.py | somnisomni/mollufier | 7bc42ac51615f164bd3a479ed5e05cdea5b186d5 | [
"MIT"
] | 3 | 2021-11-20T16:55:41.000Z | 2021-11-26T15:27:10.000Z | import unittest
from mollufy import mollufy
class MollufyTestSimple(unittest.TestCase):
def test_mollufy_word_2chars(self):
# TEST 1: Mollufy simple 2-characters noun word
self.assertEqual(mollufy.mollufy("블루"), "블?루")
self.assertEqual(mollufy.mollufy("하루"), "하?루")
self.assertEqual(mollufy.mollufy("감정"), "감?정")
def test_mollufy_word_manychars_without_param(self):
# TEST 2: Ensure 3-characters-or-above noun word not to be mollufied without parameter
self.assertEqual(mollufy.mollufy("마술사"), "마술사")
self.assertEqual(mollufy.mollufy("모니터"), "모니터")
self.assertEqual(mollufy.mollufy("아이스크림"), "아이스크림")
def test_mollufy_word_manychars(self):
# TEST 3: Mollufy 3-characters-or-above noun word with parameter
self.assertEqual(mollufy.mollufy("슬리퍼", True), "슬리?퍼")
self.assertEqual(mollufy.mollufy("이구동성", True), "이구동?성")
self.assertEqual(mollufy.mollufy("아메리카노", True), "아메리카?노")
def test_mollufy_non_noun_word(self):
# TEST 4: Ensure non-noun words not to be mollufied
self.assertEqual(mollufy.mollufy("좋아"), "좋아")
self.assertEqual(mollufy.mollufy("그만해", True), "그만해")
self.assertEqual(mollufy.mollufy("냠냠쩝쩝", True), "냠냠쩝쩝")
class MollufyTestSentence(unittest.TestCase):
def test_mollufy_sentence_with_one_2chars_word(self):
# TEST 5: Mollufy sentence with one 2-characters noun word
self.assertEqual(mollufy.mollufy("안녕하세요"), "안?녕하세요")
self.assertEqual(mollufy.mollufy("바다에 갑시다"), "바?다에 갑시다")
self.assertEqual(mollufy.mollufy("재미있는 게임인데"), "재미있는 게?임인데")
def test_mollufy_sentence_with_one_manychar_word(self):
# TEST 6: Mollufy sentence with one 3-characters-or-above noun word
self.assertEqual(mollufy.mollufy("참관인이세요?", True), "참관?인이세요?")
self.assertEqual(mollufy.mollufy("보드카 너무 써", True), "보드?카 너무 써")
self.assertEqual(mollufy.mollufy("필라멘트가 타버렸네", True), "필라멘?트가 타버렸네")
def test_mollufy_sentence_with_many_2chars_words(self):
# TEST 7: Mollufy sentence with many 2-characters noun words
self.assertEqual(mollufy.mollufy("내가 재미있는 게임을 하나 알아냈는데, 나중에 검색해봐"), "내가 재미있는 게?임을 하나 알아냈는데, 나?중에 검?색해봐")
self.assertEqual(mollufy.mollufy("그야말로 연애재판 너는 나에게 얼마만큼의 죄를 물을 거니?"), "그야말로 연?애재?판 너는 나에게 얼?마만큼의 죄를 물을 거니?")
self.assertEqual(mollufy.mollufy("두 글자 명사가 다수 존재하는 문장을 생각하기는 곤란하다"), "두 글?자 명?사가 다?수 존?재하는 문?장을 생?각하기는 곤?란하다")
def test_mollufy_sentence_with_many_words(self):
# TEST 8: Mollufy sentence with many noun words (without no length limit)
self.assertEqual(mollufy.mollufy("대한민국의 영토는 한반도와 그 부속도서로 한다.", True), "대한민?국의 영?토는 한반?도와 그 부?속도?서로 한다.")
self.assertEqual(mollufy.mollufy("대한민국은 통일을 지향하며, 자유민주적 기본질서에 입각한 평화적 통일 정책을 수립하고 이를 추진한다.", True), "대한민?국은 통?일을 지?향하며, 자?유민?주적 기?본질?서에 입?각한 평?화적 통?일 정?책을 수?립하고 이를 추?진한다.")
self.assertEqual(mollufy.mollufy("블루 아카이브 정말 건전하고 건강하고 밝은 게임인데...", True), "블?루 아카이?브 정말 건?전하고 건?강하고 밝은 게?임인데...")
def test_mollufy_sentence_with_many_words_without_param(self):
# TEST 9: Mollufy 2-characters noun words in sentence, not 3-characters-or-above noun words
self.assertEqual(mollufy.mollufy("그래픽 디자인은 특정 메시지 (혹은 콘텐츠)와 이를 전달하려는 대상자에게 걸맞은 매체 (인쇄물, 웹사이트, 동영상 등)를 선택하여 표현 또는 제작하는 창의적인 과정이다."),
"그래픽 디자인은 특?정 메시지 (혹은 콘텐츠)와 이를 전?달하려는 대상자에게 걸맞은 매?체 (인쇄물, 웹사이트, 동영상 등)를 선?택하여 표?현 또는 제?작하는 창?의적인 과?정이다.")
class MollufyTestMeme(unittest.TestCase):
def test_mollufy_meme_words(self):
# TEST 10: Meme words
self.assertEqual(mollufy.mollufy("몰루"), "몰?루")
self.assertEqual(mollufy.mollufy("코하루"), "코하?루")
self.assertEqual(mollufy.mollufy("아루"), "아?루")
self.assertEqual(mollufy.mollufy("네루"), "네?루")
def test_mollufy_meme_sentences(self):
# TEST 11: Meme sentences
self.assertEqual(mollufy.mollufy("몰루는건가..."), "몰?루는건가...")
self.assertEqual(mollufy.mollufy("내가 몰루가 될께..."), "내가 몰?루가 될께...")
class MollufyTestAltmark(unittest.TestCase):
def test_mollufy_altmark(self):
# TEST 12: Mollufy with alternative mark: [!]
self.assertEqual(mollufy.mollufy("바람", alternativeMark=True), "바!람")
self.assertEqual(mollufy.mollufy("아루", alternativeMark=True), "아!루")
self.assertEqual(mollufy.mollufy("스튜디오", True, True), "스튜디!오")
self.assertEqual(mollufy.mollufy("각설탕을 커피에 타먹으면 달게요 안 달게요~", True, True), "각설!탕을 커!피에 타먹으면 달게요 안 달게요~")
if __name__ == "__main__":
unittest.main()
| 52.914634 | 176 | 0.704771 | import unittest
from mollufy import mollufy
class MollufyTestSimple(unittest.TestCase):
def test_mollufy_word_2chars(self):
self.assertEqual(mollufy.mollufy("블루"), "블?루")
self.assertEqual(mollufy.mollufy("하루"), "하?루")
self.assertEqual(mollufy.mollufy("감정"), "감?정")
def test_mollufy_word_manychars_without_param(self):
self.assertEqual(mollufy.mollufy("마술사"), "마술사")
self.assertEqual(mollufy.mollufy("모니터"), "모니터")
self.assertEqual(mollufy.mollufy("아이스크림"), "아이스크림")
def test_mollufy_word_manychars(self):
self.assertEqual(mollufy.mollufy("슬리퍼", True), "슬리?퍼")
self.assertEqual(mollufy.mollufy("이구동성", True), "이구동?성")
self.assertEqual(mollufy.mollufy("아메리카노", True), "아메리카?노")
def test_mollufy_non_noun_word(self):
self.assertEqual(mollufy.mollufy("좋아"), "좋아")
self.assertEqual(mollufy.mollufy("그만해", True), "그만해")
self.assertEqual(mollufy.mollufy("냠냠쩝쩝", True), "냠냠쩝쩝")
class MollufyTestSentence(unittest.TestCase):
def test_mollufy_sentence_with_one_2chars_word(self):
self.assertEqual(mollufy.mollufy("안녕하세요"), "안?녕하세요")
self.assertEqual(mollufy.mollufy("바다에 갑시다"), "바?다에 갑시다")
self.assertEqual(mollufy.mollufy("재미있는 게임인데"), "재미있는 게?임인데")
def test_mollufy_sentence_with_one_manychar_word(self):
self.assertEqual(mollufy.mollufy("참관인이세요?", True), "참관?인이세요?")
self.assertEqual(mollufy.mollufy("보드카 너무 써", True), "보드?카 너무 써")
self.assertEqual(mollufy.mollufy("필라멘트가 타버렸네", True), "필라멘?트가 타버렸네")
def test_mollufy_sentence_with_many_2chars_words(self):
self.assertEqual(mollufy.mollufy("내가 재미있는 게임을 하나 알아냈는데, 나중에 검색해봐"), "내가 재미있는 게?임을 하나 알아냈는데, 나?중에 검?색해봐")
self.assertEqual(mollufy.mollufy("그야말로 연애재판 너는 나에게 얼마만큼의 죄를 물을 거니?"), "그야말로 연?애재?판 너는 나에게 얼?마만큼의 죄를 물을 거니?")
self.assertEqual(mollufy.mollufy("두 글자 명사가 다수 존재하는 문장을 생각하기는 곤란하다"), "두 글?자 명?사가 다?수 존?재하는 문?장을 생?각하기는 곤?란하다")
def test_mollufy_sentence_with_many_words(self):
self.assertEqual(mollufy.mollufy("대한민국의 영토는 한반도와 그 부속도서로 한다.", True), "대한민?국의 영?토는 한반?도와 그 부?속도?서로 한다.")
self.assertEqual(mollufy.mollufy("대한민국은 통일을 지향하며, 자유민주적 기본질서에 입각한 평화적 통일 정책을 수립하고 이를 추진한다.", True), "대한민?국은 통?일을 지?향하며, 자?유민?주적 기?본질?서에 입?각한 평?화적 통?일 정?책을 수?립하고 이를 추?진한다.")
self.assertEqual(mollufy.mollufy("블루 아카이브 정말 건전하고 건강하고 밝은 게임인데...", True), "블?루 아카이?브 정말 건?전하고 건?강하고 밝은 게?임인데...")
def test_mollufy_sentence_with_many_words_without_param(self):
self.assertEqual(mollufy.mollufy("그래픽 디자인은 특정 메시지 (혹은 콘텐츠)와 이를 전달하려는 대상자에게 걸맞은 매체 (인쇄물, 웹사이트, 동영상 등)를 선택하여 표현 또는 제작하는 창의적인 과정이다."),
"그래픽 디자인은 특?정 메시지 (혹은 콘텐츠)와 이를 전?달하려는 대상자에게 걸맞은 매?체 (인쇄물, 웹사이트, 동영상 등)를 선?택하여 표?현 또는 제?작하는 창?의적인 과?정이다.")
class MollufyTestMeme(unittest.TestCase):
def test_mollufy_meme_words(self):
self.assertEqual(mollufy.mollufy("몰루"), "몰?루")
self.assertEqual(mollufy.mollufy("코하루"), "코하?루")
self.assertEqual(mollufy.mollufy("아루"), "아?루")
self.assertEqual(mollufy.mollufy("네루"), "네?루")
def test_mollufy_meme_sentences(self):
self.assertEqual(mollufy.mollufy("몰루는건가..."), "몰?루는건가...")
self.assertEqual(mollufy.mollufy("내가 몰루가 될께..."), "내가 몰?루가 될께...")
class MollufyTestAltmark(unittest.TestCase):
def test_mollufy_altmark(self):
self.assertEqual(mollufy.mollufy("바람", alternativeMark=True), "바!람")
self.assertEqual(mollufy.mollufy("아루", alternativeMark=True), "아!루")
self.assertEqual(mollufy.mollufy("스튜디오", True, True), "스튜디!오")
self.assertEqual(mollufy.mollufy("각설탕을 커피에 타먹으면 달게요 안 달게요~", True, True), "각설!탕을 커!피에 타먹으면 달게요 안 달게요~")
if __name__ == "__main__":
unittest.main()
| true | true |
f714c57c24f77188279a44953fa4e55425f6c2d6 | 7,804 | py | Python | stai/util/ssl_check.py | STATION-I/staicoin-blockchain | b8686c75dd5fe7883115d9613858c9c8cadfc4a7 | [
"Apache-2.0"
] | 10 | 2021-10-02T18:33:56.000Z | 2021-11-14T17:10:48.000Z | stai/util/ssl_check.py | STATION-I/staicoin-blockchain | b8686c75dd5fe7883115d9613858c9c8cadfc4a7 | [
"Apache-2.0"
] | 14 | 2021-10-07T22:10:15.000Z | 2021-12-21T09:13:49.000Z | stai/util/ssl_check.py | STATION-I/staicoin-blockchain | b8686c75dd5fe7883115d9613858c9c8cadfc4a7 | [
"Apache-2.0"
] | 6 | 2021-10-29T19:36:59.000Z | 2021-12-19T19:52:57.000Z | import os
import stat
import sys
from stai.util.config import load_config, traverse_dict
from stai.util.permissions import octal_mode_string, verify_file_permissions
from logging import Logger
from pathlib import Path
from typing import Dict, List, Optional, Set, Tuple
DEFAULT_PERMISSIONS_CERT_FILE: int = 0o644
DEFAULT_PERMISSIONS_KEY_FILE: int = 0o600
# Masks containing permission bits we don't allow
RESTRICT_MASK_CERT_FILE: int = stat.S_IWGRP | stat.S_IXGRP | stat.S_IWOTH | stat.S_IXOTH # 0o033
RESTRICT_MASK_KEY_FILE: int = (
stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH
) # 0o077
CERT_CONFIG_KEY_PATHS = [
"stai_ssl_ca:crt",
"daemon_ssl:private_crt",
"farmer:ssl:private_crt",
"farmer:ssl:public_crt",
"full_node:ssl:private_crt",
"full_node:ssl:public_crt",
"harvester:stai_ssl_ca:crt",
"harvester:private_ssl_ca:crt",
"harvester:ssl:private_crt",
"introducer:ssl:public_crt",
"private_ssl_ca:crt",
"timelord:ssl:private_crt",
"timelord:ssl:public_crt",
"ui:daemon_ssl:private_crt",
"wallet:ssl:private_crt",
"wallet:ssl:public_crt",
]
KEY_CONFIG_KEY_PATHS = [
"stai_ssl_ca:key",
"daemon_ssl:private_key",
"farmer:ssl:private_key",
"farmer:ssl:public_key",
"full_node:ssl:private_key",
"full_node:ssl:public_key",
"harvester:stai_ssl_ca:key",
"harvester:private_ssl_ca:key",
"harvester:ssl:private_key",
"introducer:ssl:public_key",
"private_ssl_ca:key",
"timelord:ssl:private_key",
"timelord:ssl:public_key",
"ui:daemon_ssl:private_key",
"wallet:ssl:private_key",
"wallet:ssl:public_key",
]
# Set to keep track of which files we've already warned about
warned_ssl_files: Set[Path] = set()
def get_all_ssl_file_paths(root_path: Path) -> Tuple[List[Path], List[Path]]:
"""Lookup config values and append to a list of files whose permissions we need to check"""
from stai.ssl.create_ssl import get_mozilla_ca_crt
all_certs: List[Path] = []
all_keys: List[Path] = []
try:
config: Dict = load_config(root_path, "config.yaml", exit_on_error=False)
for paths, parsed_list in [(CERT_CONFIG_KEY_PATHS, all_certs), (KEY_CONFIG_KEY_PATHS, all_keys)]:
for path in paths:
try:
file = root_path / Path(traverse_dict(config, path))
parsed_list.append(file)
except Exception as e:
print(
f"Failed to lookup config value for {path}: {e}"
) # lgtm [py/clear-text-logging-sensitive-data]
# Check the Mozilla Root CAs as well
all_certs.append(Path(get_mozilla_ca_crt()))
except (FileNotFoundError, ValueError):
pass
return all_certs, all_keys
def get_ssl_perm_warning(path: Path, actual_mode: int, expected_mode: int) -> str:
return (
f"Permissions {octal_mode_string(actual_mode)} for "
f"'{path}' are too open. " # lgtm [py/clear-text-logging-sensitive-data]
f"Expected {octal_mode_string(expected_mode)}"
)
def verify_ssl_certs_and_keys(
cert_paths: List[Path], key_paths: List[Path], log: Optional[Logger] = None
) -> List[Tuple[Path, int, int]]:
"""Check that file permissions are properly set for the provided SSL cert and key files"""
if sys.platform == "win32" or sys.platform == "cygwin":
# TODO: ACLs for SSL certs/keys on Windows
return []
invalid_files_and_modes: List[Tuple[Path, int, int]] = []
def verify_paths(paths: List[Path], restrict_mask: int, expected_permissions: int):
nonlocal invalid_files_and_modes
for path in paths:
try:
# Check that the file permissions are not too permissive
is_valid, actual_permissions = verify_file_permissions(path, restrict_mask)
if not is_valid:
if log is not None:
log.error(get_ssl_perm_warning(path, actual_permissions, expected_permissions))
warned_ssl_files.add(path)
invalid_files_and_modes.append((path, actual_permissions, expected_permissions))
except Exception as e:
print(f"Unable to check permissions for {path}: {e}") # lgtm [py/clear-text-logging-sensitive-data]
verify_paths(cert_paths, RESTRICT_MASK_CERT_FILE, DEFAULT_PERMISSIONS_CERT_FILE)
verify_paths(key_paths, RESTRICT_MASK_KEY_FILE, DEFAULT_PERMISSIONS_KEY_FILE)
return invalid_files_and_modes
def check_ssl(root_path: Path) -> None:
"""
Sanity checks on the SSL configuration. Checks that file permissions are properly
set on the keys and certs, warning and exiting if permissions are incorrect.
"""
if sys.platform == "win32" or sys.platform == "cygwin":
# TODO: ACLs for SSL certs/keys on Windows
return None
certs_to_check, keys_to_check = get_all_ssl_file_paths(root_path)
invalid_files = verify_ssl_certs_and_keys(certs_to_check, keys_to_check)
if len(invalid_files):
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print("@ WARNING: UNPROTECTED SSL FILE! @")
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
for path, actual_permissions, expected_permissions in invalid_files:
print(
get_ssl_perm_warning(path, actual_permissions, expected_permissions)
) # lgtm [py/clear-text-logging-sensitive-data]
print("One or more SSL files were found with permission issues.")
print("Run `stai init --fix-ssl-permissions` to fix issues.")
def check_and_fix_permissions_for_ssl_file(file: Path, mask: int, updated_mode: int) -> Tuple[bool, bool]:
"""Check file permissions and attempt to fix them if found to be too open"""
if sys.platform == "win32" or sys.platform == "cygwin":
# TODO: ACLs for SSL certs/keys on Windows
return True, False
valid: bool = True
updated: bool = False
# Check that the file permissions are not too permissive
try:
(good_perms, mode) = verify_file_permissions(file, mask)
if not good_perms:
valid = False
print(
f"Attempting to set permissions {octal_mode_string(updated_mode)} on "
f"{file}" # lgtm [py/clear-text-logging-sensitive-data]
)
os.chmod(str(file), updated_mode)
updated = True
except Exception as e:
print(f"Failed to change permissions on {file}: {e}") # lgtm [py/clear-text-logging-sensitive-data]
valid = False
return valid, updated
def fix_ssl(root_path: Path) -> None:
"""Attempts to fix SSL cert/key file permissions that are too open"""
if sys.platform == "win32" or sys.platform == "cygwin":
# TODO: ACLs for SSL certs/keys on Windows
return None
updated: bool = False
encountered_error: bool = False
certs_to_check, keys_to_check = get_all_ssl_file_paths(root_path)
files_to_fix = verify_ssl_certs_and_keys(certs_to_check, keys_to_check)
for (file, mask, updated_mode) in files_to_fix:
# Check that permissions are correct, and if not, attempt to fix
(valid, fixed) = check_and_fix_permissions_for_ssl_file(file, mask, updated_mode)
if fixed:
updated = True
if not valid and not fixed:
encountered_error = True
if encountered_error:
print("One or more errors were encountered while updating SSL file permissions...")
elif updated:
print("Finished updating SSL file permissions")
else:
print("SSL file permissions are correct")
| 38.44335 | 116 | 0.657868 | import os
import stat
import sys
from stai.util.config import load_config, traverse_dict
from stai.util.permissions import octal_mode_string, verify_file_permissions
from logging import Logger
from pathlib import Path
from typing import Dict, List, Optional, Set, Tuple
DEFAULT_PERMISSIONS_CERT_FILE: int = 0o644
DEFAULT_PERMISSIONS_KEY_FILE: int = 0o600
RESTRICT_MASK_CERT_FILE: int = stat.S_IWGRP | stat.S_IXGRP | stat.S_IWOTH | stat.S_IXOTH # 0o033
RESTRICT_MASK_KEY_FILE: int = (
stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH
) # 0o077
CERT_CONFIG_KEY_PATHS = [
"stai_ssl_ca:crt",
"daemon_ssl:private_crt",
"farmer:ssl:private_crt",
"farmer:ssl:public_crt",
"full_node:ssl:private_crt",
"full_node:ssl:public_crt",
"harvester:stai_ssl_ca:crt",
"harvester:private_ssl_ca:crt",
"harvester:ssl:private_crt",
"introducer:ssl:public_crt",
"private_ssl_ca:crt",
"timelord:ssl:private_crt",
"timelord:ssl:public_crt",
"ui:daemon_ssl:private_crt",
"wallet:ssl:private_crt",
"wallet:ssl:public_crt",
]
KEY_CONFIG_KEY_PATHS = [
"stai_ssl_ca:key",
"daemon_ssl:private_key",
"farmer:ssl:private_key",
"farmer:ssl:public_key",
"full_node:ssl:private_key",
"full_node:ssl:public_key",
"harvester:stai_ssl_ca:key",
"harvester:private_ssl_ca:key",
"harvester:ssl:private_key",
"introducer:ssl:public_key",
"private_ssl_ca:key",
"timelord:ssl:private_key",
"timelord:ssl:public_key",
"ui:daemon_ssl:private_key",
"wallet:ssl:private_key",
"wallet:ssl:public_key",
]
# Set to keep track of which files we've already warned about
warned_ssl_files: Set[Path] = set()
def get_all_ssl_file_paths(root_path: Path) -> Tuple[List[Path], List[Path]]:
from stai.ssl.create_ssl import get_mozilla_ca_crt
all_certs: List[Path] = []
all_keys: List[Path] = []
try:
config: Dict = load_config(root_path, "config.yaml", exit_on_error=False)
for paths, parsed_list in [(CERT_CONFIG_KEY_PATHS, all_certs), (KEY_CONFIG_KEY_PATHS, all_keys)]:
for path in paths:
try:
file = root_path / Path(traverse_dict(config, path))
parsed_list.append(file)
except Exception as e:
print(
f"Failed to lookup config value for {path}: {e}"
)
all_certs.append(Path(get_mozilla_ca_crt()))
except (FileNotFoundError, ValueError):
pass
return all_certs, all_keys
def get_ssl_perm_warning(path: Path, actual_mode: int, expected_mode: int) -> str:
return (
f"Permissions {octal_mode_string(actual_mode)} for "
f"'{path}' are too open. "
f"Expected {octal_mode_string(expected_mode)}"
)
def verify_ssl_certs_and_keys(
cert_paths: List[Path], key_paths: List[Path], log: Optional[Logger] = None
) -> List[Tuple[Path, int, int]]:
if sys.platform == "win32" or sys.platform == "cygwin":
return []
invalid_files_and_modes: List[Tuple[Path, int, int]] = []
def verify_paths(paths: List[Path], restrict_mask: int, expected_permissions: int):
nonlocal invalid_files_and_modes
for path in paths:
try:
is_valid, actual_permissions = verify_file_permissions(path, restrict_mask)
if not is_valid:
if log is not None:
log.error(get_ssl_perm_warning(path, actual_permissions, expected_permissions))
warned_ssl_files.add(path)
invalid_files_and_modes.append((path, actual_permissions, expected_permissions))
except Exception as e:
print(f"Unable to check permissions for {path}: {e}")
verify_paths(cert_paths, RESTRICT_MASK_CERT_FILE, DEFAULT_PERMISSIONS_CERT_FILE)
verify_paths(key_paths, RESTRICT_MASK_KEY_FILE, DEFAULT_PERMISSIONS_KEY_FILE)
return invalid_files_and_modes
def check_ssl(root_path: Path) -> None:
if sys.platform == "win32" or sys.platform == "cygwin":
return None
certs_to_check, keys_to_check = get_all_ssl_file_paths(root_path)
invalid_files = verify_ssl_certs_and_keys(certs_to_check, keys_to_check)
if len(invalid_files):
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
print("@ WARNING: UNPROTECTED SSL FILE! @")
print("@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@")
for path, actual_permissions, expected_permissions in invalid_files:
print(
get_ssl_perm_warning(path, actual_permissions, expected_permissions)
)
print("One or more SSL files were found with permission issues.")
print("Run `stai init --fix-ssl-permissions` to fix issues.")
def check_and_fix_permissions_for_ssl_file(file: Path, mask: int, updated_mode: int) -> Tuple[bool, bool]:
if sys.platform == "win32" or sys.platform == "cygwin":
return True, False
valid: bool = True
updated: bool = False
try:
(good_perms, mode) = verify_file_permissions(file, mask)
if not good_perms:
valid = False
print(
f"Attempting to set permissions {octal_mode_string(updated_mode)} on "
f"{file}"
)
os.chmod(str(file), updated_mode)
updated = True
except Exception as e:
print(f"Failed to change permissions on {file}: {e}")
valid = False
return valid, updated
def fix_ssl(root_path: Path) -> None:
if sys.platform == "win32" or sys.platform == "cygwin":
return None
updated: bool = False
encountered_error: bool = False
certs_to_check, keys_to_check = get_all_ssl_file_paths(root_path)
files_to_fix = verify_ssl_certs_and_keys(certs_to_check, keys_to_check)
for (file, mask, updated_mode) in files_to_fix:
(valid, fixed) = check_and_fix_permissions_for_ssl_file(file, mask, updated_mode)
if fixed:
updated = True
if not valid and not fixed:
encountered_error = True
if encountered_error:
print("One or more errors were encountered while updating SSL file permissions...")
elif updated:
print("Finished updating SSL file permissions")
else:
print("SSL file permissions are correct")
| true | true |
f714c6177d1854e7a182e8a0ce20becbd5ea9017 | 1,320 | py | Python | webdev/urls.py | h-zanetti/jewelry-manager | 74166b89f492303b8ebf5ff8af058f394eb2a28b | [
"MIT"
] | null | null | null | webdev/urls.py | h-zanetti/jewelry-manager | 74166b89f492303b8ebf5ff8af058f394eb2a28b | [
"MIT"
] | 103 | 2021-04-25T21:28:11.000Z | 2022-03-15T01:36:31.000Z | webdev/urls.py | h-zanetti/jewelry-manager | 74166b89f492303b8ebf5ff8af058f394eb2a28b | [
"MIT"
] | null | null | null | """webdev URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('webdev.produtos.urls')),
path('users/', include('webdev.users.urls')),
path('fornecedores/', include('webdev.fornecedores.urls')),
path('materiais/', include('webdev.materiais.urls')),
path('financeiro/', include('webdev.financeiro.urls')),
path('vendas/', include('webdev.vendas.urls')),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | 41.25 | 78 | 0.720455 | from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('webdev.produtos.urls')),
path('users/', include('webdev.users.urls')),
path('fornecedores/', include('webdev.fornecedores.urls')),
path('materiais/', include('webdev.materiais.urls')),
path('financeiro/', include('webdev.financeiro.urls')),
path('vendas/', include('webdev.vendas.urls')),
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | true | true |
f714c61bd4e0f62ce5ce0a72df8b8cb21cc1d5f1 | 6,726 | py | Python | tests/test_other.py | dtzWill/httplib2 | 7ee25dbcc24fbe42d2f7b2839327d58ecf3c8e71 | [
"MIT"
] | null | null | null | tests/test_other.py | dtzWill/httplib2 | 7ee25dbcc24fbe42d2f7b2839327d58ecf3c8e71 | [
"MIT"
] | null | null | null | tests/test_other.py | dtzWill/httplib2 | 7ee25dbcc24fbe42d2f7b2839327d58ecf3c8e71 | [
"MIT"
] | null | null | null | import httplib2
import mock
import os
import pickle
import pytest
import socket
import sys
import tests
import time
from six.moves import urllib
@pytest.mark.skipif(
sys.version_info <= (3,),
reason=(
"TODO: httplib2._convert_byte_str was defined only in python3 code " "version"
),
)
def test_convert_byte_str():
with tests.assert_raises(TypeError):
httplib2._convert_byte_str(4)
assert httplib2._convert_byte_str(b"Hello") == "Hello"
assert httplib2._convert_byte_str("World") == "World"
def test_reflect():
http = httplib2.Http()
with tests.server_reflect() as uri:
response, content = http.request(uri + "?query", "METHOD")
assert response.status == 200
host = urllib.parse.urlparse(uri).netloc
assert content.startswith(
"""\
METHOD /?query HTTP/1.1\r\n\
Host: {host}\r\n""".format(
host=host
).encode()
), content
def test_pickle_http():
http = httplib2.Http(cache=tests.get_cache_path())
new_http = pickle.loads(pickle.dumps(http))
assert tuple(sorted(new_http.__dict__)) == tuple(sorted(http.__dict__))
assert new_http.credentials.credentials == http.credentials.credentials
assert new_http.certificates.credentials == http.certificates.credentials
assert new_http.cache.cache == http.cache.cache
for key in new_http.__dict__:
if key not in ("cache", "certificates", "credentials"):
assert getattr(new_http, key) == getattr(http, key)
def test_pickle_http_with_connection():
http = httplib2.Http()
http.request("http://random-domain:81/", connection_type=tests.MockHTTPConnection)
new_http = pickle.loads(pickle.dumps(http))
assert tuple(http.connections) == ("http:random-domain:81",)
assert new_http.connections == {}
def test_pickle_custom_request_http():
http = httplib2.Http()
http.request = lambda: None
http.request.dummy_attr = "dummy_value"
new_http = pickle.loads(pickle.dumps(http))
assert getattr(new_http.request, "dummy_attr", None) is None
@pytest.mark.xfail(
sys.version_info >= (3,),
reason=(
"FIXME: for unknown reason global timeout test fails in Python3 "
"with response 200"
),
)
def test_timeout_global():
def handler(request):
time.sleep(0.5)
return tests.http_response_bytes()
try:
socket.setdefaulttimeout(0.1)
except Exception:
pytest.skip("cannot set global socket timeout")
try:
http = httplib2.Http()
http.force_exception_to_status_code = True
with tests.server_request(handler) as uri:
response, content = http.request(uri)
assert response.status == 408
assert response.reason.startswith("Request Timeout")
finally:
socket.setdefaulttimeout(None)
def test_timeout_individual():
def handler(request):
time.sleep(0.5)
return tests.http_response_bytes()
http = httplib2.Http(timeout=0.1)
http.force_exception_to_status_code = True
with tests.server_request(handler) as uri:
response, content = http.request(uri)
assert response.status == 408
assert response.reason.startswith("Request Timeout")
def test_timeout_https():
c = httplib2.HTTPSConnectionWithTimeout("localhost", 80, timeout=47)
assert 47 == c.timeout
# @pytest.mark.xfail(
# sys.version_info >= (3,),
# reason='[py3] last request should open new connection, but client does not realize socket was closed by server',
# )
def test_connection_close():
http = httplib2.Http()
g = []
def handler(request):
g.append(request.number)
return tests.http_response_bytes(proto="HTTP/1.1")
with tests.server_request(handler, request_count=3) as uri:
http.request(uri, "GET") # conn1 req1
for c in http.connections.values():
assert c.sock is not None
http.request(uri, "GET", headers={"connection": "close"})
time.sleep(0.7)
http.request(uri, "GET") # conn2 req1
assert g == [1, 2, 1]
def test_get_end2end_headers():
# one end to end header
response = {"content-type": "application/atom+xml", "te": "deflate"}
end2end = httplib2._get_end2end_headers(response)
assert "content-type" in end2end
assert "te" not in end2end
assert "connection" not in end2end
# one end to end header that gets eliminated
response = {
"connection": "content-type",
"content-type": "application/atom+xml",
"te": "deflate",
}
end2end = httplib2._get_end2end_headers(response)
assert "content-type" not in end2end
assert "te" not in end2end
assert "connection" not in end2end
# Degenerate case of no headers
response = {}
end2end = httplib2._get_end2end_headers(response)
assert len(end2end) == 0
# Degenerate case of connection referrring to a header not passed in
response = {"connection": "content-type"}
end2end = httplib2._get_end2end_headers(response)
assert len(end2end) == 0
@pytest.mark.xfail(
os.environ.get("TRAVIS_PYTHON_VERSION") in ("2.7", "pypy"),
reason="FIXME: fail on Travis py27 and pypy, works elsewhere",
)
@pytest.mark.parametrize("scheme", ("http", "https"))
def test_ipv6(scheme):
# Even if IPv6 isn't installed on a machine it should just raise socket.error
uri = "{scheme}://[::1]:1/".format(scheme=scheme)
try:
httplib2.Http(timeout=0.1).request(uri)
except socket.gaierror:
assert False, "should get the address family right for IPv6"
except socket.error:
pass
@pytest.mark.parametrize(
"conn_type",
(httplib2.HTTPConnectionWithTimeout, httplib2.HTTPSConnectionWithTimeout),
)
def test_connection_proxy_info_attribute_error(conn_type):
# HTTPConnectionWithTimeout did not initialize its .proxy_info attribute
# https://github.com/httplib2/httplib2/pull/97
# Thanks to Joseph Ryan https://github.com/germanjoey
conn = conn_type("no-such-hostname.", 80)
# TODO: replace mock with dummy local server
with tests.assert_raises(socket.gaierror):
with mock.patch("socket.socket.connect", side_effect=socket.gaierror):
conn.request("GET", "/")
def test_http_443_forced_https():
http = httplib2.Http()
http.force_exception_to_status_code = True
uri = "http://localhost:443/"
# sorry, using internal structure of Http to check chosen scheme
with mock.patch("httplib2.Http._request") as m:
http.request(uri)
assert len(m.call_args) > 0, "expected Http._request() call"
conn = m.call_args[0][0]
assert isinstance(conn, httplib2.HTTPConnectionWithTimeout)
| 32.181818 | 118 | 0.678115 | import httplib2
import mock
import os
import pickle
import pytest
import socket
import sys
import tests
import time
from six.moves import urllib
@pytest.mark.skipif(
sys.version_info <= (3,),
reason=(
"TODO: httplib2._convert_byte_str was defined only in python3 code " "version"
),
)
def test_convert_byte_str():
with tests.assert_raises(TypeError):
httplib2._convert_byte_str(4)
assert httplib2._convert_byte_str(b"Hello") == "Hello"
assert httplib2._convert_byte_str("World") == "World"
def test_reflect():
http = httplib2.Http()
with tests.server_reflect() as uri:
response, content = http.request(uri + "?query", "METHOD")
assert response.status == 200
host = urllib.parse.urlparse(uri).netloc
assert content.startswith(
"""\
METHOD /?query HTTP/1.1\r\n\
Host: {host}\r\n""".format(
host=host
).encode()
), content
def test_pickle_http():
http = httplib2.Http(cache=tests.get_cache_path())
new_http = pickle.loads(pickle.dumps(http))
assert tuple(sorted(new_http.__dict__)) == tuple(sorted(http.__dict__))
assert new_http.credentials.credentials == http.credentials.credentials
assert new_http.certificates.credentials == http.certificates.credentials
assert new_http.cache.cache == http.cache.cache
for key in new_http.__dict__:
if key not in ("cache", "certificates", "credentials"):
assert getattr(new_http, key) == getattr(http, key)
def test_pickle_http_with_connection():
http = httplib2.Http()
http.request("http://random-domain:81/", connection_type=tests.MockHTTPConnection)
new_http = pickle.loads(pickle.dumps(http))
assert tuple(http.connections) == ("http:random-domain:81",)
assert new_http.connections == {}
def test_pickle_custom_request_http():
http = httplib2.Http()
http.request = lambda: None
http.request.dummy_attr = "dummy_value"
new_http = pickle.loads(pickle.dumps(http))
assert getattr(new_http.request, "dummy_attr", None) is None
@pytest.mark.xfail(
sys.version_info >= (3,),
reason=(
"FIXME: for unknown reason global timeout test fails in Python3 "
"with response 200"
),
)
def test_timeout_global():
def handler(request):
time.sleep(0.5)
return tests.http_response_bytes()
try:
socket.setdefaulttimeout(0.1)
except Exception:
pytest.skip("cannot set global socket timeout")
try:
http = httplib2.Http()
http.force_exception_to_status_code = True
with tests.server_request(handler) as uri:
response, content = http.request(uri)
assert response.status == 408
assert response.reason.startswith("Request Timeout")
finally:
socket.setdefaulttimeout(None)
def test_timeout_individual():
def handler(request):
time.sleep(0.5)
return tests.http_response_bytes()
http = httplib2.Http(timeout=0.1)
http.force_exception_to_status_code = True
with tests.server_request(handler) as uri:
response, content = http.request(uri)
assert response.status == 408
assert response.reason.startswith("Request Timeout")
def test_timeout_https():
c = httplib2.HTTPSConnectionWithTimeout("localhost", 80, timeout=47)
assert 47 == c.timeout
def test_connection_close():
http = httplib2.Http()
g = []
def handler(request):
g.append(request.number)
return tests.http_response_bytes(proto="HTTP/1.1")
with tests.server_request(handler, request_count=3) as uri:
http.request(uri, "GET")
for c in http.connections.values():
assert c.sock is not None
http.request(uri, "GET", headers={"connection": "close"})
time.sleep(0.7)
http.request(uri, "GET")
assert g == [1, 2, 1]
def test_get_end2end_headers():
response = {"content-type": "application/atom+xml", "te": "deflate"}
end2end = httplib2._get_end2end_headers(response)
assert "content-type" in end2end
assert "te" not in end2end
assert "connection" not in end2end
response = {
"connection": "content-type",
"content-type": "application/atom+xml",
"te": "deflate",
}
end2end = httplib2._get_end2end_headers(response)
assert "content-type" not in end2end
assert "te" not in end2end
assert "connection" not in end2end
response = {}
end2end = httplib2._get_end2end_headers(response)
assert len(end2end) == 0
response = {"connection": "content-type"}
end2end = httplib2._get_end2end_headers(response)
assert len(end2end) == 0
@pytest.mark.xfail(
os.environ.get("TRAVIS_PYTHON_VERSION") in ("2.7", "pypy"),
reason="FIXME: fail on Travis py27 and pypy, works elsewhere",
)
@pytest.mark.parametrize("scheme", ("http", "https"))
def test_ipv6(scheme):
uri = "{scheme}://[::1]:1/".format(scheme=scheme)
try:
httplib2.Http(timeout=0.1).request(uri)
except socket.gaierror:
assert False, "should get the address family right for IPv6"
except socket.error:
pass
@pytest.mark.parametrize(
"conn_type",
(httplib2.HTTPConnectionWithTimeout, httplib2.HTTPSConnectionWithTimeout),
)
def test_connection_proxy_info_attribute_error(conn_type):
# HTTPConnectionWithTimeout did not initialize its .proxy_info attribute
# https://github.com/httplib2/httplib2/pull/97
# Thanks to Joseph Ryan https://github.com/germanjoey
conn = conn_type("no-such-hostname.", 80)
# TODO: replace mock with dummy local server
with tests.assert_raises(socket.gaierror):
with mock.patch("socket.socket.connect", side_effect=socket.gaierror):
conn.request("GET", "/")
def test_http_443_forced_https():
http = httplib2.Http()
http.force_exception_to_status_code = True
uri = "http://localhost:443/"
# sorry, using internal structure of Http to check chosen scheme
with mock.patch("httplib2.Http._request") as m:
http.request(uri)
assert len(m.call_args) > 0, "expected Http._request() call"
conn = m.call_args[0][0]
assert isinstance(conn, httplib2.HTTPConnectionWithTimeout)
| true | true |
f714c6db7d33f46788a289e98db5502368671a9e | 1,863 | py | Python | data/transforms/transforms.py | nodiz/reid-strong-baseline | d3c1bc948843d0ad6e52dafa79a74ab94d5d484d | [
"MIT"
] | 1 | 2020-05-30T13:44:16.000Z | 2020-05-30T13:44:16.000Z | data/transforms/transforms.py | nodiz/reid-strong-baseline | d3c1bc948843d0ad6e52dafa79a74ab94d5d484d | [
"MIT"
] | null | null | null | data/transforms/transforms.py | nodiz/reid-strong-baseline | d3c1bc948843d0ad6e52dafa79a74ab94d5d484d | [
"MIT"
] | null | null | null | # encoding: utf-8
"""
@author: liaoxingyu
@contact: liaoxingyu2@jd.com
"""
import math
import random
class RandomErasing(object):
""" Randomly selects a rectangle region in an image and erases its pixels.
'Random Erasing Data Augmentation' by Zhong et al.
See https://arxiv.org/pdf/1708.04896.pdf
Args:
probability: The probability that the Random Erasing operation will be performed.
sl: Minimum proportion of erased area against input image.
sh: Maximum proportion of erased area against input image.
r1: Minimum aspect ratio of erased area.
mean: Erasing value.
"""
def __init__(self, probability=0.5, sl=0.02, sh=0.4, r1=0.3, mean=(0.4914, 0.4822, 0.4465)):
self.probability = probability
self.mean = mean
self.sl = sl
self.sh = sh
self.r1 = r1
def __call__(self, img):
if random.uniform(0, 1) >= self.probability:
return img
for attempt in range(100):
area = img.size()[1] * img.size()[2]
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1 / self.r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img.size()[2] and h < img.size()[1]:
x1 = random.randint(0, img.size()[1] - h)
y1 = random.randint(0, img.size()[2] - w)
if img.size()[0] == 3:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
img[1, x1:x1 + h, y1:y1 + w] = self.mean[1]
img[2, x1:x1 + h, y1:y1 + w] = self.mean[2]
else:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
return img
return img | 33.872727 | 96 | 0.547504 |
import math
import random
class RandomErasing(object):
def __init__(self, probability=0.5, sl=0.02, sh=0.4, r1=0.3, mean=(0.4914, 0.4822, 0.4465)):
self.probability = probability
self.mean = mean
self.sl = sl
self.sh = sh
self.r1 = r1
def __call__(self, img):
if random.uniform(0, 1) >= self.probability:
return img
for attempt in range(100):
area = img.size()[1] * img.size()[2]
target_area = random.uniform(self.sl, self.sh) * area
aspect_ratio = random.uniform(self.r1, 1 / self.r1)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < img.size()[2] and h < img.size()[1]:
x1 = random.randint(0, img.size()[1] - h)
y1 = random.randint(0, img.size()[2] - w)
if img.size()[0] == 3:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
img[1, x1:x1 + h, y1:y1 + w] = self.mean[1]
img[2, x1:x1 + h, y1:y1 + w] = self.mean[2]
else:
img[0, x1:x1 + h, y1:y1 + w] = self.mean[0]
return img
return img | true | true |
f714c8da841a220b302fd12f3f8bb3b9dedd0598 | 5,003 | py | Python | test/test_ssl.py | nobodyinperson/urllib3 | 79e81f918efe5ae85a276bd3ad8a1939dfa206dd | [
"MIT"
] | null | null | null | test/test_ssl.py | nobodyinperson/urllib3 | 79e81f918efe5ae85a276bd3ad8a1939dfa206dd | [
"MIT"
] | null | null | null | test/test_ssl.py | nobodyinperson/urllib3 | 79e81f918efe5ae85a276bd3ad8a1939dfa206dd | [
"MIT"
] | null | null | null | import platform
import sys
import mock
import pytest
from urllib3.util import ssl_
from urllib3.exceptions import SNIMissingWarning
@pytest.mark.parametrize(
"addr",
[
# IPv6
"::1",
"::",
"FE80::8939:7684:D84b:a5A4%251",
# IPv4
"127.0.0.1",
"8.8.8.8",
b"127.0.0.1",
# IPv6 w/ Zone IDs
"FE80::8939:7684:D84b:a5A4%251",
b"FE80::8939:7684:D84b:a5A4%251",
"FE80::8939:7684:D84b:a5A4%19",
b"FE80::8939:7684:D84b:a5A4%19",
],
)
def test_is_ipaddress_true(addr):
assert ssl_.is_ipaddress(addr)
@pytest.mark.parametrize(
"addr",
[
"www.python.org",
b"www.python.org",
"v2.sg.media-imdb.com",
b"v2.sg.media-imdb.com",
],
)
def test_is_ipaddress_false(addr):
assert not ssl_.is_ipaddress(addr)
@pytest.mark.parametrize(
["has_sni", "server_hostname", "uses_sni"],
[
(True, "127.0.0.1", False),
(False, "www.python.org", False),
(False, "0.0.0.0", False),
(True, "www.google.com", True),
(True, None, False),
(False, None, False),
],
)
def test_context_sni_with_ip_address(monkeypatch, has_sni, server_hostname, uses_sni):
monkeypatch.setattr(ssl_, "HAS_SNI", has_sni)
sock = mock.Mock()
context = mock.create_autospec(ssl_.SSLContext)
ssl_.ssl_wrap_socket(sock, server_hostname=server_hostname, ssl_context=context)
if uses_sni:
context.wrap_socket.assert_called_with(sock, server_hostname=server_hostname)
else:
context.wrap_socket.assert_called_with(sock)
@pytest.mark.parametrize(
["has_sni", "server_hostname", "should_warn"],
[
(True, "www.google.com", False),
(True, "127.0.0.1", False),
(False, "127.0.0.1", False),
(False, "www.google.com", True),
(True, None, False),
(False, None, False),
],
)
def test_sni_missing_warning_with_ip_addresses(
monkeypatch, has_sni, server_hostname, should_warn
):
monkeypatch.setattr(ssl_, "HAS_SNI", has_sni)
sock = mock.Mock()
context = mock.create_autospec(ssl_.SSLContext)
with mock.patch("warnings.warn") as warn:
ssl_.ssl_wrap_socket(sock, server_hostname=server_hostname, ssl_context=context)
if should_warn:
assert warn.call_count >= 1
warnings = [call[0][1] for call in warn.call_args_list]
assert SNIMissingWarning in warnings
else:
assert warn.call_count == 0
@pytest.mark.parametrize(
["ciphers", "expected_ciphers"],
[
(None, ssl_.DEFAULT_CIPHERS),
("ECDH+AESGCM:ECDH+CHACHA20", "ECDH+AESGCM:ECDH+CHACHA20"),
],
)
def test_create_urllib3_context_set_ciphers(monkeypatch, ciphers, expected_ciphers):
context = mock.create_autospec(ssl_.SSLContext)
context.set_ciphers = mock.Mock()
context.options = 0
monkeypatch.setattr(ssl_, "SSLContext", lambda *_, **__: context)
assert ssl_.create_urllib3_context(ciphers=ciphers) is context
assert context.set_ciphers.call_count == 1
assert context.set_ciphers.call_args == mock.call(expected_ciphers)
def test_wrap_socket_given_context_no_load_default_certs():
context = mock.create_autospec(ssl_.SSLContext)
context.load_default_certs = mock.Mock()
sock = mock.Mock()
ssl_.ssl_wrap_socket(sock, ssl_context=context)
context.load_default_certs.assert_not_called()
def test_wrap_socket_given_ca_certs_no_load_default_certs(monkeypatch):
if platform.python_implementation() == "PyPy" and sys.version_info[0] == 2:
# https://github.com/testing-cabal/mock/issues/438
pytest.xfail("fails with PyPy for Python 2 dues to funcsigs bug")
context = mock.create_autospec(ssl_.SSLContext)
context.load_default_certs = mock.Mock()
context.options = 0
monkeypatch.setattr(ssl_, "SSLContext", lambda *_, **__: context)
sock = mock.Mock()
ssl_.ssl_wrap_socket(sock, ca_certs="/tmp/fake-file")
context.load_default_certs.assert_not_called()
context.load_verify_locations.assert_called_with("/tmp/fake-file", None)
def test_wrap_socket_default_loads_default_certs(monkeypatch):
context = mock.create_autospec(ssl_.SSLContext)
context.load_default_certs = mock.Mock()
context.options = 0
monkeypatch.setattr(ssl_, "SSLContext", lambda *_, **__: context)
sock = mock.Mock()
ssl_.ssl_wrap_socket(sock)
context.load_default_certs.assert_called_with()
@pytest.mark.parametrize(
["pha", "expected_pha"], [(None, None), (False, True), (True, True)]
)
def test_create_urllib3_context_pha(monkeypatch, pha, expected_pha):
context = mock.create_autospec(ssl_.SSLContext)
context.set_ciphers = mock.Mock()
context.options = 0
context.post_handshake_auth = pha
monkeypatch.setattr(ssl_, "SSLContext", lambda *_, **__: context)
assert ssl_.create_urllib3_context() is context
assert context.post_handshake_auth == expected_pha
| 28.919075 | 88 | 0.677593 | import platform
import sys
import mock
import pytest
from urllib3.util import ssl_
from urllib3.exceptions import SNIMissingWarning
@pytest.mark.parametrize(
"addr",
[
"::1",
"::",
"FE80::8939:7684:D84b:a5A4%251",
"127.0.0.1",
"8.8.8.8",
b"127.0.0.1",
"FE80::8939:7684:D84b:a5A4%251",
b"FE80::8939:7684:D84b:a5A4%251",
"FE80::8939:7684:D84b:a5A4%19",
b"FE80::8939:7684:D84b:a5A4%19",
],
)
def test_is_ipaddress_true(addr):
assert ssl_.is_ipaddress(addr)
@pytest.mark.parametrize(
"addr",
[
"www.python.org",
b"www.python.org",
"v2.sg.media-imdb.com",
b"v2.sg.media-imdb.com",
],
)
def test_is_ipaddress_false(addr):
assert not ssl_.is_ipaddress(addr)
@pytest.mark.parametrize(
["has_sni", "server_hostname", "uses_sni"],
[
(True, "127.0.0.1", False),
(False, "www.python.org", False),
(False, "0.0.0.0", False),
(True, "www.google.com", True),
(True, None, False),
(False, None, False),
],
)
def test_context_sni_with_ip_address(monkeypatch, has_sni, server_hostname, uses_sni):
monkeypatch.setattr(ssl_, "HAS_SNI", has_sni)
sock = mock.Mock()
context = mock.create_autospec(ssl_.SSLContext)
ssl_.ssl_wrap_socket(sock, server_hostname=server_hostname, ssl_context=context)
if uses_sni:
context.wrap_socket.assert_called_with(sock, server_hostname=server_hostname)
else:
context.wrap_socket.assert_called_with(sock)
@pytest.mark.parametrize(
["has_sni", "server_hostname", "should_warn"],
[
(True, "www.google.com", False),
(True, "127.0.0.1", False),
(False, "127.0.0.1", False),
(False, "www.google.com", True),
(True, None, False),
(False, None, False),
],
)
def test_sni_missing_warning_with_ip_addresses(
monkeypatch, has_sni, server_hostname, should_warn
):
monkeypatch.setattr(ssl_, "HAS_SNI", has_sni)
sock = mock.Mock()
context = mock.create_autospec(ssl_.SSLContext)
with mock.patch("warnings.warn") as warn:
ssl_.ssl_wrap_socket(sock, server_hostname=server_hostname, ssl_context=context)
if should_warn:
assert warn.call_count >= 1
warnings = [call[0][1] for call in warn.call_args_list]
assert SNIMissingWarning in warnings
else:
assert warn.call_count == 0
@pytest.mark.parametrize(
["ciphers", "expected_ciphers"],
[
(None, ssl_.DEFAULT_CIPHERS),
("ECDH+AESGCM:ECDH+CHACHA20", "ECDH+AESGCM:ECDH+CHACHA20"),
],
)
def test_create_urllib3_context_set_ciphers(monkeypatch, ciphers, expected_ciphers):
context = mock.create_autospec(ssl_.SSLContext)
context.set_ciphers = mock.Mock()
context.options = 0
monkeypatch.setattr(ssl_, "SSLContext", lambda *_, **__: context)
assert ssl_.create_urllib3_context(ciphers=ciphers) is context
assert context.set_ciphers.call_count == 1
assert context.set_ciphers.call_args == mock.call(expected_ciphers)
def test_wrap_socket_given_context_no_load_default_certs():
context = mock.create_autospec(ssl_.SSLContext)
context.load_default_certs = mock.Mock()
sock = mock.Mock()
ssl_.ssl_wrap_socket(sock, ssl_context=context)
context.load_default_certs.assert_not_called()
def test_wrap_socket_given_ca_certs_no_load_default_certs(monkeypatch):
if platform.python_implementation() == "PyPy" and sys.version_info[0] == 2:
pytest.xfail("fails with PyPy for Python 2 dues to funcsigs bug")
context = mock.create_autospec(ssl_.SSLContext)
context.load_default_certs = mock.Mock()
context.options = 0
monkeypatch.setattr(ssl_, "SSLContext", lambda *_, **__: context)
sock = mock.Mock()
ssl_.ssl_wrap_socket(sock, ca_certs="/tmp/fake-file")
context.load_default_certs.assert_not_called()
context.load_verify_locations.assert_called_with("/tmp/fake-file", None)
def test_wrap_socket_default_loads_default_certs(monkeypatch):
context = mock.create_autospec(ssl_.SSLContext)
context.load_default_certs = mock.Mock()
context.options = 0
monkeypatch.setattr(ssl_, "SSLContext", lambda *_, **__: context)
sock = mock.Mock()
ssl_.ssl_wrap_socket(sock)
context.load_default_certs.assert_called_with()
@pytest.mark.parametrize(
["pha", "expected_pha"], [(None, None), (False, True), (True, True)]
)
def test_create_urllib3_context_pha(monkeypatch, pha, expected_pha):
context = mock.create_autospec(ssl_.SSLContext)
context.set_ciphers = mock.Mock()
context.options = 0
context.post_handshake_auth = pha
monkeypatch.setattr(ssl_, "SSLContext", lambda *_, **__: context)
assert ssl_.create_urllib3_context() is context
assert context.post_handshake_auth == expected_pha
| true | true |
f714c9384e398c4912bca404e456801ec236b034 | 3,792 | py | Python | setup.py | vincent101/setup.py | 147b387e82b5702dea35868b708142f150f00a1f | [
"MIT"
] | null | null | null | setup.py | vincent101/setup.py | 147b387e82b5702dea35868b708142f150f00a1f | [
"MIT"
] | null | null | null | setup.py | vincent101/setup.py | 147b387e82b5702dea35868b708142f150f00a1f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Note: To use the 'upload' functionality of this file, you must:
# $ pipenv install twine --dev
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
# Package meta-data.
NAME = 'mypackage'
DESCRIPTION = 'My short description for my project.'
URL = 'https://github.com/vincent101/myproject'
EMAIL = 'vincent.wangworks@gmail.com'
AUTHOR = 'Vicnet Wang'
REQUIRES_PYTHON = '>=3.6.0'
VERSION = '0.1.0'
# What packages are required for this module to be executed?
REQUIRED = [
# 'requests', 'maya', 'records',
]
# What packages are optional?
EXTRAS = {
# 'fancy feature': ['django'],
}
# The rest you shouldn't have to touch too much :)
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
"""Support setup.py upload."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
# If your package is a single module, use this instead of 'packages':
# py_modules=['mypackage'],
# entry_points={
# 'console_scripts': ['mycli=mymodule:cli'],
# },
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license='MIT',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
| 28.727273 | 86 | 0.640032 |
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command
NAME = 'mypackage'
DESCRIPTION = 'My short description for my project.'
URL = 'https://github.com/vincent101/myproject'
EMAIL = 'vincent.wangworks@gmail.com'
AUTHOR = 'Vicnet Wang'
REQUIRES_PYTHON = '>=3.6.0'
VERSION = '0.1.0'
REQUIRED = [
]
EXTRAS = {
}
# ------------------------------------------------
# Except, perhaps the License and Trove Classifiers!
# If you do change the License, remember to change the Trove Classifier for that!
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
try:
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
project_slug = NAME.lower().replace("-", "_").replace(" ", "_")
with open(os.path.join(here, project_slug, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
class UploadCommand(Command):
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPI via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(about['__version__']))
os.system('git push --tags')
sys.exit()
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=["tests", "*.tests", "*.tests.*", "tests.*"]),
install_requires=REQUIRED,
extras_require=EXTRAS,
include_package_data=True,
license='MIT',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
cmdclass={
'upload': UploadCommand,
},
)
| true | true |
f714c9ace3c7e9c796c9b28a1e4cced97ecb105d | 872 | py | Python | setup.py | yuwenhou/ProxyPool-master1 | 21b6c0b7788bb24e728ec75c0b44b6e4b6583595 | [
"Apache-2.0"
] | 803 | 2017-02-23T15:43:28.000Z | 2022-03-25T15:28:19.000Z | setup.py | yuwenhou/ProxyPool-master1 | 21b6c0b7788bb24e728ec75c0b44b6e4b6583595 | [
"Apache-2.0"
] | 31 | 2017-07-30T08:47:10.000Z | 2021-04-24T20:30:54.000Z | ThirdParty/ProxyPool/setup.py | XiMuYouZi/PythonDemo | 476d4d814338f37148bbf1504c0dd94a68f55a05 | [
"MIT"
] | 483 | 2017-04-01T04:08:50.000Z | 2022-03-30T11:40:24.000Z | from setuptools import setup
setup(
name='proxy-pool',
version='1.0.0',
description='High performance proxy pool',
long_description='A proxy pool project modified from WiseDoge/ProxyPool',
author=['Germey', 'WiseDoge'],
author_email='cqc@cuiqingcai.com',
url='https://github.com/Germey/ProxyPool',
packages=[
'proxy-pool'
],
py_modules=['run'],
include_package_data=True,
platforms='any',
install_requires=[
'aiohttp',
'requests',
'flask',
'redis',
'pyquery'
],
entry_points={
'console_scripts': ['proxy_pool_run=run:cli']
},
license='apache 2.0',
zip_safe=False,
classifiers=[
'Environment :: Console',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython'
]
)
| 24.914286 | 77 | 0.597477 | from setuptools import setup
setup(
name='proxy-pool',
version='1.0.0',
description='High performance proxy pool',
long_description='A proxy pool project modified from WiseDoge/ProxyPool',
author=['Germey', 'WiseDoge'],
author_email='cqc@cuiqingcai.com',
url='https://github.com/Germey/ProxyPool',
packages=[
'proxy-pool'
],
py_modules=['run'],
include_package_data=True,
platforms='any',
install_requires=[
'aiohttp',
'requests',
'flask',
'redis',
'pyquery'
],
entry_points={
'console_scripts': ['proxy_pool_run=run:cli']
},
license='apache 2.0',
zip_safe=False,
classifiers=[
'Environment :: Console',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython'
]
)
| true | true |
f714ca99c5ac8b2d50a2135185af88bb05dfb468 | 1,304 | py | Python | src/nasa_sbm/configuration.py | ReeceHumphreys/NASA-breakup-model-python | 3cca5d603c846b62b31b5ac7652e040b2a31193c | [
"MIT"
] | 3 | 2022-03-07T14:42:14.000Z | 2022-03-08T19:41:04.000Z | src/nasa_sbm/configuration.py | ReeceHumphreys/NASA-breakup-model-python | 3cca5d603c846b62b31b5ac7652e040b2a31193c | [
"MIT"
] | 1 | 2022-03-30T00:44:10.000Z | 2022-03-30T00:44:10.000Z | src/nasa_sbm/configuration.py | ReeceHumphreys/python-sbm | 3cca5d603c846b62b31b5ac7652e040b2a31193c | [
"MIT"
] | null | null | null | import yaml
from enum import Enum
class SimulationType(Enum):
explosion = "EXPLOSION"
collision = "COLLISION"
class SatType(Enum):
rb = "RB"
sat = "SC"
soc = "SOC"
deb = "DEB"
class SimulationConfiguration:
# Takes a .yaml file with simulation configurations
def __init__(self, filePath: str):
try:
with open(filePath, 'r') as stream:
data_loaded = yaml.safe_load(stream)
self._minimalCharacteristicLength = float(
data_loaded['minimalCharacteristicLength'])
self._simulationType = SimulationType(data_loaded['simulationType'].upper())
self._sat_type = SatType(data_loaded['satType'].upper())
self._mass_conservation = bool(data_loaded['massConservation'])
stream.close()
except Exception as e:
print(f"Exception: {e}")
@property
def minimalCharacteristicLength(self) -> float:
return self._minimalCharacteristicLength
@property
def simulationType(self) -> SimulationType:
return self._simulationType
@property
def sat_type(self) -> SatType:
return self._sat_type
@property
def mass_conservation(self) -> bool:
return self._mass_conservation
| 27.744681 | 92 | 0.631135 | import yaml
from enum import Enum
class SimulationType(Enum):
explosion = "EXPLOSION"
collision = "COLLISION"
class SatType(Enum):
rb = "RB"
sat = "SC"
soc = "SOC"
deb = "DEB"
class SimulationConfiguration:
def __init__(self, filePath: str):
try:
with open(filePath, 'r') as stream:
data_loaded = yaml.safe_load(stream)
self._minimalCharacteristicLength = float(
data_loaded['minimalCharacteristicLength'])
self._simulationType = SimulationType(data_loaded['simulationType'].upper())
self._sat_type = SatType(data_loaded['satType'].upper())
self._mass_conservation = bool(data_loaded['massConservation'])
stream.close()
except Exception as e:
print(f"Exception: {e}")
@property
def minimalCharacteristicLength(self) -> float:
return self._minimalCharacteristicLength
@property
def simulationType(self) -> SimulationType:
return self._simulationType
@property
def sat_type(self) -> SatType:
return self._sat_type
@property
def mass_conservation(self) -> bool:
return self._mass_conservation
| true | true |
f714caabd13b3013f599b17b11b7fce0affc6412 | 1,675 | py | Python | hellodjango/urls.py | Alonski/HelloDjango | d6de2ec2532799e54c893fbc615433681d49bbd9 | [
"MIT"
] | null | null | null | hellodjango/urls.py | Alonski/HelloDjango | d6de2ec2532799e54c893fbc615433681d49bbd9 | [
"MIT"
] | null | null | null | hellodjango/urls.py | Alonski/HelloDjango | d6de2ec2532799e54c893fbc615433681d49bbd9 | [
"MIT"
] | null | null | null | """hellodjango URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.http import HttpResponse, JsonResponse
def home_page(request):
# assert False, request.META['HTTP_USER_AGENT']
# return HttpResponse("Hello <b>World!</b>", content_type="text/plain")
return HttpResponse("Hello <b>World!</b>")
# return JsonResponse({
# 'a':'b',
# 'c':'d',
# })
def age(request, name, value): # view function
return HttpResponse("{}, you are {} years old".format(name.title(), value))
def mult(request, first, second):
return HttpResponse("{} X {} = {}".format(first, second, (int(first) * int(second))))
def throw_404(request):
return HttpResponse("404 Error", status=404)
# def go(request):
# assert False, request.GET
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^age/(?P<name>\w+)/(?P<value>\d+)/$', age),
url(r'^mult/(?P<first>\d+)/(?P<second>\d+)/$', mult),
url(r'^$', home_page),
url(r'$', throw_404),
# url(r'age/(\w+)/$', age),
]
| 31.018519 | 89 | 0.64597 | from django.conf.urls import url
from django.contrib import admin
from django.http import HttpResponse, JsonResponse
def home_page(request):
return HttpResponse("Hello <b>World!</b>")
def age(request, name, value):
return HttpResponse("{}, you are {} years old".format(name.title(), value))
def mult(request, first, second):
return HttpResponse("{} X {} = {}".format(first, second, (int(first) * int(second))))
def throw_404(request):
return HttpResponse("404 Error", status=404)
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^age/(?P<name>\w+)/(?P<value>\d+)/$', age),
url(r'^mult/(?P<first>\d+)/(?P<second>\d+)/$', mult),
url(r'^$', home_page),
url(r'$', throw_404),
]
| true | true |
f714cb4507e46b6adfd3ffaaf754a90b0df72870 | 10,504 | py | Python | tests/test_plot.py | daxpryce/graspologic | b076f58ca03a41eb2e1462d20a61ff09abfd6045 | [
"MIT"
] | 148 | 2020-09-15T21:45:51.000Z | 2022-03-24T17:33:01.000Z | tests/test_plot.py | daxpryce/graspologic | b076f58ca03a41eb2e1462d20a61ff09abfd6045 | [
"MIT"
] | 533 | 2020-09-15T18:49:00.000Z | 2022-03-25T12:16:58.000Z | tests/test_plot.py | daxpryce/graspologic | b076f58ca03a41eb2e1462d20a61ff09abfd6045 | [
"MIT"
] | 74 | 2020-09-16T02:24:23.000Z | 2022-03-20T20:09:38.000Z | # Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
import unittest
import numpy as np
from sklearn.mixture import GaussianMixture
from graspologic.plot.plot import (
_sort_inds,
gridplot,
heatmap,
pairplot,
pairplot_with_gmm,
)
from graspologic.simulations.simulations import er_np, sbm
def _test_pairplot_with_gmm_inputs(caller: unittest.TestCase, **kws):
X = np.random.rand(15, 3)
gmm = GaussianMixture(n_components=3, **kws).fit(X)
labels = ["A"] * 5 + ["B"] * 5 + ["C"] * 5
# test data
with caller.assertRaises(ValueError):
pairplot_with_gmm(X="test", gmm=gmm)
with caller.assertRaises(ValueError):
pairplot_with_gmm(X=X, gmm=gmm, labels=["A"])
with caller.assertRaises(NameError):
pairplot_with_gmm(X, gmm=None)
def _test_pairplot_with_gmm_outputs(**kws):
X = np.random.rand(15, 3)
gmm = GaussianMixture(n_components=3, **kws).fit(X)
labels = ["A"] * 5 + ["B"] * 5 + ["C"] * 5
cluster_palette = {0: "red", 1: "blue", 2: "green"}
label_palette = {"A": "red", "B": "blue", "C": "green"}
fig = pairplot_with_gmm(X, gmm)
fig = pairplot_with_gmm(
X,
gmm,
labels=labels,
cluster_palette=cluster_palette,
label_palette=label_palette,
)
class TestPlot(unittest.TestCase):
def test_common_inputs(self):
X = er_np(100, 0.5)
grid_labels = ["Test1"]
# test figsize
with self.assertRaises(TypeError):
figsize = "bad figsize"
heatmap(X, figsize=figsize)
# test height
height = "1"
with self.assertRaises(TypeError):
gridplot([X], grid_labels, height=height)
with self.assertRaises(TypeError):
pairplot(X, height=height)
# test title
title = 1
with self.assertRaises(TypeError):
heatmap(X, title=title)
with self.assertRaises(TypeError):
gridplot([X], grid_labels, title=title)
with self.assertRaises(TypeError):
pairplot(X, title=title)
# test context
context = 123
with self.assertRaises(TypeError):
heatmap(X, context=context)
with self.assertRaises(TypeError):
gridplot([X], grid_labels, context=context)
with self.assertRaises(TypeError):
pairplot(X, context=context)
context = "journal"
with self.assertRaises(ValueError):
heatmap(X, context=context)
with self.assertRaises(ValueError):
gridplot([X], grid_labels, context=context)
with self.assertRaises(ValueError):
pairplot(X, context=context)
# test font scales
font_scales = ["1", []]
for font_scale in font_scales:
with self.assertRaises(TypeError):
heatmap(X, font_scale=font_scale)
with self.assertRaises(TypeError):
gridplot([X], grid_labels, font_scale=font_scale)
with self.assertRaises(TypeError):
pairplot(X, cont_scale=font_scale)
# ticklabels
with self.assertRaises(TypeError):
xticklabels = "labels"
yticklabels = "labels"
heatmap(X, xticklabels=xticklabels, yticklabels=yticklabels)
with self.assertRaises(ValueError):
xticklabels = ["{}".format(i) for i in range(5)]
yticklabels = ["{}".format(i) for i in range(5)]
heatmap(X, xticklabels=xticklabels, yticklabels=yticklabels)
with self.assertRaises(TypeError):
heatmap(X, title_pad="f")
with self.assertRaises(TypeError):
gridplot([X], title_pad="f")
with self.assertRaises(TypeError):
heatmap(X, hier_label_fontsize="f")
with self.assertRaises(TypeError):
gridplot([X], hier_label_fontsize="f")
def test_heatmap_inputs(self):
"""
test parameter checks
"""
X = np.random.rand(10, 10)
with self.assertRaises(TypeError):
heatmap(X="input")
# transform
with self.assertRaises(ValueError):
transform = "bad transform"
heatmap(X, transform=transform)
# cmap
with self.assertRaises(TypeError):
cmap = 123
heatmap(X, cmap=cmap)
# center
with self.assertRaises(TypeError):
center = "center"
heatmap(X, center=center)
# cbar
with self.assertRaises(TypeError):
cbar = 1
heatmap(X, cbar=cbar)
def test_heatmap_output(self):
"""
simple function to see if plot is made without errors
"""
X = er_np(10, 0.5)
xticklabels = ["Dimension {}".format(i) for i in range(10)]
yticklabels = ["Dimension {}".format(i) for i in range(10)]
fig = heatmap(
X, transform="log", xticklabels=xticklabels, yticklabels=yticklabels
)
fig = heatmap(X, transform="zero-boost")
fig = heatmap(X, transform="simple-all")
fig = heatmap(X, transform="simple-nonzero")
fig = heatmap(X, transform="binarize")
fig = heatmap(X, cmap="gist_rainbow")
def test_gridplot_inputs(self):
X = [er_np(10, 0.5)]
labels = ["ER(10, 0.5)"]
with self.assertRaises(TypeError):
gridplot(X="input", labels=labels)
with self.assertRaises(ValueError):
gridplot(X, labels=["a", "b"])
# transform
with self.assertRaises(ValueError):
transform = "bad transform"
gridplot(X, labels=labels, transform=transform)
def test_gridplot_outputs(self):
"""
simple function to see if plot is made without errors
"""
X = [er_np(10, 0.5) for _ in range(2)]
labels = ["Random A", "Random B"]
fig = gridplot(X, labels)
fig = gridplot(X, labels, transform="zero-boost")
fig = gridplot(X, labels, "simple-all", title="Test", font_scale=0.9)
def test_pairplot_inputs(self):
X = np.random.rand(15, 3)
Y = ["A"] * 5 + ["B"] * 5 + ["C"] * 5
# test data
with self.assertRaises(TypeError):
pairplot(X="test")
with self.assertRaises(ValueError):
pairplot(X=X, labels=["A"])
with self.assertRaises(TypeError):
pairplot(X, col_names="A")
with self.assertRaises(ValueError):
pairplot(X, col_names=["1", "2"])
with self.assertRaises(ValueError):
pairplot(X, col_names=["1", "2", "3"], variables=[1, 2, 3, 4])
with self.assertRaises(KeyError):
pairplot(X, col_names=["1", "2", "3"], variables=["A", "B"])
def test_pairplot_outputs(self):
X = np.random.rand(15, 3)
Y = ["A"] * 5 + ["B"] * 5 + ["C"] * 5
col_names = ["Feature1", "Feature2", "Feature3"]
fig = pairplot(X)
fig = pairplot(X, Y)
fig = pairplot(X, Y, col_names)
fig = pairplot(
X,
Y,
col_names,
title="Test",
height=1.5,
variables=["Feature1", "Feature2"],
)
def test_pairplot_with_gmm_inputs_type_full(self):
_test_pairplot_with_gmm_inputs(self, covariance_type="full")
def test_pairplot_with_gmm_inputs_type_diag(self):
_test_pairplot_with_gmm_inputs(self, covariance_type="diag")
def test_pairplot_with_gmm_inputs_type_tied(self):
_test_pairplot_with_gmm_inputs(self, covariance_type="tied")
def test_pairplot_with_gmm_inputs_type_spherical(self):
_test_pairplot_with_gmm_inputs(self, covariance_type="spherical")
def test_pairplot_with_gmm_outputs_type_full(self):
_test_pairplot_with_gmm_outputs(covariance_type="full")
def test_pairplot_with_gmm_outputs_type_diag(self):
_test_pairplot_with_gmm_outputs(covariance_type="diag")
def test_pairplot_with_gmm_outputs_type_tied(self):
_test_pairplot_with_gmm_outputs(covariance_type="tied")
def test_pairplot_with_gmm_outputs_type_spherical(self):
_test_pairplot_with_gmm_outputs(covariance_type="spherical")
def test_sort_inds(self):
B = np.array(
[
[0, 0.2, 0.1, 0.1, 0.1],
[0.2, 0.8, 0.1, 0.3, 0.1],
[0.15, 0.1, 0, 0.05, 0.1],
[0.1, 0.1, 0.2, 1, 0.1],
[0.1, 0.2, 0.1, 0.1, 0.8],
]
)
g = sbm([10, 30, 50, 25, 25], B, directed=True)
degrees = g.sum(axis=0) + g.sum(axis=1)
degree_sort_inds = np.argsort(degrees)
labels2 = 40 * ["0"] + 100 * ["1"]
labels1 = 10 * ["d"] + 30 * ["c"] + 50 * ["d"] + 25 * ["e"] + 25 * ["c"]
labels1 = np.array(labels1)
labels2 = np.array(labels2)
sorted_inds = _sort_inds(g, labels1, labels2, True)
# sort outer blocks first if given, sort by num verts in the block
# for inner hier, sort by num verts for that category across the entire graph
# ie if there are multiple inner hier across different outer blocks, sort
# by prevalence in the entire graph, not within block
# this is to make the ordering within outer block consistent
# within a block, sort by degree
# outer block order should thus be: 1, 0
# inner block order should thus be: d, c, e
# show that outer blocks are sorted correctly
labels2 = labels2[sorted_inds]
self.assertTrue(np.all(labels2[:100] == "1"))
self.assertTrue(np.all(labels2[100:] == "0"))
# show that inner blocks are sorted correctly
labels1 = labels1[sorted_inds]
self.assertTrue(np.all(labels1[:50] == "d"))
self.assertTrue(np.all(labels1[50:75] == "c"))
self.assertTrue(np.all(labels1[75:100] == "e"))
self.assertTrue(np.all(labels1[100:110] == "d"))
self.assertTrue(np.all(labels1[110:] == "c"))
# show that within block, everything is in descending degree order
degrees = degrees[sorted_inds]
self.assertTrue(np.all(np.diff(degrees[:50]) <= 0))
self.assertTrue(np.all(np.diff(degrees[50:75]) <= 0))
self.assertTrue(np.all(np.diff(degrees[75:100]) <= 0))
self.assertTrue(np.all(np.diff(degrees[100:110]) <= 0))
self.assertTrue(np.all(np.diff(degrees[110:]) <= 0))
| 33.883871 | 85 | 0.590251 |
import unittest
import numpy as np
from sklearn.mixture import GaussianMixture
from graspologic.plot.plot import (
_sort_inds,
gridplot,
heatmap,
pairplot,
pairplot_with_gmm,
)
from graspologic.simulations.simulations import er_np, sbm
def _test_pairplot_with_gmm_inputs(caller: unittest.TestCase, **kws):
X = np.random.rand(15, 3)
gmm = GaussianMixture(n_components=3, **kws).fit(X)
labels = ["A"] * 5 + ["B"] * 5 + ["C"] * 5
with caller.assertRaises(ValueError):
pairplot_with_gmm(X="test", gmm=gmm)
with caller.assertRaises(ValueError):
pairplot_with_gmm(X=X, gmm=gmm, labels=["A"])
with caller.assertRaises(NameError):
pairplot_with_gmm(X, gmm=None)
def _test_pairplot_with_gmm_outputs(**kws):
X = np.random.rand(15, 3)
gmm = GaussianMixture(n_components=3, **kws).fit(X)
labels = ["A"] * 5 + ["B"] * 5 + ["C"] * 5
cluster_palette = {0: "red", 1: "blue", 2: "green"}
label_palette = {"A": "red", "B": "blue", "C": "green"}
fig = pairplot_with_gmm(X, gmm)
fig = pairplot_with_gmm(
X,
gmm,
labels=labels,
cluster_palette=cluster_palette,
label_palette=label_palette,
)
class TestPlot(unittest.TestCase):
def test_common_inputs(self):
X = er_np(100, 0.5)
grid_labels = ["Test1"]
with self.assertRaises(TypeError):
figsize = "bad figsize"
heatmap(X, figsize=figsize)
height = "1"
with self.assertRaises(TypeError):
gridplot([X], grid_labels, height=height)
with self.assertRaises(TypeError):
pairplot(X, height=height)
title = 1
with self.assertRaises(TypeError):
heatmap(X, title=title)
with self.assertRaises(TypeError):
gridplot([X], grid_labels, title=title)
with self.assertRaises(TypeError):
pairplot(X, title=title)
context = 123
with self.assertRaises(TypeError):
heatmap(X, context=context)
with self.assertRaises(TypeError):
gridplot([X], grid_labels, context=context)
with self.assertRaises(TypeError):
pairplot(X, context=context)
context = "journal"
with self.assertRaises(ValueError):
heatmap(X, context=context)
with self.assertRaises(ValueError):
gridplot([X], grid_labels, context=context)
with self.assertRaises(ValueError):
pairplot(X, context=context)
font_scales = ["1", []]
for font_scale in font_scales:
with self.assertRaises(TypeError):
heatmap(X, font_scale=font_scale)
with self.assertRaises(TypeError):
gridplot([X], grid_labels, font_scale=font_scale)
with self.assertRaises(TypeError):
pairplot(X, cont_scale=font_scale)
with self.assertRaises(TypeError):
xticklabels = "labels"
yticklabels = "labels"
heatmap(X, xticklabels=xticklabels, yticklabels=yticklabels)
with self.assertRaises(ValueError):
xticklabels = ["{}".format(i) for i in range(5)]
yticklabels = ["{}".format(i) for i in range(5)]
heatmap(X, xticklabels=xticklabels, yticklabels=yticklabels)
with self.assertRaises(TypeError):
heatmap(X, title_pad="f")
with self.assertRaises(TypeError):
gridplot([X], title_pad="f")
with self.assertRaises(TypeError):
heatmap(X, hier_label_fontsize="f")
with self.assertRaises(TypeError):
gridplot([X], hier_label_fontsize="f")
def test_heatmap_inputs(self):
X = np.random.rand(10, 10)
with self.assertRaises(TypeError):
heatmap(X="input")
with self.assertRaises(ValueError):
transform = "bad transform"
heatmap(X, transform=transform)
with self.assertRaises(TypeError):
cmap = 123
heatmap(X, cmap=cmap)
with self.assertRaises(TypeError):
center = "center"
heatmap(X, center=center)
with self.assertRaises(TypeError):
cbar = 1
heatmap(X, cbar=cbar)
def test_heatmap_output(self):
X = er_np(10, 0.5)
xticklabels = ["Dimension {}".format(i) for i in range(10)]
yticklabels = ["Dimension {}".format(i) for i in range(10)]
fig = heatmap(
X, transform="log", xticklabels=xticklabels, yticklabels=yticklabels
)
fig = heatmap(X, transform="zero-boost")
fig = heatmap(X, transform="simple-all")
fig = heatmap(X, transform="simple-nonzero")
fig = heatmap(X, transform="binarize")
fig = heatmap(X, cmap="gist_rainbow")
def test_gridplot_inputs(self):
X = [er_np(10, 0.5)]
labels = ["ER(10, 0.5)"]
with self.assertRaises(TypeError):
gridplot(X="input", labels=labels)
with self.assertRaises(ValueError):
gridplot(X, labels=["a", "b"])
with self.assertRaises(ValueError):
transform = "bad transform"
gridplot(X, labels=labels, transform=transform)
def test_gridplot_outputs(self):
X = [er_np(10, 0.5) for _ in range(2)]
labels = ["Random A", "Random B"]
fig = gridplot(X, labels)
fig = gridplot(X, labels, transform="zero-boost")
fig = gridplot(X, labels, "simple-all", title="Test", font_scale=0.9)
def test_pairplot_inputs(self):
X = np.random.rand(15, 3)
Y = ["A"] * 5 + ["B"] * 5 + ["C"] * 5
with self.assertRaises(TypeError):
pairplot(X="test")
with self.assertRaises(ValueError):
pairplot(X=X, labels=["A"])
with self.assertRaises(TypeError):
pairplot(X, col_names="A")
with self.assertRaises(ValueError):
pairplot(X, col_names=["1", "2"])
with self.assertRaises(ValueError):
pairplot(X, col_names=["1", "2", "3"], variables=[1, 2, 3, 4])
with self.assertRaises(KeyError):
pairplot(X, col_names=["1", "2", "3"], variables=["A", "B"])
def test_pairplot_outputs(self):
X = np.random.rand(15, 3)
Y = ["A"] * 5 + ["B"] * 5 + ["C"] * 5
col_names = ["Feature1", "Feature2", "Feature3"]
fig = pairplot(X)
fig = pairplot(X, Y)
fig = pairplot(X, Y, col_names)
fig = pairplot(
X,
Y,
col_names,
title="Test",
height=1.5,
variables=["Feature1", "Feature2"],
)
def test_pairplot_with_gmm_inputs_type_full(self):
_test_pairplot_with_gmm_inputs(self, covariance_type="full")
def test_pairplot_with_gmm_inputs_type_diag(self):
_test_pairplot_with_gmm_inputs(self, covariance_type="diag")
def test_pairplot_with_gmm_inputs_type_tied(self):
_test_pairplot_with_gmm_inputs(self, covariance_type="tied")
def test_pairplot_with_gmm_inputs_type_spherical(self):
_test_pairplot_with_gmm_inputs(self, covariance_type="spherical")
def test_pairplot_with_gmm_outputs_type_full(self):
_test_pairplot_with_gmm_outputs(covariance_type="full")
def test_pairplot_with_gmm_outputs_type_diag(self):
_test_pairplot_with_gmm_outputs(covariance_type="diag")
def test_pairplot_with_gmm_outputs_type_tied(self):
_test_pairplot_with_gmm_outputs(covariance_type="tied")
def test_pairplot_with_gmm_outputs_type_spherical(self):
_test_pairplot_with_gmm_outputs(covariance_type="spherical")
def test_sort_inds(self):
B = np.array(
[
[0, 0.2, 0.1, 0.1, 0.1],
[0.2, 0.8, 0.1, 0.3, 0.1],
[0.15, 0.1, 0, 0.05, 0.1],
[0.1, 0.1, 0.2, 1, 0.1],
[0.1, 0.2, 0.1, 0.1, 0.8],
]
)
g = sbm([10, 30, 50, 25, 25], B, directed=True)
degrees = g.sum(axis=0) + g.sum(axis=1)
degree_sort_inds = np.argsort(degrees)
labels2 = 40 * ["0"] + 100 * ["1"]
labels1 = 10 * ["d"] + 30 * ["c"] + 50 * ["d"] + 25 * ["e"] + 25 * ["c"]
labels1 = np.array(labels1)
labels2 = np.array(labels2)
sorted_inds = _sort_inds(g, labels1, labels2, True)
labels2 = labels2[sorted_inds]
self.assertTrue(np.all(labels2[:100] == "1"))
self.assertTrue(np.all(labels2[100:] == "0"))
labels1 = labels1[sorted_inds]
self.assertTrue(np.all(labels1[:50] == "d"))
self.assertTrue(np.all(labels1[50:75] == "c"))
self.assertTrue(np.all(labels1[75:100] == "e"))
self.assertTrue(np.all(labels1[100:110] == "d"))
self.assertTrue(np.all(labels1[110:] == "c"))
degrees = degrees[sorted_inds]
self.assertTrue(np.all(np.diff(degrees[:50]) <= 0))
self.assertTrue(np.all(np.diff(degrees[50:75]) <= 0))
self.assertTrue(np.all(np.diff(degrees[75:100]) <= 0))
self.assertTrue(np.all(np.diff(degrees[100:110]) <= 0))
self.assertTrue(np.all(np.diff(degrees[110:]) <= 0))
| true | true |
f714cb67aece3a563782c8da368a0232d2341c5a | 760 | py | Python | samples/py/obj_traversal.py | alexbudmsft/dbgscript | 76dc77109bbeb8f09a893e9dd56012ff8a4b601f | [
"PSF-2.0"
] | 27 | 2015-11-05T22:19:34.000Z | 2021-08-21T02:03:52.000Z | samples/py/obj_traversal.py | alexbudmsft/dbgscript | 76dc77109bbeb8f09a893e9dd56012ff8a4b601f | [
"PSF-2.0"
] | null | null | null | samples/py/obj_traversal.py | alexbudmsft/dbgscript | 76dc77109bbeb8f09a893e9dd56012ff8a4b601f | [
"PSF-2.0"
] | 2 | 2015-11-06T04:32:31.000Z | 2016-08-22T18:24:20.000Z | from dbgscript import *
thd = Process.current_thread
print(thd)
frame = thd.current_frame
locals = frame.get_locals()
print(locals)
for l in locals: print(l.name)
for l in locals: print(l.name, l.type)
car1 = locals[0]
print(car1.name)
car1_f = car1['f']
print(car1_f)
print(car1_f.name, car1_f.type)
print(car1_f.name, car1_f.type, car1_f.size)
foo_c = car1_f['c']
print(foo_c)
print(foo_c.name)
print(foo_c.name, foo_c.type)
print(foo_c.name, foo_c.type, foo_c.size, hex(foo_c.address), foo_c.value)
# car1_f['xyz'] # no such field
print(car1_f['arr'])
print(car1_f['arr'][0].value)
print(len(car1_f['arr']))
#some_foo_ptr = Process.read_ptr(0x000007f913ef9c0)
#print(hex(some_foo_ptr))
print (hex(car1_f.address), hex(car1_f.value)) | 29.230769 | 75 | 0.717105 | from dbgscript import *
thd = Process.current_thread
print(thd)
frame = thd.current_frame
locals = frame.get_locals()
print(locals)
for l in locals: print(l.name)
for l in locals: print(l.name, l.type)
car1 = locals[0]
print(car1.name)
car1_f = car1['f']
print(car1_f)
print(car1_f.name, car1_f.type)
print(car1_f.name, car1_f.type, car1_f.size)
foo_c = car1_f['c']
print(foo_c)
print(foo_c.name)
print(foo_c.name, foo_c.type)
print(foo_c.name, foo_c.type, foo_c.size, hex(foo_c.address), foo_c.value)
rr'])
print(car1_f['arr'][0].value)
print(len(car1_f['arr']))
print (hex(car1_f.address), hex(car1_f.value)) | true | true |
f714cc193af3bb00e3db9b505c65fc30ea731e09 | 4,804 | py | Python | tests/ouimeaux_device/api/unit/test_service.py | KnicKnic/pywemo | 5e094b47057549a9d7c539a7e2592dcbecd50deb | [
"MIT"
] | null | null | null | tests/ouimeaux_device/api/unit/test_service.py | KnicKnic/pywemo | 5e094b47057549a9d7c539a7e2592dcbecd50deb | [
"MIT"
] | 60 | 2021-01-19T07:13:42.000Z | 2022-03-25T12:06:46.000Z | tests/ouimeaux_device/api/unit/test_service.py | KnicKnic/pywemo | 5e094b47057549a9d7c539a7e2592dcbecd50deb | [
"MIT"
] | null | null | null | """Tests for pywemo.ouimeaux_device.api.service."""
import unittest.mock as mock
from xml.etree import ElementTree
from xml.etree import cElementTree as cet
import pytest
import requests
import pywemo.ouimeaux_device.api.service as svc
HEADERS_KWARG_KEY = "headers"
CONTENT_TYPE_KEY = "Content-Type"
SOAPACTION_KEY = "SOAPACTION"
MOCK_ARGS_ORDERED = 0
MOCK_ARGS_KWARGS = 1
svc.LOG = mock.Mock()
MOCK_RESPONSE = (
b'<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"'
b' s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">'
b'<s:Body>\n<u:GetInsightParamsResponse xmlns:u="urn:Belkin:service:metainfo:1">'
b"\r\n<InsightParams>0|1604849509|85|1315|27628|1209600|772|0|21689183|386799026.000000|8000"
b"</InsightParams>\r\n</u:GetInsightParamsResponse>\r\n</s:Body> </s:Envelope>"
)
class TestAction:
@staticmethod
def get_mock_action(name="", service_type="", url=""):
device = mock.Mock()
service = mock.Mock()
service.serviceType = service_type
service.controlURL = url
action_config = mock.MagicMock()
action_config.get_name = lambda: name
return svc.Action(device, service, action_config)
@staticmethod
def get_et_mock():
resp = cet.fromstring(MOCK_RESPONSE)
return mock.MagicMock(return_value=resp)
def test_call_post_request_is_made_exactly_once_when_successful(self):
action = self.get_mock_action()
requests.post = post_mock = mock.Mock()
cet.fromstring = self.get_et_mock()
action()
assert post_mock.call_count == 1
def test_call_request_has_well_formed_xml_body(self):
action = self.get_mock_action(name="cool_name", service_type="service")
requests.post = post_mock = mock.Mock()
cet.fromstring = self.get_et_mock()
action()
body = post_mock.call_args[MOCK_ARGS_ORDERED][1]
ElementTree.fromstring(body) # will raise error if xml is malformed
def test_call_request_has_correct_header_keys(self):
action = self.get_mock_action()
requests.post = post_mock = mock.Mock()
action()
headers = post_mock.call_args[MOCK_ARGS_KWARGS][HEADERS_KWARG_KEY]
for header in [CONTENT_TYPE_KEY, SOAPACTION_KEY]:
assert header in headers
def test_call_headers_has_correct_content_type(self):
action = self.get_mock_action()
requests.post = post_mock = mock.Mock()
action()
headers = post_mock.call_args[MOCK_ARGS_KWARGS][HEADERS_KWARG_KEY]
content_type_header = headers[CONTENT_TYPE_KEY]
assert content_type_header == "text/xml"
def test_call_headers_has_correct_soapaction(self):
service_type = "some_service"
name = "cool_name"
action = self.get_mock_action(name, service_type)
requests.post = post_mock = mock.Mock()
action()
headers = post_mock.call_args[MOCK_ARGS_KWARGS][HEADERS_KWARG_KEY]
soapaction_header = headers[SOAPACTION_KEY]
assert soapaction_header == '"%s#%s"' % (service_type, name)
def test_call_headers_has_correct_url(self):
url = "http://www.github.com/"
action = self.get_mock_action(url=url)
requests.post = post_mock = mock.Mock()
action()
actual_url = post_mock.call_args[MOCK_ARGS_ORDERED][0]
assert actual_url == url
def test_call_request_is_tried_up_to_max_on_communication_error(self):
action = self.get_mock_action()
requests.post = post_mock = mock.Mock(
side_effect=requests.exceptions.RequestException
)
try:
action()
except svc.ActionException:
pass
assert post_mock.call_count == svc.MAX_RETRIES
def test_call_throws_when_final_retry_fails(self):
action = self.get_mock_action()
requests.post = mock.Mock(
side_effect=requests.exceptions.RequestException
)
with pytest.raises(svc.ActionException):
action()
def test_call_returns_correct_dictionary_with_response_contents(self):
action = self.get_mock_action()
requests.post = mock.Mock()
envelope = cet.Element("soapEnvelope")
body = cet.SubElement(envelope, "soapBody")
response = cet.SubElement(body, "soapResponse")
response_content = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
}
for key, value in response_content.items():
element = cet.SubElement(response, key)
element.text = value
cet.fromstring = mock.MagicMock(return_value=envelope)
actual_responses = action()
assert actual_responses == response_content
| 30.598726 | 97 | 0.669858 |
import unittest.mock as mock
from xml.etree import ElementTree
from xml.etree import cElementTree as cet
import pytest
import requests
import pywemo.ouimeaux_device.api.service as svc
HEADERS_KWARG_KEY = "headers"
CONTENT_TYPE_KEY = "Content-Type"
SOAPACTION_KEY = "SOAPACTION"
MOCK_ARGS_ORDERED = 0
MOCK_ARGS_KWARGS = 1
svc.LOG = mock.Mock()
MOCK_RESPONSE = (
b'<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"'
b' s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">'
b'<s:Body>\n<u:GetInsightParamsResponse xmlns:u="urn:Belkin:service:metainfo:1">'
b"\r\n<InsightParams>0|1604849509|85|1315|27628|1209600|772|0|21689183|386799026.000000|8000"
b"</InsightParams>\r\n</u:GetInsightParamsResponse>\r\n</s:Body> </s:Envelope>"
)
class TestAction:
@staticmethod
def get_mock_action(name="", service_type="", url=""):
device = mock.Mock()
service = mock.Mock()
service.serviceType = service_type
service.controlURL = url
action_config = mock.MagicMock()
action_config.get_name = lambda: name
return svc.Action(device, service, action_config)
@staticmethod
def get_et_mock():
resp = cet.fromstring(MOCK_RESPONSE)
return mock.MagicMock(return_value=resp)
def test_call_post_request_is_made_exactly_once_when_successful(self):
action = self.get_mock_action()
requests.post = post_mock = mock.Mock()
cet.fromstring = self.get_et_mock()
action()
assert post_mock.call_count == 1
def test_call_request_has_well_formed_xml_body(self):
action = self.get_mock_action(name="cool_name", service_type="service")
requests.post = post_mock = mock.Mock()
cet.fromstring = self.get_et_mock()
action()
body = post_mock.call_args[MOCK_ARGS_ORDERED][1]
ElementTree.fromstring(body)
def test_call_request_has_correct_header_keys(self):
action = self.get_mock_action()
requests.post = post_mock = mock.Mock()
action()
headers = post_mock.call_args[MOCK_ARGS_KWARGS][HEADERS_KWARG_KEY]
for header in [CONTENT_TYPE_KEY, SOAPACTION_KEY]:
assert header in headers
def test_call_headers_has_correct_content_type(self):
action = self.get_mock_action()
requests.post = post_mock = mock.Mock()
action()
headers = post_mock.call_args[MOCK_ARGS_KWARGS][HEADERS_KWARG_KEY]
content_type_header = headers[CONTENT_TYPE_KEY]
assert content_type_header == "text/xml"
def test_call_headers_has_correct_soapaction(self):
service_type = "some_service"
name = "cool_name"
action = self.get_mock_action(name, service_type)
requests.post = post_mock = mock.Mock()
action()
headers = post_mock.call_args[MOCK_ARGS_KWARGS][HEADERS_KWARG_KEY]
soapaction_header = headers[SOAPACTION_KEY]
assert soapaction_header == '"%s#%s"' % (service_type, name)
def test_call_headers_has_correct_url(self):
url = "http://www.github.com/"
action = self.get_mock_action(url=url)
requests.post = post_mock = mock.Mock()
action()
actual_url = post_mock.call_args[MOCK_ARGS_ORDERED][0]
assert actual_url == url
def test_call_request_is_tried_up_to_max_on_communication_error(self):
action = self.get_mock_action()
requests.post = post_mock = mock.Mock(
side_effect=requests.exceptions.RequestException
)
try:
action()
except svc.ActionException:
pass
assert post_mock.call_count == svc.MAX_RETRIES
def test_call_throws_when_final_retry_fails(self):
action = self.get_mock_action()
requests.post = mock.Mock(
side_effect=requests.exceptions.RequestException
)
with pytest.raises(svc.ActionException):
action()
def test_call_returns_correct_dictionary_with_response_contents(self):
action = self.get_mock_action()
requests.post = mock.Mock()
envelope = cet.Element("soapEnvelope")
body = cet.SubElement(envelope, "soapBody")
response = cet.SubElement(body, "soapResponse")
response_content = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
}
for key, value in response_content.items():
element = cet.SubElement(response, key)
element.text = value
cet.fromstring = mock.MagicMock(return_value=envelope)
actual_responses = action()
assert actual_responses == response_content
| true | true |
f714cc8f8cc3d2beac32698e45c11ada4b6f4fde | 590 | py | Python | core/config.py | imhsz/fastapi-vue-admin | 6af8876b3d62df1de776fcf23ffcfb2bbf6082d6 | [
"MIT"
] | 1 | 2022-03-20T02:03:07.000Z | 2022-03-20T02:03:07.000Z | core/config.py | imhsz/fastapi-vue-admin | 6af8876b3d62df1de776fcf23ffcfb2bbf6082d6 | [
"MIT"
] | null | null | null | core/config.py | imhsz/fastapi-vue-admin | 6af8876b3d62df1de776fcf23ffcfb2bbf6082d6 | [
"MIT"
] | null | null | null | from pydantic import AnyHttpUrl
from typing import List
import os
ENV = os.environ.get("fast_env", "DEV") # 本次启动环境
class Settings:
APP_NAME = "fastapi-vue-admin"
# api前缀
API_PREFIX = "/api"
# jwt密钥,建议随机生成一个
SECRET_KEY = "ShsUP9qIP2Xui2GpXRY6y74v2JSVS0Q2YOXJ22VjwkI"
# token过期时间
ACCESS_TOKEN_EXPIRE_MINUTES = 24 * 60
# 跨域白名单
BACKEND_CORS_ORIGINS: List[AnyHttpUrl] = ["http://localhost:9528"]
# db配置
DB_URL = "mysql+pymysql://root:Aa123456@127.0.0.1:3306/fast"
# 启动端口配置
PORT = 8999
# 是否热加载
RELOAD = True
settings = Settings()
| 21.851852 | 70 | 0.672881 | from pydantic import AnyHttpUrl
from typing import List
import os
ENV = os.environ.get("fast_env", "DEV")
class Settings:
APP_NAME = "fastapi-vue-admin"
API_PREFIX = "/api"
SECRET_KEY = "ShsUP9qIP2Xui2GpXRY6y74v2JSVS0Q2YOXJ22VjwkI"
ACCESS_TOKEN_EXPIRE_MINUTES = 24 * 60
BACKEND_CORS_ORIGINS: List[AnyHttpUrl] = ["http://localhost:9528"]
DB_URL = "mysql+pymysql://root:Aa123456@127.0.0.1:3306/fast"
PORT = 8999
RELOAD = True
settings = Settings()
| true | true |
f714cf1852de7db2ee015f8b20b6a0e72c652f7f | 9,850 | py | Python | nova/tests/unit/db/api/test_migrations.py | crowdy/nova | 7b063e4d0518af3e57872bc0288a94edcd33c19d | [
"Apache-2.0"
] | null | null | null | nova/tests/unit/db/api/test_migrations.py | crowdy/nova | 7b063e4d0518af3e57872bc0288a94edcd33c19d | [
"Apache-2.0"
] | 3 | 2019-05-17T15:49:12.000Z | 2019-11-21T10:49:54.000Z | nova/tests/unit/db/api/test_migrations.py | crowdy/nova | 7b063e4d0518af3e57872bc0288a94edcd33c19d | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for database migrations for the API database.
These are "opportunistic" tests which allow testing against all three databases
(sqlite in memory, mysql, pg) in a properly configured unit test environment.
For the opportunistic testing you need to set up DBs named 'openstack_citest'
with user 'openstack_citest' and password 'openstack_citest' on localhost. The
test will then use that DB and username/password combo to run the tests. Refer
to the 'tools/test-setup.sh' for an example of how to configure this.
"""
from alembic import command as alembic_api
from alembic import script as alembic_script
from migrate.versioning import api as migrate_api
import mock
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import test_fixtures
from oslo_db.sqlalchemy import test_migrations
from oslo_log import log as logging
import testtools
from nova.db.api import models
from nova.db import migration
from nova import test
LOG = logging.getLogger(__name__)
class NovaModelsMigrationsSync(test_migrations.ModelsMigrationsSync):
"""Test that the models match the database after migrations are run."""
def setUp(self):
super().setUp()
self.engine = enginefacade.writer.get_engine()
def db_sync(self, engine):
with mock.patch.object(migration, '_get_engine', return_value=engine):
migration.db_sync(database='api')
def get_engine(self):
return self.engine
def get_metadata(self):
return models.BASE.metadata
def include_object(self, object_, name, type_, reflected, compare_to):
if type_ == 'table':
# migrate_version is a sqlalchemy-migrate control table and
# isn't included in the model.
if name == 'migrate_version':
return False
return True
def filter_metadata_diff(self, diff):
# Filter out diffs that shouldn't cause a sync failure.
new_diff = []
# Define a whitelist of ForeignKeys that exist on the model but not in
# the database. They will be removed from the model at a later time.
fkey_whitelist = {'build_requests': ['request_spec_id']}
# Define a whitelist of columns that will be removed from the
# DB at a later release and aren't on a model anymore.
column_whitelist = {
'build_requests': [
'vm_state', 'instance_metadata',
'display_name', 'access_ip_v6', 'access_ip_v4', 'key_name',
'locked_by', 'image_ref', 'progress', 'request_spec_id',
'info_cache', 'user_id', 'task_state', 'security_groups',
'config_drive',
],
'resource_providers': ['can_host'],
}
for element in diff:
if isinstance(element, list):
# modify_nullable is a list
new_diff.append(element)
else:
# tuple with action as first element. Different actions have
# different tuple structures.
if element[0] == 'add_fk':
fkey = element[1]
tablename = fkey.table.name
column_keys = fkey.column_keys
if (
tablename in fkey_whitelist and
column_keys == fkey_whitelist[tablename]
):
continue
elif element[0] == 'remove_column':
tablename = element[2]
column = element[3]
if (
tablename in column_whitelist and
column.name in column_whitelist[tablename]
):
continue
new_diff.append(element)
return new_diff
class TestModelsSyncSQLite(
NovaModelsMigrationsSync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
pass
class TestModelsSyncMySQL(
NovaModelsMigrationsSync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
FIXTURE = test_fixtures.MySQLOpportunisticFixture
class TestModelsSyncPostgreSQL(
NovaModelsMigrationsSync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
class NovaModelsMigrationsLegacySync(NovaModelsMigrationsSync):
"""Test that the models match the database after old migrations are run."""
def db_sync(self, engine):
# the 'nova.db.migration.db_sync' method will not use the legacy
# sqlalchemy-migrate-based migration flow unless the database is
# already controlled with sqlalchemy-migrate, so we need to manually
# enable version controlling with this tool to test this code path
repository = migration._find_migrate_repo(database='api')
migrate_api.version_control(
engine, repository, migration.MIGRATE_INIT_VERSION['api'])
# now we can apply migrations as expected and the legacy path will be
# followed
super().db_sync(engine)
class TestModelsLegacySyncSQLite(
NovaModelsMigrationsLegacySync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
pass
class TestModelsLegacySyncMySQL(
NovaModelsMigrationsLegacySync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
FIXTURE = test_fixtures.MySQLOpportunisticFixture
class TestModelsLegacySyncPostgreSQL(
NovaModelsMigrationsLegacySync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
class NovaMigrationsWalk(
test_fixtures.OpportunisticDBTestMixin, test.NoDBTestCase,
):
def setUp(self):
super().setUp()
self.engine = enginefacade.writer.get_engine()
self.config = migration._find_alembic_conf('api')
self.init_version = migration.ALEMBIC_INIT_VERSION['api']
def _migrate_up(self, connection, revision):
if revision == self.init_version: # no tests for the initial revision
alembic_api.upgrade(self.config, revision)
return
self.assertIsNotNone(
getattr(self, '_check_%s' % revision, None),
(
'API DB Migration %s does not have a test; you must add one'
) % revision,
)
pre_upgrade = getattr(self, '_pre_upgrade_%s' % revision, None)
if pre_upgrade:
pre_upgrade(connection)
alembic_api.upgrade(self.config, revision)
post_upgrade = getattr(self, '_check_%s' % revision, None)
if post_upgrade:
post_upgrade(connection)
def test_single_base_revision(self):
"""Ensure we only have a single base revision.
There's no good reason for us to have diverging history, so validate
that only one base revision exists. This will prevent simple errors
where people forget to specify the base revision. If this fail for your
change, look for migrations that do not have a 'revises' line in them.
"""
script = alembic_script.ScriptDirectory.from_config(self.config)
self.assertEqual(1, len(script.get_bases()))
def test_single_head_revision(self):
"""Ensure we only have a single head revision.
There's no good reason for us to have diverging history, so validate
that only one head revision exists. This will prevent merge conflicts
adding additional head revision points. If this fail for your change,
look for migrations with the same 'revises' line in them.
"""
script = alembic_script.ScriptDirectory.from_config(self.config)
self.assertEqual(1, len(script.get_heads()))
def test_walk_versions(self):
with self.engine.begin() as connection:
self.config.attributes['connection'] = connection
script = alembic_script.ScriptDirectory.from_config(self.config)
revisions = [x.revision for x in script.walk_revisions()]
# for some reason, 'walk_revisions' gives us the revisions in
# reverse chronological order so we have to invert this
revisions.reverse()
self.assertEqual(revisions[0], self.init_version)
for revision in revisions:
LOG.info('Testing revision %s', revision)
self._migrate_up(connection, revision)
def test_db_version_alembic(self):
migration.db_sync(database='api')
script = alembic_script.ScriptDirectory.from_config(self.config)
head = script.get_current_head()
self.assertEqual(head, migration.db_version(database='api'))
class TestMigrationsWalkSQLite(
NovaMigrationsWalk,
test_fixtures.OpportunisticDBTestMixin,
test.NoDBTestCase,
):
pass
class TestMigrationsWalkMySQL(
NovaMigrationsWalk,
test_fixtures.OpportunisticDBTestMixin,
test.NoDBTestCase,
):
FIXTURE = test_fixtures.MySQLOpportunisticFixture
class TestMigrationsWalkPostgreSQL(
NovaMigrationsWalk,
test_fixtures.OpportunisticDBTestMixin,
test.NoDBTestCase,
):
FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
| 34.805654 | 79 | 0.67198 |
from alembic import command as alembic_api
from alembic import script as alembic_script
from migrate.versioning import api as migrate_api
import mock
from oslo_db.sqlalchemy import enginefacade
from oslo_db.sqlalchemy import test_fixtures
from oslo_db.sqlalchemy import test_migrations
from oslo_log import log as logging
import testtools
from nova.db.api import models
from nova.db import migration
from nova import test
LOG = logging.getLogger(__name__)
class NovaModelsMigrationsSync(test_migrations.ModelsMigrationsSync):
def setUp(self):
super().setUp()
self.engine = enginefacade.writer.get_engine()
def db_sync(self, engine):
with mock.patch.object(migration, '_get_engine', return_value=engine):
migration.db_sync(database='api')
def get_engine(self):
return self.engine
def get_metadata(self):
return models.BASE.metadata
def include_object(self, object_, name, type_, reflected, compare_to):
if type_ == 'table':
if name == 'migrate_version':
return False
return True
def filter_metadata_diff(self, diff):
# Filter out diffs that shouldn't cause a sync failure.
new_diff = []
fkey_whitelist = {'build_requests': ['request_spec_id']}
column_whitelist = {
'build_requests': [
'vm_state', 'instance_metadata',
'display_name', 'access_ip_v6', 'access_ip_v4', 'key_name',
'locked_by', 'image_ref', 'progress', 'request_spec_id',
'info_cache', 'user_id', 'task_state', 'security_groups',
'config_drive',
],
'resource_providers': ['can_host'],
}
for element in diff:
if isinstance(element, list):
# modify_nullable is a list
new_diff.append(element)
else:
# tuple with action as first element. Different actions have
# different tuple structures.
if element[0] == 'add_fk':
fkey = element[1]
tablename = fkey.table.name
column_keys = fkey.column_keys
if (
tablename in fkey_whitelist and
column_keys == fkey_whitelist[tablename]
):
continue
elif element[0] == 'remove_column':
tablename = element[2]
column = element[3]
if (
tablename in column_whitelist and
column.name in column_whitelist[tablename]
):
continue
new_diff.append(element)
return new_diff
class TestModelsSyncSQLite(
NovaModelsMigrationsSync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
pass
class TestModelsSyncMySQL(
NovaModelsMigrationsSync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
FIXTURE = test_fixtures.MySQLOpportunisticFixture
class TestModelsSyncPostgreSQL(
NovaModelsMigrationsSync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
class NovaModelsMigrationsLegacySync(NovaModelsMigrationsSync):
def db_sync(self, engine):
# the 'nova.db.migration.db_sync' method will not use the legacy
# sqlalchemy-migrate-based migration flow unless the database is
# already controlled with sqlalchemy-migrate, so we need to manually
# enable version controlling with this tool to test this code path
repository = migration._find_migrate_repo(database='api')
migrate_api.version_control(
engine, repository, migration.MIGRATE_INIT_VERSION['api'])
# now we can apply migrations as expected and the legacy path will be
# followed
super().db_sync(engine)
class TestModelsLegacySyncSQLite(
NovaModelsMigrationsLegacySync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
pass
class TestModelsLegacySyncMySQL(
NovaModelsMigrationsLegacySync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
FIXTURE = test_fixtures.MySQLOpportunisticFixture
class TestModelsLegacySyncPostgreSQL(
NovaModelsMigrationsLegacySync,
test_fixtures.OpportunisticDBTestMixin,
testtools.TestCase,
):
FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
class NovaMigrationsWalk(
test_fixtures.OpportunisticDBTestMixin, test.NoDBTestCase,
):
def setUp(self):
super().setUp()
self.engine = enginefacade.writer.get_engine()
self.config = migration._find_alembic_conf('api')
self.init_version = migration.ALEMBIC_INIT_VERSION['api']
def _migrate_up(self, connection, revision):
if revision == self.init_version: # no tests for the initial revision
alembic_api.upgrade(self.config, revision)
return
self.assertIsNotNone(
getattr(self, '_check_%s' % revision, None),
(
'API DB Migration %s does not have a test; you must add one'
) % revision,
)
pre_upgrade = getattr(self, '_pre_upgrade_%s' % revision, None)
if pre_upgrade:
pre_upgrade(connection)
alembic_api.upgrade(self.config, revision)
post_upgrade = getattr(self, '_check_%s' % revision, None)
if post_upgrade:
post_upgrade(connection)
def test_single_base_revision(self):
script = alembic_script.ScriptDirectory.from_config(self.config)
self.assertEqual(1, len(script.get_bases()))
def test_single_head_revision(self):
script = alembic_script.ScriptDirectory.from_config(self.config)
self.assertEqual(1, len(script.get_heads()))
def test_walk_versions(self):
with self.engine.begin() as connection:
self.config.attributes['connection'] = connection
script = alembic_script.ScriptDirectory.from_config(self.config)
revisions = [x.revision for x in script.walk_revisions()]
# for some reason, 'walk_revisions' gives us the revisions in
# reverse chronological order so we have to invert this
revisions.reverse()
self.assertEqual(revisions[0], self.init_version)
for revision in revisions:
LOG.info('Testing revision %s', revision)
self._migrate_up(connection, revision)
def test_db_version_alembic(self):
migration.db_sync(database='api')
script = alembic_script.ScriptDirectory.from_config(self.config)
head = script.get_current_head()
self.assertEqual(head, migration.db_version(database='api'))
class TestMigrationsWalkSQLite(
NovaMigrationsWalk,
test_fixtures.OpportunisticDBTestMixin,
test.NoDBTestCase,
):
pass
class TestMigrationsWalkMySQL(
NovaMigrationsWalk,
test_fixtures.OpportunisticDBTestMixin,
test.NoDBTestCase,
):
FIXTURE = test_fixtures.MySQLOpportunisticFixture
class TestMigrationsWalkPostgreSQL(
NovaMigrationsWalk,
test_fixtures.OpportunisticDBTestMixin,
test.NoDBTestCase,
):
FIXTURE = test_fixtures.PostgresqlOpportunisticFixture
| true | true |
f714cf7e6e60b68cd1d521d3864fc8dbec020e17 | 583 | py | Python | pipecheck/checks/icmp.py | mriedmann/pipecheck | 9919c13c96d1c9ec28e90ca9c4da5f5b33eb41e9 | [
"MIT"
] | null | null | null | pipecheck/checks/icmp.py | mriedmann/pipecheck | 9919c13c96d1c9ec28e90ca9c4da5f5b33eb41e9 | [
"MIT"
] | 5 | 2021-06-05T22:09:17.000Z | 2021-11-24T22:17:08.000Z | pipecheck/checks/icmp.py | mriedmann/pipecheck | 9919c13c96d1c9ec28e90ca9c4da5f5b33eb41e9 | [
"MIT"
] | null | null | null | import icmplib
from pipecheck.api import CheckResult, Err, Ok, Probe, Warn
class PingProbe(Probe):
"""ICMP ping check"""
host: str = ""
ping_count: int = 1
def __call__(self) -> CheckResult:
h = icmplib.ping(self.host, privileged=False, count=self.ping_count)
if h.is_alive:
if h.packet_loss > 0.0:
return Warn(f"ICMP '{self.host}' ({h.address}) unreliable! packet loss {h.packet_loss*100}%")
return Ok(f"ICMP '{self.host}' reachable ({h.avg_rtt}ms)")
return Err(f"ICMP '{self.host}' unreachable")
| 30.684211 | 109 | 0.61578 | import icmplib
from pipecheck.api import CheckResult, Err, Ok, Probe, Warn
class PingProbe(Probe):
host: str = ""
ping_count: int = 1
def __call__(self) -> CheckResult:
h = icmplib.ping(self.host, privileged=False, count=self.ping_count)
if h.is_alive:
if h.packet_loss > 0.0:
return Warn(f"ICMP '{self.host}' ({h.address}) unreliable! packet loss {h.packet_loss*100}%")
return Ok(f"ICMP '{self.host}' reachable ({h.avg_rtt}ms)")
return Err(f"ICMP '{self.host}' unreachable")
| true | true |
f714d02380cfe366151bdfc4c476215576e9b055 | 2,195 | py | Python | sorts/recursive_mergesort_array.py | salvinanto7/Python | 78ce34637f4b22f7f530580cc2f0b687add1b94b | [
"MIT"
] | 13 | 2021-03-11T00:25:22.000Z | 2022-03-19T00:19:23.000Z | sorts/recursive_mergesort_array.py | Agha-Muqarib/Python | 04f156a8973d6156a4357e0717d9eb0aa264d086 | [
"MIT"
] | 279 | 2020-02-12T20:51:09.000Z | 2021-07-20T11:25:19.000Z | sorts/recursive_mergesort_array.py | Agha-Muqarib/Python | 04f156a8973d6156a4357e0717d9eb0aa264d086 | [
"MIT"
] | 12 | 2021-04-26T19:43:01.000Z | 2022-01-31T08:36:29.000Z | """A merge sort which accepts an array as input and recursively
splits an array in half and sorts and combines them.
"""
"""https://en.wikipedia.org/wiki/Merge_sort """
def merge(arr: list[int]) -> list[int]:
"""Return a sorted array.
>>> merge([10,9,8,7,6,5,4,3,2,1])
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> merge([1,2,3,4,5,6,7,8,9,10])
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
>>> merge([10,22,1,2,3,9,15,23])
[1, 2, 3, 9, 10, 15, 22, 23]
>>> merge([100])
[100]
>>> merge([])
[]
"""
if len(arr) > 1:
middle_length = len(arr) // 2 # Finds the middle of the array
left_array = arr[
:middle_length
] # Creates an array of the elements in the first half.
right_array = arr[
middle_length:
] # Creates an array of the elements in the second half.
left_size = len(left_array)
right_size = len(right_array)
merge(left_array) # Starts sorting the left.
merge(right_array) # Starts sorting the right
left_index = 0 # Left Counter
right_index = 0 # Right Counter
index = 0 # Position Counter
while (
left_index < left_size and right_index < right_size
): # Runs until the lowers size of the left and right are sorted.
if left_array[left_index] < right_array[right_index]:
arr[index] = left_array[left_index]
left_index = left_index + 1
else:
arr[index] = right_array[right_index]
right_index = right_index + 1
index = index + 1
while (
left_index < left_size
): # Adds the left over elements in the left half of the array
arr[index] = left_array[left_index]
left_index = left_index + 1
index = index + 1
while (
right_index < right_size
): # Adds the left over elements in the right half of the array
arr[index] = right_array[right_index]
right_index = right_index + 1
index = index + 1
return arr
if __name__ == "__main__":
import doctest
doctest.testmod()
| 33.769231 | 74 | 0.555353 |
def merge(arr: list[int]) -> list[int]:
if len(arr) > 1:
middle_length = len(arr) // 2
left_array = arr[
:middle_length
]
right_array = arr[
middle_length:
]
left_size = len(left_array)
right_size = len(right_array)
merge(left_array)
merge(right_array)
left_index = 0
right_index = 0
index = 0
while (
left_index < left_size and right_index < right_size
):
if left_array[left_index] < right_array[right_index]:
arr[index] = left_array[left_index]
left_index = left_index + 1
else:
arr[index] = right_array[right_index]
right_index = right_index + 1
index = index + 1
while (
left_index < left_size
):
arr[index] = left_array[left_index]
left_index = left_index + 1
index = index + 1
while (
right_index < right_size
):
arr[index] = right_array[right_index]
right_index = right_index + 1
index = index + 1
return arr
if __name__ == "__main__":
import doctest
doctest.testmod()
| true | true |
f714d1d0ae914916a9010122b6cddb2897fbe2ce | 722 | py | Python | common/helpers/utils.py | AlcindoSchleder/flaskWeb | 1f9ba3a3ac8546c24126124d4c34335825b94df9 | [
"MIT"
] | null | null | null | common/helpers/utils.py | AlcindoSchleder/flaskWeb | 1f9ba3a3ac8546c24126124d4c34335825b94df9 | [
"MIT"
] | 1 | 2019-07-31T20:50:41.000Z | 2019-08-01T03:02:10.000Z | common/helpers/utils.py | AlcindoSchleder/flask_API | 00f91ec29ba93c9ec3f45e6cfd78625f0abadc96 | [
"MIT"
] | 1 | 2019-08-02T22:38:23.000Z | 2019-08-02T22:38:23.000Z | # -*- coding: utf-8 -*-
import hmac
import hashlib
import base64
"""
unit : utils
descritption: Collection of functions used in all projetcts
author : Alcindo Schleder
version : 1.0.0
package : i-City Identification Plataform
"""
def isnumber(value):
try:
float(value)
except ValueError:
return False
return True
def calcFileSignature(data: str, password: str = None):
if (password):
digest = hmac.new(password, msg=data, digestmod=hashlib.sha256).digest()
resHash = base64.b64encode(digest).decode()
else:
hasher = hashlib.sha256()
hasher.update(data)
resHash = hasher.hexdigest()
return resHash
| 24.066667 | 81 | 0.627424 |
import hmac
import hashlib
import base64
def isnumber(value):
try:
float(value)
except ValueError:
return False
return True
def calcFileSignature(data: str, password: str = None):
if (password):
digest = hmac.new(password, msg=data, digestmod=hashlib.sha256).digest()
resHash = base64.b64encode(digest).decode()
else:
hasher = hashlib.sha256()
hasher.update(data)
resHash = hasher.hexdigest()
return resHash
| true | true |
f714d29455aa555e390ca247a0f73233eed544e4 | 1,956 | py | Python | app/app.py | Farmer-chong/todo-list | fd44fae6375cf7b1485582faf71efedaebbb57fc | [
"MIT"
] | null | null | null | app/app.py | Farmer-chong/todo-list | fd44fae6375cf7b1485582faf71efedaebbb57fc | [
"MIT"
] | null | null | null | app/app.py | Farmer-chong/todo-list | fd44fae6375cf7b1485582faf71efedaebbb57fc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
'''
:file: app.py
:author: -Farmer
:url: https://blog.farmer233.top
:date: 2021/09/21 12:44:37
'''
import os
import click
from apiflask import APIFlask, abort
from app.config import config
from app.models import TodoList
from app.extensions import db, cors
from app.api.todo import todo_bp
def create_app(config_name: str = None) -> APIFlask:
"""构造工厂
Args:
config_name (str, optional): 配置文件名. Defaults to None.
Returns:
APIFlask: falsk app 实例
"""
if config_name is None:
config_name = os.getenv('FLASK_CONFIG', 'development')
app = APIFlask(__name__)
app.config.from_object(config[config_name])
register_extensions(app)
register_blueprints(app)
register_errors(app)
register_commands(app)
return app
def register_extensions(app: APIFlask):
"""初始化扩展
Args:
app (APIFlask): flask app 实例
"""
db.init_app(app)
cors.init_app(app)
def register_blueprints(app: APIFlask):
app.register_blueprint(todo_bp, url_prefix="/")
def register_errors(app: APIFlask):
pass
# @app.errorhandler(Exception)
# def internal_server_error(e):
# abort(500, message=str(e))
def register_commands(app: APIFlask):
@app.cli.command()
def initdb():
db.drop_all()
db.create_all()
@app.cli.command()
@click.option('--count', default=5, help='Quantity of messages, default is 20.')
def fakedb(count):
from faker import Faker
from datetime import datetime
print(datetime.now())
db.drop_all()
db.create_all()
fake = Faker()
click.echo('Working...')
for _ in range(count):
todo = TodoList(
task=fake.sentence(),
completed=fake.pybool()
)
db.session.add(todo)
db.session.commit()
click.echo('Created %d fake todo items.' % count)
| 22.744186 | 84 | 0.619632 |
import os
import click
from apiflask import APIFlask, abort
from app.config import config
from app.models import TodoList
from app.extensions import db, cors
from app.api.todo import todo_bp
def create_app(config_name: str = None) -> APIFlask:
if config_name is None:
config_name = os.getenv('FLASK_CONFIG', 'development')
app = APIFlask(__name__)
app.config.from_object(config[config_name])
register_extensions(app)
register_blueprints(app)
register_errors(app)
register_commands(app)
return app
def register_extensions(app: APIFlask):
db.init_app(app)
cors.init_app(app)
def register_blueprints(app: APIFlask):
app.register_blueprint(todo_bp, url_prefix="/")
def register_errors(app: APIFlask):
pass
def register_commands(app: APIFlask):
@app.cli.command()
def initdb():
db.drop_all()
db.create_all()
@app.cli.command()
@click.option('--count', default=5, help='Quantity of messages, default is 20.')
def fakedb(count):
from faker import Faker
from datetime import datetime
print(datetime.now())
db.drop_all()
db.create_all()
fake = Faker()
click.echo('Working...')
for _ in range(count):
todo = TodoList(
task=fake.sentence(),
completed=fake.pybool()
)
db.session.add(todo)
db.session.commit()
click.echo('Created %d fake todo items.' % count)
| true | true |
f714d330ffa499a156b0b00841a0f51b4e605071 | 22,516 | py | Python | research/slim/nets/mobilenet_v1.py | TUDelftHao/models | faf0c2dc442ceaa8425aff73abd00f92f3137b7b | [
"Apache-2.0"
] | 549 | 2020-01-02T05:14:57.000Z | 2022-03-29T18:34:12.000Z | research/slim/nets/mobilenet_v1.py | TUDelftHao/models | faf0c2dc442ceaa8425aff73abd00f92f3137b7b | [
"Apache-2.0"
] | 98 | 2020-01-21T09:41:30.000Z | 2022-03-12T00:53:06.000Z | research/slim/nets/mobilenet_v1.py | TUDelftHao/models | faf0c2dc442ceaa8425aff73abd00f92f3137b7b | [
"Apache-2.0"
] | 233 | 2020-01-18T03:46:27.000Z | 2022-03-19T03:17:47.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""MobileNet v1.
MobileNet is a general architecture and can be used for multiple use cases.
Depending on the use case, it can use different input layer size and different
head (for example: embeddings, localization and classification).
As described in https://arxiv.org/abs/1704.04861.
MobileNets: Efficient Convolutional Neural Networks for
Mobile Vision Applications
Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang,
Tobias Weyand, Marco Andreetto, Hartwig Adam
100% Mobilenet V1 (base) with input size 224x224:
See mobilenet_v1()
Layer params macs
--------------------------------------------------------------------------------
MobilenetV1/Conv2d_0/Conv2D: 864 10,838,016
MobilenetV1/Conv2d_1_depthwise/depthwise: 288 3,612,672
MobilenetV1/Conv2d_1_pointwise/Conv2D: 2,048 25,690,112
MobilenetV1/Conv2d_2_depthwise/depthwise: 576 1,806,336
MobilenetV1/Conv2d_2_pointwise/Conv2D: 8,192 25,690,112
MobilenetV1/Conv2d_3_depthwise/depthwise: 1,152 3,612,672
MobilenetV1/Conv2d_3_pointwise/Conv2D: 16,384 51,380,224
MobilenetV1/Conv2d_4_depthwise/depthwise: 1,152 903,168
MobilenetV1/Conv2d_4_pointwise/Conv2D: 32,768 25,690,112
MobilenetV1/Conv2d_5_depthwise/depthwise: 2,304 1,806,336
MobilenetV1/Conv2d_5_pointwise/Conv2D: 65,536 51,380,224
MobilenetV1/Conv2d_6_depthwise/depthwise: 2,304 451,584
MobilenetV1/Conv2d_6_pointwise/Conv2D: 131,072 25,690,112
MobilenetV1/Conv2d_7_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_7_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_8_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_8_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_9_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_9_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_10_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_10_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_11_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_11_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_12_depthwise/depthwise: 4,608 225,792
MobilenetV1/Conv2d_12_pointwise/Conv2D: 524,288 25,690,112
MobilenetV1/Conv2d_13_depthwise/depthwise: 9,216 451,584
MobilenetV1/Conv2d_13_pointwise/Conv2D: 1,048,576 51,380,224
--------------------------------------------------------------------------------
Total: 3,185,088 567,716,352
75% Mobilenet V1 (base) with input size 128x128:
See mobilenet_v1_075()
Layer params macs
--------------------------------------------------------------------------------
MobilenetV1/Conv2d_0/Conv2D: 648 2,654,208
MobilenetV1/Conv2d_1_depthwise/depthwise: 216 884,736
MobilenetV1/Conv2d_1_pointwise/Conv2D: 1,152 4,718,592
MobilenetV1/Conv2d_2_depthwise/depthwise: 432 442,368
MobilenetV1/Conv2d_2_pointwise/Conv2D: 4,608 4,718,592
MobilenetV1/Conv2d_3_depthwise/depthwise: 864 884,736
MobilenetV1/Conv2d_3_pointwise/Conv2D: 9,216 9,437,184
MobilenetV1/Conv2d_4_depthwise/depthwise: 864 221,184
MobilenetV1/Conv2d_4_pointwise/Conv2D: 18,432 4,718,592
MobilenetV1/Conv2d_5_depthwise/depthwise: 1,728 442,368
MobilenetV1/Conv2d_5_pointwise/Conv2D: 36,864 9,437,184
MobilenetV1/Conv2d_6_depthwise/depthwise: 1,728 110,592
MobilenetV1/Conv2d_6_pointwise/Conv2D: 73,728 4,718,592
MobilenetV1/Conv2d_7_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_7_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_8_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_8_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_9_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_9_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_10_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_10_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_11_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_11_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_12_depthwise/depthwise: 3,456 55,296
MobilenetV1/Conv2d_12_pointwise/Conv2D: 294,912 4,718,592
MobilenetV1/Conv2d_13_depthwise/depthwise: 6,912 110,592
MobilenetV1/Conv2d_13_pointwise/Conv2D: 589,824 9,437,184
--------------------------------------------------------------------------------
Total: 1,800,144 106,002,432
"""
# Tensorflow mandates these.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import functools
import tensorflow.compat.v1 as tf
import tf_slim as slim
# Conv and DepthSepConv namedtuple define layers of the MobileNet architecture
# Conv defines 3x3 convolution layers
# DepthSepConv defines 3x3 depthwise convolution followed by 1x1 convolution.
# stride is the stride of the convolution
# depth is the number of channels or filters in a layer
Conv = namedtuple('Conv', ['kernel', 'stride', 'depth'])
DepthSepConv = namedtuple('DepthSepConv', ['kernel', 'stride', 'depth'])
# MOBILENETV1_CONV_DEFS specifies the MobileNet body
MOBILENETV1_CONV_DEFS = [
Conv(kernel=[3, 3], stride=2, depth=32),
DepthSepConv(kernel=[3, 3], stride=1, depth=64),
DepthSepConv(kernel=[3, 3], stride=2, depth=128),
DepthSepConv(kernel=[3, 3], stride=1, depth=128),
DepthSepConv(kernel=[3, 3], stride=2, depth=256),
DepthSepConv(kernel=[3, 3], stride=1, depth=256),
DepthSepConv(kernel=[3, 3], stride=2, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=2, depth=1024),
DepthSepConv(kernel=[3, 3], stride=1, depth=1024)
]
def _fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Pads the input such that if it was used in a convolution with 'VALID' padding,
the output would have the same dimensions as if the unpadded input was used
in a convolution with 'SAME' padding.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),
kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]
pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]
pad_beg = [pad_total[0] // 2, pad_total[1] // 2]
pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]
padded_inputs = tf.pad(
tensor=inputs,
paddings=[[0, 0], [pad_beg[0], pad_end[0]], [pad_beg[1], pad_end[1]],
[0, 0]])
return padded_inputs
def mobilenet_v1_base(inputs,
final_endpoint='Conv2d_13_pointwise',
min_depth=8,
depth_multiplier=1.0,
conv_defs=None,
output_stride=None,
use_explicit_padding=False,
scope=None):
"""Mobilenet v1.
Constructs a Mobilenet v1 network from inputs to the given final endpoint.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_0', 'Conv2d_1_pointwise', 'Conv2d_2_pointwise',
'Conv2d_3_pointwise', 'Conv2d_4_pointwise', 'Conv2d_5'_pointwise,
'Conv2d_6_pointwise', 'Conv2d_7_pointwise', 'Conv2d_8_pointwise',
'Conv2d_9_pointwise', 'Conv2d_10_pointwise', 'Conv2d_11_pointwise',
'Conv2d_12_pointwise', 'Conv2d_13_pointwise'].
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
conv_defs: A list of ConvDef namedtuples specifying the net architecture.
output_stride: An integer that specifies the requested ratio of input to
output spatial resolution. If not None, then we invoke atrous convolution
if necessary to prevent the network from reducing the spatial resolution
of the activation maps. Allowed values are 8 (accurate fully convolutional
mode), 16 (fast fully convolutional mode), 32 (classification mode).
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
scope: Optional variable_scope.
Returns:
tensor_out: output tensor corresponding to the final_endpoint.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or depth_multiplier <= 0, or the target output_stride is not
allowed.
"""
depth = lambda d: max(int(d * depth_multiplier), min_depth)
end_points = {}
# Used to find thinned depths for each layer.
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
if conv_defs is None:
conv_defs = MOBILENETV1_CONV_DEFS
if output_stride is not None and output_stride not in [8, 16, 32]:
raise ValueError('Only allowed output_stride values are 8, 16, 32.')
padding = 'SAME'
if use_explicit_padding:
padding = 'VALID'
with tf.variable_scope(scope, 'MobilenetV1', [inputs]):
with slim.arg_scope([slim.conv2d, slim.separable_conv2d], padding=padding):
# The current_stride variable keeps track of the output stride of the
# activations, i.e., the running product of convolution strides up to the
# current network layer. This allows us to invoke atrous convolution
# whenever applying the next convolution would result in the activations
# having output stride larger than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
net = inputs
for i, conv_def in enumerate(conv_defs):
end_point_base = 'Conv2d_%d' % i
if output_stride is not None and current_stride == output_stride:
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
layer_stride = 1
layer_rate = rate
rate *= conv_def.stride
else:
layer_stride = conv_def.stride
layer_rate = 1
current_stride *= conv_def.stride
if isinstance(conv_def, Conv):
end_point = end_point_base
if use_explicit_padding:
net = _fixed_padding(net, conv_def.kernel)
net = slim.conv2d(net, depth(conv_def.depth), conv_def.kernel,
stride=conv_def.stride,
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
elif isinstance(conv_def, DepthSepConv):
end_point = end_point_base + '_depthwise'
# By passing filters=None
# separable_conv2d produces only a depthwise convolution layer
if use_explicit_padding:
net = _fixed_padding(net, conv_def.kernel, layer_rate)
net = slim.separable_conv2d(net, None, conv_def.kernel,
depth_multiplier=1,
stride=layer_stride,
rate=layer_rate,
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
end_point = end_point_base + '_pointwise'
net = slim.conv2d(net, depth(conv_def.depth), [1, 1],
stride=1,
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
else:
raise ValueError('Unknown convolution type %s for layer %d'
% (conv_def.ltype, i))
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def mobilenet_v1(inputs,
num_classes=1000,
dropout_keep_prob=0.999,
is_training=True,
min_depth=8,
depth_multiplier=1.0,
conv_defs=None,
prediction_fn=slim.softmax,
spatial_squeeze=True,
reuse=None,
scope='MobilenetV1',
global_pool=False):
"""Mobilenet v1 model for classification.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
dropout_keep_prob: the percentage of activation values that are retained.
is_training: whether is training or not.
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
conv_defs: A list of ConvDef namedtuples specifying the net architecture.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
global_pool: Optional boolean flag to control the avgpooling before the
logits layer. If false or unset, pooling is done with a fixed window
that reduces default-sized inputs to 1x1, while larger inputs lead to
larger outputs. If true, any input size is pooled down to 1x1.
Returns:
net: a 2D Tensor with the logits (pre-softmax activations) if num_classes
is a non-zero integer, or the non-dropped-out input to the logits layer
if num_classes is 0 or None.
end_points: a dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: Input rank is invalid.
"""
input_shape = inputs.get_shape().as_list()
if len(input_shape) != 4:
raise ValueError('Invalid input tensor rank, expected 4, was: %d' %
len(input_shape))
with tf.variable_scope(
scope, 'MobilenetV1', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = mobilenet_v1_base(inputs, scope=scope,
min_depth=min_depth,
depth_multiplier=depth_multiplier,
conv_defs=conv_defs)
with tf.variable_scope('Logits'):
if global_pool:
# Global average pooling.
net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net
else:
# Pooling with a fixed kernel size.
kernel_size = _reduced_kernel_size_for_small_input(net, [7, 7])
net = slim.avg_pool2d(net, kernel_size, padding='VALID',
scope='AvgPool_1a')
end_points['AvgPool_1a'] = net
if not num_classes:
return net, end_points
# 1 x 1 x 1024
net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
if prediction_fn:
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
mobilenet_v1.default_image_size = 224
def wrapped_partial(func, *args, **kwargs):
partial_func = functools.partial(func, *args, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func
mobilenet_v1_075 = wrapped_partial(mobilenet_v1, depth_multiplier=0.75)
mobilenet_v1_050 = wrapped_partial(mobilenet_v1, depth_multiplier=0.50)
mobilenet_v1_025 = wrapped_partial(mobilenet_v1, depth_multiplier=0.25)
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are large enough.
Args:
input_tensor: input tensor of size [batch_size, height, width, channels].
kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
Returns:
a tensor with the kernel size.
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size_out = kernel_size
else:
kernel_size_out = [min(shape[1], kernel_size[0]),
min(shape[2], kernel_size[1])]
return kernel_size_out
def mobilenet_v1_arg_scope(
is_training=True,
weight_decay=0.00004,
stddev=0.09,
regularize_depthwise=False,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS,
normalizer_fn=slim.batch_norm):
"""Defines the default MobilenetV1 arg scope.
Args:
is_training: Whether or not we're training the model. If this is set to
None, the parameter is not added to the batch_norm arg_scope.
weight_decay: The weight decay to use for regularizing the model.
stddev: The standard deviation of the trunctated normal weight initializer.
regularize_depthwise: Whether or not apply regularization on depthwise.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
batch_norm_updates_collections: Collection for the update ops for
batch norm.
normalizer_fn: Normalization function to apply after convolution.
Returns:
An `arg_scope` to use for the mobilenet v1 model.
"""
batch_norm_params = {
'center': True,
'scale': True,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'updates_collections': batch_norm_updates_collections,
}
if is_training is not None:
batch_norm_params['is_training'] = is_training
# Set weight_decay for weights in Conv and DepthSepConv layers.
weights_init = tf.truncated_normal_initializer(stddev=stddev)
regularizer = slim.l2_regularizer(weight_decay)
if regularize_depthwise:
depthwise_regularizer = regularizer
else:
depthwise_regularizer = None
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
weights_initializer=weights_init,
activation_fn=tf.nn.relu6, normalizer_fn=normalizer_fn):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):
with slim.arg_scope([slim.separable_conv2d],
weights_regularizer=depthwise_regularizer) as sc:
return sc
| 46.908333 | 80 | 0.641322 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import functools
import tensorflow.compat.v1 as tf
import tf_slim as slim
Conv = namedtuple('Conv', ['kernel', 'stride', 'depth'])
DepthSepConv = namedtuple('DepthSepConv', ['kernel', 'stride', 'depth'])
MOBILENETV1_CONV_DEFS = [
Conv(kernel=[3, 3], stride=2, depth=32),
DepthSepConv(kernel=[3, 3], stride=1, depth=64),
DepthSepConv(kernel=[3, 3], stride=2, depth=128),
DepthSepConv(kernel=[3, 3], stride=1, depth=128),
DepthSepConv(kernel=[3, 3], stride=2, depth=256),
DepthSepConv(kernel=[3, 3], stride=1, depth=256),
DepthSepConv(kernel=[3, 3], stride=2, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=2, depth=1024),
DepthSepConv(kernel=[3, 3], stride=1, depth=1024)
]
def _fixed_padding(inputs, kernel_size, rate=1):
kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),
kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]
pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]
pad_beg = [pad_total[0] // 2, pad_total[1] // 2]
pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]
padded_inputs = tf.pad(
tensor=inputs,
paddings=[[0, 0], [pad_beg[0], pad_end[0]], [pad_beg[1], pad_end[1]],
[0, 0]])
return padded_inputs
def mobilenet_v1_base(inputs,
final_endpoint='Conv2d_13_pointwise',
min_depth=8,
depth_multiplier=1.0,
conv_defs=None,
output_stride=None,
use_explicit_padding=False,
scope=None):
depth = lambda d: max(int(d * depth_multiplier), min_depth)
end_points = {}
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
if conv_defs is None:
conv_defs = MOBILENETV1_CONV_DEFS
if output_stride is not None and output_stride not in [8, 16, 32]:
raise ValueError('Only allowed output_stride values are 8, 16, 32.')
padding = 'SAME'
if use_explicit_padding:
padding = 'VALID'
with tf.variable_scope(scope, 'MobilenetV1', [inputs]):
with slim.arg_scope([slim.conv2d, slim.separable_conv2d], padding=padding):
current_stride = 1
rate = 1
net = inputs
for i, conv_def in enumerate(conv_defs):
end_point_base = 'Conv2d_%d' % i
if output_stride is not None and current_stride == output_stride:
layer_stride = 1
layer_rate = rate
rate *= conv_def.stride
else:
layer_stride = conv_def.stride
layer_rate = 1
current_stride *= conv_def.stride
if isinstance(conv_def, Conv):
end_point = end_point_base
if use_explicit_padding:
net = _fixed_padding(net, conv_def.kernel)
net = slim.conv2d(net, depth(conv_def.depth), conv_def.kernel,
stride=conv_def.stride,
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
elif isinstance(conv_def, DepthSepConv):
end_point = end_point_base + '_depthwise'
# By passing filters=None
# separable_conv2d produces only a depthwise convolution layer
if use_explicit_padding:
net = _fixed_padding(net, conv_def.kernel, layer_rate)
net = slim.separable_conv2d(net, None, conv_def.kernel,
depth_multiplier=1,
stride=layer_stride,
rate=layer_rate,
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
end_point = end_point_base + '_pointwise'
net = slim.conv2d(net, depth(conv_def.depth), [1, 1],
stride=1,
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
else:
raise ValueError('Unknown convolution type %s for layer %d'
% (conv_def.ltype, i))
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def mobilenet_v1(inputs,
num_classes=1000,
dropout_keep_prob=0.999,
is_training=True,
min_depth=8,
depth_multiplier=1.0,
conv_defs=None,
prediction_fn=slim.softmax,
spatial_squeeze=True,
reuse=None,
scope='MobilenetV1',
global_pool=False):
input_shape = inputs.get_shape().as_list()
if len(input_shape) != 4:
raise ValueError('Invalid input tensor rank, expected 4, was: %d' %
len(input_shape))
with tf.variable_scope(
scope, 'MobilenetV1', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = mobilenet_v1_base(inputs, scope=scope,
min_depth=min_depth,
depth_multiplier=depth_multiplier,
conv_defs=conv_defs)
with tf.variable_scope('Logits'):
if global_pool:
# Global average pooling.
net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net
else:
# Pooling with a fixed kernel size.
kernel_size = _reduced_kernel_size_for_small_input(net, [7, 7])
net = slim.avg_pool2d(net, kernel_size, padding='VALID',
scope='AvgPool_1a')
end_points['AvgPool_1a'] = net
if not num_classes:
return net, end_points
# 1 x 1 x 1024
net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
if prediction_fn:
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
mobilenet_v1.default_image_size = 224
def wrapped_partial(func, *args, **kwargs):
partial_func = functools.partial(func, *args, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func
mobilenet_v1_075 = wrapped_partial(mobilenet_v1, depth_multiplier=0.75)
mobilenet_v1_050 = wrapped_partial(mobilenet_v1, depth_multiplier=0.50)
mobilenet_v1_025 = wrapped_partial(mobilenet_v1, depth_multiplier=0.25)
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size_out = kernel_size
else:
kernel_size_out = [min(shape[1], kernel_size[0]),
min(shape[2], kernel_size[1])]
return kernel_size_out
def mobilenet_v1_arg_scope(
is_training=True,
weight_decay=0.00004,
stddev=0.09,
regularize_depthwise=False,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS,
normalizer_fn=slim.batch_norm):
batch_norm_params = {
'center': True,
'scale': True,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'updates_collections': batch_norm_updates_collections,
}
if is_training is not None:
batch_norm_params['is_training'] = is_training
# Set weight_decay for weights in Conv and DepthSepConv layers.
weights_init = tf.truncated_normal_initializer(stddev=stddev)
regularizer = slim.l2_regularizer(weight_decay)
if regularize_depthwise:
depthwise_regularizer = regularizer
else:
depthwise_regularizer = None
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
weights_initializer=weights_init,
activation_fn=tf.nn.relu6, normalizer_fn=normalizer_fn):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):
with slim.arg_scope([slim.separable_conv2d],
weights_regularizer=depthwise_regularizer) as sc:
return sc
| true | true |
f714d444f86b7ebdebc4cfbcca8048b03e1c4ab5 | 494 | py | Python | src/model/shared/data/ooo_data.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | src/model/shared/data/ooo_data.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | src/model/shared/data/ooo_data.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
from typing import List, Dict
from .items.data_items import DataItems
from .shared_data import BaseData
from .full_imports import FullImports
from .from_import import FromImport
class Data(BaseData):
from_imports: List[FromImport]
from_imports_typing: List[FromImport]
extends_map: Dict[str, str]
quote: List[str]
typings: List[str]
requires_typing: bool
full_imports: FullImports
imports: List[str]
extends: List[str]
items: DataItems
| 24.7 | 41 | 0.744939 |
from typing import List, Dict
from .items.data_items import DataItems
from .shared_data import BaseData
from .full_imports import FullImports
from .from_import import FromImport
class Data(BaseData):
from_imports: List[FromImport]
from_imports_typing: List[FromImport]
extends_map: Dict[str, str]
quote: List[str]
typings: List[str]
requires_typing: bool
full_imports: FullImports
imports: List[str]
extends: List[str]
items: DataItems
| true | true |
f714d5865cfb4cce0b782ba39d92e5596aa4c59d | 4,297 | py | Python | python_modules/libraries/dagster-fivetran/dagster_fivetran/asset_defs.py | asamoal/dagster | 08fad28e4b608608ce090ce2e8a52c2cf9dd1b64 | [
"Apache-2.0"
] | null | null | null | python_modules/libraries/dagster-fivetran/dagster_fivetran/asset_defs.py | asamoal/dagster | 08fad28e4b608608ce090ce2e8a52c2cf9dd1b64 | [
"Apache-2.0"
] | null | null | null | python_modules/libraries/dagster-fivetran/dagster_fivetran/asset_defs.py | asamoal/dagster | 08fad28e4b608608ce090ce2e8a52c2cf9dd1b64 | [
"Apache-2.0"
] | null | null | null | from typing import List, Optional
from dagster_fivetran.resources import DEFAULT_POLL_INTERVAL
from dagster_fivetran.utils import generate_materializations
from dagster import AssetKey, AssetsDefinition, Out, Output
from dagster import _check as check
from dagster import multi_asset
from dagster.utils.backcompat import experimental
@experimental
def build_fivetran_assets(
connector_id: str,
destination_tables: List[str],
poll_interval: float = DEFAULT_POLL_INTERVAL,
poll_timeout: Optional[float] = None,
io_manager_key: Optional[str] = None,
asset_key_prefix: Optional[List[str]] = None,
) -> List[AssetsDefinition]:
"""
Build a set of assets for a given Fivetran connector.
Returns an AssetsDefintion which connects the specified ``asset_keys`` to the computation that
will update them. Internally, executes a Fivetran sync for a given ``connector_id``, and
polls until that sync completes, raising an error if it is unsuccessful. Requires the use of the
:py:class:`~dagster_fivetran.fivetran_resource`, which allows it to communicate with the
Fivetran API.
Args:
connector_id (str): The Fivetran Connector ID that this op will sync. You can retrieve this
value from the "Setup" tab of a given connector in the Fivetran UI.
destination_tables (List[str]): `schema_name.table_name` for each table that you want to be
represented in the Dagster asset graph for this connection.
poll_interval (float): The time (in seconds) that will be waited between successive polls.
poll_timeout (Optional[float]): The maximum time that will waited before this operation is
timed out. By default, this will never time out.
io_manager_key (Optional[str]): The io_manager to be used to handle each of these assets.
asset_key_prefix (Optional[List[str]]): A prefix for the asset keys inside this asset.
If left blank, assets will have a key of `AssetKey([schema_name, table_name])`.
Examples:
.. code-block:: python
from dagster import AssetKey, build_assets_job
from dagster_fivetran import fivetran_resource
from dagster_fivetran.assets import build_fivetran_assets
my_fivetran_resource = fivetran_resource.configured(
{
"api_key": {"env": "FIVETRAN_API_KEY"},
"api_secret": {"env": "FIVETRAN_API_SECRET"},
}
)
fivetran_assets = build_fivetran_assets(
connector_id="foobar",
table_names=["schema1.table1", "schema2.table2"],
])
my_fivetran_job = build_assets_job(
"my_fivetran_job",
assets=[fivetran_assets],
resource_defs={"fivetran": my_fivetran_resource}
)
"""
asset_key_prefix = check.opt_list_param(asset_key_prefix, "asset_key_prefix", of_type=str)
tracked_asset_keys = {
AssetKey(asset_key_prefix + table.split(".")) for table in destination_tables
}
@multi_asset(
name=f"fivetran_sync_{connector_id}",
outs={
"_".join(key.path): Out(io_manager_key=io_manager_key, asset_key=key)
for key in tracked_asset_keys
},
required_resource_keys={"fivetran"},
compute_kind="fivetran",
)
def _assets(context):
fivetran_output = context.resources.fivetran.sync_and_poll(
connector_id=connector_id,
poll_interval=poll_interval,
poll_timeout=poll_timeout,
)
for materialization in generate_materializations(
fivetran_output, asset_key_prefix=asset_key_prefix
):
# scan through all tables actually created, if it was expected then emit an Output.
# otherwise, emit a runtime AssetMaterialization
if materialization.asset_key in tracked_asset_keys:
yield Output(
value=None,
output_name="_".join(materialization.asset_key.path),
metadata={
entry.label: entry.entry_data for entry in materialization.metadata_entries
},
)
else:
yield materialization
return [_assets]
| 39.063636 | 100 | 0.667442 | from typing import List, Optional
from dagster_fivetran.resources import DEFAULT_POLL_INTERVAL
from dagster_fivetran.utils import generate_materializations
from dagster import AssetKey, AssetsDefinition, Out, Output
from dagster import _check as check
from dagster import multi_asset
from dagster.utils.backcompat import experimental
@experimental
def build_fivetran_assets(
connector_id: str,
destination_tables: List[str],
poll_interval: float = DEFAULT_POLL_INTERVAL,
poll_timeout: Optional[float] = None,
io_manager_key: Optional[str] = None,
asset_key_prefix: Optional[List[str]] = None,
) -> List[AssetsDefinition]:
asset_key_prefix = check.opt_list_param(asset_key_prefix, "asset_key_prefix", of_type=str)
tracked_asset_keys = {
AssetKey(asset_key_prefix + table.split(".")) for table in destination_tables
}
@multi_asset(
name=f"fivetran_sync_{connector_id}",
outs={
"_".join(key.path): Out(io_manager_key=io_manager_key, asset_key=key)
for key in tracked_asset_keys
},
required_resource_keys={"fivetran"},
compute_kind="fivetran",
)
def _assets(context):
fivetran_output = context.resources.fivetran.sync_and_poll(
connector_id=connector_id,
poll_interval=poll_interval,
poll_timeout=poll_timeout,
)
for materialization in generate_materializations(
fivetran_output, asset_key_prefix=asset_key_prefix
):
if materialization.asset_key in tracked_asset_keys:
yield Output(
value=None,
output_name="_".join(materialization.asset_key.path),
metadata={
entry.label: entry.entry_data for entry in materialization.metadata_entries
},
)
else:
yield materialization
return [_assets]
| true | true |
f714d5a168b4a464f6eba8acff23787cdd077327 | 4,848 | py | Python | datasets/W300.py | HapKoM/pyhowfar | b12c248f696dc9bc2b50455b63a2b6ca7a440ba7 | [
"BSD-3-Clause"
] | null | null | null | datasets/W300.py | HapKoM/pyhowfar | b12c248f696dc9bc2b50455b63a2b6ca7a440ba7 | [
"BSD-3-Clause"
] | null | null | null | datasets/W300.py | HapKoM/pyhowfar | b12c248f696dc9bc2b50455b63a2b6ca7a440ba7 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
import os
import numpy as np
import random
import math
from skimage import io
import torch
import torch.utils.data as data
import torchfile
# from utils.utils import *
from utils.imutils import *
from utils.transforms import *
class W300(data.Dataset):
def __init__(self, args, split):
self.nParts = 68
self.pointType = args.pointType
# self.anno = anno
self.img_folder = args.data
self.split = split
self.is_train = True if self.split == 'train' else False
self.anno = self._getDataFaces(self.is_train)
self.total = len(self.anno)
self.scale_factor = args.scale_factor
self.rot_factor = args.rot_factor
self.mean, self.std = self._comput_mean()
def _getDataFaces(self, is_train):
base_dir = self.img_folder
dirs = os.listdir(base_dir)
lines = []
vallines = []
if is_train:
fid = open(os.path.join(base_dir, 'train.txt'), 'r')
for line in fid.readlines():
lines.append(line.strip())
fid.close()
else:
fid = open(os.path.join(base_dir, 'test.txt'), 'r')
for line in fid.readlines():
vallines.append(line.strip())
fid.close()
if is_train:
print('=> loaded train set, {} images were found'.format(len(lines)))
return lines
else:
print('=> loaded validation set, {} images were found'.format(len(vallines)))
return vallines
def __len__(self):
return self.total
def __getitem__(self, index):
inp, out, pts, c, s = self.generateSampleFace(index)
self.pts, self.c, self.s = pts, c, s
if self.is_train:
return inp, out
else:
meta = {'index': index, 'center': c, 'scale': s, 'pts': pts,}
return inp, out, meta
def generateSampleFace(self, idx):
sf = self.scale_factor
rf = self.rot_factor
main_pts = torchfile.load(
os.path.join(self.img_folder, 'landmarks', self.anno[idx].split('_')[0],
self.anno[idx][:-4] + '.t7'))
pts = main_pts[0] if self.pointType == '2D' else main_pts[1]
c = torch.Tensor((450 / 2, 450 / 2 + 50))
s = 1.8
img = load_image(
os.path.join(self.img_folder, self.anno[idx].split('_')[0], self.anno[idx][:-8] +
'.jpg'))
r = 0
if self.is_train:
s = s * torch.randn(1).mul_(sf).add_(1).clamp(1 - sf, 1 + sf)[0]
r = torch.randn(1).mul_(rf).clamp(-2 * rf, 2 * rf)[0] if random.random() <= 0.6 else 0
if random.random() <= 0.5:
img = torch.from_numpy(fliplr(img.numpy())).float()
pts = shufflelr(pts, width=img.size(2), dataset='w300lp')
c[0] = img.size(2) - c[0]
img[0, :, :].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)
img[1, :, :].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)
img[2, :, :].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)
inp = crop(img, c, s, [256, 256], rot=r)
inp = color_normalize(inp, self.mean, self.std)
tpts = pts.clone()
out = torch.zeros(self.nParts, 64, 64)
for i in range(self.nParts):
if tpts[i, 0] > 0:
tpts[i, 0:2] = to_torch(transform(tpts[i, 0:2] + 1, c, s, [64, 64], rot=r))
out[i] = draw_labelmap(out[i], tpts[i] - 1, sigma=1)
return inp, out, pts, c, s
def _comput_mean(self):
meanstd_file = './data/300W_LP/mean.pth.tar'
if os.path.isfile(meanstd_file):
ms = torch.load(meanstd_file)
else:
print("\tcomputing mean and std for the first time, it may takes a while, drink a cup of coffe...")
mean = torch.zeros(3)
std = torch.zeros(3)
if self.is_train:
for i in range(self.total):
a = self.anno[i]
img_path = os.path.join(self.img_folder, self.anno[i].split('_')[0],
self.anno[i][:-8] + '.jpg')
img = load_image(img_path)
mean += img.view(img.size(0), -1).mean(1)
std += img.view(img.size(0), -1).std(1)
mean /= self.total
std /= self.total
ms = {
'mean': mean,
'std': std,
}
torch.save(ms, meanstd_file)
if self.is_train:
print('\tMean: %.4f, %.4f, %.4f' % (ms['mean'][0], ms['mean'][1], ms['mean'][2]))
print('\tStd: %.4f, %.4f, %.4f' % (ms['std'][0], ms['std'][1], ms['std'][2]))
return ms['mean'], ms['std']
| 35.130435 | 111 | 0.514233 | from __future__ import print_function
import os
import numpy as np
import random
import math
from skimage import io
import torch
import torch.utils.data as data
import torchfile
from utils.imutils import *
from utils.transforms import *
class W300(data.Dataset):
def __init__(self, args, split):
self.nParts = 68
self.pointType = args.pointType
self.img_folder = args.data
self.split = split
self.is_train = True if self.split == 'train' else False
self.anno = self._getDataFaces(self.is_train)
self.total = len(self.anno)
self.scale_factor = args.scale_factor
self.rot_factor = args.rot_factor
self.mean, self.std = self._comput_mean()
def _getDataFaces(self, is_train):
base_dir = self.img_folder
dirs = os.listdir(base_dir)
lines = []
vallines = []
if is_train:
fid = open(os.path.join(base_dir, 'train.txt'), 'r')
for line in fid.readlines():
lines.append(line.strip())
fid.close()
else:
fid = open(os.path.join(base_dir, 'test.txt'), 'r')
for line in fid.readlines():
vallines.append(line.strip())
fid.close()
if is_train:
print('=> loaded train set, {} images were found'.format(len(lines)))
return lines
else:
print('=> loaded validation set, {} images were found'.format(len(vallines)))
return vallines
def __len__(self):
return self.total
def __getitem__(self, index):
inp, out, pts, c, s = self.generateSampleFace(index)
self.pts, self.c, self.s = pts, c, s
if self.is_train:
return inp, out
else:
meta = {'index': index, 'center': c, 'scale': s, 'pts': pts,}
return inp, out, meta
def generateSampleFace(self, idx):
sf = self.scale_factor
rf = self.rot_factor
main_pts = torchfile.load(
os.path.join(self.img_folder, 'landmarks', self.anno[idx].split('_')[0],
self.anno[idx][:-4] + '.t7'))
pts = main_pts[0] if self.pointType == '2D' else main_pts[1]
c = torch.Tensor((450 / 2, 450 / 2 + 50))
s = 1.8
img = load_image(
os.path.join(self.img_folder, self.anno[idx].split('_')[0], self.anno[idx][:-8] +
'.jpg'))
r = 0
if self.is_train:
s = s * torch.randn(1).mul_(sf).add_(1).clamp(1 - sf, 1 + sf)[0]
r = torch.randn(1).mul_(rf).clamp(-2 * rf, 2 * rf)[0] if random.random() <= 0.6 else 0
if random.random() <= 0.5:
img = torch.from_numpy(fliplr(img.numpy())).float()
pts = shufflelr(pts, width=img.size(2), dataset='w300lp')
c[0] = img.size(2) - c[0]
img[0, :, :].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)
img[1, :, :].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)
img[2, :, :].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)
inp = crop(img, c, s, [256, 256], rot=r)
inp = color_normalize(inp, self.mean, self.std)
tpts = pts.clone()
out = torch.zeros(self.nParts, 64, 64)
for i in range(self.nParts):
if tpts[i, 0] > 0:
tpts[i, 0:2] = to_torch(transform(tpts[i, 0:2] + 1, c, s, [64, 64], rot=r))
out[i] = draw_labelmap(out[i], tpts[i] - 1, sigma=1)
return inp, out, pts, c, s
def _comput_mean(self):
meanstd_file = './data/300W_LP/mean.pth.tar'
if os.path.isfile(meanstd_file):
ms = torch.load(meanstd_file)
else:
print("\tcomputing mean and std for the first time, it may takes a while, drink a cup of coffe...")
mean = torch.zeros(3)
std = torch.zeros(3)
if self.is_train:
for i in range(self.total):
a = self.anno[i]
img_path = os.path.join(self.img_folder, self.anno[i].split('_')[0],
self.anno[i][:-8] + '.jpg')
img = load_image(img_path)
mean += img.view(img.size(0), -1).mean(1)
std += img.view(img.size(0), -1).std(1)
mean /= self.total
std /= self.total
ms = {
'mean': mean,
'std': std,
}
torch.save(ms, meanstd_file)
if self.is_train:
print('\tMean: %.4f, %.4f, %.4f' % (ms['mean'][0], ms['mean'][1], ms['mean'][2]))
print('\tStd: %.4f, %.4f, %.4f' % (ms['std'][0], ms['std'][1], ms['std'][2]))
return ms['mean'], ms['std']
| true | true |
f714d6667d827ed794b7897b3c342b7996ae0f37 | 12,279 | py | Python | tests/test_datetime_parse.py | jasujm/pydantic | cc1cb4826c74ac5b651ef2d80c3478428a9950ca | [
"MIT"
] | 6 | 2021-08-11T11:37:59.000Z | 2021-11-12T01:33:11.000Z | tests/test_datetime_parse.py | jasujm/pydantic | cc1cb4826c74ac5b651ef2d80c3478428a9950ca | [
"MIT"
] | 189 | 2020-07-12T08:13:29.000Z | 2022-03-28T01:16:29.000Z | tests/test_datetime_parse.py | jasujm/pydantic | cc1cb4826c74ac5b651ef2d80c3478428a9950ca | [
"MIT"
] | 2 | 2021-11-23T16:28:21.000Z | 2021-11-23T16:28:33.000Z | """
Stolen from https://github.com/django/django/blob/master/tests/utils_tests/test_dateparse.py at
9718fa2e8abe430c3526a9278dd976443d4ae3c6
Changed to:
* use standard pytest layout
* parametrize tests
"""
from datetime import date, datetime, time, timedelta, timezone
import pytest
from pydantic import BaseModel, ValidationError, errors
from pydantic.datetime_parse import parse_date, parse_datetime, parse_duration, parse_time
def create_tz(minutes):
return timezone(timedelta(minutes=minutes))
@pytest.mark.parametrize(
'value,result',
[
# Valid inputs
('1494012444.883309', date(2017, 5, 5)),
(b'1494012444.883309', date(2017, 5, 5)),
(1_494_012_444.883_309, date(2017, 5, 5)),
('1494012444', date(2017, 5, 5)),
(1_494_012_444, date(2017, 5, 5)),
(0, date(1970, 1, 1)),
('2012-04-23', date(2012, 4, 23)),
(b'2012-04-23', date(2012, 4, 23)),
('2012-4-9', date(2012, 4, 9)),
(date(2012, 4, 9), date(2012, 4, 9)),
(datetime(2012, 4, 9, 12, 15), date(2012, 4, 9)),
# Invalid inputs
('x20120423', errors.DateError),
('2012-04-56', errors.DateError),
(19_999_999_999, date(2603, 10, 11)), # just before watershed
(20_000_000_001, date(1970, 8, 20)), # just after watershed
(1_549_316_052, date(2019, 2, 4)), # nowish in s
(1_549_316_052_104, date(2019, 2, 4)), # nowish in ms
(1_549_316_052_104_324, date(2019, 2, 4)), # nowish in μs
(1_549_316_052_104_324_096, date(2019, 2, 4)), # nowish in ns
('infinity', date(9999, 12, 31)),
('inf', date(9999, 12, 31)),
(float('inf'), date(9999, 12, 31)),
('infinity ', date(9999, 12, 31)),
(int('1' + '0' * 100), date(9999, 12, 31)),
(1e1000, date(9999, 12, 31)),
('-infinity', date(1, 1, 1)),
('-inf', date(1, 1, 1)),
('nan', ValueError),
],
)
def test_date_parsing(value, result):
if type(result) == type and issubclass(result, Exception):
with pytest.raises(result):
parse_date(value)
else:
assert parse_date(value) == result
@pytest.mark.parametrize(
'value,result',
[
# Valid inputs
('09:15:00', time(9, 15)),
('10:10', time(10, 10)),
('10:20:30.400', time(10, 20, 30, 400_000)),
(b'10:20:30.400', time(10, 20, 30, 400_000)),
('4:8:16', time(4, 8, 16)),
(time(4, 8, 16), time(4, 8, 16)),
(3610, time(1, 0, 10)),
(3600.5, time(1, 0, 0, 500000)),
(86400 - 1, time(23, 59, 59)),
('11:05:00-05:30', time(11, 5, 0, tzinfo=create_tz(-330))),
('11:05:00-0530', time(11, 5, 0, tzinfo=create_tz(-330))),
('11:05:00Z', time(11, 5, 0, tzinfo=timezone.utc)),
('11:05:00+00', time(11, 5, 0, tzinfo=timezone.utc)),
('11:05-06', time(11, 5, 0, tzinfo=create_tz(-360))),
('11:05+06', time(11, 5, 0, tzinfo=create_tz(360))),
# Invalid inputs
(86400, errors.TimeError),
('xxx', errors.TimeError),
('091500', errors.TimeError),
(b'091500', errors.TimeError),
('09:15:90', errors.TimeError),
('11:05:00Y', errors.TimeError),
('11:05:00-25:00', errors.TimeError),
],
)
def test_time_parsing(value, result):
if result == errors.TimeError:
with pytest.raises(errors.TimeError):
parse_time(value)
else:
assert parse_time(value) == result
@pytest.mark.parametrize(
'value,result',
[
# Valid inputs
# values in seconds
('1494012444.883309', datetime(2017, 5, 5, 19, 27, 24, 883_309, tzinfo=timezone.utc)),
(1_494_012_444.883_309, datetime(2017, 5, 5, 19, 27, 24, 883_309, tzinfo=timezone.utc)),
('1494012444', datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
(b'1494012444', datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
(1_494_012_444, datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
# values in ms
('1494012444000.883309', datetime(2017, 5, 5, 19, 27, 24, 883, tzinfo=timezone.utc)),
('-1494012444000.883309', datetime(1922, 8, 29, 4, 32, 35, 999117, tzinfo=timezone.utc)),
(1_494_012_444_000, datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
('2012-04-23T09:15:00', datetime(2012, 4, 23, 9, 15)),
('2012-4-9 4:8:16', datetime(2012, 4, 9, 4, 8, 16)),
('2012-04-23T09:15:00Z', datetime(2012, 4, 23, 9, 15, 0, 0, timezone.utc)),
('2012-4-9 4:8:16-0320', datetime(2012, 4, 9, 4, 8, 16, 0, create_tz(-200))),
('2012-04-23T10:20:30.400+02:30', datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(150))),
('2012-04-23T10:20:30.400+02', datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(120))),
('2012-04-23T10:20:30.400-02', datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(-120))),
(b'2012-04-23T10:20:30.400-02', datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(-120))),
(datetime(2017, 5, 5), datetime(2017, 5, 5)),
(0, datetime(1970, 1, 1, 0, 0, 0, tzinfo=timezone.utc)),
# Invalid inputs
('x20120423091500', errors.DateTimeError),
('2012-04-56T09:15:90', errors.DateTimeError),
('2012-04-23T11:05:00-25:00', errors.DateTimeError),
(19_999_999_999, datetime(2603, 10, 11, 11, 33, 19, tzinfo=timezone.utc)), # just before watershed
(20_000_000_001, datetime(1970, 8, 20, 11, 33, 20, 1000, tzinfo=timezone.utc)), # just after watershed
(1_549_316_052, datetime(2019, 2, 4, 21, 34, 12, 0, tzinfo=timezone.utc)), # nowish in s
(1_549_316_052_104, datetime(2019, 2, 4, 21, 34, 12, 104_000, tzinfo=timezone.utc)), # nowish in ms
(1_549_316_052_104_324, datetime(2019, 2, 4, 21, 34, 12, 104_324, tzinfo=timezone.utc)), # nowish in μs
(1_549_316_052_104_324_096, datetime(2019, 2, 4, 21, 34, 12, 104_324, tzinfo=timezone.utc)), # nowish in ns
('infinity', datetime(9999, 12, 31, 23, 59, 59, 999999)),
('inf', datetime(9999, 12, 31, 23, 59, 59, 999999)),
('inf ', datetime(9999, 12, 31, 23, 59, 59, 999999)),
(1e50, datetime(9999, 12, 31, 23, 59, 59, 999999)),
(float('inf'), datetime(9999, 12, 31, 23, 59, 59, 999999)),
('-infinity', datetime(1, 1, 1, 0, 0)),
('-inf', datetime(1, 1, 1, 0, 0)),
('nan', ValueError),
],
)
def test_datetime_parsing(value, result):
if type(result) == type and issubclass(result, Exception):
with pytest.raises(result):
parse_datetime(value)
else:
assert parse_datetime(value) == result
@pytest.mark.parametrize(
'delta',
[
timedelta(days=4, minutes=15, seconds=30, milliseconds=100), # fractions of seconds
timedelta(hours=10, minutes=15, seconds=30), # hours, minutes, seconds
timedelta(days=4, minutes=15, seconds=30), # multiple days
timedelta(days=1, minutes=00, seconds=00), # single day
timedelta(days=-4, minutes=15, seconds=30), # negative durations
timedelta(minutes=15, seconds=30), # minute & seconds
timedelta(seconds=30), # seconds
],
)
def test_parse_python_format(delta):
assert parse_duration(delta) == delta
assert parse_duration(str(delta)) == delta
@pytest.mark.parametrize(
'value,result',
[
# seconds
(timedelta(seconds=30), timedelta(seconds=30)),
('30', timedelta(seconds=30)),
(30, timedelta(seconds=30)),
(30.1, timedelta(seconds=30, milliseconds=100)),
# minutes seconds
('15:30', timedelta(minutes=15, seconds=30)),
('5:30', timedelta(minutes=5, seconds=30)),
# hours minutes seconds
('10:15:30', timedelta(hours=10, minutes=15, seconds=30)),
('1:15:30', timedelta(hours=1, minutes=15, seconds=30)),
('100:200:300', timedelta(hours=100, minutes=200, seconds=300)),
# days
('4 15:30', timedelta(days=4, minutes=15, seconds=30)),
('4 10:15:30', timedelta(days=4, hours=10, minutes=15, seconds=30)),
# fractions of seconds
('15:30.1', timedelta(minutes=15, seconds=30, milliseconds=100)),
('15:30.01', timedelta(minutes=15, seconds=30, milliseconds=10)),
('15:30.001', timedelta(minutes=15, seconds=30, milliseconds=1)),
('15:30.0001', timedelta(minutes=15, seconds=30, microseconds=100)),
('15:30.00001', timedelta(minutes=15, seconds=30, microseconds=10)),
('15:30.000001', timedelta(minutes=15, seconds=30, microseconds=1)),
(b'15:30.000001', timedelta(minutes=15, seconds=30, microseconds=1)),
# negative
('-4 15:30', timedelta(days=-4, minutes=15, seconds=30)),
('-172800', timedelta(days=-2)),
('-15:30', timedelta(minutes=-15, seconds=30)),
('-1:15:30', timedelta(hours=-1, minutes=15, seconds=30)),
('-30.1', timedelta(seconds=-30, milliseconds=-100)),
# iso_8601
('P4Y', errors.DurationError),
('P4M', errors.DurationError),
('P4W', errors.DurationError),
('P4D', timedelta(days=4)),
('P0.5D', timedelta(hours=12)),
('PT5H', timedelta(hours=5)),
('PT5M', timedelta(minutes=5)),
('PT5S', timedelta(seconds=5)),
('PT0.000005S', timedelta(microseconds=5)),
(b'PT0.000005S', timedelta(microseconds=5)),
],
)
def test_parse_durations(value, result):
if result == errors.DurationError:
with pytest.raises(errors.DurationError):
parse_duration(value)
else:
assert parse_duration(value) == result
@pytest.mark.parametrize(
'field, value, error_message',
[
('dt', [], 'invalid type; expected datetime, string, bytes, int or float'),
('dt', {}, 'invalid type; expected datetime, string, bytes, int or float'),
('dt', object, 'invalid type; expected datetime, string, bytes, int or float'),
('d', [], 'invalid type; expected date, string, bytes, int or float'),
('d', {}, 'invalid type; expected date, string, bytes, int or float'),
('d', object, 'invalid type; expected date, string, bytes, int or float'),
('t', [], 'invalid type; expected time, string, bytes, int or float'),
('t', {}, 'invalid type; expected time, string, bytes, int or float'),
('t', object, 'invalid type; expected time, string, bytes, int or float'),
('td', [], 'invalid type; expected timedelta, string, bytes, int or float'),
('td', {}, 'invalid type; expected timedelta, string, bytes, int or float'),
('td', object, 'invalid type; expected timedelta, string, bytes, int or float'),
],
)
def test_model_type_errors(field, value, error_message):
class Model(BaseModel):
dt: datetime = None
d: date = None
t: time = None
td: timedelta = None
with pytest.raises(ValidationError) as exc_info:
Model(**{field: value})
assert len(exc_info.value.errors()) == 1
error = exc_info.value.errors()[0]
assert error == {'loc': (field,), 'type': 'type_error', 'msg': error_message}
@pytest.mark.parametrize('field', ['dt', 'd', 't', 'dt'])
def test_unicode_decode_error(field):
class Model(BaseModel):
dt: datetime = None
d: date = None
t: time = None
td: timedelta = None
with pytest.raises(ValidationError) as exc_info:
Model(**{field: b'\x81'})
assert len(exc_info.value.errors()) == 1
error = exc_info.value.errors()[0]
assert error == {
'loc': (field,),
'type': 'value_error.unicodedecode',
'msg': "'utf-8' codec can't decode byte 0x81 in position 0: invalid start byte",
}
def test_nan():
class Model(BaseModel):
dt: datetime
d: date
with pytest.raises(ValidationError) as exc_info:
Model(dt='nan', d='nan')
assert exc_info.value.errors() == [
{
'loc': ('dt',),
'msg': 'cannot convert float NaN to integer',
'type': 'value_error',
},
{
'loc': ('d',),
'msg': 'cannot convert float NaN to integer',
'type': 'value_error',
},
]
| 42.05137 | 116 | 0.58425 | from datetime import date, datetime, time, timedelta, timezone
import pytest
from pydantic import BaseModel, ValidationError, errors
from pydantic.datetime_parse import parse_date, parse_datetime, parse_duration, parse_time
def create_tz(minutes):
return timezone(timedelta(minutes=minutes))
@pytest.mark.parametrize(
'value,result',
[
('1494012444.883309', date(2017, 5, 5)),
(b'1494012444.883309', date(2017, 5, 5)),
(1_494_012_444.883_309, date(2017, 5, 5)),
('1494012444', date(2017, 5, 5)),
(1_494_012_444, date(2017, 5, 5)),
(0, date(1970, 1, 1)),
('2012-04-23', date(2012, 4, 23)),
(b'2012-04-23', date(2012, 4, 23)),
('2012-4-9', date(2012, 4, 9)),
(date(2012, 4, 9), date(2012, 4, 9)),
(datetime(2012, 4, 9, 12, 15), date(2012, 4, 9)),
('x20120423', errors.DateError),
('2012-04-56', errors.DateError),
(19_999_999_999, date(2603, 10, 11)),
(20_000_000_001, date(1970, 8, 20)),
(1_549_316_052, date(2019, 2, 4)),
(1_549_316_052_104, date(2019, 2, 4)),
(1_549_316_052_104_324, date(2019, 2, 4)),
(1_549_316_052_104_324_096, date(2019, 2, 4)),
('infinity', date(9999, 12, 31)),
('inf', date(9999, 12, 31)),
(float('inf'), date(9999, 12, 31)),
('infinity ', date(9999, 12, 31)),
(int('1' + '0' * 100), date(9999, 12, 31)),
(1e1000, date(9999, 12, 31)),
('-infinity', date(1, 1, 1)),
('-inf', date(1, 1, 1)),
('nan', ValueError),
],
)
def test_date_parsing(value, result):
if type(result) == type and issubclass(result, Exception):
with pytest.raises(result):
parse_date(value)
else:
assert parse_date(value) == result
@pytest.mark.parametrize(
'value,result',
[
('09:15:00', time(9, 15)),
('10:10', time(10, 10)),
('10:20:30.400', time(10, 20, 30, 400_000)),
(b'10:20:30.400', time(10, 20, 30, 400_000)),
('4:8:16', time(4, 8, 16)),
(time(4, 8, 16), time(4, 8, 16)),
(3610, time(1, 0, 10)),
(3600.5, time(1, 0, 0, 500000)),
(86400 - 1, time(23, 59, 59)),
('11:05:00-05:30', time(11, 5, 0, tzinfo=create_tz(-330))),
('11:05:00-0530', time(11, 5, 0, tzinfo=create_tz(-330))),
('11:05:00Z', time(11, 5, 0, tzinfo=timezone.utc)),
('11:05:00+00', time(11, 5, 0, tzinfo=timezone.utc)),
('11:05-06', time(11, 5, 0, tzinfo=create_tz(-360))),
('11:05+06', time(11, 5, 0, tzinfo=create_tz(360))),
(86400, errors.TimeError),
('xxx', errors.TimeError),
('091500', errors.TimeError),
(b'091500', errors.TimeError),
('09:15:90', errors.TimeError),
('11:05:00Y', errors.TimeError),
('11:05:00-25:00', errors.TimeError),
],
)
def test_time_parsing(value, result):
if result == errors.TimeError:
with pytest.raises(errors.TimeError):
parse_time(value)
else:
assert parse_time(value) == result
@pytest.mark.parametrize(
'value,result',
[
('1494012444.883309', datetime(2017, 5, 5, 19, 27, 24, 883_309, tzinfo=timezone.utc)),
(1_494_012_444.883_309, datetime(2017, 5, 5, 19, 27, 24, 883_309, tzinfo=timezone.utc)),
('1494012444', datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
(b'1494012444', datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
(1_494_012_444, datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
('1494012444000.883309', datetime(2017, 5, 5, 19, 27, 24, 883, tzinfo=timezone.utc)),
('-1494012444000.883309', datetime(1922, 8, 29, 4, 32, 35, 999117, tzinfo=timezone.utc)),
(1_494_012_444_000, datetime(2017, 5, 5, 19, 27, 24, tzinfo=timezone.utc)),
('2012-04-23T09:15:00', datetime(2012, 4, 23, 9, 15)),
('2012-4-9 4:8:16', datetime(2012, 4, 9, 4, 8, 16)),
('2012-04-23T09:15:00Z', datetime(2012, 4, 23, 9, 15, 0, 0, timezone.utc)),
('2012-4-9 4:8:16-0320', datetime(2012, 4, 9, 4, 8, 16, 0, create_tz(-200))),
('2012-04-23T10:20:30.400+02:30', datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(150))),
('2012-04-23T10:20:30.400+02', datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(120))),
('2012-04-23T10:20:30.400-02', datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(-120))),
(b'2012-04-23T10:20:30.400-02', datetime(2012, 4, 23, 10, 20, 30, 400_000, create_tz(-120))),
(datetime(2017, 5, 5), datetime(2017, 5, 5)),
(0, datetime(1970, 1, 1, 0, 0, 0, tzinfo=timezone.utc)),
('x20120423091500', errors.DateTimeError),
('2012-04-56T09:15:90', errors.DateTimeError),
('2012-04-23T11:05:00-25:00', errors.DateTimeError),
(19_999_999_999, datetime(2603, 10, 11, 11, 33, 19, tzinfo=timezone.utc)),
(20_000_000_001, datetime(1970, 8, 20, 11, 33, 20, 1000, tzinfo=timezone.utc)),
(1_549_316_052, datetime(2019, 2, 4, 21, 34, 12, 0, tzinfo=timezone.utc)),
(1_549_316_052_104, datetime(2019, 2, 4, 21, 34, 12, 104_000, tzinfo=timezone.utc)),
(1_549_316_052_104_324, datetime(2019, 2, 4, 21, 34, 12, 104_324, tzinfo=timezone.utc)),
(1_549_316_052_104_324_096, datetime(2019, 2, 4, 21, 34, 12, 104_324, tzinfo=timezone.utc)),
('infinity', datetime(9999, 12, 31, 23, 59, 59, 999999)),
('inf', datetime(9999, 12, 31, 23, 59, 59, 999999)),
('inf ', datetime(9999, 12, 31, 23, 59, 59, 999999)),
(1e50, datetime(9999, 12, 31, 23, 59, 59, 999999)),
(float('inf'), datetime(9999, 12, 31, 23, 59, 59, 999999)),
('-infinity', datetime(1, 1, 1, 0, 0)),
('-inf', datetime(1, 1, 1, 0, 0)),
('nan', ValueError),
],
)
def test_datetime_parsing(value, result):
if type(result) == type and issubclass(result, Exception):
with pytest.raises(result):
parse_datetime(value)
else:
assert parse_datetime(value) == result
@pytest.mark.parametrize(
'delta',
[
timedelta(days=4, minutes=15, seconds=30, milliseconds=100),
timedelta(hours=10, minutes=15, seconds=30),
timedelta(days=4, minutes=15, seconds=30),
timedelta(days=1, minutes=00, seconds=00),
timedelta(days=-4, minutes=15, seconds=30),
timedelta(minutes=15, seconds=30),
timedelta(seconds=30),
],
)
def test_parse_python_format(delta):
assert parse_duration(delta) == delta
assert parse_duration(str(delta)) == delta
@pytest.mark.parametrize(
'value,result',
[
(timedelta(seconds=30), timedelta(seconds=30)),
('30', timedelta(seconds=30)),
(30, timedelta(seconds=30)),
(30.1, timedelta(seconds=30, milliseconds=100)),
('15:30', timedelta(minutes=15, seconds=30)),
('5:30', timedelta(minutes=5, seconds=30)),
('10:15:30', timedelta(hours=10, minutes=15, seconds=30)),
('1:15:30', timedelta(hours=1, minutes=15, seconds=30)),
('100:200:300', timedelta(hours=100, minutes=200, seconds=300)),
('4 15:30', timedelta(days=4, minutes=15, seconds=30)),
('4 10:15:30', timedelta(days=4, hours=10, minutes=15, seconds=30)),
('15:30.1', timedelta(minutes=15, seconds=30, milliseconds=100)),
('15:30.01', timedelta(minutes=15, seconds=30, milliseconds=10)),
('15:30.001', timedelta(minutes=15, seconds=30, milliseconds=1)),
('15:30.0001', timedelta(minutes=15, seconds=30, microseconds=100)),
('15:30.00001', timedelta(minutes=15, seconds=30, microseconds=10)),
('15:30.000001', timedelta(minutes=15, seconds=30, microseconds=1)),
(b'15:30.000001', timedelta(minutes=15, seconds=30, microseconds=1)),
('-4 15:30', timedelta(days=-4, minutes=15, seconds=30)),
('-172800', timedelta(days=-2)),
('-15:30', timedelta(minutes=-15, seconds=30)),
('-1:15:30', timedelta(hours=-1, minutes=15, seconds=30)),
('-30.1', timedelta(seconds=-30, milliseconds=-100)),
('P4Y', errors.DurationError),
('P4M', errors.DurationError),
('P4W', errors.DurationError),
('P4D', timedelta(days=4)),
('P0.5D', timedelta(hours=12)),
('PT5H', timedelta(hours=5)),
('PT5M', timedelta(minutes=5)),
('PT5S', timedelta(seconds=5)),
('PT0.000005S', timedelta(microseconds=5)),
(b'PT0.000005S', timedelta(microseconds=5)),
],
)
def test_parse_durations(value, result):
if result == errors.DurationError:
with pytest.raises(errors.DurationError):
parse_duration(value)
else:
assert parse_duration(value) == result
@pytest.mark.parametrize(
'field, value, error_message',
[
('dt', [], 'invalid type; expected datetime, string, bytes, int or float'),
('dt', {}, 'invalid type; expected datetime, string, bytes, int or float'),
('dt', object, 'invalid type; expected datetime, string, bytes, int or float'),
('d', [], 'invalid type; expected date, string, bytes, int or float'),
('d', {}, 'invalid type; expected date, string, bytes, int or float'),
('d', object, 'invalid type; expected date, string, bytes, int or float'),
('t', [], 'invalid type; expected time, string, bytes, int or float'),
('t', {}, 'invalid type; expected time, string, bytes, int or float'),
('t', object, 'invalid type; expected time, string, bytes, int or float'),
('td', [], 'invalid type; expected timedelta, string, bytes, int or float'),
('td', {}, 'invalid type; expected timedelta, string, bytes, int or float'),
('td', object, 'invalid type; expected timedelta, string, bytes, int or float'),
],
)
def test_model_type_errors(field, value, error_message):
class Model(BaseModel):
dt: datetime = None
d: date = None
t: time = None
td: timedelta = None
with pytest.raises(ValidationError) as exc_info:
Model(**{field: value})
assert len(exc_info.value.errors()) == 1
error = exc_info.value.errors()[0]
assert error == {'loc': (field,), 'type': 'type_error', 'msg': error_message}
@pytest.mark.parametrize('field', ['dt', 'd', 't', 'dt'])
def test_unicode_decode_error(field):
class Model(BaseModel):
dt: datetime = None
d: date = None
t: time = None
td: timedelta = None
with pytest.raises(ValidationError) as exc_info:
Model(**{field: b'\x81'})
assert len(exc_info.value.errors()) == 1
error = exc_info.value.errors()[0]
assert error == {
'loc': (field,),
'type': 'value_error.unicodedecode',
'msg': "'utf-8' codec can't decode byte 0x81 in position 0: invalid start byte",
}
def test_nan():
class Model(BaseModel):
dt: datetime
d: date
with pytest.raises(ValidationError) as exc_info:
Model(dt='nan', d='nan')
assert exc_info.value.errors() == [
{
'loc': ('dt',),
'msg': 'cannot convert float NaN to integer',
'type': 'value_error',
},
{
'loc': ('d',),
'msg': 'cannot convert float NaN to integer',
'type': 'value_error',
},
]
| true | true |
f714d75855fb1f1011c915bfe8ff92d1d28c700e | 1,471 | py | Python | examples/hello-world.py | Shoe-Pi/gfx-hat | ac7cd4ac8873fdff692823b4bf4a804eaa2d98f8 | [
"MIT"
] | 24 | 2018-09-04T20:56:23.000Z | 2021-11-07T06:22:23.000Z | examples/hello-world.py | Shoe-Pi/gfx-hat | ac7cd4ac8873fdff692823b4bf4a804eaa2d98f8 | [
"MIT"
] | 10 | 2018-09-01T16:32:44.000Z | 2022-03-29T13:28:19.000Z | examples/hello-world.py | Shoe-Pi/gfx-hat | ac7cd4ac8873fdff692823b4bf4a804eaa2d98f8 | [
"MIT"
] | 12 | 2018-08-27T21:32:36.000Z | 2022-01-06T10:09:31.000Z | #!/usr/bin/env python
import time
import signal
from gfxhat import touch, lcd, backlight, fonts
from PIL import Image, ImageFont, ImageDraw
print("""hello-world.py
This basic example prints the text "Hello World" in the middle of the LCD
Press any button to see its corresponding LED toggle on/off.
Press Ctrl+C to exit.
""")
led_states = [False for _ in range(6)]
width, height = lcd.dimensions()
image = Image.new('P', (width, height))
draw = ImageDraw.Draw(image)
font = ImageFont.truetype(fonts.AmaticSCBold, 38)
text = "Hello World"
w, h = font.getsize(text)
x = (width - w) // 2
y = (height - h) // 2
draw.text((x, y), text, 1, font)
def handler(ch, event):
if event == 'press':
led_states[ch] = not led_states[ch]
touch.set_led(ch, led_states[ch])
if led_states[ch]:
backlight.set_pixel(ch, 0, 255, 255)
else:
backlight.set_pixel(ch, 0, 255, 0)
backlight.show()
for x in range(6):
touch.set_led(x, 1)
time.sleep(0.1)
touch.set_led(x, 0)
for x in range(6):
backlight.set_pixel(x, 0, 255, 0)
touch.on(x, handler)
backlight.show()
for x in range(128):
for y in range(64):
pixel = image.getpixel((x, y))
lcd.set_pixel(x, y, pixel)
lcd.show()
try:
signal.pause()
except KeyboardInterrupt:
for x in range(6):
backlight.set_pixel(x, 0, 0, 0)
touch.set_led(x, 0)
backlight.show()
lcd.clear()
lcd.show()
| 19.103896 | 73 | 0.626785 |
import time
import signal
from gfxhat import touch, lcd, backlight, fonts
from PIL import Image, ImageFont, ImageDraw
print("""hello-world.py
This basic example prints the text "Hello World" in the middle of the LCD
Press any button to see its corresponding LED toggle on/off.
Press Ctrl+C to exit.
""")
led_states = [False for _ in range(6)]
width, height = lcd.dimensions()
image = Image.new('P', (width, height))
draw = ImageDraw.Draw(image)
font = ImageFont.truetype(fonts.AmaticSCBold, 38)
text = "Hello World"
w, h = font.getsize(text)
x = (width - w) // 2
y = (height - h) // 2
draw.text((x, y), text, 1, font)
def handler(ch, event):
if event == 'press':
led_states[ch] = not led_states[ch]
touch.set_led(ch, led_states[ch])
if led_states[ch]:
backlight.set_pixel(ch, 0, 255, 255)
else:
backlight.set_pixel(ch, 0, 255, 0)
backlight.show()
for x in range(6):
touch.set_led(x, 1)
time.sleep(0.1)
touch.set_led(x, 0)
for x in range(6):
backlight.set_pixel(x, 0, 255, 0)
touch.on(x, handler)
backlight.show()
for x in range(128):
for y in range(64):
pixel = image.getpixel((x, y))
lcd.set_pixel(x, y, pixel)
lcd.show()
try:
signal.pause()
except KeyboardInterrupt:
for x in range(6):
backlight.set_pixel(x, 0, 0, 0)
touch.set_led(x, 0)
backlight.show()
lcd.clear()
lcd.show()
| true | true |
f714d8d2b3754c5bbdedf8c1d58e3f9d656a5795 | 2,619 | py | Python | vmware_nsx/shell/admin/plugins/nsxp/resources/certificates.py | yebinama/vmware-nsx | 5f59ce8d4668c24e0f4f934898fb4b4e63f1c2f4 | [
"Apache-2.0"
] | null | null | null | vmware_nsx/shell/admin/plugins/nsxp/resources/certificates.py | yebinama/vmware-nsx | 5f59ce8d4668c24e0f4f934898fb4b4e63f1c2f4 | [
"Apache-2.0"
] | null | null | null | vmware_nsx/shell/admin/plugins/nsxp/resources/certificates.py | yebinama/vmware-nsx | 5f59ce8d4668c24e0f4f934898fb4b4e63f1c2f4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from vmware_nsx.shell.admin.plugins.common import constants
from vmware_nsx.shell.admin.plugins.common import utils as admin_utils
from vmware_nsx.shell.admin.plugins.common import v3_common_cert
from vmware_nsx.shell import resources as shell
from neutron_lib.callbacks import registry
from oslo_config import cfg
@admin_utils.output_header
def generate_cert(resource, event, trigger, **kwargs):
"""Generate self signed client certificate and private key
"""
return v3_common_cert.generate_cert(cfg.CONF.nsx_p, **kwargs)
@admin_utils.output_header
def delete_cert(resource, event, trigger, **kwargs):
"""Delete client certificate and private key """
return v3_common_cert.delete_cert(cfg.CONF.nsx_p, **kwargs)
@admin_utils.output_header
def show_cert(resource, event, trigger, **kwargs):
"""Show client certificate details """
return v3_common_cert.show_cert(cfg.CONF.nsx_p, **kwargs)
@admin_utils.output_header
def import_cert(resource, event, trigger, **kwargs):
"""Import client certificate that was generated externally"""
return v3_common_cert.import_cert(cfg.CONF.nsx_p, **kwargs)
@admin_utils.output_header
def show_nsx_certs(resource, event, trigger, **kwargs):
"""Show client certificates associated with openstack identity in NSX"""
return v3_common_cert.show_nsx_certs(cfg.CONF.nsx_p, **kwargs)
registry.subscribe(generate_cert,
constants.CERTIFICATE,
shell.Operations.GENERATE.value)
registry.subscribe(show_cert,
constants.CERTIFICATE,
shell.Operations.SHOW.value)
registry.subscribe(delete_cert,
constants.CERTIFICATE,
shell.Operations.CLEAN.value)
registry.subscribe(import_cert,
constants.CERTIFICATE,
shell.Operations.IMPORT.value)
registry.subscribe(show_nsx_certs,
constants.CERTIFICATE,
shell.Operations.NSX_LIST.value)
| 35.391892 | 78 | 0.722413 |
from vmware_nsx.shell.admin.plugins.common import constants
from vmware_nsx.shell.admin.plugins.common import utils as admin_utils
from vmware_nsx.shell.admin.plugins.common import v3_common_cert
from vmware_nsx.shell import resources as shell
from neutron_lib.callbacks import registry
from oslo_config import cfg
@admin_utils.output_header
def generate_cert(resource, event, trigger, **kwargs):
return v3_common_cert.generate_cert(cfg.CONF.nsx_p, **kwargs)
@admin_utils.output_header
def delete_cert(resource, event, trigger, **kwargs):
return v3_common_cert.delete_cert(cfg.CONF.nsx_p, **kwargs)
@admin_utils.output_header
def show_cert(resource, event, trigger, **kwargs):
return v3_common_cert.show_cert(cfg.CONF.nsx_p, **kwargs)
@admin_utils.output_header
def import_cert(resource, event, trigger, **kwargs):
return v3_common_cert.import_cert(cfg.CONF.nsx_p, **kwargs)
@admin_utils.output_header
def show_nsx_certs(resource, event, trigger, **kwargs):
return v3_common_cert.show_nsx_certs(cfg.CONF.nsx_p, **kwargs)
registry.subscribe(generate_cert,
constants.CERTIFICATE,
shell.Operations.GENERATE.value)
registry.subscribe(show_cert,
constants.CERTIFICATE,
shell.Operations.SHOW.value)
registry.subscribe(delete_cert,
constants.CERTIFICATE,
shell.Operations.CLEAN.value)
registry.subscribe(import_cert,
constants.CERTIFICATE,
shell.Operations.IMPORT.value)
registry.subscribe(show_nsx_certs,
constants.CERTIFICATE,
shell.Operations.NSX_LIST.value)
| true | true |
f714d8d4dbefa94c8fbca307ba8490cc93a1e285 | 457 | py | Python | Module1/Getting_Started_with_Data_Analysis_Code/4/annotate.py | vijaysharmapc/Python-End-to-end-Data-Analysis | a00f2d5d1547993e000b2551ec6a1360240885ba | [
"MIT"
] | 38 | 2017-04-10T19:18:43.000Z | 2021-12-25T08:23:27.000Z | Module1/Getting_Started_with_Data_Analysis_Code/4/annotate.py | vijaysharmapc/Python-End-to-end-Data-Analysis | a00f2d5d1547993e000b2551ec6a1360240885ba | [
"MIT"
] | 1 | 2018-07-10T09:41:43.000Z | 2018-07-10T09:41:43.000Z | Module1/Getting_Started_with_Data_Analysis_Code/4/annotate.py | vijaysharmapc/Python-End-to-end-Data-Analysis | a00f2d5d1547993e000b2551ec6a1360240885ba | [
"MIT"
] | 37 | 2017-04-25T01:49:35.000Z | 2021-05-04T01:46:43.000Z | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(-2.4, 0.4, 20)
y = x * x + 2 * x + 1
plt.plot(x, y, 'c', linewidth=2.0)
plt.text(-1.5, 1.8, 'y=x^2 + 2*x + 1',
fontsize=14, style='italic')
plt.annotate('minima point', xy=(-1, 0),
xytext=(-1, 0.3), horizontalalignment='center',
verticalalignment='top',
arrowprops=dict(arrowstyle='->',
connectionstyle='arc3'))
plt.savefig('annotate.png') | 28.5625 | 51 | 0.619256 |
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(-2.4, 0.4, 20)
y = x * x + 2 * x + 1
plt.plot(x, y, 'c', linewidth=2.0)
plt.text(-1.5, 1.8, 'y=x^2 + 2*x + 1',
fontsize=14, style='italic')
plt.annotate('minima point', xy=(-1, 0),
xytext=(-1, 0.3), horizontalalignment='center',
verticalalignment='top',
arrowprops=dict(arrowstyle='->',
connectionstyle='arc3'))
plt.savefig('annotate.png') | true | true |
f714d97e1633490532b220709b522cf4d4c1414c | 1,361 | py | Python | maior_menor_lista.py | eduardobaltazarmarfim/PythonC | 8e44b4f191582c73cca6df98120ab142145c4ba1 | [
"MIT"
] | null | null | null | maior_menor_lista.py | eduardobaltazarmarfim/PythonC | 8e44b4f191582c73cca6df98120ab142145c4ba1 | [
"MIT"
] | null | null | null | maior_menor_lista.py | eduardobaltazarmarfim/PythonC | 8e44b4f191582c73cca6df98120ab142145c4ba1 | [
"MIT"
] | null | null | null | def retorno():
resp=input('Deseja executar o programa novamente?[s/n] ')
if(resp=='S' or resp=='s'):
verificar()
else:
print('Processo finalizado com sucesso!')
pass
def cabecalho(titulo):
print('-'*30)
print(f'{titulo:^30}')
print('-'*30)
pass
def mensagem_erro():
print('Dados inseridos são invalidos!')
pass
def verificar():
try:
cabecalho('Maior e Menor Valores Lista')
valores=list()
cont=0
for i in range(0,5):
cont+=1
num=int(input('Digite o {}º valor: '.format(cont)))
valores.append(num)
except:
mensagem_erro()
retorno()
else:
cont=0
maior=max(valores)
menor=min(valores)
print('Os valores da lista: {}'.format(valores))
print('O maior valor é {} ele está nas posições: '.format(maior),end='')
for i,v in enumerate(valores):
if(v==maior):
print('{} '.format(i),end='')
print('\nO menor valor é {} ele está nas posições: '.format(menor),end='')
for i,v in enumerate(valores):
if(v==menor):
print('{} '.format(i),end='')
print('\n')
retorno()
pass
verificar() | 15.465909 | 82 | 0.488611 | def retorno():
resp=input('Deseja executar o programa novamente?[s/n] ')
if(resp=='S' or resp=='s'):
verificar()
else:
print('Processo finalizado com sucesso!')
pass
def cabecalho(titulo):
print('-'*30)
print(f'{titulo:^30}')
print('-'*30)
pass
def mensagem_erro():
print('Dados inseridos são invalidos!')
pass
def verificar():
try:
cabecalho('Maior e Menor Valores Lista')
valores=list()
cont=0
for i in range(0,5):
cont+=1
num=int(input('Digite o {}º valor: '.format(cont)))
valores.append(num)
except:
mensagem_erro()
retorno()
else:
cont=0
maior=max(valores)
menor=min(valores)
print('Os valores da lista: {}'.format(valores))
print('O maior valor é {} ele está nas posições: '.format(maior),end='')
for i,v in enumerate(valores):
if(v==maior):
print('{} '.format(i),end='')
print('\nO menor valor é {} ele está nas posições: '.format(menor),end='')
for i,v in enumerate(valores):
if(v==menor):
print('{} '.format(i),end='')
print('\n')
retorno()
pass
verificar() | true | true |
f714d9bc36813cd34431c4442b42ce62a95887ea | 3,783 | py | Python | Section 4/04.02_omniscient_agent_webapp.py | AYCHAIN/PracticalAI | 1657e31dfc60645f4f999475803f57c0ab9f1a2d | [
"MIT"
] | 7 | 2019-03-06T17:29:52.000Z | 2021-11-08T13:10:24.000Z | Section 4/04.02_omniscient_agent_webapp.py | AYCHAIN/PracticalAI | 1657e31dfc60645f4f999475803f57c0ab9f1a2d | [
"MIT"
] | null | null | null | Section 4/04.02_omniscient_agent_webapp.py | AYCHAIN/PracticalAI | 1657e31dfc60645f4f999475803f57c0ab9f1a2d | [
"MIT"
] | 5 | 2019-03-01T22:21:48.000Z | 2020-05-17T02:05:58.000Z | from flask import Flask, redirect, render_template, url_for
import numpy as np
app = Flask( __name__ )
@app.route( '/home' )
def index():
# retrieve the agent
agent = app.config['AGENT']
print( 'Episode: {}/{}'.format( agent.get_episode(), agent.get_episodes() ) )
print( 'Trial: {}/{}'.format( agent.get_trial(), agent.get_trials() ) )
if agent.get_episode() > agent.get_episodes():
# episodes are over
# compute the final prob
prob_reward_array = agent.get_prob_reward_array()
prob_01 = 100*np.round( prob_reward_array[0] / agent.get_episodes(), 2 )
prob_02 = 100*np.round( prob_reward_array[1] / agent.get_episodes(), 2 )
# avg the accumulated reward
avg_accumulated_reward = agent.get_avg_accumulated_reward_array()
# print the final
print( '\nProb Bandit 01:{}% - Prob Bandit 02:{}%'.format( prob_01, prob_02 ) )
print( '\n Avg accumulated reward: {}\n'.format( np.mean( avg_accumulated_reward ) ) )
# reset the episodes
agent.reset_episode()
elif agent.get_trial() > agent.get_trials():
# trials are over
# increase the episode
agent.set_episode()
# compute the partial results
agent.set_prob_reward_array()
# append the accumualted reward
agent.set_append_accumulated_reward()
# append the avg accumulated reward
agent.set_append_avg_accumulated_reward()
# reset the trial and initial variables
agent.set_trial( reset=1 )
# get the partial results
partial_result = agent.get_prob_reward_array()
prob_01 = partial_result[0] / agent.get_episode()
prob_02 = partial_result[1] / agent.get_episode()
# print the partial results
print( '\n Prob Bandit 01:{} - Prob Bandit 02:{}\n'.format( prob_01, prob_02 ) )
return redirect( url_for( 'index' ) )
else:
# trials are not over
# code the omniscient agent
bandit_machine = np.argmax( agent.get_prob_list() )
# set the current bandit machine
agent.set_current_bandit( bandit_machine )
# pick up the web page
if bandit_machine == 0: # red Yes button
return render_template( 'layout_red.html' )
else:
return render_template( 'layout_blue.html' )
@app.route( '/yes', methods=['POST'] )
def yes_event():
agent = app.config['AGENT']
# set the reward
reward = 1
# get the current bandit machine
bandit_machine = agent.get_current_bandit()
# add a reward to the bandit machine
agent.set_reward_array( bandit_machine, reward )
# increase how many times the bandit machine gets the lever pulled
agent.set_bandit_array( bandit_machine )
# sum the accumulated reward
agent.set_accumulated_reward( reward )
# increase the number of trial
agent.set_trial( reset=0 )
return redirect( url_for( 'index' ) )
@app.route( '/no', methods=['POST'] )
def no_event():
agent = app.config['AGENT']
# set the reward
reward = 0
# get the current bandit machine
bandit_machine = agent.get_current_bandit()
# add a reward to the bandit machine
agent.set_reward_array( bandit_machine, reward )
# increase how many times the bandit machine gets the lever pulled
agent.set_bandit_array( bandit_machine )
# sum the accumulated reward
agent.set_accumulated_reward( reward )
# increase the number of trial
agent.set_trial( reset=0 )
return redirect( url_for( 'index' ) )
if __name__ == "__main__":
trials = 100
episodes = 20
prob_list = [0.3, 0.8]
agent = OmniscientAgent( prob_list, trials, episodes )
app.config['AGENT'] = agent
app.run()
| 29.554688 | 94 | 0.648427 | from flask import Flask, redirect, render_template, url_for
import numpy as np
app = Flask( __name__ )
@app.route( '/home' )
def index():
agent = app.config['AGENT']
print( 'Episode: {}/{}'.format( agent.get_episode(), agent.get_episodes() ) )
print( 'Trial: {}/{}'.format( agent.get_trial(), agent.get_trials() ) )
if agent.get_episode() > agent.get_episodes():
prob_reward_array = agent.get_prob_reward_array()
prob_01 = 100*np.round( prob_reward_array[0] / agent.get_episodes(), 2 )
prob_02 = 100*np.round( prob_reward_array[1] / agent.get_episodes(), 2 )
avg_accumulated_reward = agent.get_avg_accumulated_reward_array()
print( '\nProb Bandit 01:{}% - Prob Bandit 02:{}%'.format( prob_01, prob_02 ) )
print( '\n Avg accumulated reward: {}\n'.format( np.mean( avg_accumulated_reward ) ) )
agent.reset_episode()
elif agent.get_trial() > agent.get_trials():
agent.set_episode()
agent.set_prob_reward_array()
agent.set_append_accumulated_reward()
agent.set_append_avg_accumulated_reward()
agent.set_trial( reset=1 )
partial_result = agent.get_prob_reward_array()
prob_01 = partial_result[0] / agent.get_episode()
prob_02 = partial_result[1] / agent.get_episode()
print( '\n Prob Bandit 01:{} - Prob Bandit 02:{}\n'.format( prob_01, prob_02 ) )
return redirect( url_for( 'index' ) )
else:
bandit_machine = np.argmax( agent.get_prob_list() )
agent.set_current_bandit( bandit_machine )
if bandit_machine == 0:
return render_template( 'layout_red.html' )
else:
return render_template( 'layout_blue.html' )
@app.route( '/yes', methods=['POST'] )
def yes_event():
agent = app.config['AGENT']
reward = 1
bandit_machine = agent.get_current_bandit()
agent.set_reward_array( bandit_machine, reward )
agent.set_bandit_array( bandit_machine )
agent.set_accumulated_reward( reward )
agent.set_trial( reset=0 )
return redirect( url_for( 'index' ) )
@app.route( '/no', methods=['POST'] )
def no_event():
agent = app.config['AGENT']
reward = 0
bandit_machine = agent.get_current_bandit()
agent.set_reward_array( bandit_machine, reward )
agent.set_bandit_array( bandit_machine )
agent.set_accumulated_reward( reward )
agent.set_trial( reset=0 )
return redirect( url_for( 'index' ) )
if __name__ == "__main__":
trials = 100
episodes = 20
prob_list = [0.3, 0.8]
agent = OmniscientAgent( prob_list, trials, episodes )
app.config['AGENT'] = agent
app.run()
| true | true |
f714d9ff51f72caaaa2af83ccd179ae251b0bb23 | 1,929 | py | Python | examples/conjunctive_graphs.py | tonyfast/rdflib | e4fe0fdbd4de7e1183418f302315b51a14602e03 | [
"BSD-3-Clause"
] | 2 | 2021-02-06T17:36:05.000Z | 2021-04-21T07:33:39.000Z | examples/conjunctive_graphs.py | pragya16067/rdflib | 6b5bd37ccc67bdec62d2e36d174eb7933b5020b2 | [
"BSD-3-Clause"
] | 2 | 2020-05-09T15:03:57.000Z | 2020-05-30T10:51:40.000Z | examples/conjunctive_graphs.py | pragya16067/rdflib | 6b5bd37ccc67bdec62d2e36d174eb7933b5020b2 | [
"BSD-3-Clause"
] | 4 | 2020-05-08T08:36:19.000Z | 2020-05-28T07:23:23.000Z | """
An RDFLib ConjunctiveGraph is an (unnamed) aggregation of all the named graphs
within a Store. The :meth:`~rdflib.graph.ConjunctiveGraph.get_context`
method can be used to get a particular named graph for use such as to add
triples to, or the default graph can be used
This example shows how to create named graphs and work with the
conjunction (union) of all the graphs.
"""
from rdflib import Namespace, Literal, URIRef
from rdflib.graph import Graph, ConjunctiveGraph
from rdflib.plugins.memory import IOMemory
if __name__ == "__main__":
ns = Namespace("http://love.com#")
mary = URIRef("http://love.com/lovers/mary")
john = URIRef("http://love.com/lovers/john")
cmary = URIRef("http://love.com/lovers/mary")
cjohn = URIRef("http://love.com/lovers/john")
store = IOMemory()
g = ConjunctiveGraph(store=store)
g.bind("love", ns)
# add a graph for Mary's facts to the Conjunctive Graph
gmary = Graph(store=store, identifier=cmary)
# Mary's graph only contains the URI of the person she love, not his cute name
gmary.add((mary, ns["hasName"], Literal("Mary")))
gmary.add((mary, ns["loves"], john))
# add a graph for Mary's facts to the Conjunctive Graph
gjohn = Graph(store=store, identifier=cjohn)
# John's graph contains his cute name
gjohn.add((john, ns["hasCuteName"], Literal("Johnny Boy")))
# enumerate contexts
for c in g.contexts():
print("-- %s " % c)
# separate graphs
print(gjohn.serialize(format="n3").decode("utf-8"))
print("===================")
print(gmary.serialize(format="n3").decode("utf-8"))
print("===================")
# full graph
print(g.serialize(format="n3").decode("utf-8"))
# query the conjunction of all graphs
xx = None
for x in g[mary : ns.loves / ns.hasCuteName]:
xx = x
print("Q: Who does Mary love?")
print("A: Mary loves {}".format(xx))
| 32.15 | 82 | 0.65578 |
from rdflib import Namespace, Literal, URIRef
from rdflib.graph import Graph, ConjunctiveGraph
from rdflib.plugins.memory import IOMemory
if __name__ == "__main__":
ns = Namespace("http://love.com#")
mary = URIRef("http://love.com/lovers/mary")
john = URIRef("http://love.com/lovers/john")
cmary = URIRef("http://love.com/lovers/mary")
cjohn = URIRef("http://love.com/lovers/john")
store = IOMemory()
g = ConjunctiveGraph(store=store)
g.bind("love", ns)
gmary = Graph(store=store, identifier=cmary)
# Mary's graph only contains the URI of the person she love, not his cute name
gmary.add((mary, ns["hasName"], Literal("Mary")))
gmary.add((mary, ns["loves"], john))
gjohn = Graph(store=store, identifier=cjohn)
# John's graph contains his cute name
gjohn.add((john, ns["hasCuteName"], Literal("Johnny Boy")))
for c in g.contexts():
print("-- %s " % c)
print(gjohn.serialize(format="n3").decode("utf-8"))
print("===================")
print(gmary.serialize(format="n3").decode("utf-8"))
print("===================")
print(g.serialize(format="n3").decode("utf-8"))
xx = None
for x in g[mary : ns.loves / ns.hasCuteName]:
xx = x
print("Q: Who does Mary love?")
print("A: Mary loves {}".format(xx))
| true | true |
f714da0d0e3039473d2c96ef73abc9cc0aa2fb6a | 13,287 | py | Python | pandas/core/indexes/extension.py | andrei-assa/pandas | ded76dbbfdff3211cfff0ec7039611b50d531efb | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/core/indexes/extension.py | andrei-assa/pandas | ded76dbbfdff3211cfff0ec7039611b50d531efb | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/core/indexes/extension.py | andrei-assa/pandas | ded76dbbfdff3211cfff0ec7039611b50d531efb | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | """
Shared methods for Index subclasses backed by ExtensionArray.
"""
from typing import (
Hashable,
List,
Type,
TypeVar,
Union,
)
import numpy as np
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.core.dtypes.cast import (
find_common_type,
infer_dtype_from,
)
from pandas.core.dtypes.common import (
is_dtype_equal,
is_object_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
from pandas.core.arrays import (
Categorical,
DatetimeArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
)
from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
from pandas.core.indexers import deprecate_ndim_indexing
from pandas.core.indexes.base import Index
from pandas.core.ops import get_op_result_name
_T = TypeVar("_T", bound="NDArrayBackedExtensionIndex")
def inherit_from_data(name: str, delegate, cache: bool = False, wrap: bool = False):
"""
Make an alias for a method of the underlying ExtensionArray.
Parameters
----------
name : str
Name of an attribute the class should inherit from its EA parent.
delegate : class
cache : bool, default False
Whether to convert wrapped properties into cache_readonly
wrap : bool, default False
Whether to wrap the inherited result in an Index.
Returns
-------
attribute, method, property, or cache_readonly
"""
attr = getattr(delegate, name)
if isinstance(attr, property) or type(attr).__name__ == "getset_descriptor":
# getset_descriptor i.e. property defined in cython class
if cache:
def cached(self):
return getattr(self._data, name)
cached.__name__ = name
cached.__doc__ = attr.__doc__
method = cache_readonly(cached)
else:
def fget(self):
result = getattr(self._data, name)
if wrap:
if isinstance(result, type(self._data)):
return type(self)._simple_new(result, name=self.name)
elif isinstance(result, ABCDataFrame):
return result.set_index(self)
return Index(result, name=self.name)
return result
def fset(self, value):
setattr(self._data, name, value)
fget.__name__ = name
fget.__doc__ = attr.__doc__
method = property(fget, fset)
elif not callable(attr):
# just a normal attribute, no wrapping
method = attr
else:
def method(self, *args, **kwargs):
result = attr(self._data, *args, **kwargs)
if wrap:
if isinstance(result, type(self._data)):
return type(self)._simple_new(result, name=self.name)
elif isinstance(result, ABCDataFrame):
return result.set_index(self)
return Index(result, name=self.name)
return result
method.__name__ = name
method.__doc__ = attr.__doc__
return method
def inherit_names(names: List[str], delegate, cache: bool = False, wrap: bool = False):
"""
Class decorator to pin attributes from an ExtensionArray to a Index subclass.
Parameters
----------
names : List[str]
delegate : class
cache : bool, default False
wrap : bool, default False
Whether to wrap the inherited result in an Index.
"""
def wrapper(cls):
for name in names:
meth = inherit_from_data(name, delegate, cache=cache, wrap=wrap)
setattr(cls, name, meth)
return cls
return wrapper
def _make_wrapped_comparison_op(opname: str):
"""
Create a comparison method that dispatches to ``._data``.
"""
def wrapper(self, other):
if isinstance(other, ABCSeries):
# the arrays defer to Series for comparison ops but the indexes
# don't, so we have to unwrap here.
other = other._values
other = _maybe_unwrap_index(other)
op = getattr(self._data, opname)
return op(other)
wrapper.__name__ = opname
return wrapper
def make_wrapped_arith_op(opname: str):
def method(self, other):
if (
isinstance(other, Index)
and is_object_dtype(other.dtype)
and type(other) is not Index
):
# We return NotImplemented for object-dtype index *subclasses* so they have
# a chance to implement ops before we unwrap them.
# See https://github.com/pandas-dev/pandas/issues/31109
return NotImplemented
meth = getattr(self._data, opname)
result = meth(_maybe_unwrap_index(other))
return _wrap_arithmetic_op(self, other, result)
method.__name__ = opname
return method
def _wrap_arithmetic_op(self, other, result):
if result is NotImplemented:
return NotImplemented
if isinstance(result, tuple):
# divmod, rdivmod
assert len(result) == 2
return (
_wrap_arithmetic_op(self, other, result[0]),
_wrap_arithmetic_op(self, other, result[1]),
)
if not isinstance(result, Index):
# Index.__new__ will choose appropriate subclass for dtype
result = Index(result)
res_name = get_op_result_name(self, other)
result.name = res_name
return result
def _maybe_unwrap_index(obj):
"""
If operating against another Index object, we need to unwrap the underlying
data before deferring to the DatetimeArray/TimedeltaArray/PeriodArray
implementation, otherwise we will incorrectly return NotImplemented.
Parameters
----------
obj : object
Returns
-------
unwrapped object
"""
if isinstance(obj, Index):
return obj._data
return obj
class ExtensionIndex(Index):
"""
Index subclass for indexes backed by ExtensionArray.
"""
# The base class already passes through to _data:
# size, __len__, dtype
_data: Union[IntervalArray, NDArrayBackedExtensionArray]
__eq__ = _make_wrapped_comparison_op("__eq__")
__ne__ = _make_wrapped_comparison_op("__ne__")
__lt__ = _make_wrapped_comparison_op("__lt__")
__gt__ = _make_wrapped_comparison_op("__gt__")
__le__ = _make_wrapped_comparison_op("__le__")
__ge__ = _make_wrapped_comparison_op("__ge__")
@property
def _has_complex_internals(self) -> bool:
# used to avoid libreduction code paths, which raise or require conversion
return True
# ---------------------------------------------------------------------
# NDarray-Like Methods
def __getitem__(self, key):
result = self._data[key]
if isinstance(result, type(self._data)):
if result.ndim == 1:
return type(self)(result, name=self.name)
# Unpack to ndarray for MPL compat
result = result._ndarray
# Includes cases where we get a 2D ndarray back for MPL compat
deprecate_ndim_indexing(result)
return result
def searchsorted(self, value, side="left", sorter=None) -> np.ndarray:
# overriding IndexOpsMixin improves performance GH#38083
return self._data.searchsorted(value, side=side, sorter=sorter)
# ---------------------------------------------------------------------
def _get_engine_target(self) -> np.ndarray:
return np.asarray(self._data)
def delete(self, loc):
"""
Make new Index with passed location(-s) deleted
Returns
-------
new_index : Index
"""
arr = self._data.delete(loc)
return type(self)._simple_new(arr, name=self.name)
def repeat(self, repeats, axis=None):
nv.validate_repeat((), {"axis": axis})
result = self._data.repeat(repeats, axis=axis)
return type(self)._simple_new(result, name=self.name)
def insert(self, loc: int, item):
# ExtensionIndex subclasses must override Index.insert
raise AbstractMethodError(self)
def _validate_fill_value(self, value):
"""
Convert value to be insertable to underlying array.
"""
return self._data._validate_setitem_value(value)
def _get_unique_index(self):
if self.is_unique:
return self
result = self._data.unique()
return self._shallow_copy(result)
@doc(Index.map)
def map(self, mapper, na_action=None):
# Try to run function on index first, and then on elements of index
# Especially important for group-by functionality
try:
result = mapper(self)
# Try to use this result if we can
if isinstance(result, np.ndarray):
result = Index(result)
if not isinstance(result, Index):
raise TypeError("The map function must return an Index object")
return result
except Exception:
return self.astype(object).map(mapper)
@doc(Index.astype)
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_dtype_equal(self.dtype, dtype):
if not copy:
# Ensure that self.astype(self.dtype) is self
return self
return self.copy()
if isinstance(dtype, np.dtype) and dtype.kind == "M" and dtype != "M8[ns]":
# For now Datetime supports this by unwrapping ndarray, but DTI doesn't
raise TypeError(f"Cannot cast {type(self._data).__name__} to dtype")
new_values = self._data.astype(dtype, copy=copy)
# pass copy=False because any copying will be done in the
# _data.astype call above
return Index(new_values, dtype=new_values.dtype, name=self.name, copy=False)
@cache_readonly
def _isnan(self) -> np.ndarray:
# error: Incompatible return value type (got "ExtensionArray", expected
# "ndarray")
return self._data.isna() # type: ignore[return-value]
@doc(Index.equals)
def equals(self, other) -> bool:
# Dispatch to the ExtensionArray's .equals method.
if self.is_(other):
return True
if not isinstance(other, type(self)):
return False
return self._data.equals(other._data)
class NDArrayBackedExtensionIndex(ExtensionIndex):
"""
Index subclass for indexes backed by NDArrayBackedExtensionArray.
"""
_data: NDArrayBackedExtensionArray
_data_cls: Union[
Type[Categorical],
Type[DatetimeArray],
Type[TimedeltaArray],
Type[PeriodArray],
]
@classmethod
def _simple_new(
cls,
values: NDArrayBackedExtensionArray,
name: Hashable = None,
):
assert isinstance(values, cls._data_cls), type(values)
result = object.__new__(cls)
result._data = values
result._name = name
result._cache = {}
# For groupby perf. See note in indexes/base about _index_data
result._index_data = values._ndarray
result._reset_identity()
return result
def _get_engine_target(self) -> np.ndarray:
return self._data._ndarray
def insert(self: _T, loc: int, item) -> _T:
"""
Make new Index inserting new item at location. Follows
Python list.append semantics for negative values.
Parameters
----------
loc : int
item : object
Returns
-------
new_index : Index
Raises
------
ValueError if the item is not valid for this dtype.
"""
arr = self._data
try:
code = arr._validate_scalar(item)
except (ValueError, TypeError):
# e.g. trying to insert an integer into a DatetimeIndex
# We cannot keep the same dtype, so cast to the (often object)
# minimal shared dtype before doing the insert.
dtype, _ = infer_dtype_from(item, pandas_dtype=True)
dtype = find_common_type([self.dtype, dtype])
return self.astype(dtype).insert(loc, item)
else:
new_vals = np.concatenate(
(
arr._ndarray[:loc],
np.asarray([code], dtype=arr._ndarray.dtype),
arr._ndarray[loc:],
)
)
new_arr = arr._from_backing_data(new_vals)
return type(self)._simple_new(new_arr, name=self.name)
def putmask(self, mask, value) -> Index:
res_values = self._data.copy()
try:
res_values.putmask(mask, value)
except (TypeError, ValueError):
return self.astype(object).putmask(mask, value)
return type(self)._simple_new(res_values, name=self.name)
def _wrap_joined_index(self: _T, joined: np.ndarray, other: _T) -> _T:
name = get_op_result_name(self, other)
arr = self._data._from_backing_data(joined)
return type(self)._simple_new(arr, name=name)
| 29.79148 | 87 | 0.611274 | from typing import (
Hashable,
List,
Type,
TypeVar,
Union,
)
import numpy as np
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.core.dtypes.cast import (
find_common_type,
infer_dtype_from,
)
from pandas.core.dtypes.common import (
is_dtype_equal,
is_object_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCSeries,
)
from pandas.core.arrays import (
Categorical,
DatetimeArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
)
from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
from pandas.core.indexers import deprecate_ndim_indexing
from pandas.core.indexes.base import Index
from pandas.core.ops import get_op_result_name
_T = TypeVar("_T", bound="NDArrayBackedExtensionIndex")
def inherit_from_data(name: str, delegate, cache: bool = False, wrap: bool = False):
attr = getattr(delegate, name)
if isinstance(attr, property) or type(attr).__name__ == "getset_descriptor":
if cache:
def cached(self):
return getattr(self._data, name)
cached.__name__ = name
cached.__doc__ = attr.__doc__
method = cache_readonly(cached)
else:
def fget(self):
result = getattr(self._data, name)
if wrap:
if isinstance(result, type(self._data)):
return type(self)._simple_new(result, name=self.name)
elif isinstance(result, ABCDataFrame):
return result.set_index(self)
return Index(result, name=self.name)
return result
def fset(self, value):
setattr(self._data, name, value)
fget.__name__ = name
fget.__doc__ = attr.__doc__
method = property(fget, fset)
elif not callable(attr):
method = attr
else:
def method(self, *args, **kwargs):
result = attr(self._data, *args, **kwargs)
if wrap:
if isinstance(result, type(self._data)):
return type(self)._simple_new(result, name=self.name)
elif isinstance(result, ABCDataFrame):
return result.set_index(self)
return Index(result, name=self.name)
return result
method.__name__ = name
method.__doc__ = attr.__doc__
return method
def inherit_names(names: List[str], delegate, cache: bool = False, wrap: bool = False):
def wrapper(cls):
for name in names:
meth = inherit_from_data(name, delegate, cache=cache, wrap=wrap)
setattr(cls, name, meth)
return cls
return wrapper
def _make_wrapped_comparison_op(opname: str):
def wrapper(self, other):
if isinstance(other, ABCSeries):
other = other._values
other = _maybe_unwrap_index(other)
op = getattr(self._data, opname)
return op(other)
wrapper.__name__ = opname
return wrapper
def make_wrapped_arith_op(opname: str):
def method(self, other):
if (
isinstance(other, Index)
and is_object_dtype(other.dtype)
and type(other) is not Index
):
# We return NotImplemented for object-dtype index *subclasses* so they have
# a chance to implement ops before we unwrap them.
# See https://github.com/pandas-dev/pandas/issues/31109
return NotImplemented
meth = getattr(self._data, opname)
result = meth(_maybe_unwrap_index(other))
return _wrap_arithmetic_op(self, other, result)
method.__name__ = opname
return method
def _wrap_arithmetic_op(self, other, result):
if result is NotImplemented:
return NotImplemented
if isinstance(result, tuple):
# divmod, rdivmod
assert len(result) == 2
return (
_wrap_arithmetic_op(self, other, result[0]),
_wrap_arithmetic_op(self, other, result[1]),
)
if not isinstance(result, Index):
# Index.__new__ will choose appropriate subclass for dtype
result = Index(result)
res_name = get_op_result_name(self, other)
result.name = res_name
return result
def _maybe_unwrap_index(obj):
if isinstance(obj, Index):
return obj._data
return obj
class ExtensionIndex(Index):
# The base class already passes through to _data:
# size, __len__, dtype
_data: Union[IntervalArray, NDArrayBackedExtensionArray]
__eq__ = _make_wrapped_comparison_op("__eq__")
__ne__ = _make_wrapped_comparison_op("__ne__")
__lt__ = _make_wrapped_comparison_op("__lt__")
__gt__ = _make_wrapped_comparison_op("__gt__")
__le__ = _make_wrapped_comparison_op("__le__")
__ge__ = _make_wrapped_comparison_op("__ge__")
@property
def _has_complex_internals(self) -> bool:
# used to avoid libreduction code paths, which raise or require conversion
return True
# ---------------------------------------------------------------------
# NDarray-Like Methods
def __getitem__(self, key):
result = self._data[key]
if isinstance(result, type(self._data)):
if result.ndim == 1:
return type(self)(result, name=self.name)
# Unpack to ndarray for MPL compat
result = result._ndarray
# Includes cases where we get a 2D ndarray back for MPL compat
deprecate_ndim_indexing(result)
return result
def searchsorted(self, value, side="left", sorter=None) -> np.ndarray:
# overriding IndexOpsMixin improves performance GH#38083
return self._data.searchsorted(value, side=side, sorter=sorter)
# ---------------------------------------------------------------------
def _get_engine_target(self) -> np.ndarray:
return np.asarray(self._data)
def delete(self, loc):
arr = self._data.delete(loc)
return type(self)._simple_new(arr, name=self.name)
def repeat(self, repeats, axis=None):
nv.validate_repeat((), {"axis": axis})
result = self._data.repeat(repeats, axis=axis)
return type(self)._simple_new(result, name=self.name)
def insert(self, loc: int, item):
# ExtensionIndex subclasses must override Index.insert
raise AbstractMethodError(self)
def _validate_fill_value(self, value):
return self._data._validate_setitem_value(value)
def _get_unique_index(self):
if self.is_unique:
return self
result = self._data.unique()
return self._shallow_copy(result)
@doc(Index.map)
def map(self, mapper, na_action=None):
# Try to run function on index first, and then on elements of index
# Especially important for group-by functionality
try:
result = mapper(self)
# Try to use this result if we can
if isinstance(result, np.ndarray):
result = Index(result)
if not isinstance(result, Index):
raise TypeError("The map function must return an Index object")
return result
except Exception:
return self.astype(object).map(mapper)
@doc(Index.astype)
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_dtype_equal(self.dtype, dtype):
if not copy:
# Ensure that self.astype(self.dtype) is self
return self
return self.copy()
if isinstance(dtype, np.dtype) and dtype.kind == "M" and dtype != "M8[ns]":
# For now Datetime supports this by unwrapping ndarray, but DTI doesn't
raise TypeError(f"Cannot cast {type(self._data).__name__} to dtype")
new_values = self._data.astype(dtype, copy=copy)
return Index(new_values, dtype=new_values.dtype, name=self.name, copy=False)
@cache_readonly
def _isnan(self) -> np.ndarray:
return self._data.isna()
@doc(Index.equals)
def equals(self, other) -> bool:
if self.is_(other):
return True
if not isinstance(other, type(self)):
return False
return self._data.equals(other._data)
class NDArrayBackedExtensionIndex(ExtensionIndex):
_data: NDArrayBackedExtensionArray
_data_cls: Union[
Type[Categorical],
Type[DatetimeArray],
Type[TimedeltaArray],
Type[PeriodArray],
]
@classmethod
def _simple_new(
cls,
values: NDArrayBackedExtensionArray,
name: Hashable = None,
):
assert isinstance(values, cls._data_cls), type(values)
result = object.__new__(cls)
result._data = values
result._name = name
result._cache = {}
# For groupby perf. See note in indexes/base about _index_data
result._index_data = values._ndarray
result._reset_identity()
return result
def _get_engine_target(self) -> np.ndarray:
return self._data._ndarray
def insert(self: _T, loc: int, item) -> _T:
arr = self._data
try:
code = arr._validate_scalar(item)
except (ValueError, TypeError):
# e.g. trying to insert an integer into a DatetimeIndex
# We cannot keep the same dtype, so cast to the (often object)
# minimal shared dtype before doing the insert.
dtype, _ = infer_dtype_from(item, pandas_dtype=True)
dtype = find_common_type([self.dtype, dtype])
return self.astype(dtype).insert(loc, item)
else:
new_vals = np.concatenate(
(
arr._ndarray[:loc],
np.asarray([code], dtype=arr._ndarray.dtype),
arr._ndarray[loc:],
)
)
new_arr = arr._from_backing_data(new_vals)
return type(self)._simple_new(new_arr, name=self.name)
def putmask(self, mask, value) -> Index:
res_values = self._data.copy()
try:
res_values.putmask(mask, value)
except (TypeError, ValueError):
return self.astype(object).putmask(mask, value)
return type(self)._simple_new(res_values, name=self.name)
def _wrap_joined_index(self: _T, joined: np.ndarray, other: _T) -> _T:
name = get_op_result_name(self, other)
arr = self._data._from_backing_data(joined)
return type(self)._simple_new(arr, name=name)
| true | true |
f714da179a33d1d5ea2c52e3e23e127d722d3088 | 110 | py | Python | Data Structures/Array/FindSingleNumber.py | prabhupant/daily-coding-problem | b3775dd0ad823823e60100624ccf14235c446098 | [
"MIT"
] | null | null | null | Data Structures/Array/FindSingleNumber.py | prabhupant/daily-coding-problem | b3775dd0ad823823e60100624ccf14235c446098 | [
"MIT"
] | null | null | null | Data Structures/Array/FindSingleNumber.py | prabhupant/daily-coding-problem | b3775dd0ad823823e60100624ccf14235c446098 | [
"MIT"
] | null | null | null | def find_single(arr, n):
res = arr[0]
for i in range(1,n):
res = res ^ arr[i]
return res
| 15.714286 | 26 | 0.518182 | def find_single(arr, n):
res = arr[0]
for i in range(1,n):
res = res ^ arr[i]
return res
| true | true |
f714dae6a19b474807991a76b68862fa4ed2b7a5 | 42,485 | py | Python | controllers/results.py | admed/molgears | 385c5bf1a00d54961042e75f345626f890f43bde | [
"BSD-3-Clause"
] | 5 | 2017-01-18T07:29:02.000Z | 2018-09-26T08:44:10.000Z | controllers/results.py | admed/molgears | 385c5bf1a00d54961042e75f345626f890f43bde | [
"BSD-3-Clause"
] | null | null | null | controllers/results.py | admed/molgears | 385c5bf1a00d54961042e75f345626f890f43bde | [
"BSD-3-Clause"
] | 4 | 2016-02-07T02:14:48.000Z | 2021-04-03T17:49:15.000Z | # -*- coding: utf-8 -*-
"""Sample controller with all its actions protected."""
from tg import expose, flash, redirect, request
from tg.i18n import lazy_ugettext as l_
from molgears.model import DBSession, Tags, LCompound, LPurity, Names
from molgears.model import Compound, User, Projects
from molgears.model.auth import UserLists
from molgears.lib.base import BaseController
import os
from sqlalchemy import desc
from rdkit import Chem
from molgears.widgets.structure import checksmi
from datetime import datetime
#from tg.decorators import paginate
from webhelpers import paginate
from molgears.widgets.rgbTuple import htmlRgb, htmlRgb100, Num2Rgb
from molgears.controllers.ctoxicity import CytotoxicityController
__all__ = ['ResultsController']
class ResultsController(BaseController):
ctoxicity=CytotoxicityController()
@expose('molgears.templates.users.results.index')
def index(self, page=1, *args, **kw):
pname = request.environ['PATH_INFO'].split('/')[1]
project = DBSession.query(Projects).filter_by(name=pname).first()
page_url = paginate.PageURL_WebOb(request)
import pickle
try:
cells = pickle.loads([test.cell_line for test in project.tests if test.name == 'CT'][0])
except:
cells = None
lcompound = DBSession.query(LCompound).join(LCompound.mol).filter(Compound.project.any(Projects.name==pname)).filter(LCompound.showme==True)
dsc = True
order = LCompound.id
tmpl = ''
alltags =[tag for tag in DBSession.query(Tags).order_by('name').all() ]
selection = None
similarity = None
userid = request.identity['repoze.who.userid']
user = DBSession.query(User).filter_by(user_name=userid).first()
ulist = None
ulists = set([l for l in user.lists if l.table == 'Results'] + [l for l in user.tg_user_lists if l.table == 'Results'])
items = user.items_per_page
try:
if kw['search'] != u'':
search_clicked = kw['search']
else:
search_clicked = None
except Exception:
search_clicked = None
if kw:
if kw.has_key('mylist'):
try:
ulist_id = int(kw['mylist'])
ulist = DBSession.query(UserLists).get(ulist_id)
except Exception:
flash(l_(u'List error'), 'error')
redirect(request.headers['Referer'])
if (ulist in user.lists) or (user in ulist.permitusers):
if ulist.elements:
import pickle
elements = [int(el) for el in pickle.loads(ulist.elements)]
if ulist.table == 'Results':
lcompound = DBSession.query(LCompound).join(LCompound.mol).filter(Compound.project.any(Projects.name==pname)).filter(LCompound.id.in_(elements))
else:
flash(l_(u'Table error'), 'error')
redirect(request.headers['Referer'])
else:
flash(l_(u'Permission denied'), 'error')
redirect(request.headers['Referer'])
for k, v in kw.iteritems():
if str(k) == 'desc' and str(v) != '1':
dsc = None
elif str(k) == 'order_by':
if v in ('gid', 'create_date', 'box', 'form', 'state', 'entry', 'source', 'MDM2', 'MDM4', 'lcode'):
if v=='lcode':
order = LCompound.lcode
else:
order = LCompound.__getattribute__(LCompound, v)
else:
if v=='last_point':
lcompound=lcompound.join(LCompound.solubility)
order = v
elif hasattr(LCompound, v):
order = LCompound.__getattribute__(LCompound, v)
elif 'CTOX_' in v:
v = v.replace('CTOX_', '')
all_lcompounds = DBSession.query(LCompound).join(LCompound.mol).filter(Compound.project.any(Projects.name==pname)).all()
for l in all_lcompounds:
l.avg_ct = v.replace('pp', '+')
order = '_avg_ct'
else:
order = v
if str(k) != 'select' and str(k) != 'remove' and str(v) != u'':
tmpl += str(k) + '=' + str(v) + '&'
elif str(k) == 'select':
try:
if isinstance(kw['select'], basestring):
selection = [kw['select']]
else:
selection = [id for id in kw['select']]
except Exception:
selection = None
if search_clicked:
try:
smiles = str(kw['smiles'])
if 'pp' in smiles:
smiles = smiles.replace('pp', '+')
method = str(kw['method'])
except Exception:
smiles = None
method = None
if smiles:
if checksmi(smiles):
from razi.functions import functions
from razi.expression import TxtMoleculeElement
if method == 'similarity':
# from razi.postgresql_rdkit import tanimoto_threshold
query_bfp = functions.morgan_b(TxtMoleculeElement(smiles), 2)
constraint = Compound.morgan.tanimoto_similar(query_bfp)
tanimoto_sml = Compound.morgan.tanimoto_similarity(query_bfp).label('tanimoto')
search = DBSession.query(LCompound, tanimoto_sml).join(LCompound.mol).join(LCompound.purity).filter(Compound.project.any(Projects.name==pname)).filter(constraint)
if order != LCompound.id:
if order == 'purity':
order = LPurity.value
if dsc:
search = search.order_by(desc(order).nullslast())
else:
search = search.order_by(order)
else:
search = search.order_by(desc(tanimoto_sml)).all()
lcompound = ()
similarity = ()
for row in search:
lcompound += (row[0], )
similarity += (row[1], )
currentPage = paginate.Page(lcompound, page, url=page_url, items_per_page=items)
return dict(currentPage=currentPage,tmpl=tmpl, page='results', pname=pname, alltags=alltags, similarity=similarity,htmlRgb=htmlRgb, htmlRgb100=htmlRgb100, Num2Rgb=Num2Rgb, cells=cells, ulists=ulists, ulist=ulist)
elif method == 'substructure':
constraint = Compound.structure.contains(smiles)
lcompound = DBSession.query(LCompound).join(LCompound.mol).filter(Compound.project.any(Projects.name==pname)).filter(constraint)
elif method == 'identity':
lcompound = DBSession.query(LCompound).filter(Compound.project.any(Projects.name==pname)).join(LCompound.mol).filter(Compound.structure.equals(smiles))
else:
if method == 'smarts':
if dsc:
lcompound = lcompound.order_by(desc(order).nullslast())
else:
lcompound = lcompound.order_by(order)
search = lcompound.all()
sub_lcompounds = ()
patt = Chem.MolFromSmarts(smiles)
if not patt:
flash(l_(u'SMARTS error'), 'warning')
redirect(request.headers['Referer'])
for row in search:
m = Chem.MolFromSmiles(str(row.mol.structure))
mol = Chem.AddHs(m)
if mol.HasSubstructMatch(patt):
sub_lcompounds += (row, )
currentPage = paginate.Page(sub_lcompounds, page, url=page_url, items_per_page=items)
return dict(currentPage=currentPage,tmpl=tmpl, page='results', pname=pname, alltags=alltags, similarity=similarity,htmlRgb=htmlRgb, htmlRgb100=htmlRgb100, Num2Rgb=Num2Rgb, cells=cells, ulists=ulists, ulist=ulist)
else:
flash(l_(u'SMILES error'), 'warning')
redirect(request.headers['Referer'])
if kw.has_key('text_GID') and kw['text_GID'] !=u'':
try:
gid = int(kw['text_GID'])
lcompound = lcompound.filter(LCompound.gid == gid)
except Exception as msg:
flash(l_(u'GID should be a number: %s' % msg), 'error')
redirect(request.headers['Referer'])
if kw.has_key('text_ID') and kw['text_ID'] !=u'':
try:
id = int(kw['text_ID'])
lcompound = lcompound.filter(LCompound.id == id)
except Exception as msg:
flash(l_(u'ID should be a number: %s' % msg), 'error')
redirect(request.headers['Referer'])
if kw.has_key('text_name') and kw['text_name'] !=u'':
lcompound = lcompound.filter(Compound.names.any(Names.name.like(kw['text_name'].strip().replace('*', '%'))))
if kw.has_key('text_notes') and kw['text_notes'] !=u'':
lcompound = lcompound.filter(LCompound.notes.like(kw['text_notes'].replace('*', '%')))
if kw.has_key('text_lso') and kw['text_lso'] !=u'':
lcompound = lcompound.filter(LCompound.lso.like(kw['text_lso'].replace('*', '%')))
if kw.has_key('text_entry') and kw['text_entry'] !=u'':
lcompound = lcompound.filter(LCompound.entry.like(kw['text_entry'].replace('*', '%')))
if kw.has_key('text_box') and kw['text_box'] !=u'':
lcompound = lcompound.filter(LCompound.box.like(kw['text_box'].replace('*', '%')))
if kw.has_key('date_from') and kw['date_from'] !=u'':
date_from = datetime.strptime(str(kw['date_from']), '%Y-%m-%d')
lcompound = lcompound.filter(LCompound.create_date > date_from)
else:
date_from = None
if kw.has_key('date_to') and kw['date_to'] !=u'':
date_to = datetime.strptime(str(kw['date_to']), '%Y-%m-%d')
if date_from:
if date_to>date_from:
lcompound = lcompound.filter(LCompound.create_date < date_to)
else:
flash(l_(u'The End date must be later than the initial'), 'error')
redirect(request.headers['Referer'])
else:
lcompound = lcompound.filter(LCompound.create_date < date_to)
if kw.has_key('text_mdm2_hill_from') and kw['text_mdm2_hill_from'] !=u'':
text_mdm2_hill_from = float(kw['text_mdm2_hill_from'])
lcompound = lcompound.filter(LCompound.avg_hillslope_mdm2 >= text_mdm2_hill_from)
else:
text_mdm2_hill_from = None
if kw.has_key('text_mdm2_hill_to') and kw['text_mdm2_hill_to'] !=u'':
text_mdm2_hill_to = float(kw['text_mdm2_hill_to'])
if text_mdm2_hill_from:
if text_mdm2_hill_to>=text_mdm2_hill_from:
lcompound = lcompound.filter(LCompound.avg_hillslope_mdm2 <= text_mdm2_hill_to)
else:
flash(l_(u'The final value must be greater than the initial'))
redirect(request.headers['Referer'])
else:
lcompound = lcompound.filter(LCompound.avg_hillslope_mdm2 <= text_mdm2_hill_to)
if kw.has_key('text_mdm2_fluor_from') and kw['text_mdm2_fluor_from'] !=u'':
text_mdm2_fluor_from = float(kw['text_mdm2_fluor_from'])
lcompound = lcompound.filter(LCompound.avg_fluorescence_mdm2 >= text_mdm2_fluor_from)
else:
text_mdm2_fluor_from = None
if kw.has_key('text_mdm2_fluor_to') and kw['text_mdm2_fluor_to'] !=u'':
text_mdm2_fluor_to = float(kw['text_mdm2_fluor_to'])
if text_mdm2_fluor_from:
if text_mdm2_fluor_to>=text_mdm2_fluor_from:
lcompound = lcompound.filter(LCompound.avg_fluorescence_mdm2 <= text_mdm2_fluor_to)
else:
flash(l_(u'The final value must be greater than the initial'))
redirect(request.headers['Referer'])
else:
lcompound = lcompound.filter(LCompound.avg_fluorescence_mdm2 <= text_mdm2_fluor_to)
if kw.has_key('text_mdm2_ki_from') and kw['text_mdm2_ki_from'] !=u'':
text_mdm2_ki_from = float(kw['text_mdm2_ki_from'])
lcompound = lcompound.filter(LCompound.avg_ki_mdm2 >= text_mdm2_ki_from)
else:
text_mdm2_ki_from = None
if kw.has_key('text_mdm2_ki_to') and kw['text_mdm2_ki_to'] !=u'':
text_mdm2_ki_to = float(kw['text_mdm2_ki_to'])
if text_mdm2_ki_from:
if text_mdm2_ki_to>=text_mdm2_ki_from:
lcompound = lcompound.filter(LCompound.avg_ki_mdm2 <= text_mdm2_ki_to)
else:
flash(l_(u'The final value must be greater than the initial'))
redirect(request.headers['Referer'])
else:
lcompound = lcompound.filter(LCompound.avg_ki_mdm2 <= text_mdm2_ki_to)
if kw.has_key('text_mdm4_hill_from') and kw['text_mdm4_hill_from'] !=u'':
text_mdm4_hill_from = float(kw['text_mdm4_hill_from'])
lcompound = lcompound.filter(LCompound.avg_hillslope_mdm4 >= text_mdm4_hill_from)
else:
text_mdm4_hill_from = None
if kw.has_key('text_mdm4_hill_to') and kw['text_mdm4_hill_to'] !=u'':
text_mdm4_hill_to = float(kw['text_mdm4_hill_to'])
if text_mdm4_hill_from:
if text_mdm4_hill_to>=text_mdm4_hill_from:
lcompound = lcompound.filter(LCompound.avg_hillslope_mdm4 <= text_mdm4_hill_to)
else:
flash(l_(u'The final value must be greater than the initial'))
redirect(request.headers['Referer'])
else:
lcompound = lcompound.filter(LCompound.avg_hillslope_mdm4 <= text_mdm4_hill_to)
if kw.has_key('text_mdm4_fluor_from') and kw['text_mdm4_fluor_from'] !=u'':
text_mdm4_fluor_from = float(kw['text_mdm4_fluor_from'])
lcompound = lcompound.filter(LCompound.avg_fluorescence_mdm4 >= text_mdm4_fluor_from)
else:
text_mdm4_fluor_from = None
if kw.has_key('text_mdm4_fluor_to') and kw['text_mdm4_fluor_to'] !=u'':
text_mdm4_fluor_to = float(kw['text_mdm4_fluor_to'])
if text_mdm4_fluor_from:
if text_mdm4_fluor_to>=text_mdm4_fluor_from:
lcompound = lcompound.filter(LCompound.avg_fluorescence_mdm4 <= text_mdm4_fluor_to)
else:
flash(l_(u'The final value must be greater than the initial'))
redirect(request.headers['Referer'])
else:
lcompound = lcompound.filter(LCompound.avg_fluorescence_mdm4 <= text_mdm4_fluor_to)
if kw.has_key('text_mdm4_ki_from') and kw['text_mdm4_ki_from'] !=u'':
text_mdm4_ki_from = float(kw['text_mdm4_ki_from'])
lcompound = lcompound.filter(LCompound.avg_ki_mdm4 >= text_mdm4_ki_from)
else:
text_mdm4_ki_from = None
if kw.has_key('text_mdm4_ki_to') and kw['text_mdm4_ki_to'] !=u'':
text_mdm4_ki_to = float(kw['text_mdm4_ki_to'])
if text_mdm4_ki_from:
if text_mdm4_ki_to>=text_mdm4_ki_from:
lcompound = lcompound.filter(LCompound.avg_ki_mdm4 <= text_mdm4_ki_to)
else:
flash(l_(u'The final value must be greater than the initial'))
redirect(request.headers['Referer'])
else:
lcompound = lcompound.filter(LCompound.avg_ki_mdm4 <= text_mdm4_ki_to)
try:
tags = kw['text_tags']
except Exception:
tags = None
pass
if tags:
if isinstance(tags, basestring):
tagi = eval(tags)
if type(tagi) != type([]):
tagi = [int(tags)]
else:
tagi = [int(tid) for tid in tags]
lcompound = lcompound.filter(Compound.tags.any(Tags.id.in_(tagi)))
if dsc:
lcompound = lcompound.order_by(desc(order).nullslast())
else:
lcompound = lcompound.order_by(order)
if search_clicked and kw['search'] == "Download":
if kw['file_type'] and kw['file_type'] != u'' and kw['sell_type'] and kw['sell_type'] != u'':
if kw['sell_type'] == u'all':
lcompounds = lcompound.all()
elif kw['sell_type'] == u'selected':
if selection:
lcompounds = ()
for el in selection:
lcompounds += (DBSession.query(LCompound).get(el), )
else:
flash(l_(u'Lack of selected structures for download'), 'error')
redirect(request.headers['Referer'])
elif kw['sell_type'] == u'range':
lcompounds = lcompound.all()
if kw.has_key('select_from') and kw['select_from'] != u'':
try:
select_from = int(kw['select_from']) -1
if select_from<1 or select_from>len(lcompounds):
select_from = 0
except Exception:
select_from = 0
else:
select_from = 0
if kw.has_key('select_to') and kw['select_to'] != u'':
try:
select_to = int(kw['select_to'])
if select_to<2 or select_to>len(lcompounds):
select_to = len(lcompounds)
except Exception:
select_to = len(lcompounds)
else:
select_to = len(lcompounds)
lcompounds_new = ()
for el in range(select_from, select_to):
lcompounds_new += (lcompounds[el], )
lcompounds = lcompounds_new
else:
flash(l_(u'Lack of items to download'), 'error')
redirect(request.headers['Referer'])
try:
if isinstance(kw['options'], basestring):
options = [kw['options']]
else:
options = kw['options']
except Exception:
flash(l_('Choose download options'), 'error')
redirect(request.headers['Referer'])
if 'getsize' in kw:
size = int(kw['getsize']), int(kw['getsize'])
else:
size = 100, 100
if kw['file_type'] == 'pdf':
filename = userid + '_selected.pdf'
from xhtml2pdf.pisa import CreatePDF
from tg.render import render as render_template
import cStringIO
html = render_template({"length":len(lcompounds), "lcompound":lcompounds, "cells":cells, "options":options, "size":size}, "genshi", "molgears.templates.users.results.print2", doctype=None)
dest = './molgears/files/pdf/' + filename
result = file(dest, "wb")
CreatePDF(cStringIO.StringIO(html.encode("UTF-8")), result, encoding="utf-8")
result.close()
import paste.fileapp
f = paste.fileapp.FileApp('./molgears/files/pdf/'+ filename)
from tg import use_wsgi_app
return use_wsgi_app(f)
elif kw['file_type'] == 'xls':
filename = userid + '_selected.xls'
filepath = os.path.join('./molgears/files/download/', filename)
from PIL import Image
import xlwt
wbk = xlwt.Workbook()
sheet = wbk.add_sheet('sheet1')
j=0
if 'nr' in options:
sheet.write(0,j,u'Nr.')
j+=1
if 'gid' in options:
sheet.write(0,j,u'GID')
j+=1
if 'id' in options:
sheet.write(0,j,u'ID')
j+=1
if 'name' in options:
sheet.write(0,j,u'Name')
j+=1
if 'names' in options:
sheet.write(0,j,u'Names')
j+=1
if 'image' in options:
sheet.write(0,j,u'Image')
j+=1
if 'smiles' in options:
sheet.write(0,j,u'SMILES')
j+=1
if 'inchi' in options:
sheet.write(0,j,u'InChi')
j+=1
if 'lso' in options:
sheet.write(0,j,u'LSO')
j+=1
if 'num_atoms' in options:
sheet.write(0,j,u'Atoms')
j+=1
if 'mw' in options:
sheet.write(0,j,u'MW')
j+=1
if 'hba' in options:
sheet.write(0,j,u'hba')
j+=1
if 'hbd' in options:
sheet.write(0,j,u'hbd')
j+=1
if 'tpsa' in options:
sheet.write(0,j,u'tpsa')
j+=1
if 'logp' in options:
sheet.write(0,j,u'logP')
j+=1
if 'purity' in options:
sheet.write(0,j, u'Purity')
j+=1
if 'create_date' in options:
sheet.write(0,j,u'Date')
j+=1
if 'box' in options:
sheet.write(0,j,u'Box')
j+=1
if 'entry' in options:
sheet.write(0,j,u'Entry')
j+=1
if 'source' in options:
sheet.write(0,j,u'Source')
j+=1
if 'content' in options:
sheet.write(0,j,u'Content')
j+=1
if 'tags' in options:
sheet.write(0,j,u'Tags')
j+=1
if 'notes' in options:
sheet.write(0,j,u'Notes')
j+=1
for cell_line in cells:
if '_CT_%s' % cell_line in options:
sheet.write(0,j,u'CT %s' % cell_line)
j+=1
i = 1
for row in lcompounds:
j=0
if 'nr' in options:
sheet.write(i,j, str(i))
j+=1
if 'gid' in options:
sheet.write(i,j, row.gid)
j+=1
if 'id' in options:
sheet.write(i,j, row.id)
j+=1
if 'name' in options:
sheet.write(i,j, row.mol.name)
j+=1
if 'names' in options:
names = u''
for n in row.mol.names:
names += n.name + u', '
sheet.write(i,j, names)
j+=1
if 'image' in options:
file_in = './molgears/public/img/%s.png' % row.gid
img = Image.open(file_in)
file_out = './molgears/public/img/bitmap/thumb%s.bmp' %row.gid
img.thumbnail(size, Image.ANTIALIAS)
img.save(file_out)
sheet.insert_bitmap(file_out , i,j, 5, 5)
j+=1
if 'smiles' in options:
sheet.write(i,j, str(row.mol.structure))
j+=1
if 'inchi' in options:
sheet.write(i,j, str(row.mol.inchi))
j+=1
if 'lso' in options:
sheet.write(i,j, row.lso)
j+=1
if 'num_atoms' in options:
sheet.write(i,j,str(row.mol.num_hvy_atoms)+'/'+str(row.mol.num_atoms))
j+=1
if 'mw' in options:
sheet.write(i,j, str(row.mol.mw))
j+=1
if 'hba' in options:
sheet.write(i,j, str(row.mol.hba))
j+=1
if 'hbd' in options:
sheet.write(i,j, str(row.mol.hbd))
j+=1
if 'tpsa' in options:
sheet.write(i,j, str(row.mol.tpsa))
j+=1
if 'logp' in options:
sheet.write(i,j, str(row.mol.logp))
j+=1
if 'state' in options:
sheet.write(i,j, str(row.state))
j+=1
if 'purity' in options:
pur = u''
for p in sorted(row.purity, key=lambda p: p.value, reverse=True):
pur += u'%s : %s\n' % (p.value, p.type)
sheet.write(i,j, pur)
j+=1
if 'create_date' in options:
sheet.write(i,j, str(row.create_date))
j+=1
if 'owner' in options:
sheet.write(i,j, row.owner)
j+=1
if 'box' in options:
sheet.write(i,j, row.box)
j+=1
if 'entry' in options:
sheet.write(i,j, row.entry)
j+=1
if 'source' in options:
sheet.write(i,j, row.source)
j+=1
if 'content' in options:
if row.content:
sheet.write(i,j, str(row.content.value))
else:
sheet.write(i,j, 'None')
j+=1
if 'tags' in options:
tagsy=u''
for tag in row.mol.tags:
tagsy += tag.name + u', '
sheet.write(i,j,tagsy)
j+=1
if 'notes' in options:
sheet.write(i,j, row.notes)
j+=1
for cell_line in cells:
if '_CT_%s' % cell_line in options:
res = []
if row.ctoxicity:
for ct in sorted(row.ctoxicity, key=lambda ct: ct.id):
if ct.cell_line==cell_line:
res.append(ct.ic50)
if len(res)>0:
sheet.write(i,j, str(round(sum(res)/len(res), 3)))
else:
sheet.write(i,j, '')
j+=1
i += 1
wbk.save(filepath)
import paste.fileapp
f = paste.fileapp.FileApp(filepath)
from tg import use_wsgi_app
return use_wsgi_app(f)
elif kw['file_type'] == 'sdf':
filepath = './molgears/files/download/out.sdf'
ww = Chem.SDWriter(filepath)
from rdkit.Chem import AllChem
for row in lcompounds:
m2 = Chem.MolFromSmiles(str(row.mol.structure))
AllChem.Compute2DCoords(m2)
AllChem.EmbedMolecule(m2)
AllChem.UFFOptimizeMolecule(m2)
if 'smiles' in options:
m2.SetProp("smiles", str(row.mol.structure))
if 'name' in options:
m2.SetProp("_Name", str(row.mol.name.encode('ascii', 'ignore')))
if 'nr' in options:
m2.SetProp("Nr", str(lcompounds.index(row)+1))
if 'gid' in options:
m2.SetProp("GID", str(row.gid))
if 'names' in options:
names = u''
for n in row.mol.names:
names += n.name + ', '
m2.SetProp("names", str(names.encode('ascii', 'ignore')))
if 'inchi' in options:
m2.SetProp("InChi", str(row.mol.inchi))
if 'lso' in options:
m2.SetProp("LSO", str(row.lso))
if 'num_atoms' in options:
m2.SetProp("atoms", str(row.mol.num_hvy_atoms)+'/'+str(row.mol.num_atoms))
if 'mw' in options:
m2.SetProp("mw", str(row.mol.mw))
if 'hba' in options:
m2.SetProp("hba", str(row.mol.hba))
if 'hbd' in options:
m2.SetProp("hbd", str(row.mol.hbd))
if 'tpsa' in options:
m2.SetProp("TPSA", str(row.mol.tpsa))
if 'logp' in options:
m2.SetProp("logP", str(row.mol.tpsa))
if 'create_date' in options:
m2.SetProp("create_date", str(row.create_date))
if 'owner' in options:
m2.SetProp("owner", str(row.owner))
if 'tags' in options:
tagsy=u''
for tag in row.mol.tags:
tagsy += tag.name + u', '
m2.SetProp("tagi", str(tagsy.encode('ascii', 'ignore')))
if 'purity' in options:
pur = u''
for p in sorted(row.purity, key=lambda p: p.value, reverse=True):
pur += u'%s : %s \n' % (p.value, p.type)
m2.SetProp("purity", str(pur.encode('ascii', 'ignore')))
if 'content' in options:
if row.content:
m2.SetProp("content", str(row.content.value))
else:
m2.SetProp("content", "None")
j+=1
if 'box' in options:
m2.SetProp("box", str(row.box))
if 'entry' in options:
m2.SetProp("entry", str(row.entry))
if 'notes' in options:
if row.notes:
m2.SetProp("notes", str(row.notes.encode('ascii', 'ignore')))
else:
m2.SetProp("notes", " ")
for cell_line in cells:
if '_CT_%s' % cell_line in options:
res = []
if row.ctoxicity:
for ct in sorted(row.ctoxicity, key=lambda ct: ct.id):
if ct.cell_line==cell_line:
res.append(ct.ic50)
if len(res)>0:
m2.SetProp('CT_%s' % cell_line, str(round(sum(res)/len(res), 3)))
else:
m2.SetProp('CT_%s' % cell_line, ' ')
ww.write(m2)
ww.close()
import paste.fileapp
f = paste.fileapp.FileApp(filepath)
from tg import use_wsgi_app
return use_wsgi_app(f)
elif kw['file_type'] == 'csv' or 'txt':
filename = userid + '_selected.' + kw['file_type']
filepath = os.path.join('./molgears/files/download/', filename)
from molgears.widgets.unicodeCSV import UnicodeWriter
import csv
if kw['file_type'] == u'csv':
delimiter = ';'
else:
delimiter = ' '
with open(filepath, 'wb') as csvfile:
spamwriter = UnicodeWriter(csvfile, delimiter=delimiter,
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in lcompounds:
line =[]
if 'smiles' in options:
line.append(str(row.mol.structure))
if 'name' in options:
line.append(row.mol.name)
if 'nr' in options:
line.append(unicode(lcompounds.index(row)+1))
if 'gid' in options:
line.append(unicode(row.gid))
if 'names' in options:
names = u''
for n in row.mol.names:
names += n.name + u', '
line.append(names)
if 'inchi' in options:
line.append(row.mol.inchi)
if 'lso' in options:
line.append(row.lso)
if 'num_atoms' in options:
line.append(unicode(row.mol.num_hvy_atoms)+'/'+unicode(row.mol.num_atoms))
if 'mw' in options:
line.append(unicode(row.mol.mw))
if 'hba' in options:
line.append(unicode(row.mol.hba))
if 'hbd' in options:
line.append(unicode(row.mol.hbd))
if 'tpsa' in options:
line.append(unicode(row.mol.tpsa))
if 'logp' in options:
line.append(unicode(row.mol.logp))
if 'purity' in options:
pur = u''
for p in sorted(row.purity, key=lambda p: p.value, reverse=True):
pur += u'%s : %s\n' % (p.value, p.type)
line.append(pur)
if 'create_date' in options:
line.append(unicode(row.create_date))
if 'owner' in options:
line.append(row.owner)
if 'box' in options:
line.append(row.box)
if 'entry' in options:
line.append(row.entry)
if 'source' in options:
line.append(row.source)
if 'content' in options:
if row.content:
line.append(unicode(row.content.value))
else:
line.append(u'None')
if 'tags' in options:
tagsy= ''
for tag in row.mol.tags:
tagsy += tag.name + ', '
line.append(tagsy)
if 'notes' in options:
line.append(row.notes)
spamwriter.writerow(line)
import paste.fileapp
f = paste.fileapp.FileApp(filepath)
from tg import use_wsgi_app
return use_wsgi_app(f)
if selection and not search_clicked:
argv =''
gids = ''
for arg in selection:
argv += '/' + arg
tmp_result = DBSession.query(LCompound).get(arg)
gids += '/' + str(tmp_result.gid)
if kw['akcja'] == u'edit':
redirect('/%s/molecules/multiedit/index%s' % (pname, gids))
elif kw['akcja'] == u'results':
if len(selection) == 1:
redirect('/%s/results/new_result%s' % (pname, argv))
else:
redirect('/%s/results/multiresults/index%s' % (pname, argv))
elif kw['akcja'] == u'htrf':
if len(selection) == 1:
redirect('/%s/results/htrf/add_result2%s' % (pname, argv))
currentPage = paginate.Page(lcompound, page, url=page_url, items_per_page=items)
return dict(currentPage=currentPage,tmpl=tmpl, page='results', htmlRgb=htmlRgb, htmlRgb100=htmlRgb100, Num2Rgb=Num2Rgb, pname=pname, alltags=alltags, similarity=similarity, cells=cells, ulists=ulists, ulist=ulist)
@expose()
def deletefromlist(self, ulist_id, *args):
"""
Delete compound from User List.
"""
ulist = DBSession.query(UserLists).get(ulist_id)
# pname = request.environ['PATH_INFO'].split('/')[1]
userid = request.identity['repoze.who.userid']
user = DBSession.query(User).filter_by(user_name=userid).first()
# ulists = [l for l in user.lists if l.table == 'Results']
if (ulist in user.lists) or (user in ulist.permitusers):
if ulist.elements:
import pickle
elements = [int(el) for el in pickle.loads(ulist.elements)]
for arg in args:
if int(arg) in elements:
elements.remove(int(arg))
ulist.elements = pickle.dumps(elements)
flash(l_(u'Task completed successfully'))
else:
flash(l_(u'Permission denied'), 'error')
redirect(request.headers['Referer'])
| 53.507557 | 240 | 0.427775 |
from tg import expose, flash, redirect, request
from tg.i18n import lazy_ugettext as l_
from molgears.model import DBSession, Tags, LCompound, LPurity, Names
from molgears.model import Compound, User, Projects
from molgears.model.auth import UserLists
from molgears.lib.base import BaseController
import os
from sqlalchemy import desc
from rdkit import Chem
from molgears.widgets.structure import checksmi
from datetime import datetime
from webhelpers import paginate
from molgears.widgets.rgbTuple import htmlRgb, htmlRgb100, Num2Rgb
from molgears.controllers.ctoxicity import CytotoxicityController
__all__ = ['ResultsController']
class ResultsController(BaseController):
ctoxicity=CytotoxicityController()
@expose('molgears.templates.users.results.index')
def index(self, page=1, *args, **kw):
pname = request.environ['PATH_INFO'].split('/')[1]
project = DBSession.query(Projects).filter_by(name=pname).first()
page_url = paginate.PageURL_WebOb(request)
import pickle
try:
cells = pickle.loads([test.cell_line for test in project.tests if test.name == 'CT'][0])
except:
cells = None
lcompound = DBSession.query(LCompound).join(LCompound.mol).filter(Compound.project.any(Projects.name==pname)).filter(LCompound.showme==True)
dsc = True
order = LCompound.id
tmpl = ''
alltags =[tag for tag in DBSession.query(Tags).order_by('name').all() ]
selection = None
similarity = None
userid = request.identity['repoze.who.userid']
user = DBSession.query(User).filter_by(user_name=userid).first()
ulist = None
ulists = set([l for l in user.lists if l.table == 'Results'] + [l for l in user.tg_user_lists if l.table == 'Results'])
items = user.items_per_page
try:
if kw['search'] != u'':
search_clicked = kw['search']
else:
search_clicked = None
except Exception:
search_clicked = None
if kw:
if kw.has_key('mylist'):
try:
ulist_id = int(kw['mylist'])
ulist = DBSession.query(UserLists).get(ulist_id)
except Exception:
flash(l_(u'List error'), 'error')
redirect(request.headers['Referer'])
if (ulist in user.lists) or (user in ulist.permitusers):
if ulist.elements:
import pickle
elements = [int(el) for el in pickle.loads(ulist.elements)]
if ulist.table == 'Results':
lcompound = DBSession.query(LCompound).join(LCompound.mol).filter(Compound.project.any(Projects.name==pname)).filter(LCompound.id.in_(elements))
else:
flash(l_(u'Table error'), 'error')
redirect(request.headers['Referer'])
else:
flash(l_(u'Permission denied'), 'error')
redirect(request.headers['Referer'])
for k, v in kw.iteritems():
if str(k) == 'desc' and str(v) != '1':
dsc = None
elif str(k) == 'order_by':
if v in ('gid', 'create_date', 'box', 'form', 'state', 'entry', 'source', 'MDM2', 'MDM4', 'lcode'):
if v=='lcode':
order = LCompound.lcode
else:
order = LCompound.__getattribute__(LCompound, v)
else:
if v=='last_point':
lcompound=lcompound.join(LCompound.solubility)
order = v
elif hasattr(LCompound, v):
order = LCompound.__getattribute__(LCompound, v)
elif 'CTOX_' in v:
v = v.replace('CTOX_', '')
all_lcompounds = DBSession.query(LCompound).join(LCompound.mol).filter(Compound.project.any(Projects.name==pname)).all()
for l in all_lcompounds:
l.avg_ct = v.replace('pp', '+')
order = '_avg_ct'
else:
order = v
if str(k) != 'select' and str(k) != 'remove' and str(v) != u'':
tmpl += str(k) + '=' + str(v) + '&'
elif str(k) == 'select':
try:
if isinstance(kw['select'], basestring):
selection = [kw['select']]
else:
selection = [id for id in kw['select']]
except Exception:
selection = None
if search_clicked:
try:
smiles = str(kw['smiles'])
if 'pp' in smiles:
smiles = smiles.replace('pp', '+')
method = str(kw['method'])
except Exception:
smiles = None
method = None
if smiles:
if checksmi(smiles):
from razi.functions import functions
from razi.expression import TxtMoleculeElement
if method == 'similarity':
query_bfp = functions.morgan_b(TxtMoleculeElement(smiles), 2)
constraint = Compound.morgan.tanimoto_similar(query_bfp)
tanimoto_sml = Compound.morgan.tanimoto_similarity(query_bfp).label('tanimoto')
search = DBSession.query(LCompound, tanimoto_sml).join(LCompound.mol).join(LCompound.purity).filter(Compound.project.any(Projects.name==pname)).filter(constraint)
if order != LCompound.id:
if order == 'purity':
order = LPurity.value
if dsc:
search = search.order_by(desc(order).nullslast())
else:
search = search.order_by(order)
else:
search = search.order_by(desc(tanimoto_sml)).all()
lcompound = ()
similarity = ()
for row in search:
lcompound += (row[0], )
similarity += (row[1], )
currentPage = paginate.Page(lcompound, page, url=page_url, items_per_page=items)
return dict(currentPage=currentPage,tmpl=tmpl, page='results', pname=pname, alltags=alltags, similarity=similarity,htmlRgb=htmlRgb, htmlRgb100=htmlRgb100, Num2Rgb=Num2Rgb, cells=cells, ulists=ulists, ulist=ulist)
elif method == 'substructure':
constraint = Compound.structure.contains(smiles)
lcompound = DBSession.query(LCompound).join(LCompound.mol).filter(Compound.project.any(Projects.name==pname)).filter(constraint)
elif method == 'identity':
lcompound = DBSession.query(LCompound).filter(Compound.project.any(Projects.name==pname)).join(LCompound.mol).filter(Compound.structure.equals(smiles))
else:
if method == 'smarts':
if dsc:
lcompound = lcompound.order_by(desc(order).nullslast())
else:
lcompound = lcompound.order_by(order)
search = lcompound.all()
sub_lcompounds = ()
patt = Chem.MolFromSmarts(smiles)
if not patt:
flash(l_(u'SMARTS error'), 'warning')
redirect(request.headers['Referer'])
for row in search:
m = Chem.MolFromSmiles(str(row.mol.structure))
mol = Chem.AddHs(m)
if mol.HasSubstructMatch(patt):
sub_lcompounds += (row, )
currentPage = paginate.Page(sub_lcompounds, page, url=page_url, items_per_page=items)
return dict(currentPage=currentPage,tmpl=tmpl, page='results', pname=pname, alltags=alltags, similarity=similarity,htmlRgb=htmlRgb, htmlRgb100=htmlRgb100, Num2Rgb=Num2Rgb, cells=cells, ulists=ulists, ulist=ulist)
else:
flash(l_(u'SMILES error'), 'warning')
redirect(request.headers['Referer'])
if kw.has_key('text_GID') and kw['text_GID'] !=u'':
try:
gid = int(kw['text_GID'])
lcompound = lcompound.filter(LCompound.gid == gid)
except Exception as msg:
flash(l_(u'GID should be a number: %s' % msg), 'error')
redirect(request.headers['Referer'])
if kw.has_key('text_ID') and kw['text_ID'] !=u'':
try:
id = int(kw['text_ID'])
lcompound = lcompound.filter(LCompound.id == id)
except Exception as msg:
flash(l_(u'ID should be a number: %s' % msg), 'error')
redirect(request.headers['Referer'])
if kw.has_key('text_name') and kw['text_name'] !=u'':
lcompound = lcompound.filter(Compound.names.any(Names.name.like(kw['text_name'].strip().replace('*', '%'))))
if kw.has_key('text_notes') and kw['text_notes'] !=u'':
lcompound = lcompound.filter(LCompound.notes.like(kw['text_notes'].replace('*', '%')))
if kw.has_key('text_lso') and kw['text_lso'] !=u'':
lcompound = lcompound.filter(LCompound.lso.like(kw['text_lso'].replace('*', '%')))
if kw.has_key('text_entry') and kw['text_entry'] !=u'':
lcompound = lcompound.filter(LCompound.entry.like(kw['text_entry'].replace('*', '%')))
if kw.has_key('text_box') and kw['text_box'] !=u'':
lcompound = lcompound.filter(LCompound.box.like(kw['text_box'].replace('*', '%')))
if kw.has_key('date_from') and kw['date_from'] !=u'':
date_from = datetime.strptime(str(kw['date_from']), '%Y-%m-%d')
lcompound = lcompound.filter(LCompound.create_date > date_from)
else:
date_from = None
if kw.has_key('date_to') and kw['date_to'] !=u'':
date_to = datetime.strptime(str(kw['date_to']), '%Y-%m-%d')
if date_from:
if date_to>date_from:
lcompound = lcompound.filter(LCompound.create_date < date_to)
else:
flash(l_(u'The End date must be later than the initial'), 'error')
redirect(request.headers['Referer'])
else:
lcompound = lcompound.filter(LCompound.create_date < date_to)
if kw.has_key('text_mdm2_hill_from') and kw['text_mdm2_hill_from'] !=u'':
text_mdm2_hill_from = float(kw['text_mdm2_hill_from'])
lcompound = lcompound.filter(LCompound.avg_hillslope_mdm2 >= text_mdm2_hill_from)
else:
text_mdm2_hill_from = None
if kw.has_key('text_mdm2_hill_to') and kw['text_mdm2_hill_to'] !=u'':
text_mdm2_hill_to = float(kw['text_mdm2_hill_to'])
if text_mdm2_hill_from:
if text_mdm2_hill_to>=text_mdm2_hill_from:
lcompound = lcompound.filter(LCompound.avg_hillslope_mdm2 <= text_mdm2_hill_to)
else:
flash(l_(u'The final value must be greater than the initial'))
redirect(request.headers['Referer'])
else:
lcompound = lcompound.filter(LCompound.avg_hillslope_mdm2 <= text_mdm2_hill_to)
if kw.has_key('text_mdm2_fluor_from') and kw['text_mdm2_fluor_from'] !=u'':
text_mdm2_fluor_from = float(kw['text_mdm2_fluor_from'])
lcompound = lcompound.filter(LCompound.avg_fluorescence_mdm2 >= text_mdm2_fluor_from)
else:
text_mdm2_fluor_from = None
if kw.has_key('text_mdm2_fluor_to') and kw['text_mdm2_fluor_to'] !=u'':
text_mdm2_fluor_to = float(kw['text_mdm2_fluor_to'])
if text_mdm2_fluor_from:
if text_mdm2_fluor_to>=text_mdm2_fluor_from:
lcompound = lcompound.filter(LCompound.avg_fluorescence_mdm2 <= text_mdm2_fluor_to)
else:
flash(l_(u'The final value must be greater than the initial'))
redirect(request.headers['Referer'])
else:
lcompound = lcompound.filter(LCompound.avg_fluorescence_mdm2 <= text_mdm2_fluor_to)
if kw.has_key('text_mdm2_ki_from') and kw['text_mdm2_ki_from'] !=u'':
text_mdm2_ki_from = float(kw['text_mdm2_ki_from'])
lcompound = lcompound.filter(LCompound.avg_ki_mdm2 >= text_mdm2_ki_from)
else:
text_mdm2_ki_from = None
if kw.has_key('text_mdm2_ki_to') and kw['text_mdm2_ki_to'] !=u'':
text_mdm2_ki_to = float(kw['text_mdm2_ki_to'])
if text_mdm2_ki_from:
if text_mdm2_ki_to>=text_mdm2_ki_from:
lcompound = lcompound.filter(LCompound.avg_ki_mdm2 <= text_mdm2_ki_to)
else:
flash(l_(u'The final value must be greater than the initial'))
redirect(request.headers['Referer'])
else:
lcompound = lcompound.filter(LCompound.avg_ki_mdm2 <= text_mdm2_ki_to)
if kw.has_key('text_mdm4_hill_from') and kw['text_mdm4_hill_from'] !=u'':
text_mdm4_hill_from = float(kw['text_mdm4_hill_from'])
lcompound = lcompound.filter(LCompound.avg_hillslope_mdm4 >= text_mdm4_hill_from)
else:
text_mdm4_hill_from = None
if kw.has_key('text_mdm4_hill_to') and kw['text_mdm4_hill_to'] !=u'':
text_mdm4_hill_to = float(kw['text_mdm4_hill_to'])
if text_mdm4_hill_from:
if text_mdm4_hill_to>=text_mdm4_hill_from:
lcompound = lcompound.filter(LCompound.avg_hillslope_mdm4 <= text_mdm4_hill_to)
else:
flash(l_(u'The final value must be greater than the initial'))
redirect(request.headers['Referer'])
else:
lcompound = lcompound.filter(LCompound.avg_hillslope_mdm4 <= text_mdm4_hill_to)
if kw.has_key('text_mdm4_fluor_from') and kw['text_mdm4_fluor_from'] !=u'':
text_mdm4_fluor_from = float(kw['text_mdm4_fluor_from'])
lcompound = lcompound.filter(LCompound.avg_fluorescence_mdm4 >= text_mdm4_fluor_from)
else:
text_mdm4_fluor_from = None
if kw.has_key('text_mdm4_fluor_to') and kw['text_mdm4_fluor_to'] !=u'':
text_mdm4_fluor_to = float(kw['text_mdm4_fluor_to'])
if text_mdm4_fluor_from:
if text_mdm4_fluor_to>=text_mdm4_fluor_from:
lcompound = lcompound.filter(LCompound.avg_fluorescence_mdm4 <= text_mdm4_fluor_to)
else:
flash(l_(u'The final value must be greater than the initial'))
redirect(request.headers['Referer'])
else:
lcompound = lcompound.filter(LCompound.avg_fluorescence_mdm4 <= text_mdm4_fluor_to)
if kw.has_key('text_mdm4_ki_from') and kw['text_mdm4_ki_from'] !=u'':
text_mdm4_ki_from = float(kw['text_mdm4_ki_from'])
lcompound = lcompound.filter(LCompound.avg_ki_mdm4 >= text_mdm4_ki_from)
else:
text_mdm4_ki_from = None
if kw.has_key('text_mdm4_ki_to') and kw['text_mdm4_ki_to'] !=u'':
text_mdm4_ki_to = float(kw['text_mdm4_ki_to'])
if text_mdm4_ki_from:
if text_mdm4_ki_to>=text_mdm4_ki_from:
lcompound = lcompound.filter(LCompound.avg_ki_mdm4 <= text_mdm4_ki_to)
else:
flash(l_(u'The final value must be greater than the initial'))
redirect(request.headers['Referer'])
else:
lcompound = lcompound.filter(LCompound.avg_ki_mdm4 <= text_mdm4_ki_to)
try:
tags = kw['text_tags']
except Exception:
tags = None
pass
if tags:
if isinstance(tags, basestring):
tagi = eval(tags)
if type(tagi) != type([]):
tagi = [int(tags)]
else:
tagi = [int(tid) for tid in tags]
lcompound = lcompound.filter(Compound.tags.any(Tags.id.in_(tagi)))
if dsc:
lcompound = lcompound.order_by(desc(order).nullslast())
else:
lcompound = lcompound.order_by(order)
if search_clicked and kw['search'] == "Download":
if kw['file_type'] and kw['file_type'] != u'' and kw['sell_type'] and kw['sell_type'] != u'':
if kw['sell_type'] == u'all':
lcompounds = lcompound.all()
elif kw['sell_type'] == u'selected':
if selection:
lcompounds = ()
for el in selection:
lcompounds += (DBSession.query(LCompound).get(el), )
else:
flash(l_(u'Lack of selected structures for download'), 'error')
redirect(request.headers['Referer'])
elif kw['sell_type'] == u'range':
lcompounds = lcompound.all()
if kw.has_key('select_from') and kw['select_from'] != u'':
try:
select_from = int(kw['select_from']) -1
if select_from<1 or select_from>len(lcompounds):
select_from = 0
except Exception:
select_from = 0
else:
select_from = 0
if kw.has_key('select_to') and kw['select_to'] != u'':
try:
select_to = int(kw['select_to'])
if select_to<2 or select_to>len(lcompounds):
select_to = len(lcompounds)
except Exception:
select_to = len(lcompounds)
else:
select_to = len(lcompounds)
lcompounds_new = ()
for el in range(select_from, select_to):
lcompounds_new += (lcompounds[el], )
lcompounds = lcompounds_new
else:
flash(l_(u'Lack of items to download'), 'error')
redirect(request.headers['Referer'])
try:
if isinstance(kw['options'], basestring):
options = [kw['options']]
else:
options = kw['options']
except Exception:
flash(l_('Choose download options'), 'error')
redirect(request.headers['Referer'])
if 'getsize' in kw:
size = int(kw['getsize']), int(kw['getsize'])
else:
size = 100, 100
if kw['file_type'] == 'pdf':
filename = userid + '_selected.pdf'
from xhtml2pdf.pisa import CreatePDF
from tg.render import render as render_template
import cStringIO
html = render_template({"length":len(lcompounds), "lcompound":lcompounds, "cells":cells, "options":options, "size":size}, "genshi", "molgears.templates.users.results.print2", doctype=None)
dest = './molgears/files/pdf/' + filename
result = file(dest, "wb")
CreatePDF(cStringIO.StringIO(html.encode("UTF-8")), result, encoding="utf-8")
result.close()
import paste.fileapp
f = paste.fileapp.FileApp('./molgears/files/pdf/'+ filename)
from tg import use_wsgi_app
return use_wsgi_app(f)
elif kw['file_type'] == 'xls':
filename = userid + '_selected.xls'
filepath = os.path.join('./molgears/files/download/', filename)
from PIL import Image
import xlwt
wbk = xlwt.Workbook()
sheet = wbk.add_sheet('sheet1')
j=0
if 'nr' in options:
sheet.write(0,j,u'Nr.')
j+=1
if 'gid' in options:
sheet.write(0,j,u'GID')
j+=1
if 'id' in options:
sheet.write(0,j,u'ID')
j+=1
if 'name' in options:
sheet.write(0,j,u'Name')
j+=1
if 'names' in options:
sheet.write(0,j,u'Names')
j+=1
if 'image' in options:
sheet.write(0,j,u'Image')
j+=1
if 'smiles' in options:
sheet.write(0,j,u'SMILES')
j+=1
if 'inchi' in options:
sheet.write(0,j,u'InChi')
j+=1
if 'lso' in options:
sheet.write(0,j,u'LSO')
j+=1
if 'num_atoms' in options:
sheet.write(0,j,u'Atoms')
j+=1
if 'mw' in options:
sheet.write(0,j,u'MW')
j+=1
if 'hba' in options:
sheet.write(0,j,u'hba')
j+=1
if 'hbd' in options:
sheet.write(0,j,u'hbd')
j+=1
if 'tpsa' in options:
sheet.write(0,j,u'tpsa')
j+=1
if 'logp' in options:
sheet.write(0,j,u'logP')
j+=1
if 'purity' in options:
sheet.write(0,j, u'Purity')
j+=1
if 'create_date' in options:
sheet.write(0,j,u'Date')
j+=1
if 'box' in options:
sheet.write(0,j,u'Box')
j+=1
if 'entry' in options:
sheet.write(0,j,u'Entry')
j+=1
if 'source' in options:
sheet.write(0,j,u'Source')
j+=1
if 'content' in options:
sheet.write(0,j,u'Content')
j+=1
if 'tags' in options:
sheet.write(0,j,u'Tags')
j+=1
if 'notes' in options:
sheet.write(0,j,u'Notes')
j+=1
for cell_line in cells:
if '_CT_%s' % cell_line in options:
sheet.write(0,j,u'CT %s' % cell_line)
j+=1
i = 1
for row in lcompounds:
j=0
if 'nr' in options:
sheet.write(i,j, str(i))
j+=1
if 'gid' in options:
sheet.write(i,j, row.gid)
j+=1
if 'id' in options:
sheet.write(i,j, row.id)
j+=1
if 'name' in options:
sheet.write(i,j, row.mol.name)
j+=1
if 'names' in options:
names = u''
for n in row.mol.names:
names += n.name + u', '
sheet.write(i,j, names)
j+=1
if 'image' in options:
file_in = './molgears/public/img/%s.png' % row.gid
img = Image.open(file_in)
file_out = './molgears/public/img/bitmap/thumb%s.bmp' %row.gid
img.thumbnail(size, Image.ANTIALIAS)
img.save(file_out)
sheet.insert_bitmap(file_out , i,j, 5, 5)
j+=1
if 'smiles' in options:
sheet.write(i,j, str(row.mol.structure))
j+=1
if 'inchi' in options:
sheet.write(i,j, str(row.mol.inchi))
j+=1
if 'lso' in options:
sheet.write(i,j, row.lso)
j+=1
if 'num_atoms' in options:
sheet.write(i,j,str(row.mol.num_hvy_atoms)+'/'+str(row.mol.num_atoms))
j+=1
if 'mw' in options:
sheet.write(i,j, str(row.mol.mw))
j+=1
if 'hba' in options:
sheet.write(i,j, str(row.mol.hba))
j+=1
if 'hbd' in options:
sheet.write(i,j, str(row.mol.hbd))
j+=1
if 'tpsa' in options:
sheet.write(i,j, str(row.mol.tpsa))
j+=1
if 'logp' in options:
sheet.write(i,j, str(row.mol.logp))
j+=1
if 'state' in options:
sheet.write(i,j, str(row.state))
j+=1
if 'purity' in options:
pur = u''
for p in sorted(row.purity, key=lambda p: p.value, reverse=True):
pur += u'%s : %s\n' % (p.value, p.type)
sheet.write(i,j, pur)
j+=1
if 'create_date' in options:
sheet.write(i,j, str(row.create_date))
j+=1
if 'owner' in options:
sheet.write(i,j, row.owner)
j+=1
if 'box' in options:
sheet.write(i,j, row.box)
j+=1
if 'entry' in options:
sheet.write(i,j, row.entry)
j+=1
if 'source' in options:
sheet.write(i,j, row.source)
j+=1
if 'content' in options:
if row.content:
sheet.write(i,j, str(row.content.value))
else:
sheet.write(i,j, 'None')
j+=1
if 'tags' in options:
tagsy=u''
for tag in row.mol.tags:
tagsy += tag.name + u', '
sheet.write(i,j,tagsy)
j+=1
if 'notes' in options:
sheet.write(i,j, row.notes)
j+=1
for cell_line in cells:
if '_CT_%s' % cell_line in options:
res = []
if row.ctoxicity:
for ct in sorted(row.ctoxicity, key=lambda ct: ct.id):
if ct.cell_line==cell_line:
res.append(ct.ic50)
if len(res)>0:
sheet.write(i,j, str(round(sum(res)/len(res), 3)))
else:
sheet.write(i,j, '')
j+=1
i += 1
wbk.save(filepath)
import paste.fileapp
f = paste.fileapp.FileApp(filepath)
from tg import use_wsgi_app
return use_wsgi_app(f)
elif kw['file_type'] == 'sdf':
filepath = './molgears/files/download/out.sdf'
ww = Chem.SDWriter(filepath)
from rdkit.Chem import AllChem
for row in lcompounds:
m2 = Chem.MolFromSmiles(str(row.mol.structure))
AllChem.Compute2DCoords(m2)
AllChem.EmbedMolecule(m2)
AllChem.UFFOptimizeMolecule(m2)
if 'smiles' in options:
m2.SetProp("smiles", str(row.mol.structure))
if 'name' in options:
m2.SetProp("_Name", str(row.mol.name.encode('ascii', 'ignore')))
if 'nr' in options:
m2.SetProp("Nr", str(lcompounds.index(row)+1))
if 'gid' in options:
m2.SetProp("GID", str(row.gid))
if 'names' in options:
names = u''
for n in row.mol.names:
names += n.name + ', '
m2.SetProp("names", str(names.encode('ascii', 'ignore')))
if 'inchi' in options:
m2.SetProp("InChi", str(row.mol.inchi))
if 'lso' in options:
m2.SetProp("LSO", str(row.lso))
if 'num_atoms' in options:
m2.SetProp("atoms", str(row.mol.num_hvy_atoms)+'/'+str(row.mol.num_atoms))
if 'mw' in options:
m2.SetProp("mw", str(row.mol.mw))
if 'hba' in options:
m2.SetProp("hba", str(row.mol.hba))
if 'hbd' in options:
m2.SetProp("hbd", str(row.mol.hbd))
if 'tpsa' in options:
m2.SetProp("TPSA", str(row.mol.tpsa))
if 'logp' in options:
m2.SetProp("logP", str(row.mol.tpsa))
if 'create_date' in options:
m2.SetProp("create_date", str(row.create_date))
if 'owner' in options:
m2.SetProp("owner", str(row.owner))
if 'tags' in options:
tagsy=u''
for tag in row.mol.tags:
tagsy += tag.name + u', '
m2.SetProp("tagi", str(tagsy.encode('ascii', 'ignore')))
if 'purity' in options:
pur = u''
for p in sorted(row.purity, key=lambda p: p.value, reverse=True):
pur += u'%s : %s \n' % (p.value, p.type)
m2.SetProp("purity", str(pur.encode('ascii', 'ignore')))
if 'content' in options:
if row.content:
m2.SetProp("content", str(row.content.value))
else:
m2.SetProp("content", "None")
j+=1
if 'box' in options:
m2.SetProp("box", str(row.box))
if 'entry' in options:
m2.SetProp("entry", str(row.entry))
if 'notes' in options:
if row.notes:
m2.SetProp("notes", str(row.notes.encode('ascii', 'ignore')))
else:
m2.SetProp("notes", " ")
for cell_line in cells:
if '_CT_%s' % cell_line in options:
res = []
if row.ctoxicity:
for ct in sorted(row.ctoxicity, key=lambda ct: ct.id):
if ct.cell_line==cell_line:
res.append(ct.ic50)
if len(res)>0:
m2.SetProp('CT_%s' % cell_line, str(round(sum(res)/len(res), 3)))
else:
m2.SetProp('CT_%s' % cell_line, ' ')
ww.write(m2)
ww.close()
import paste.fileapp
f = paste.fileapp.FileApp(filepath)
from tg import use_wsgi_app
return use_wsgi_app(f)
elif kw['file_type'] == 'csv' or 'txt':
filename = userid + '_selected.' + kw['file_type']
filepath = os.path.join('./molgears/files/download/', filename)
from molgears.widgets.unicodeCSV import UnicodeWriter
import csv
if kw['file_type'] == u'csv':
delimiter = ';'
else:
delimiter = ' '
with open(filepath, 'wb') as csvfile:
spamwriter = UnicodeWriter(csvfile, delimiter=delimiter,
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in lcompounds:
line =[]
if 'smiles' in options:
line.append(str(row.mol.structure))
if 'name' in options:
line.append(row.mol.name)
if 'nr' in options:
line.append(unicode(lcompounds.index(row)+1))
if 'gid' in options:
line.append(unicode(row.gid))
if 'names' in options:
names = u''
for n in row.mol.names:
names += n.name + u', '
line.append(names)
if 'inchi' in options:
line.append(row.mol.inchi)
if 'lso' in options:
line.append(row.lso)
if 'num_atoms' in options:
line.append(unicode(row.mol.num_hvy_atoms)+'/'+unicode(row.mol.num_atoms))
if 'mw' in options:
line.append(unicode(row.mol.mw))
if 'hba' in options:
line.append(unicode(row.mol.hba))
if 'hbd' in options:
line.append(unicode(row.mol.hbd))
if 'tpsa' in options:
line.append(unicode(row.mol.tpsa))
if 'logp' in options:
line.append(unicode(row.mol.logp))
if 'purity' in options:
pur = u''
for p in sorted(row.purity, key=lambda p: p.value, reverse=True):
pur += u'%s : %s\n' % (p.value, p.type)
line.append(pur)
if 'create_date' in options:
line.append(unicode(row.create_date))
if 'owner' in options:
line.append(row.owner)
if 'box' in options:
line.append(row.box)
if 'entry' in options:
line.append(row.entry)
if 'source' in options:
line.append(row.source)
if 'content' in options:
if row.content:
line.append(unicode(row.content.value))
else:
line.append(u'None')
if 'tags' in options:
tagsy= ''
for tag in row.mol.tags:
tagsy += tag.name + ', '
line.append(tagsy)
if 'notes' in options:
line.append(row.notes)
spamwriter.writerow(line)
import paste.fileapp
f = paste.fileapp.FileApp(filepath)
from tg import use_wsgi_app
return use_wsgi_app(f)
if selection and not search_clicked:
argv =''
gids = ''
for arg in selection:
argv += '/' + arg
tmp_result = DBSession.query(LCompound).get(arg)
gids += '/' + str(tmp_result.gid)
if kw['akcja'] == u'edit':
redirect('/%s/molecules/multiedit/index%s' % (pname, gids))
elif kw['akcja'] == u'results':
if len(selection) == 1:
redirect('/%s/results/new_result%s' % (pname, argv))
else:
redirect('/%s/results/multiresults/index%s' % (pname, argv))
elif kw['akcja'] == u'htrf':
if len(selection) == 1:
redirect('/%s/results/htrf/add_result2%s' % (pname, argv))
currentPage = paginate.Page(lcompound, page, url=page_url, items_per_page=items)
return dict(currentPage=currentPage,tmpl=tmpl, page='results', htmlRgb=htmlRgb, htmlRgb100=htmlRgb100, Num2Rgb=Num2Rgb, pname=pname, alltags=alltags, similarity=similarity, cells=cells, ulists=ulists, ulist=ulist)
@expose()
def deletefromlist(self, ulist_id, *args):
ulist = DBSession.query(UserLists).get(ulist_id)
userid = request.identity['repoze.who.userid']
user = DBSession.query(User).filter_by(user_name=userid).first()
if (ulist in user.lists) or (user in ulist.permitusers):
if ulist.elements:
import pickle
elements = [int(el) for el in pickle.loads(ulist.elements)]
for arg in args:
if int(arg) in elements:
elements.remove(int(arg))
ulist.elements = pickle.dumps(elements)
flash(l_(u'Task completed successfully'))
else:
flash(l_(u'Permission denied'), 'error')
redirect(request.headers['Referer'])
| true | true |
f714dbac06e6467ae8dac56a6f4797e46e75a4c1 | 30,324 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/operations/_ddos_protection_plans_operations.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 2 | 2021-03-24T06:26:11.000Z | 2021-04-18T15:55:59.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/operations/_ddos_protection_plans_operations.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 4 | 2019-04-17T17:57:49.000Z | 2020-04-24T21:11:22.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_07_01/operations/_ddos_protection_plans_operations.py | beltr0n/azure-sdk-for-python | 2f7fb8bee881b0fc0386a0ad5385755ceedd0453 | [
"MIT"
] | 2 | 2021-05-23T16:46:31.000Z | 2021-05-26T23:51:09.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DdosProtectionPlansOperations(object):
"""DdosProtectionPlansOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.DdosProtectionPlan"
"""Gets information about the specified DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosProtectionPlan, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_07_01.models.DdosProtectionPlan
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
parameters, # type: "_models.DdosProtectionPlan"
**kwargs # type: Any
):
# type: (...) -> "_models.DdosProtectionPlan"
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DdosProtectionPlan')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
parameters, # type: "_models.DdosProtectionPlan"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.DdosProtectionPlan"]
"""Creates or updates a DDoS protection plan.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:param parameters: Parameters supplied to the create or update operation.
:type parameters: ~azure.mgmt.network.v2019_07_01.models.DdosProtectionPlan
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DdosProtectionPlan or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_07_01.models.DdosProtectionPlan]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.DdosProtectionPlan"
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
ddos_protection_plan_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.DdosProtectionPlan"]
"""Update a DDoS protection plan tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_protection_plan_name: The name of the DDoS protection plan.
:type ddos_protection_plan_name: str
:param parameters: Parameters supplied to the update DDoS protection plan resource tags.
:type parameters: ~azure.mgmt.network.v2019_07_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DdosProtectionPlan or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_07_01.models.DdosProtectionPlan]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlan"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DdosProtectionPlanListResult"]
"""Gets all DDoS protection plans in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DdosProtectionPlanListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_07_01.models.DdosProtectionPlanListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlanListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlanListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ddosProtectionPlans'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.DdosProtectionPlanListResult"]
"""Gets all the DDoS protection plans in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DdosProtectionPlanListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_07_01.models.DdosProtectionPlanListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DdosProtectionPlanListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlanListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans'} # type: ignore
| 49.711475 | 207 | 0.667491 |
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DdosProtectionPlansOperations(object):
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name,
ddos_protection_plan_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
url = self._delete_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'}
def begin_delete(
self,
resource_group_name,
ddos_protection_plan_name,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'}
def get(
self,
resource_group_name,
ddos_protection_plan_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
url = self.get.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'}
def _create_or_update_initial(
self,
resource_group_name,
ddos_protection_plan_name,
parameters,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._create_or_update_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'DdosProtectionPlan')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'}
def begin_create_or_update(
self,
resource_group_name,
ddos_protection_plan_name,
parameters,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'}
def _update_tags_initial(
self,
resource_group_name,
ddos_protection_plan_name,
parameters,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._update_tags_initial.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'}
def begin_update_tags(
self,
resource_group_name,
ddos_protection_plan_name,
parameters,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
ddos_protection_plan_name=ddos_protection_plan_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlan', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosProtectionPlanName': self._serialize.url("ddos_protection_plan_name", ddos_protection_plan_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans/{ddosProtectionPlanName}'}
def list(
self,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlanListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ddosProtectionPlans'}
def list_by_resource_group(
self,
resource_group_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-07-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_by_resource_group.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DdosProtectionPlanListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosProtectionPlans'}
| true | true |
f714dbc004258f17a45bcd057be6ef72a01f16ec | 414 | py | Python | setup.py | SabevAtGitHub/qspreadsheet | 29127dd6f38573c7ede7680cf8f4852368fb2c38 | [
"MIT"
] | null | null | null | setup.py | SabevAtGitHub/qspreadsheet | 29127dd6f38573c7ede7680cf8f4852368fb2c38 | [
"MIT"
] | null | null | null | setup.py | SabevAtGitHub/qspreadsheet | 29127dd6f38573c7ede7680cf8f4852368fb2c38 | [
"MIT"
] | null | null | null | import setuptools
setuptools.setup(
name='qspreadsheet',
version='0.1.0',
author='TT-at-GitHub',
author_email='tt3d@start.bg',
license='MIT',
packages=setuptools.find_packages(),
install_requires=[
'numpy>=1.19.0',
'pandas>=1.0.5',
'PySide2>=5.13.0'
],
description='Package used to show and edit pandas DataFrame in GUI',
python_requires='>=3.7.5'
) | 24.352941 | 72 | 0.615942 | import setuptools
setuptools.setup(
name='qspreadsheet',
version='0.1.0',
author='TT-at-GitHub',
author_email='tt3d@start.bg',
license='MIT',
packages=setuptools.find_packages(),
install_requires=[
'numpy>=1.19.0',
'pandas>=1.0.5',
'PySide2>=5.13.0'
],
description='Package used to show and edit pandas DataFrame in GUI',
python_requires='>=3.7.5'
) | true | true |
f714dbe7af81229b495cab12b96c587e22021104 | 913 | py | Python | var/spack/repos/builtin/packages/libgcrypt/package.py | nkianggiss/spack | 3477d3375142a30f5714bb5966a6d8bb22c33c06 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2018-08-20T06:55:11.000Z | 2018-08-20T06:55:11.000Z | var/spack/repos/builtin/packages/libgcrypt/package.py | nkianggiss/spack | 3477d3375142a30f5714bb5966a6d8bb22c33c06 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2019-04-29T22:36:27.000Z | 2019-04-30T12:51:38.000Z | var/spack/repos/builtin/packages/libgcrypt/package.py | nkianggiss/spack | 3477d3375142a30f5714bb5966a6d8bb22c33c06 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2020-03-12T19:27:17.000Z | 2020-03-12T19:27:17.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libgcrypt(AutotoolsPackage):
"""Libgcrypt is a general purpose cryptographic library based on
the code from GnuPG. It provides functions for all cryptographic
building blocks: symmetric ciphers, hash algorithms, MACs, public
key algorithms, large integer functions, random numbers and a lot
of supporting functions. """
homepage = "http://www.gnu.org/software/libgcrypt/"
url = "https://gnupg.org/ftp/gcrypt/libgcrypt/libgcrypt-1.8.1.tar.bz2"
version('1.8.1', 'b21817f9d850064d2177285f1073ec55')
version('1.7.6', '54e180679a7ae4d090f8689ca32b654c')
version('1.6.2', 'b54395a93cb1e57619943c082da09d5f')
depends_on("libgpg-error")
| 39.695652 | 74 | 0.736035 |
from spack import *
class Libgcrypt(AutotoolsPackage):
homepage = "http://www.gnu.org/software/libgcrypt/"
url = "https://gnupg.org/ftp/gcrypt/libgcrypt/libgcrypt-1.8.1.tar.bz2"
version('1.8.1', 'b21817f9d850064d2177285f1073ec55')
version('1.7.6', '54e180679a7ae4d090f8689ca32b654c')
version('1.6.2', 'b54395a93cb1e57619943c082da09d5f')
depends_on("libgpg-error")
| true | true |
f714dc8856bcaebac5a0892eb1e21befd216579a | 10,791 | py | Python | examples/Ball_Drop/GenDataBallDrop1.py | lanl/SEPIA | 0a1e606e1d1072f49e4f3f358962bd8918a5d3a3 | [
"BSD-3-Clause"
] | 19 | 2020-06-22T16:37:07.000Z | 2022-02-18T22:50:59.000Z | examples/Ball_Drop/GenDataBallDrop1.py | lanl/SEPIA | 0a1e606e1d1072f49e4f3f358962bd8918a5d3a3 | [
"BSD-3-Clause"
] | 41 | 2020-07-07T22:52:33.000Z | 2021-11-04T14:05:03.000Z | examples/Ball_Drop/GenDataBallDrop1.py | lanl/SEPIA | 0a1e606e1d1072f49e4f3f358962bd8918a5d3a3 | [
"BSD-3-Clause"
] | 6 | 2020-08-14T18:58:45.000Z | 2022-03-01T21:00:14.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 24 07:52:25 2020
Generate, Plot, and write all data needed for ball drop example 1
@author: granthutchings
"""
#%% Imports
import numpy as np
#import pyDOE # Latin Hypercube
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from invertH import invertHsim, invertHtrue
#%% notes
# x = R
# theta = C
# y = {h, t}, i.e., pairs of h and t that form a trace when plotted
# imagine the field experiments involve say 4 platforms --> 4 values of h.
# Then for each R, one experiment gives output of 4 h-t pairs (a curve).
# Likewise for the simulator, we have a dense grid of say 100 heights h.
# Then for each setting of {x, theta} = {R, C} we get output of 100 h-t
# pairs.
# I'll make python files to:
# 1. generate the h-t pairs and write them into files. (this file and invertH.py)
# 2. a "runmcmc"-type file that first calls...
# 3. ...a file that reads in the data and packages it appropriately
# generate "field" data and "simulator" data, where the simulator model is
# systematically off from reality.
# true: d2h/dt2 = g - C (dh/dt)^2 / R
# sim: d2h/dt2 = g - C (dh/dt) / R
# inputs for field experiments: x = R
# inputs for simulator: x = R, theta = C
# We want to calibrate theta in the simulator to match the field data.
#%% Compute data
def gen_data(et,plot_design=False,R_new=None,R_design=None,C_design=None):
n = 3; m = 25
g = 9.8 # gravity
C_true = .1 / (4 * np.pi / 3); print('generating data with C = ',C_true)
n_field_heights = 4
h_field = np.linspace(5,20,n_field_heights) # platform heights for the field experiments
h_sim = np.arange(1.5,25,1.5) # grid of heights fed to the simulator
h_dense = np.concatenate((np.arange(0,2,.01),np.arange(2,25,.5))) # a denser grid for drawing the curves
# the coefficient of drag for a smooth sphere is 0.1, and we're
# dividing by 4/3 pi to absorb a constant related to the volume of the
# sphere (not including R)
if R_new is None: R = np.array([.1, .2, .4]) # radii of balls to try (in meters)
else: R = R_new
# get a Latin hypercube sim_design of m=25 points over R_sim, C_sim
#sim_design = pyDOE.lhs(2,m)
# Use Kary's sim_designign for testing purposes
sim_design = np.array([
[0.1239, 0.8024],
[0.8738, 0.6473],
[0.6140, 0.3337],
[0.8833, 0.4783],
[0.9946, 0.0548],
[0.1178, 0.9382],
[0.1805, 0.2411],
[0.6638, 0.2861],
[0.2939, 0.1208],
[0.2451, 0.2397],
[0.4577, 0.5696],
[0.4377, 0.8874],
[0.0737, 0.7384],
[0.6931, 0.8683],
[0.4901, 0.7070],
[0.5953, 0.9828],
[0.7506, 0.1009],
[0.7783, 0.4225],
[0.8333, 0.5318],
[0.3987, 0.6312],
[0.2021, 0.4990],
[0.3495, 0.3680],
[0.9411, 0.7935],
[0.0198, 0.0218],
[0.5440, 0.1925]])
# scale the first column to [0,.5] and call it R_sim
# (this inclusim_design our field values, i.e., R \in [0,.5])
# scale the second column to [0.05,.25] and call it Csim
# (likewise, Ctrue \in [0.05, .25])
sim_design[:,0] = sim_design[:,0] * .4 + .05
sim_design[:,1] = sim_design[:,1] * .2 + .05
if R_design is not None: R_sim = R_design
else: R_sim = sim_design[:,0]
if C_design is not None: C_sim = C_design
else: C_sim = sim_design[:,1]
if plot_design:
plt.scatter(R_sim,C_sim)
plt.xlabel("R design points");plt.ylabel("C design points")
plt.title("Simulator Design")
plt.show()
# Generate field data for each R
y_field = invertHtrue(h_field, g, C_true, R, et) # observed times
y_field_dense = invertHtrue(h_dense, g, C_true, R, et) # dense grid for plots
# imagine that the biggest ball is too big to get to the highest
# platform, so we don't observe data there
#y_field[-1,-1] = np.nan
# Generate simulated data for each (C,R) pair
y_sim = invertHsim(h_sim, g, C_sim, R_sim)
y_sim_dense = invertHsim(h_dense, g, C_sim, R_sim)
data_dict = dict([('R',R),('sim_design',np.column_stack((R_sim,C_sim))),\
('n',n),('m',m),('C_true',C_true),\
('h_field',h_field),('h_sim',h_sim),('h_dense',h_dense),\
('y_field',y_field),('y_field_dense',y_field_dense),\
('y_sim',y_sim),('y_sim_dense',y_sim_dense)])
return(data_dict)
#%% #===================== Plots ===============================#
def plot_data(data_dict,inset=True,near_sim=True):
n = data_dict['n']
m = data_dict['m']
y_sim = data_dict['y_sim']
y_field = data_dict['y_field']
R = data_dict['R']
R_sim = data_dict['sim_design'][:,0]
C_sim = data_dict['sim_design'][:,1]
h_field = data_dict['h_field']
h_sim = data_dict['h_sim']
h_dense = data_dict['h_dense']
y_field = data_dict['y_field']
y_field_dense = data_dict['y_field_dense']
y_sim = data_dict['y_sim']
y_sim_dense = data_dict['y_sim_dense']
if isinstance(y_field, list): ragged = True
else: ragged = False
if ragged:
y_max = max(max(np.array([np.max(k) for k in y_field])),max(y_sim.max(1)))
else:
y_max = max(max(y_field.max(1)),max(y_sim.max(1))) # max of all row maxes for axis limit
# find closest values each R
# ith column of R_nearest_sim_design contains the n_neighbors nearest sim_designign points (by index)
# for ith value of R
n_neighbors = 3
R_nearest_sim_design = np.zeros(shape=(n_neighbors,len(R)),dtype=int)
for i in range(len(R)):
dist = np.argsort(np.abs(R_sim-R[i]))
R_nearest_sim_design[:,i] = dist[0:n_neighbors]
# Generate plot for each radius
colors = ('r', 'g', 'b')
fig = plt.figure(figsize=[12,12],constrained_layout=True)
gs = GridSpec(2,2,figure=fig)
axs = np.array([fig.add_subplot(gs[0,0]),\
fig.add_subplot(gs[0,1]),\
fig.add_subplot(gs[1,0])])
for i in range(len(R)):
# axis limits, ticks, and labels
axs[i].set_xlim([0, 25])
axs[i].set_ylim([0, y_max+.5])
axs[i].xaxis.set_ticks(np.arange(0,30,5))
axs[i].yaxis.set_ticks(np.arange(0,y_max+.5,1))
axs[i].set_title("Ball Radius {} m".format(R[i]),fontweight="bold")
axs[i].set_xlabel("Distance (m)")
axs[i].set_ylabel("Time (s)")
# simulations - all
for j in range(m):
axs[i].plot(h_dense, np.transpose(y_sim_dense)[:,j],color='lightgreen',\
label="Simulation runs" if j==0 else "")
if near_sim:
# simulations - nearest neighbors
for j in range(n_neighbors):
axs[i].plot(h_dense,np.transpose(y_sim_dense)[:,R_nearest_sim_design[j,i]],\
linestyle="--",\
color=colors[j],label="Nearest Sim {}".format(j+1))
# true data curve and "real data points"
axs[i].plot(h_dense, y_field_dense[i,:],'k',label="Reality")
if ragged:
axs[i].plot(h_field[i],y_field[i],'ks',label='Reality')
else:
axs[i].plot(h_field, y_field[i,],'ks',label="Field data")
axs[i].legend(loc="lower right")
if inset:
# imbed sim_designign point subplot
inset_ax = inset_axes(axs[i],width="30%",height="30%",loc="upper left",\
borderpad=2.5)
inset_ax.set_xlabel("R sim_design values",fontsize=7,labelpad=1)
inset_ax.set_ylabel("C sim_design values",fontsize=7)
inset_ax.xaxis.set_ticks(R)
inset_ax.yaxis.set_ticks(np.arange(0,.251,.05))
inset_ax.tick_params(axis='both', which='major', labelsize=7, pad = -5)
inset_ax.scatter(R_sim,C_sim,s=15, facecolors='none', edgecolors='grey')
inset_ax.scatter(R_sim[R_nearest_sim_design[:,i]],C_sim[R_nearest_sim_design[:,i]],s=15,\
color=colors)
inset_ax.axvline(x=R[i], ymin=0, ymax=1,color='k',linewidth=.5)
plt.savefig('data/plotAll.png', dpi=300)
plt.show()
#%% #==================== Write data ===========================#
# write the h-t pairs into files
def write_data(data_dict, datadir = '/Users/granthutchings/Documents/LANL/SEPIA/sepia/Examples/Ball_Drop/data/ball_drop_1'):
# datadir == directory where data files should be written to or read from
# sim.dat, should be length(hsim) x length(Csim)
y_sim = data_dict['y_sim']
with open(datadir+'sim.dat',"w+") as f:
for line in np.array(np.transpose(y_sim)):
np.savetxt(f, line)
# sim.height, a file with just the heights (same for all sim runs)
h_sim = data_dict['h_sim']
with open(datadir+'sim.height',"w+") as f:
for line in np.array(np.transpose(h_sim)):
np.savetxt(f, line)
# sim.sim_designign, length(Csim) x (num X's + num thetas)
R_sim = data_dict['R_sim']; C_sim = data_dict['C_sim']
sim_design = np.transpose(np.array([R_sim, C_sim]))
with open(datadir+'sim.design',"w+") as f:
for line in sim_design:
np.savetxt(f, line)
# field.dat, one row per experiment (radius)
y_field = data_dict['y_field']
with open(datadir+'field.dat',"w+") as f:
for line in np.array(y_field):
np.savetxt(f, line)
# field.height
h_field = data_dict['h_field']
with open(datadir+'field.height',"w+") as f:
for line in np.array(h_field):
np.savetxt(f, line)
# field radii
R = data_dict['R']
with open(datadir+'field.radii',"w+") as f:
for line in np.array(R):
np.savetxt(f, line)
#%%
def read_data(datadir = '/Users/granthutchings/Documents/LANL/SEPIA/sepia/Examples/Ball_Drop/data/ball_drop_1'):
with open(datadir+'sim.dat','r') as f:
y_sim = np.loadtxt(f)
with open(datadir+'sim.height',"r") as f:
h_sim = np.loadtxt(f)
with open(datadir+'sim.design','r') as f:
sim_design = np.loadtxt(f)
with open(datadir+'field.dat','r') as f:
y_field = np.loadtxt(f)
with open(datadir+'field.height','r') as f:
h_field = np.loadtxt(f)
with open(datadir+'field.radii','r') as f:
R = np.loadtxt(f)
data_dict = dict([('R',R),('sim_design',sim_design),\
('h_field',h_field),('h_sim',h_sim),\
('y_field',y_field),('y_sim',y_sim)])
return(data_dict)
| 38.130742 | 124 | 0.587712 |
import numpy as np
b.pyplot as plt
from matplotlib.gridspec import GridSpec
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from invertH import invertHsim, invertHtrue
# 1. generate the h-t pairs and write them into files. (this file and invertH.py)
# 2. a "runmcmc"-type file that first calls...
# 3. ...a file that reads in the data and packages it appropriately
# generate "field" data and "simulator" data, where the simulator model is
# systematically off from reality.
# true: d2h/dt2 = g - C (dh/dt)^2 / R
# sim: d2h/dt2 = g - C (dh/dt) / R
# inputs for field experiments: x = R
# inputs for simulator: x = R, theta = C
# We want to calibrate theta in the simulator to match the field data.
#%% Compute data
def gen_data(et,plot_design=False,R_new=None,R_design=None,C_design=None):
n = 3; m = 25
g = 9.8 # gravity
C_true = .1 / (4 * np.pi / 3); print('generating data with C = ',C_true)
n_field_heights = 4
h_field = np.linspace(5,20,n_field_heights) # platform heights for the field experiments
h_sim = np.arange(1.5,25,1.5) # grid of heights fed to the simulator
h_dense = np.concatenate((np.arange(0,2,.01),np.arange(2,25,.5))) # a denser grid for drawing the curves
# the coefficient of drag for a smooth sphere is 0.1, and we're
if R_new is None: R = np.array([.1, .2, .4])
else: R = R_new
sim_design = np.array([
[0.1239, 0.8024],
[0.8738, 0.6473],
[0.6140, 0.3337],
[0.8833, 0.4783],
[0.9946, 0.0548],
[0.1178, 0.9382],
[0.1805, 0.2411],
[0.6638, 0.2861],
[0.2939, 0.1208],
[0.2451, 0.2397],
[0.4577, 0.5696],
[0.4377, 0.8874],
[0.0737, 0.7384],
[0.6931, 0.8683],
[0.4901, 0.7070],
[0.5953, 0.9828],
[0.7506, 0.1009],
[0.7783, 0.4225],
[0.8333, 0.5318],
[0.3987, 0.6312],
[0.2021, 0.4990],
[0.3495, 0.3680],
[0.9411, 0.7935],
[0.0198, 0.0218],
[0.5440, 0.1925]])
# scale the first column to [0,.5] and call it R_sim
# (this inclusim_design our field values, i.e., R \in [0,.5])
# scale the second column to [0.05,.25] and call it Csim
# (likewise, Ctrue \in [0.05, .25])
sim_design[:,0] = sim_design[:,0] * .4 + .05
sim_design[:,1] = sim_design[:,1] * .2 + .05
if R_design is not None: R_sim = R_design
else: R_sim = sim_design[:,0]
if C_design is not None: C_sim = C_design
else: C_sim = sim_design[:,1]
if plot_design:
plt.scatter(R_sim,C_sim)
plt.xlabel("R design points");plt.ylabel("C design points")
plt.title("Simulator Design")
plt.show()
# Generate field data for each R
y_field = invertHtrue(h_field, g, C_true, R, et) # observed times
y_field_dense = invertHtrue(h_dense, g, C_true, R, et) # dense grid for plots
# imagine that the biggest ball is too big to get to the highest
# platform, so we don't observe data there
y_sim = invertHsim(h_sim, g, C_sim, R_sim)
y_sim_dense = invertHsim(h_dense, g, C_sim, R_sim)
data_dict = dict([('R',R),('sim_design',np.column_stack((R_sim,C_sim))),\
('n',n),('m',m),('C_true',C_true),\
('h_field',h_field),('h_sim',h_sim),('h_dense',h_dense),\
('y_field',y_field),('y_field_dense',y_field_dense),\
('y_sim',y_sim),('y_sim_dense',y_sim_dense)])
return(data_dict)
ta_dict['n']
m = data_dict['m']
y_sim = data_dict['y_sim']
y_field = data_dict['y_field']
R = data_dict['R']
R_sim = data_dict['sim_design'][:,0]
C_sim = data_dict['sim_design'][:,1]
h_field = data_dict['h_field']
h_sim = data_dict['h_sim']
h_dense = data_dict['h_dense']
y_field = data_dict['y_field']
y_field_dense = data_dict['y_field_dense']
y_sim = data_dict['y_sim']
y_sim_dense = data_dict['y_sim_dense']
if isinstance(y_field, list): ragged = True
else: ragged = False
if ragged:
y_max = max(max(np.array([np.max(k) for k in y_field])),max(y_sim.max(1)))
else:
y_max = max(max(y_field.max(1)),max(y_sim.max(1)))
n_neighbors = 3
R_nearest_sim_design = np.zeros(shape=(n_neighbors,len(R)),dtype=int)
for i in range(len(R)):
dist = np.argsort(np.abs(R_sim-R[i]))
R_nearest_sim_design[:,i] = dist[0:n_neighbors]
colors = ('r', 'g', 'b')
fig = plt.figure(figsize=[12,12],constrained_layout=True)
gs = GridSpec(2,2,figure=fig)
axs = np.array([fig.add_subplot(gs[0,0]),\
fig.add_subplot(gs[0,1]),\
fig.add_subplot(gs[1,0])])
for i in range(len(R)):
axs[i].set_xlim([0, 25])
axs[i].set_ylim([0, y_max+.5])
axs[i].xaxis.set_ticks(np.arange(0,30,5))
axs[i].yaxis.set_ticks(np.arange(0,y_max+.5,1))
axs[i].set_title("Ball Radius {} m".format(R[i]),fontweight="bold")
axs[i].set_xlabel("Distance (m)")
axs[i].set_ylabel("Time (s)")
for j in range(m):
axs[i].plot(h_dense, np.transpose(y_sim_dense)[:,j],color='lightgreen',\
label="Simulation runs" if j==0 else "")
if near_sim:
for j in range(n_neighbors):
axs[i].plot(h_dense,np.transpose(y_sim_dense)[:,R_nearest_sim_design[j,i]],\
linestyle="--",\
color=colors[j],label="Nearest Sim {}".format(j+1))
axs[i].plot(h_dense, y_field_dense[i,:],'k',label="Reality")
if ragged:
axs[i].plot(h_field[i],y_field[i],'ks',label='Reality')
else:
axs[i].plot(h_field, y_field[i,],'ks',label="Field data")
axs[i].legend(loc="lower right")
if inset:
inset_ax = inset_axes(axs[i],width="30%",height="30%",loc="upper left",\
borderpad=2.5)
inset_ax.set_xlabel("R sim_design values",fontsize=7,labelpad=1)
inset_ax.set_ylabel("C sim_design values",fontsize=7)
inset_ax.xaxis.set_ticks(R)
inset_ax.yaxis.set_ticks(np.arange(0,.251,.05))
inset_ax.tick_params(axis='both', which='major', labelsize=7, pad = -5)
inset_ax.scatter(R_sim,C_sim,s=15, facecolors='none', edgecolors='grey')
inset_ax.scatter(R_sim[R_nearest_sim_design[:,i]],C_sim[R_nearest_sim_design[:,i]],s=15,\
color=colors)
inset_ax.axvline(x=R[i], ymin=0, ymax=1,color='k',linewidth=.5)
plt.savefig('data/plotAll.png', dpi=300)
plt.show()
ocuments/LANL/SEPIA/sepia/Examples/Ball_Drop/data/ball_drop_1'):
y_sim = data_dict['y_sim']
with open(datadir+'sim.dat',"w+") as f:
for line in np.array(np.transpose(y_sim)):
np.savetxt(f, line)
h_sim = data_dict['h_sim']
with open(datadir+'sim.height',"w+") as f:
for line in np.array(np.transpose(h_sim)):
np.savetxt(f, line)
R_sim = data_dict['R_sim']; C_sim = data_dict['C_sim']
sim_design = np.transpose(np.array([R_sim, C_sim]))
with open(datadir+'sim.design',"w+") as f:
for line in sim_design:
np.savetxt(f, line)
# field.dat, one row per experiment (radius)
y_field = data_dict['y_field']
with open(datadir+'field.dat',"w+") as f:
for line in np.array(y_field):
np.savetxt(f, line)
# field.height
h_field = data_dict['h_field']
with open(datadir+'field.height',"w+") as f:
for line in np.array(h_field):
np.savetxt(f, line)
# field radii
R = data_dict['R']
with open(datadir+'field.radii',"w+") as f:
for line in np.array(R):
np.savetxt(f, line)
#%%
def read_data(datadir = '/Users/granthutchings/Documents/LANL/SEPIA/sepia/Examples/Ball_Drop/data/ball_drop_1'):
with open(datadir+'sim.dat','r') as f:
y_sim = np.loadtxt(f)
with open(datadir+'sim.height',"r") as f:
h_sim = np.loadtxt(f)
with open(datadir+'sim.design','r') as f:
sim_design = np.loadtxt(f)
with open(datadir+'field.dat','r') as f:
y_field = np.loadtxt(f)
with open(datadir+'field.height','r') as f:
h_field = np.loadtxt(f)
with open(datadir+'field.radii','r') as f:
R = np.loadtxt(f)
data_dict = dict([('R',R),('sim_design',sim_design),\
('h_field',h_field),('h_sim',h_sim),\
('y_field',y_field),('y_sim',y_sim)])
return(data_dict)
| true | true |
f714dd2831864994b43276d37c673f59de37c3f8 | 13,979 | py | Python | monai/transforms/io/dictionary.py | tatuanb/monai_V1 | 41e492b61c78bb3c303f38b03fe9fdc74a3c2e96 | [
"Apache-2.0"
] | 2,971 | 2019-10-16T23:53:16.000Z | 2022-03-31T20:58:24.000Z | monai/transforms/io/dictionary.py | catherine1996cn/MONAI | ff9bbfa82763de46cbac75553e340633e3d84ecb | [
"Apache-2.0"
] | 2,851 | 2020-01-10T16:23:44.000Z | 2022-03-31T22:14:53.000Z | monai/transforms/io/dictionary.py | catherine1996cn/MONAI | ff9bbfa82763de46cbac75553e340633e3d84ecb | [
"Apache-2.0"
] | 614 | 2020-01-14T19:18:01.000Z | 2022-03-31T14:06:14.000Z | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A collection of dictionary-based wrappers around the "vanilla" transforms for IO functions
defined in :py:class:`monai.transforms.io.array`.
Class names are ended with 'd' to denote dictionary-based transforms.
"""
from pathlib import Path
from typing import Optional, Union
import numpy as np
from monai.config import DtypeLike, KeysCollection
from monai.data.image_reader import ImageReader
from monai.transforms.io.array import LoadImage, SaveImage
from monai.transforms.transform import MapTransform
from monai.utils import GridSampleMode, GridSamplePadMode, InterpolateMode, ensure_tuple, ensure_tuple_rep
__all__ = ["LoadImaged", "LoadImageD", "LoadImageDict", "SaveImaged", "SaveImageD", "SaveImageDict"]
class LoadImaged(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.LoadImage`,
It can load both image data and metadata. When loading a list of files in one key,
the arrays will be stacked and a new dimension will be added as the first dimension
In this case, the meta data of the first image will be used to represent the stacked result.
The affine transform of all the stacked images should be same.
The output metadata field will be created as ``meta_keys`` or ``key_{meta_key_postfix}``.
If reader is not specified, this class automatically chooses readers
based on the supported suffixes and in the following order:
- User-specified reader at runtime when calling this loader.
- User-specified reader in the constructor of `LoadImage`.
- Readers from the last to the first in the registered list.
- Current default readers: (nii, nii.gz -> NibabelReader), (png, jpg, bmp -> PILReader),
(npz, npy -> NumpyReader), (others -> ITKReader).
Note:
- If `reader` is specified, the loader will attempt to use the specified readers and the default supported
readers. This might introduce overheads when handling the exceptions of trying the incompatible loaders.
In this case, it is therefore recommended to set the most appropriate reader as
the last item of the `reader` parameter.
See also:
- tutorial: https://github.com/Project-MONAI/tutorials/blob/master/modules/load_medical_images.ipynb
"""
def __init__(
self,
keys: KeysCollection,
reader: Optional[Union[ImageReader, str]] = None,
dtype: DtypeLike = np.float32,
meta_keys: Optional[KeysCollection] = None,
meta_key_postfix: str = "meta_dict",
overwriting: bool = False,
image_only: bool = False,
allow_missing_keys: bool = False,
*args,
**kwargs,
) -> None:
"""
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
reader: register reader to load image file and meta data, if None, still can register readers
at runtime or use the default readers. If a string of reader name provided, will construct
a reader object with the `*args` and `**kwargs` parameters, supported reader name: "NibabelReader",
"PILReader", "ITKReader", "NumpyReader".
dtype: if not None convert the loaded image data to this data type.
meta_keys: explicitly indicate the key to store the corresponding meta data dictionary.
the meta data is a dictionary object which contains: filename, original_shape, etc.
it can be a sequence of string, map to the `keys`.
if None, will try to construct meta_keys by `key_{meta_key_postfix}`.
meta_key_postfix: if meta_keys is None, use `key_{postfix}` to store the metadata of the nifti image,
default is `meta_dict`. The meta data is a dictionary object.
For example, load nifti file for `image`, store the metadata into `image_meta_dict`.
overwriting: whether allow to overwrite existing meta data of same key.
default is False, which will raise exception if encountering existing key.
image_only: if True return dictionary containing just only the image volumes, otherwise return
dictionary containing image data array and header dict per input key.
allow_missing_keys: don't raise exception if key is missing.
args: additional parameters for reader if providing a reader name.
kwargs: additional parameters for reader if providing a reader name.
"""
super().__init__(keys, allow_missing_keys)
self._loader = LoadImage(reader, image_only, dtype, *args, **kwargs)
if not isinstance(meta_key_postfix, str):
raise TypeError(f"meta_key_postfix must be a str but is {type(meta_key_postfix).__name__}.")
self.meta_keys = ensure_tuple_rep(None, len(self.keys)) if meta_keys is None else ensure_tuple(meta_keys)
if len(self.keys) != len(self.meta_keys):
raise ValueError("meta_keys should have the same length as keys.")
self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))
self.overwriting = overwriting
def register(self, reader: ImageReader):
self._loader.register(reader)
def __call__(self, data, reader: Optional[ImageReader] = None):
"""
Raises:
KeyError: When not ``self.overwriting`` and key already exists in ``data``.
"""
d = dict(data)
for key, meta_key, meta_key_postfix in self.key_iterator(d, self.meta_keys, self.meta_key_postfix):
data = self._loader(d[key], reader)
if self._loader.image_only:
if not isinstance(data, np.ndarray):
raise ValueError("loader must return a numpy array (because image_only=True was used).")
d[key] = data
else:
if not isinstance(data, (tuple, list)):
raise ValueError("loader must return a tuple or list (because image_only=False was used).")
d[key] = data[0]
if not isinstance(data[1], dict):
raise ValueError("metadata must be a dict.")
meta_key = meta_key or f"{key}_{meta_key_postfix}"
if meta_key in d and not self.overwriting:
raise KeyError(f"Meta data with key {meta_key} already exists and overwriting=False.")
d[meta_key] = data[1]
return d
class SaveImaged(MapTransform):
"""
Dictionary-based wrapper of :py:class:`monai.transforms.SaveImage`.
Note:
Image should be channel-first shape: [C,H,W,[D]].
If the data is a patch of big image, will append the patch index to filename.
Args:
keys: keys of the corresponding items to be transformed.
See also: :py:class:`monai.transforms.compose.MapTransform`
meta_keys: explicitly indicate the key of the corresponding meta data dictionary.
for example, for data with key `image`, the metadata by default is in `image_meta_dict`.
the meta data is a dictionary object which contains: filename, original_shape, etc.
it can be a sequence of string, map to the `keys`.
if None, will try to construct meta_keys by `key_{meta_key_postfix}`.
meta_key_postfix: if meta_keys is None and `key_{postfix}` was used to store the metadata in `LoadImaged`.
need the key to extract metadata to save images, default is `meta_dict`.
for example, for data with key `image`, the metadata by default is in `image_meta_dict`.
the meta data is a dictionary object which contains: filename, affine, original_shape, etc.
if no corresponding metadata, set to `None`.
output_dir: output image directory.
output_postfix: a string appended to all output file names, default to `trans`.
output_ext: output file extension name, available extensions: `.nii.gz`, `.nii`, `.png`.
resample: whether to resample before saving the data array.
if saving PNG format image, based on the `spatial_shape` from metadata.
if saving NIfTI format image, based on the `original_affine` from metadata.
mode: This option is used when ``resample = True``. Defaults to ``"nearest"``.
- NIfTI files {``"bilinear"``, ``"nearest"``}
Interpolation mode to calculate output values.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
- PNG files {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
The interpolation mode.
See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate
padding_mode: This option is used when ``resample = True``. Defaults to ``"border"``.
- NIfTI files {``"zeros"``, ``"border"``, ``"reflection"``}
Padding mode for outside grid values.
See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
- PNG files
This option is ignored.
scale: {``255``, ``65535``} postprocess data by clipping to [0, 1] and scaling
[0, 255] (uint8) or [0, 65535] (uint16). Default is None to disable scaling.
it's used for PNG format only.
dtype: data type during resampling computation. Defaults to ``np.float64`` for best precision.
if None, use the data type of input data. To be compatible with other modules,
the output data type is always ``np.float32``.
it's used for NIfTI format only.
output_dtype: data type for saving data. Defaults to ``np.float32``.
it's used for NIfTI format only.
allow_missing_keys: don't raise exception if key is missing.
squeeze_end_dims: if True, any trailing singleton dimensions will be removed (after the channel
has been moved to the end). So if input is (C,H,W,D), this will be altered to (H,W,D,C), and
then if C==1, it will be saved as (H,W,D). If D also ==1, it will be saved as (H,W). If false,
image will always be saved as (H,W,D,C).
it's used for NIfTI format only.
data_root_dir: if not empty, it specifies the beginning parts of the input file's
absolute path. it's used to compute `input_file_rel_path`, the relative path to the file from
`data_root_dir` to preserve folder structure when saving in case there are files in different
folders with the same file names. for example:
input_file_name: /foo/bar/test1/image.nii,
output_postfix: seg
output_ext: nii.gz
output_dir: /output,
data_root_dir: /foo/bar,
output will be: /output/test1/image/image_seg.nii.gz
separate_folder: whether to save every file in a separate folder, for example: if input filename is
`image.nii`, postfix is `seg` and folder_path is `output`, if `True`, save as:
`output/image/image_seg.nii`, if `False`, save as `output/image_seg.nii`. default to `True`.
print_log: whether to print log about the saved file path, etc. default to `True`.
"""
def __init__(
self,
keys: KeysCollection,
meta_keys: Optional[KeysCollection] = None,
meta_key_postfix: str = "meta_dict",
output_dir: Union[Path, str] = "./",
output_postfix: str = "trans",
output_ext: str = ".nii.gz",
resample: bool = True,
mode: Union[GridSampleMode, InterpolateMode, str] = "nearest",
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
scale: Optional[int] = None,
dtype: DtypeLike = np.float64,
output_dtype: DtypeLike = np.float32,
allow_missing_keys: bool = False,
squeeze_end_dims: bool = True,
data_root_dir: str = "",
separate_folder: bool = True,
print_log: bool = True,
) -> None:
super().__init__(keys, allow_missing_keys)
self.meta_keys = ensure_tuple_rep(meta_keys, len(self.keys))
self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))
self._saver = SaveImage(
output_dir=output_dir,
output_postfix=output_postfix,
output_ext=output_ext,
resample=resample,
mode=mode,
padding_mode=padding_mode,
scale=scale,
dtype=dtype,
output_dtype=output_dtype,
squeeze_end_dims=squeeze_end_dims,
data_root_dir=data_root_dir,
separate_folder=separate_folder,
print_log=print_log,
)
def __call__(self, data):
d = dict(data)
for key, meta_key, meta_key_postfix in self.key_iterator(d, self.meta_keys, self.meta_key_postfix):
if meta_key is None and meta_key_postfix is not None:
meta_key = f"{key}_{meta_key_postfix}"
meta_data = d[meta_key] if meta_key is not None else None
self._saver(img=d[key], meta_data=meta_data)
return d
LoadImageD = LoadImageDict = LoadImaged
SaveImageD = SaveImageDict = SaveImaged
| 52.552632 | 115 | 0.654625 |
from pathlib import Path
from typing import Optional, Union
import numpy as np
from monai.config import DtypeLike, KeysCollection
from monai.data.image_reader import ImageReader
from monai.transforms.io.array import LoadImage, SaveImage
from monai.transforms.transform import MapTransform
from monai.utils import GridSampleMode, GridSamplePadMode, InterpolateMode, ensure_tuple, ensure_tuple_rep
__all__ = ["LoadImaged", "LoadImageD", "LoadImageDict", "SaveImaged", "SaveImageD", "SaveImageDict"]
class LoadImaged(MapTransform):
def __init__(
self,
keys: KeysCollection,
reader: Optional[Union[ImageReader, str]] = None,
dtype: DtypeLike = np.float32,
meta_keys: Optional[KeysCollection] = None,
meta_key_postfix: str = "meta_dict",
overwriting: bool = False,
image_only: bool = False,
allow_missing_keys: bool = False,
*args,
**kwargs,
) -> None:
super().__init__(keys, allow_missing_keys)
self._loader = LoadImage(reader, image_only, dtype, *args, **kwargs)
if not isinstance(meta_key_postfix, str):
raise TypeError(f"meta_key_postfix must be a str but is {type(meta_key_postfix).__name__}.")
self.meta_keys = ensure_tuple_rep(None, len(self.keys)) if meta_keys is None else ensure_tuple(meta_keys)
if len(self.keys) != len(self.meta_keys):
raise ValueError("meta_keys should have the same length as keys.")
self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))
self.overwriting = overwriting
def register(self, reader: ImageReader):
self._loader.register(reader)
def __call__(self, data, reader: Optional[ImageReader] = None):
d = dict(data)
for key, meta_key, meta_key_postfix in self.key_iterator(d, self.meta_keys, self.meta_key_postfix):
data = self._loader(d[key], reader)
if self._loader.image_only:
if not isinstance(data, np.ndarray):
raise ValueError("loader must return a numpy array (because image_only=True was used).")
d[key] = data
else:
if not isinstance(data, (tuple, list)):
raise ValueError("loader must return a tuple or list (because image_only=False was used).")
d[key] = data[0]
if not isinstance(data[1], dict):
raise ValueError("metadata must be a dict.")
meta_key = meta_key or f"{key}_{meta_key_postfix}"
if meta_key in d and not self.overwriting:
raise KeyError(f"Meta data with key {meta_key} already exists and overwriting=False.")
d[meta_key] = data[1]
return d
class SaveImaged(MapTransform):
def __init__(
self,
keys: KeysCollection,
meta_keys: Optional[KeysCollection] = None,
meta_key_postfix: str = "meta_dict",
output_dir: Union[Path, str] = "./",
output_postfix: str = "trans",
output_ext: str = ".nii.gz",
resample: bool = True,
mode: Union[GridSampleMode, InterpolateMode, str] = "nearest",
padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
scale: Optional[int] = None,
dtype: DtypeLike = np.float64,
output_dtype: DtypeLike = np.float32,
allow_missing_keys: bool = False,
squeeze_end_dims: bool = True,
data_root_dir: str = "",
separate_folder: bool = True,
print_log: bool = True,
) -> None:
super().__init__(keys, allow_missing_keys)
self.meta_keys = ensure_tuple_rep(meta_keys, len(self.keys))
self.meta_key_postfix = ensure_tuple_rep(meta_key_postfix, len(self.keys))
self._saver = SaveImage(
output_dir=output_dir,
output_postfix=output_postfix,
output_ext=output_ext,
resample=resample,
mode=mode,
padding_mode=padding_mode,
scale=scale,
dtype=dtype,
output_dtype=output_dtype,
squeeze_end_dims=squeeze_end_dims,
data_root_dir=data_root_dir,
separate_folder=separate_folder,
print_log=print_log,
)
def __call__(self, data):
d = dict(data)
for key, meta_key, meta_key_postfix in self.key_iterator(d, self.meta_keys, self.meta_key_postfix):
if meta_key is None and meta_key_postfix is not None:
meta_key = f"{key}_{meta_key_postfix}"
meta_data = d[meta_key] if meta_key is not None else None
self._saver(img=d[key], meta_data=meta_data)
return d
LoadImageD = LoadImageDict = LoadImaged
SaveImageD = SaveImageDict = SaveImaged
| true | true |
f714dd425e33726e34a19c85ff11b50408c2213b | 3,088 | py | Python | overhave/cli/s3.py | TinkoffCreditSystems/overhave | b0ab705ef5c5c5a65fa0b14b173b64fd7310e187 | [
"Apache-2.0"
] | 33 | 2021-02-01T15:49:37.000Z | 2021-12-20T00:44:43.000Z | overhave/cli/s3.py | TinkoffCreditSystems/overhave | b0ab705ef5c5c5a65fa0b14b173b64fd7310e187 | [
"Apache-2.0"
] | 46 | 2021-02-03T12:56:52.000Z | 2021-12-19T18:50:27.000Z | overhave/cli/s3.py | TinkoffCreditSystems/overhave | b0ab705ef5c5c5a65fa0b14b173b64fd7310e187 | [
"Apache-2.0"
] | 1 | 2021-12-07T09:02:44.000Z | 2021-12-07T09:02:44.000Z | from datetime import timedelta
from pathlib import Path
import click
from overhave.base_settings import LoggingSettings
from overhave.cli.group import overhave
from overhave.transport import OverhaveS3Bucket, OverhaveS3ManagerSettings, S3Manager
from overhave.utils import get_current_time
@overhave.group(short_help="Run s3 cloud interaction commands")
def s3() -> None:
pass
@s3.group(short_help="S3 cloud bucket's interaction commands")
def bucket() -> None:
pass
def _check_bucket_registered(name: str) -> None:
if name in (item.value for item in list(OverhaveS3Bucket)):
return
click.secho(f"Note: specified s3 bucket name '{name}' not presented in OverhaveS3Bucket enum!", fg="yellow")
def _get_s3_manager() -> S3Manager:
LoggingSettings().setup_logging()
manager = S3Manager(OverhaveS3ManagerSettings(autocreate_buckets=False))
manager.initialize()
return manager
@bucket.command(short_help="Create s3 cloud bucket")
@click.option(
"-n", "--name", type=str, help="Declared s3 bucket",
)
def create(name: str) -> None:
""" Create s3 bucket. """
_check_bucket_registered(name)
_get_s3_manager().create_bucket(name)
@bucket.command(short_help="Delete s3 cloud bucket")
@click.option(
"-n", "--name", type=str, help="Declared s3 bucket",
)
@click.option(
"-f", "--force", is_flag=True, help="Delete all files in bucket, then delete bucket",
)
def delete(name: str, force: bool) -> None:
""" Delete s3 bucket. """
_check_bucket_registered(name)
_get_s3_manager().delete_bucket(name, force=force)
@bucket.command(short_help="Remove old s3 cloud bucket files")
@click.option(
"-n", "--name", type=str, help="Declared s3 bucket",
)
@click.option(
"-d", "--days", type=int, help="Remove all files in bucket older then specified days value",
)
def remove_files(name: str, days: int) -> None:
""" Remove s3 bucket files older . """
_check_bucket_registered(name)
manager = _get_s3_manager()
target_date = get_current_time() - timedelta(days=days)
objects = manager.get_bucket_objects(name)
objects_to_delete = []
for obj in objects:
if not obj.modified_at < target_date:
continue
objects_to_delete.append(obj)
if not objects_to_delete:
click.secho(f"No one object older than {days} days.")
return
click.secho(f"Objects older then {days} days: {[x.name for x in objects_to_delete]}")
manager.delete_bucket_objects(bucket=bucket, objects=objects_to_delete)
@s3.command(short_help="Download file from s3 bucket")
@click.option(
"-b", "--bucket", type=str, help="Declared s3 bucket",
)
@click.option(
"-f", "--filename", type=str, help="Filename for downloading",
)
@click.option("-d", "--dir-to-save", type=str, help="Directory for saving file", default=".")
def download_file(bucket: str, filename: str, dir_to_save: str) -> None:
""" Create s3 bucket. """
_check_bucket_registered(bucket)
_get_s3_manager().download_file(filename=filename, bucket=bucket, dir_to_save=Path(dir_to_save))
| 32.166667 | 112 | 0.70693 | from datetime import timedelta
from pathlib import Path
import click
from overhave.base_settings import LoggingSettings
from overhave.cli.group import overhave
from overhave.transport import OverhaveS3Bucket, OverhaveS3ManagerSettings, S3Manager
from overhave.utils import get_current_time
@overhave.group(short_help="Run s3 cloud interaction commands")
def s3() -> None:
pass
@s3.group(short_help="S3 cloud bucket's interaction commands")
def bucket() -> None:
pass
def _check_bucket_registered(name: str) -> None:
if name in (item.value for item in list(OverhaveS3Bucket)):
return
click.secho(f"Note: specified s3 bucket name '{name}' not presented in OverhaveS3Bucket enum!", fg="yellow")
def _get_s3_manager() -> S3Manager:
LoggingSettings().setup_logging()
manager = S3Manager(OverhaveS3ManagerSettings(autocreate_buckets=False))
manager.initialize()
return manager
@bucket.command(short_help="Create s3 cloud bucket")
@click.option(
"-n", "--name", type=str, help="Declared s3 bucket",
)
def create(name: str) -> None:
_check_bucket_registered(name)
_get_s3_manager().create_bucket(name)
@bucket.command(short_help="Delete s3 cloud bucket")
@click.option(
"-n", "--name", type=str, help="Declared s3 bucket",
)
@click.option(
"-f", "--force", is_flag=True, help="Delete all files in bucket, then delete bucket",
)
def delete(name: str, force: bool) -> None:
_check_bucket_registered(name)
_get_s3_manager().delete_bucket(name, force=force)
@bucket.command(short_help="Remove old s3 cloud bucket files")
@click.option(
"-n", "--name", type=str, help="Declared s3 bucket",
)
@click.option(
"-d", "--days", type=int, help="Remove all files in bucket older then specified days value",
)
def remove_files(name: str, days: int) -> None:
_check_bucket_registered(name)
manager = _get_s3_manager()
target_date = get_current_time() - timedelta(days=days)
objects = manager.get_bucket_objects(name)
objects_to_delete = []
for obj in objects:
if not obj.modified_at < target_date:
continue
objects_to_delete.append(obj)
if not objects_to_delete:
click.secho(f"No one object older than {days} days.")
return
click.secho(f"Objects older then {days} days: {[x.name for x in objects_to_delete]}")
manager.delete_bucket_objects(bucket=bucket, objects=objects_to_delete)
@s3.command(short_help="Download file from s3 bucket")
@click.option(
"-b", "--bucket", type=str, help="Declared s3 bucket",
)
@click.option(
"-f", "--filename", type=str, help="Filename for downloading",
)
@click.option("-d", "--dir-to-save", type=str, help="Directory for saving file", default=".")
def download_file(bucket: str, filename: str, dir_to_save: str) -> None:
_check_bucket_registered(bucket)
_get_s3_manager().download_file(filename=filename, bucket=bucket, dir_to_save=Path(dir_to_save))
| true | true |
f714ddbe1fa4334438b63d1817170ad4c1db0595 | 2,250 | py | Python | coffea/lookup_tools/dense_lookup.py | kmohrman/coffea | 1963fc9371552b348a15084f5bde9390be1e6e1c | [
"BSD-3-Clause"
] | 1 | 2020-11-19T21:50:34.000Z | 2020-11-19T21:50:34.000Z | coffea/lookup_tools/dense_lookup.py | kondratyevd/coffea | 2baae94028c38b59f0eb52127d8fb92840dbf23d | [
"BSD-3-Clause"
] | null | null | null | coffea/lookup_tools/dense_lookup.py | kondratyevd/coffea | 2baae94028c38b59f0eb52127d8fb92840dbf23d | [
"BSD-3-Clause"
] | null | null | null | from coffea.lookup_tools.lookup_base import lookup_base
import numpy
from copy import deepcopy
class dense_lookup(lookup_base):
def __init__(self, values, dims, feval_dim=None):
super(dense_lookup, self).__init__()
self._dimension = 0
whattype = type(dims)
if whattype == numpy.ndarray:
self._dimension = 1
else:
self._dimension = len(dims)
if self._dimension == 0:
raise Exception("Could not define dimension for {}".format(whattype))
self._axes = deepcopy(dims)
self._feval_dim = None
vals_are_strings = (
"string" in values.dtype.name
or "str" in values.dtype.name
or "unicode" in values.dtype.name
or "bytes" in values.dtype.name
) # ....
if not isinstance(values, numpy.ndarray):
raise TypeError("values is not a numpy array, but %r" % type(values))
if vals_are_strings:
raise Exception("dense_lookup cannot handle string values!")
self._values = deepcopy(values)
def _evaluate(self, *args):
indices = []
if self._dimension == 1:
indices.append(
numpy.clip(
numpy.searchsorted(self._axes, args[0], side="right") - 1,
0,
self._values.shape[0] - 1,
)
)
else:
for dim in range(self._dimension):
indices.append(
numpy.clip(
numpy.searchsorted(self._axes[dim], args[dim], side="right")
- 1,
0,
self._values.shape[dim] - 1,
)
)
return self._values[tuple(indices)]
def __repr__(self):
myrepr = "{} dimensional histogram with axes:\n".format(self._dimension)
temp = ""
if self._dimension == 1:
temp = "\t1: {}\n".format(self._axes)
else:
temp = "\t1: {}\n".format(self._axes[0])
for idim in range(1, self._dimension):
temp += "\t{}: {}\n".format(idim + 1, self._axes[idim])
myrepr += temp
return myrepr
| 34.615385 | 84 | 0.517333 | from coffea.lookup_tools.lookup_base import lookup_base
import numpy
from copy import deepcopy
class dense_lookup(lookup_base):
def __init__(self, values, dims, feval_dim=None):
super(dense_lookup, self).__init__()
self._dimension = 0
whattype = type(dims)
if whattype == numpy.ndarray:
self._dimension = 1
else:
self._dimension = len(dims)
if self._dimension == 0:
raise Exception("Could not define dimension for {}".format(whattype))
self._axes = deepcopy(dims)
self._feval_dim = None
vals_are_strings = (
"string" in values.dtype.name
or "str" in values.dtype.name
or "unicode" in values.dtype.name
or "bytes" in values.dtype.name
)
if not isinstance(values, numpy.ndarray):
raise TypeError("values is not a numpy array, but %r" % type(values))
if vals_are_strings:
raise Exception("dense_lookup cannot handle string values!")
self._values = deepcopy(values)
def _evaluate(self, *args):
indices = []
if self._dimension == 1:
indices.append(
numpy.clip(
numpy.searchsorted(self._axes, args[0], side="right") - 1,
0,
self._values.shape[0] - 1,
)
)
else:
for dim in range(self._dimension):
indices.append(
numpy.clip(
numpy.searchsorted(self._axes[dim], args[dim], side="right")
- 1,
0,
self._values.shape[dim] - 1,
)
)
return self._values[tuple(indices)]
def __repr__(self):
myrepr = "{} dimensional histogram with axes:\n".format(self._dimension)
temp = ""
if self._dimension == 1:
temp = "\t1: {}\n".format(self._axes)
else:
temp = "\t1: {}\n".format(self._axes[0])
for idim in range(1, self._dimension):
temp += "\t{}: {}\n".format(idim + 1, self._axes[idim])
myrepr += temp
return myrepr
| true | true |
f714ddd185814029f4f28b3c294fecf5acc1b57a | 834 | py | Python | components/py_engine/adapter/esp32/m5stackcore2/boot.py | yong171966/AliOS-Things | df29e6886cec68885db9975d5b9f51f057c2ba04 | [
"Apache-2.0"
] | null | null | null | components/py_engine/adapter/esp32/m5stackcore2/boot.py | yong171966/AliOS-Things | df29e6886cec68885db9975d5b9f51f057c2ba04 | [
"Apache-2.0"
] | null | null | null | components/py_engine/adapter/esp32/m5stackcore2/boot.py | yong171966/AliOS-Things | df29e6886cec68885db9975d5b9f51f057c2ba04 | [
"Apache-2.0"
] | null | null | null | import axp192
import kv
try:
# for m5stack-core2 only
axp = axp192.Axp192()
axp.powerAll()
axp.setLCDBrightness(80) # 设置背光亮度 0~100
except OSError:
print("make sure axp192.py is in libs folder")
def _on_get_url(url):
kv.set('_amp_pyapp_url', url)
execfile('/lib/appOta.py')
def _connect_wifi(ssid, passwd):
import network
sta_if = network.WLAN(network.STA_IF)
if not sta_if.isconnected():
sta_if.active(True)
sta_if.scan()
sta_if.connect(ssid, passwd)
channel = kv.get('app_upgrade_channel')
if channel == "disable":
pass
else:
ssid = kv.get('_amp_wifi_ssid')
passwd = kv.get('_amp_wifi_passwd')
if isinstance(ssid, str) and isinstance(passwd, str):
_connect_wifi(ssid, passwd)
import online_upgrade
online_upgrade.on(_on_get_url)
| 22.540541 | 57 | 0.672662 | import axp192
import kv
try:
axp = axp192.Axp192()
axp.powerAll()
axp.setLCDBrightness(80)
except OSError:
print("make sure axp192.py is in libs folder")
def _on_get_url(url):
kv.set('_amp_pyapp_url', url)
execfile('/lib/appOta.py')
def _connect_wifi(ssid, passwd):
import network
sta_if = network.WLAN(network.STA_IF)
if not sta_if.isconnected():
sta_if.active(True)
sta_if.scan()
sta_if.connect(ssid, passwd)
channel = kv.get('app_upgrade_channel')
if channel == "disable":
pass
else:
ssid = kv.get('_amp_wifi_ssid')
passwd = kv.get('_amp_wifi_passwd')
if isinstance(ssid, str) and isinstance(passwd, str):
_connect_wifi(ssid, passwd)
import online_upgrade
online_upgrade.on(_on_get_url)
| true | true |
f714dedfb2fc55a060893bd7f928aefbca6e4e47 | 3,001 | py | Python | sphinx/ext/duration.py | samdoran/sphinx | 4c91c038b220d07bbdfe0c1680af42fe897f342c | [
"BSD-2-Clause"
] | 4,973 | 2015-01-03T15:44:00.000Z | 2022-03-31T03:11:51.000Z | sphinx/ext/duration.py | samdoran/sphinx | 4c91c038b220d07bbdfe0c1680af42fe897f342c | [
"BSD-2-Clause"
] | 7,850 | 2015-01-02T08:09:25.000Z | 2022-03-31T18:57:40.000Z | sphinx/ext/duration.py | samdoran/sphinx | 4c91c038b220d07bbdfe0c1680af42fe897f342c | [
"BSD-2-Clause"
] | 2,179 | 2015-01-03T15:26:53.000Z | 2022-03-31T12:22:44.000Z | """
sphinx.ext.duration
~~~~~~~~~~~~~~~~~~~
Measure durations of Sphinx processing.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from datetime import datetime, timedelta
from itertools import islice
from operator import itemgetter
from typing import Any, Dict, List, cast
from docutils import nodes
from sphinx.application import Sphinx
from sphinx.domains import Domain
from sphinx.locale import __
from sphinx.util import logging
logger = logging.getLogger(__name__)
class DurationDomain(Domain):
"""A domain for durations of Sphinx processing."""
name = 'duration'
@property
def reading_durations(self) -> Dict[str, timedelta]:
return self.data.setdefault('reading_durations', {})
def note_reading_duration(self, duration: timedelta) -> None:
self.reading_durations[self.env.docname] = duration
def clear(self) -> None:
self.reading_durations.clear()
def clear_doc(self, docname: str) -> None:
self.reading_durations.pop(docname, None)
def merge_domaindata(self, docnames: List[str], otherdata: Dict[str, timedelta]) -> None:
for docname, duration in otherdata.items():
if docname in docnames:
self.reading_durations[docname] = duration
def on_builder_inited(app: Sphinx) -> None:
"""Initialize DurationDomain on bootstrap.
This clears results of last build.
"""
domain = cast(DurationDomain, app.env.get_domain('duration'))
domain.clear()
def on_source_read(app: Sphinx, docname: str, content: List[str]) -> None:
"""Start to measure reading duration."""
app.env.temp_data['started_at'] = datetime.now()
def on_doctree_read(app: Sphinx, doctree: nodes.document) -> None:
"""Record a reading duration."""
started_at = app.env.temp_data.get('started_at')
duration = datetime.now() - started_at
domain = cast(DurationDomain, app.env.get_domain('duration'))
domain.note_reading_duration(duration)
def on_build_finished(app: Sphinx, error: Exception) -> None:
"""Display duration ranking on current build."""
domain = cast(DurationDomain, app.env.get_domain('duration'))
durations = sorted(domain.reading_durations.items(), key=itemgetter(1), reverse=True)
if not durations:
return
logger.info('')
logger.info(__('====================== slowest reading durations ======================='))
for docname, d in islice(durations, 5):
logger.info('%d.%03d %s', d.seconds, d.microseconds / 1000, docname)
def setup(app: Sphinx) -> Dict[str, Any]:
app.add_domain(DurationDomain)
app.connect('builder-inited', on_builder_inited)
app.connect('source-read', on_source_read)
app.connect('doctree-read', on_doctree_read)
app.connect('build-finished', on_build_finished)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| 31.260417 | 95 | 0.680107 |
from datetime import datetime, timedelta
from itertools import islice
from operator import itemgetter
from typing import Any, Dict, List, cast
from docutils import nodes
from sphinx.application import Sphinx
from sphinx.domains import Domain
from sphinx.locale import __
from sphinx.util import logging
logger = logging.getLogger(__name__)
class DurationDomain(Domain):
name = 'duration'
@property
def reading_durations(self) -> Dict[str, timedelta]:
return self.data.setdefault('reading_durations', {})
def note_reading_duration(self, duration: timedelta) -> None:
self.reading_durations[self.env.docname] = duration
def clear(self) -> None:
self.reading_durations.clear()
def clear_doc(self, docname: str) -> None:
self.reading_durations.pop(docname, None)
def merge_domaindata(self, docnames: List[str], otherdata: Dict[str, timedelta]) -> None:
for docname, duration in otherdata.items():
if docname in docnames:
self.reading_durations[docname] = duration
def on_builder_inited(app: Sphinx) -> None:
domain = cast(DurationDomain, app.env.get_domain('duration'))
domain.clear()
def on_source_read(app: Sphinx, docname: str, content: List[str]) -> None:
app.env.temp_data['started_at'] = datetime.now()
def on_doctree_read(app: Sphinx, doctree: nodes.document) -> None:
started_at = app.env.temp_data.get('started_at')
duration = datetime.now() - started_at
domain = cast(DurationDomain, app.env.get_domain('duration'))
domain.note_reading_duration(duration)
def on_build_finished(app: Sphinx, error: Exception) -> None:
domain = cast(DurationDomain, app.env.get_domain('duration'))
durations = sorted(domain.reading_durations.items(), key=itemgetter(1), reverse=True)
if not durations:
return
logger.info('')
logger.info(__('====================== slowest reading durations ======================='))
for docname, d in islice(durations, 5):
logger.info('%d.%03d %s', d.seconds, d.microseconds / 1000, docname)
def setup(app: Sphinx) -> Dict[str, Any]:
app.add_domain(DurationDomain)
app.connect('builder-inited', on_builder_inited)
app.connect('source-read', on_source_read)
app.connect('doctree-read', on_doctree_read)
app.connect('build-finished', on_build_finished)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
| true | true |
f714df06f4b8553ddceb7ea6424b300d9bce8373 | 1,678 | py | Python | saleor/store/migrations/0002_auto_20210513_1002.py | autobotasia/saleor | e03e9f6ab1bddac308a6609d6b576a87e90ae655 | [
"CC-BY-4.0"
] | 1 | 2022-02-19T13:27:40.000Z | 2022-02-19T13:27:40.000Z | saleor/store/migrations/0002_auto_20210513_1002.py | autobotasia/saleor | e03e9f6ab1bddac308a6609d6b576a87e90ae655 | [
"CC-BY-4.0"
] | null | null | null | saleor/store/migrations/0002_auto_20210513_1002.py | autobotasia/saleor | e03e9f6ab1bddac308a6609d6b576a87e90ae655 | [
"CC-BY-4.0"
] | 2 | 2021-12-03T16:59:37.000Z | 2022-02-19T13:05:42.000Z | # Generated by Django 3.1.7 on 2021-05-13 03:02
from django.db import migrations, models
import django.utils.timezone
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
('store', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='store',
name='city',
field=models.CharField(blank=True, max_length=256),
),
migrations.AddField(
model_name='store',
name='city_area',
field=models.CharField(blank=True, max_length=128),
),
migrations.AddField(
model_name='store',
name='company_name',
field=models.CharField(blank=True, max_length=256),
),
migrations.AddField(
model_name='store',
name='country',
field=django_countries.fields.CountryField(default="VN", max_length=2),
preserve_default=False,
),
migrations.AddField(
model_name='store',
name='country_area',
field=models.CharField(blank=True, max_length=128),
),
migrations.AddField(
model_name='store',
name='postal_code',
field=models.CharField(blank=True, max_length=20),
),
migrations.AddField(
model_name='store',
name='street_address_1',
field=models.CharField(blank=True, max_length=256),
),
migrations.AddField(
model_name='store',
name='street_address_2',
field=models.CharField(blank=True, max_length=256),
),
]
| 29.438596 | 83 | 0.565554 |
from django.db import migrations, models
import django.utils.timezone
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
('store', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='store',
name='city',
field=models.CharField(blank=True, max_length=256),
),
migrations.AddField(
model_name='store',
name='city_area',
field=models.CharField(blank=True, max_length=128),
),
migrations.AddField(
model_name='store',
name='company_name',
field=models.CharField(blank=True, max_length=256),
),
migrations.AddField(
model_name='store',
name='country',
field=django_countries.fields.CountryField(default="VN", max_length=2),
preserve_default=False,
),
migrations.AddField(
model_name='store',
name='country_area',
field=models.CharField(blank=True, max_length=128),
),
migrations.AddField(
model_name='store',
name='postal_code',
field=models.CharField(blank=True, max_length=20),
),
migrations.AddField(
model_name='store',
name='street_address_1',
field=models.CharField(blank=True, max_length=256),
),
migrations.AddField(
model_name='store',
name='street_address_2',
field=models.CharField(blank=True, max_length=256),
),
]
| true | true |
f714df9ac1fd6d0bf4721aeb747d23287a74cfba | 15,684 | py | Python | tests/python/unit/dku_timeseries/resampling/test_resampler_helpers.py | dataiku/dss-plugin-timeseries-preparation | bdb662c909a0ad6d7845325a70e3dac2bdcc6b28 | [
"Apache-2.0"
] | 2 | 2021-03-12T10:48:20.000Z | 2021-04-23T09:37:18.000Z | tests/python/unit/dku_timeseries/resampling/test_resampler_helpers.py | dataiku/dss-plugin-timeseries-preparation | bdb662c909a0ad6d7845325a70e3dac2bdcc6b28 | [
"Apache-2.0"
] | 27 | 2020-07-22T15:49:25.000Z | 2021-06-18T09:40:48.000Z | tests/python/unit/dku_timeseries/resampling/test_resampler_helpers.py | dataiku/dss-plugin-timeseries-preparation | bdb662c909a0ad6d7845325a70e3dac2bdcc6b28 | [
"Apache-2.0"
] | 1 | 2021-06-01T12:49:53.000Z | 2021-06-01T12:49:53.000Z | import numpy as np
import pandas as pd
import pytest
from dku_timeseries.timeseries_helpers import generate_date_range, get_date_offset
from recipe_config_loading import get_resampling_params
@pytest.fixture
def config():
config = {u'clip_end': 0, u'constant_value': 0, u'extrapolation_method': u'none', u'shift': 0, u'time_unit_end_of_week': u'SUN',
u'datetime_column': u'Date', u'advanced_activated': False, u'time_unit': u'quarters', u'clip_start': 0, u'time_step': 2,
u'interpolation_method': u'linear'}
return config
class TestResamplerHelpers:
def test_date_offset(self):
time_unit = "business_days"
offset_value = 0
sunday = pd.Timestamp('2021-01-31 10:00:00')
offset = get_date_offset(time_unit, offset_value)
assert sunday + offset == sunday
sunday = pd.Timestamp('2021-01-31 00:00:00')
offset = get_date_offset(time_unit, 1)
assert sunday + offset == pd.Timestamp('2021-02-01 00:00:00')
assert sunday - offset == pd.Timestamp('2021-01-29 00:00:00')
assert sunday + offset + offset == pd.Timestamp('2021-02-02 00:00:00')
friday = pd.Timestamp('2021-01-29 00:00:00')
offset = get_date_offset(time_unit, 1)
assert friday + offset == pd.Timestamp('2021-02-01 00:00:00')
friday = pd.Timestamp('2021-01-29 00:00:00')
offset = get_date_offset(time_unit, 2)
assert friday + offset == pd.Timestamp('2021-02-02 00:00:00')
saturday = pd.Timestamp('2021-01-30 00:00:00')
offset = get_date_offset(time_unit, 1)
assert saturday + offset == pd.Timestamp('2021-02-01 00:00:00')
saturday = pd.Timestamp('2021-02-04 00:00:00')
offset = get_date_offset(time_unit, 1)
assert saturday + offset == pd.Timestamp('2021-02-05 00:00:00')
def test_generate_date_range_month(self, config):
config["time_unit"] = "months"
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
end_time = pd.Timestamp('2021-06-20 00:00:00')
start_time = pd.Timestamp('2021-01-31 00:00:00')
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-31', '2021-03-31', '2021-05-31', '2021-07-31']))
start_time = pd.Timestamp('2021-01-23 00:00:00')
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-31', '2021-03-31', '2021-05-31', '2021-07-31']))
start_time = pd.Timestamp('2021-01-31 10:00:00')
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-31', '2021-03-31', '2021-05-31', '2021-07-31']))
start_time = pd.Timestamp('2021-01-31 10:00:00').tz_localize("CET")
end_time = pd.Timestamp('2021-06-20 00:00:00').tz_localize("CET")
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(
['2021-01-31 00:00:00+01:00', '2021-03-31 00:00:00+02:00', '2021-05-31 00:00:00+02:00', '2021-07-31 00:00:00+02:00']))
start_time = pd.Timestamp('2021-01-31 10:00:00')
end_time = pd.Timestamp('2021-06-20 00:00:00')
date_range = generate_date_range(start_time, end_time, 1, 0, 1, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-03-31', '2021-05-31', '2021-07-31']))
def test_generate_date_range_week(self, config):
config["time_unit"] = "weeks"
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
start_time = pd.Timestamp('2020-12-23 00:00:00')
end_time = pd.Timestamp('2021-01-18 00:00:00')
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2020-12-27', '2021-01-10', '2021-01-24']))
end_time = pd.Timestamp('2021-01-24 00:00:00')
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2020-12-27', '2021-01-10', '2021-01-24', '2021-02-07']))
date_range = generate_date_range(start_time, end_time, 1, 0, 1, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-10', '2021-01-24', '2021-02-07']))
config["time_unit"] = "weeks"
config["time_unit_end_of_week"] = "WED"
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2020-12-23', '2021-01-6', '2021-01-20', '2021-02-03']))
def test_generate_date_range_quarters(self, config):
config["time_step"] = 1
config["time_unit"] = "quarters"
start_time = pd.Timestamp('2020-01-23 00:00:00')
end_time = pd.Timestamp('2021-01-18 00:00:00')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2020-01-31', '2020-04-30', '2020-07-31', '2020-10-31', '2021-01-31']))
def test_generate_date_range_half_year(self, config):
config["time_step"] = 1
config["time_unit"] = "semi_annual"
start_time = pd.Timestamp('2020-01-01 00:00:00')
end_time = pd.Timestamp('2021-06-18 00:00:00')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2020-01-31', '2020-07-31', '2021-01-31', '2021-07-31']))
def test_generate_date_range_b_days(self, config):
config["time_unit"] = "business_days"
config["time_step"] = 1
start_time = pd.Timestamp('2021-01-02 00:00:00')
end_time = pd.Timestamp('2021-01-10 00:00:00')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-04', '2021-01-05', '2021-01-06', '2021-01-07', '2021-01-08', '2021-01-11']))
clip_start = 1
clip_end = 1
shift = 0
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-04', '2021-01-05', '2021-01-06', '2021-01-07', '2021-01-08', '2021-01-11']))
clip_start = 2
clip_end = 2
shift = 0
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-05', '2021-01-06', '2021-01-07', '2021-01-08']))
def test_generate_date_range_days(self, config):
config["time_unit"] = "days"
config["time_step"] = 1
start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
end_time = pd.Timestamp('20190214 01:59:00').tz_localize('CET')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
expected_range = pd.DatetimeIndex(['2019-02-07 00:00:00+01:00', '2019-02-08 00:00:00+01:00',
'2019-02-09 00:00:00+01:00', '2019-02-10 00:00:00+01:00',
'2019-02-11 00:00:00+01:00', '2019-02-12 00:00:00+01:00',
'2019-02-13 00:00:00+01:00'])
np.testing.assert_array_equal(date_range, expected_range)
def test_generate_date_range_hours(self, config):
config["time_unit"] = "hours"
config["time_step"] = 1
start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
end_time = pd.Timestamp('20190131 11:59:00').tz_localize('CET')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
expected_range = pd.DatetimeIndex(['2019-01-31 09:00:00+01:00', '2019-01-31 10:00:00+01:00',
'2019-01-31 11:00:00+01:00'])
np.testing.assert_array_equal(date_range, expected_range)
def test_generate_date_range_minutes(self, config):
config["time_unit"] = "minutes"
config["time_step"] = 1
start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
end_time = pd.Timestamp('20190131 02:15:00').tz_localize('CET')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
expected_range = pd.DatetimeIndex(['2019-01-31 02:06:00+01:00', '2019-01-31 02:07:00+01:00',
'2019-01-31 02:08:00+01:00', '2019-01-31 02:09:00+01:00',
'2019-01-31 02:10:00+01:00', '2019-01-31 02:11:00+01:00',
'2019-01-31 02:12:00+01:00', '2019-01-31 02:13:00+01:00',
'2019-01-31 02:14:00+01:00'])
np.testing.assert_array_equal(date_range, expected_range)
def test_generate_date_range_seconds(self, config):
config["time_unit"] = "seconds"
config["time_step"] = 1
start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
end_time = pd.Timestamp('20190131 01:59:12').tz_localize('CET')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
expected_range = pd.DatetimeIndex(['2019-01-31 01:59:07+01:00', '2019-01-31 01:59:08+01:00',
'2019-01-31 01:59:09+01:00', '2019-01-31 01:59:10+01:00',
'2019-01-31 01:59:11+01:00'])
np.testing.assert_array_equal(date_range, expected_range)
def test_generate_date_range_milliseconds(self, config):
config["time_unit"] = "milliseconds"
config["time_step"] = 1
start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
end_time = pd.Timestamp('2019-01-31 01:59:00.015000').tz_localize('CET')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
expected_range = pd.DatetimeIndex(['2019-01-31 01:59:00.007000+01:00',
'2019-01-31 01:59:00.008000+01:00',
'2019-01-31 01:59:00.009000+01:00',
'2019-01-31 01:59:00.010000+01:00',
'2019-01-31 01:59:00.011000+01:00',
'2019-01-31 01:59:00.012000+01:00',
'2019-01-31 01:59:00.013000+01:00',
'2019-01-31 01:59:00.014000+01:00'])
np.testing.assert_array_equal(date_range, expected_range)
def test_generate_date_range_microseconds(self, config):
config["time_unit"] = "microseconds"
config["time_step"] = 1
start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
end_time = pd.Timestamp('2019-01-31 01:59:00.000016').tz_localize('CET')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
expected_range = pd.DatetimeIndex(['2019-01-31 01:59:00.000007+01:00',
'2019-01-31 01:59:00.000008+01:00',
'2019-01-31 01:59:00.000009+01:00',
'2019-01-31 01:59:00.000010+01:00',
'2019-01-31 01:59:00.000011+01:00',
'2019-01-31 01:59:00.000012+01:00',
'2019-01-31 01:59:00.000013+01:00',
'2019-01-31 01:59:00.000014+01:00',
'2019-01-31 01:59:00.000015+01:00'])
np.testing.assert_array_equal(date_range, expected_range)
def test_generate_date_range_nanoseconds(self, config):
config["time_unit"] = "nanoseconds"
config["time_step"] = 1
start_time = pd.Timestamp('2019-01-31T00:59:00.000000000')
end_time = pd.Timestamp('2019-01-31T00:59:00.000000009')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2019-01-31 00:59:00.000000007',
'2019-01-31 00:59:00.000000008']))
| 49.166144 | 153 | 0.615851 | import numpy as np
import pandas as pd
import pytest
from dku_timeseries.timeseries_helpers import generate_date_range, get_date_offset
from recipe_config_loading import get_resampling_params
@pytest.fixture
def config():
config = {u'clip_end': 0, u'constant_value': 0, u'extrapolation_method': u'none', u'shift': 0, u'time_unit_end_of_week': u'SUN',
u'datetime_column': u'Date', u'advanced_activated': False, u'time_unit': u'quarters', u'clip_start': 0, u'time_step': 2,
u'interpolation_method': u'linear'}
return config
class TestResamplerHelpers:
def test_date_offset(self):
time_unit = "business_days"
offset_value = 0
sunday = pd.Timestamp('2021-01-31 10:00:00')
offset = get_date_offset(time_unit, offset_value)
assert sunday + offset == sunday
sunday = pd.Timestamp('2021-01-31 00:00:00')
offset = get_date_offset(time_unit, 1)
assert sunday + offset == pd.Timestamp('2021-02-01 00:00:00')
assert sunday - offset == pd.Timestamp('2021-01-29 00:00:00')
assert sunday + offset + offset == pd.Timestamp('2021-02-02 00:00:00')
friday = pd.Timestamp('2021-01-29 00:00:00')
offset = get_date_offset(time_unit, 1)
assert friday + offset == pd.Timestamp('2021-02-01 00:00:00')
friday = pd.Timestamp('2021-01-29 00:00:00')
offset = get_date_offset(time_unit, 2)
assert friday + offset == pd.Timestamp('2021-02-02 00:00:00')
saturday = pd.Timestamp('2021-01-30 00:00:00')
offset = get_date_offset(time_unit, 1)
assert saturday + offset == pd.Timestamp('2021-02-01 00:00:00')
saturday = pd.Timestamp('2021-02-04 00:00:00')
offset = get_date_offset(time_unit, 1)
assert saturday + offset == pd.Timestamp('2021-02-05 00:00:00')
def test_generate_date_range_month(self, config):
config["time_unit"] = "months"
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
end_time = pd.Timestamp('2021-06-20 00:00:00')
start_time = pd.Timestamp('2021-01-31 00:00:00')
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-31', '2021-03-31', '2021-05-31', '2021-07-31']))
start_time = pd.Timestamp('2021-01-23 00:00:00')
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-31', '2021-03-31', '2021-05-31', '2021-07-31']))
start_time = pd.Timestamp('2021-01-31 10:00:00')
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-31', '2021-03-31', '2021-05-31', '2021-07-31']))
start_time = pd.Timestamp('2021-01-31 10:00:00').tz_localize("CET")
end_time = pd.Timestamp('2021-06-20 00:00:00').tz_localize("CET")
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(
['2021-01-31 00:00:00+01:00', '2021-03-31 00:00:00+02:00', '2021-05-31 00:00:00+02:00', '2021-07-31 00:00:00+02:00']))
start_time = pd.Timestamp('2021-01-31 10:00:00')
end_time = pd.Timestamp('2021-06-20 00:00:00')
date_range = generate_date_range(start_time, end_time, 1, 0, 1, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-03-31', '2021-05-31', '2021-07-31']))
def test_generate_date_range_week(self, config):
config["time_unit"] = "weeks"
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
start_time = pd.Timestamp('2020-12-23 00:00:00')
end_time = pd.Timestamp('2021-01-18 00:00:00')
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2020-12-27', '2021-01-10', '2021-01-24']))
end_time = pd.Timestamp('2021-01-24 00:00:00')
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2020-12-27', '2021-01-10', '2021-01-24', '2021-02-07']))
date_range = generate_date_range(start_time, end_time, 1, 0, 1, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-10', '2021-01-24', '2021-02-07']))
config["time_unit"] = "weeks"
config["time_unit_end_of_week"] = "WED"
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2020-12-23', '2021-01-6', '2021-01-20', '2021-02-03']))
def test_generate_date_range_quarters(self, config):
config["time_step"] = 1
config["time_unit"] = "quarters"
start_time = pd.Timestamp('2020-01-23 00:00:00')
end_time = pd.Timestamp('2021-01-18 00:00:00')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2020-01-31', '2020-04-30', '2020-07-31', '2020-10-31', '2021-01-31']))
def test_generate_date_range_half_year(self, config):
config["time_step"] = 1
config["time_unit"] = "semi_annual"
start_time = pd.Timestamp('2020-01-01 00:00:00')
end_time = pd.Timestamp('2021-06-18 00:00:00')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2020-01-31', '2020-07-31', '2021-01-31', '2021-07-31']))
def test_generate_date_range_b_days(self, config):
config["time_unit"] = "business_days"
config["time_step"] = 1
start_time = pd.Timestamp('2021-01-02 00:00:00')
end_time = pd.Timestamp('2021-01-10 00:00:00')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
date_range = generate_date_range(start_time, end_time, 0, 0, 0, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-04', '2021-01-05', '2021-01-06', '2021-01-07', '2021-01-08', '2021-01-11']))
clip_start = 1
clip_end = 1
shift = 0
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-04', '2021-01-05', '2021-01-06', '2021-01-07', '2021-01-08', '2021-01-11']))
clip_start = 2
clip_end = 2
shift = 0
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2021-01-05', '2021-01-06', '2021-01-07', '2021-01-08']))
def test_generate_date_range_days(self, config):
config["time_unit"] = "days"
config["time_step"] = 1
start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
end_time = pd.Timestamp('20190214 01:59:00').tz_localize('CET')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
expected_range = pd.DatetimeIndex(['2019-02-07 00:00:00+01:00', '2019-02-08 00:00:00+01:00',
'2019-02-09 00:00:00+01:00', '2019-02-10 00:00:00+01:00',
'2019-02-11 00:00:00+01:00', '2019-02-12 00:00:00+01:00',
'2019-02-13 00:00:00+01:00'])
np.testing.assert_array_equal(date_range, expected_range)
def test_generate_date_range_hours(self, config):
config["time_unit"] = "hours"
config["time_step"] = 1
start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
end_time = pd.Timestamp('20190131 11:59:00').tz_localize('CET')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
expected_range = pd.DatetimeIndex(['2019-01-31 09:00:00+01:00', '2019-01-31 10:00:00+01:00',
'2019-01-31 11:00:00+01:00'])
np.testing.assert_array_equal(date_range, expected_range)
def test_generate_date_range_minutes(self, config):
config["time_unit"] = "minutes"
config["time_step"] = 1
start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
end_time = pd.Timestamp('20190131 02:15:00').tz_localize('CET')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
expected_range = pd.DatetimeIndex(['2019-01-31 02:06:00+01:00', '2019-01-31 02:07:00+01:00',
'2019-01-31 02:08:00+01:00', '2019-01-31 02:09:00+01:00',
'2019-01-31 02:10:00+01:00', '2019-01-31 02:11:00+01:00',
'2019-01-31 02:12:00+01:00', '2019-01-31 02:13:00+01:00',
'2019-01-31 02:14:00+01:00'])
np.testing.assert_array_equal(date_range, expected_range)
def test_generate_date_range_seconds(self, config):
config["time_unit"] = "seconds"
config["time_step"] = 1
start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
end_time = pd.Timestamp('20190131 01:59:12').tz_localize('CET')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
expected_range = pd.DatetimeIndex(['2019-01-31 01:59:07+01:00', '2019-01-31 01:59:08+01:00',
'2019-01-31 01:59:09+01:00', '2019-01-31 01:59:10+01:00',
'2019-01-31 01:59:11+01:00'])
np.testing.assert_array_equal(date_range, expected_range)
def test_generate_date_range_milliseconds(self, config):
config["time_unit"] = "milliseconds"
config["time_step"] = 1
start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
end_time = pd.Timestamp('2019-01-31 01:59:00.015000').tz_localize('CET')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
expected_range = pd.DatetimeIndex(['2019-01-31 01:59:00.007000+01:00',
'2019-01-31 01:59:00.008000+01:00',
'2019-01-31 01:59:00.009000+01:00',
'2019-01-31 01:59:00.010000+01:00',
'2019-01-31 01:59:00.011000+01:00',
'2019-01-31 01:59:00.012000+01:00',
'2019-01-31 01:59:00.013000+01:00',
'2019-01-31 01:59:00.014000+01:00'])
np.testing.assert_array_equal(date_range, expected_range)
def test_generate_date_range_microseconds(self, config):
config["time_unit"] = "microseconds"
config["time_step"] = 1
start_time = pd.Timestamp('20190131 01:59:00').tz_localize('CET')
end_time = pd.Timestamp('2019-01-31 01:59:00.000016').tz_localize('CET')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
expected_range = pd.DatetimeIndex(['2019-01-31 01:59:00.000007+01:00',
'2019-01-31 01:59:00.000008+01:00',
'2019-01-31 01:59:00.000009+01:00',
'2019-01-31 01:59:00.000010+01:00',
'2019-01-31 01:59:00.000011+01:00',
'2019-01-31 01:59:00.000012+01:00',
'2019-01-31 01:59:00.000013+01:00',
'2019-01-31 01:59:00.000014+01:00',
'2019-01-31 01:59:00.000015+01:00'])
np.testing.assert_array_equal(date_range, expected_range)
def test_generate_date_range_nanoseconds(self, config):
config["time_unit"] = "nanoseconds"
config["time_step"] = 1
start_time = pd.Timestamp('2019-01-31T00:59:00.000000000')
end_time = pd.Timestamp('2019-01-31T00:59:00.000000009')
params = get_resampling_params(config)
frequency = params.resampling_step
time_unit = params.time_unit
time_step = params.time_step
clip_start = 5
shift = 2
clip_end = 3
date_range = generate_date_range(start_time, end_time, clip_start, clip_end, shift, frequency, time_step, time_unit)
np.testing.assert_array_equal(date_range, pd.DatetimeIndex(['2019-01-31 00:59:00.000000007',
'2019-01-31 00:59:00.000000008']))
| true | true |
f714e02bb7e930b170e446ab67321d019d589be2 | 1,958 | py | Python | fltk/nets/fashion_mnist_ls_gan.py | nata1y/fltk-testbed-group-3 | e23b59fa2a5e638d3804a39fe5012983e2988ca6 | [
"BSD-2-Clause"
] | null | null | null | fltk/nets/fashion_mnist_ls_gan.py | nata1y/fltk-testbed-group-3 | e23b59fa2a5e638d3804a39fe5012983e2988ca6 | [
"BSD-2-Clause"
] | null | null | null | fltk/nets/fashion_mnist_ls_gan.py | nata1y/fltk-testbed-group-3 | e23b59fa2a5e638d3804a39fe5012983e2988ca6 | [
"BSD-2-Clause"
] | 2 | 2021-05-03T17:40:18.000Z | 2021-05-11T09:34:30.000Z | import torch.nn as nn
class Generator(nn.Module):
def __init__(self, img_size=32):
super(Generator, self).__init__()
# TODO: update to proper image size
self.init_size = img_size // 4
self.l1 = nn.Sequential(nn.Linear(10, 128 * self.init_size ** 2))
self.conv_blocks = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 128, 3, stride=1, padding=1),
nn.BatchNorm2d(128, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 64, 3, stride=1, padding=1),
nn.BatchNorm2d(64, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(64, 1, 3, stride=1, padding=1), #3
nn.Tanh(),
)
def forward(self, z):
out = self.l1(z)
out = out.view(out.shape[0], 128, self.init_size, self.init_size)
img = self.conv_blocks(out)
return img
class Discriminator(nn.Module):
def __init__(self, img_size=32):
super(Discriminator, self).__init__()
def discriminator_block(in_filters, out_filters, bn=True):
block = [nn.Conv2d(in_filters, out_filters, 3, 2, 1), nn.LeakyReLU(0.2, inplace=True), nn.Dropout2d(0.25)]
if bn:
block.append(nn.BatchNorm2d(out_filters, 0.8))
return block
self.model = nn.Sequential(
*discriminator_block(1, 16, bn=False), #3
*discriminator_block(16, 32),
*discriminator_block(32, 64),
*discriminator_block(64, 128),
)
# The height and width of downsampled image
# TODO: update to proper image size
ds_size = img_size // 2 ** 4
self.adv_layer = nn.Linear(128 * ds_size ** 2, 1)
def forward(self, img):
out = self.model(img)
out = out.view(out.shape[0], -1)
validity = self.adv_layer(out)
return validity
| 32.633333 | 118 | 0.569969 | import torch.nn as nn
class Generator(nn.Module):
def __init__(self, img_size=32):
super(Generator, self).__init__()
self.init_size = img_size // 4
self.l1 = nn.Sequential(nn.Linear(10, 128 * self.init_size ** 2))
self.conv_blocks = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 128, 3, stride=1, padding=1),
nn.BatchNorm2d(128, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Upsample(scale_factor=2),
nn.Conv2d(128, 64, 3, stride=1, padding=1),
nn.BatchNorm2d(64, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(64, 1, 3, stride=1, padding=1),
nn.Tanh(),
)
def forward(self, z):
out = self.l1(z)
out = out.view(out.shape[0], 128, self.init_size, self.init_size)
img = self.conv_blocks(out)
return img
class Discriminator(nn.Module):
def __init__(self, img_size=32):
super(Discriminator, self).__init__()
def discriminator_block(in_filters, out_filters, bn=True):
block = [nn.Conv2d(in_filters, out_filters, 3, 2, 1), nn.LeakyReLU(0.2, inplace=True), nn.Dropout2d(0.25)]
if bn:
block.append(nn.BatchNorm2d(out_filters, 0.8))
return block
self.model = nn.Sequential(
*discriminator_block(1, 16, bn=False),
*discriminator_block(16, 32),
*discriminator_block(32, 64),
*discriminator_block(64, 128),
)
ds_size = img_size // 2 ** 4
self.adv_layer = nn.Linear(128 * ds_size ** 2, 1)
def forward(self, img):
out = self.model(img)
out = out.view(out.shape[0], -1)
validity = self.adv_layer(out)
return validity
| true | true |
f714e05a9950ae7e34d7aeff00d5514217f9ba13 | 1,811 | py | Python | vimsetting/bundle/VOom/autoload/voom/voom_mode_org.py | thuleqaid/boost_study | 59469af4e7b569c87c0a1de53644a39e7f9ae766 | [
"MIT"
] | 1 | 2016-03-02T16:44:59.000Z | 2016-03-02T16:44:59.000Z | vimsetting/bundle/VOom/autoload/voom/voom_mode_org.py | thuleqaid/boost_study | 59469af4e7b569c87c0a1de53644a39e7f9ae766 | [
"MIT"
] | null | null | null | vimsetting/bundle/VOom/autoload/voom/voom_mode_org.py | thuleqaid/boost_study | 59469af4e7b569c87c0a1de53644a39e7f9ae766 | [
"MIT"
] | null | null | null | # voom_mode_org.py
# Last Modified: 2013-10-31
# VOoM -- Vim two-pane outliner, plugin for Python-enabled Vim 7.x
# Website: http://www.vim.org/scripts/script.php?script_id=2657
# Author: Vlad Irnov (vlad DOT irnov AT gmail DOT com)
# License: CC0, see http://creativecommons.org/publicdomain/zero/1.0/
"""
VOoM markup mode for Emacs Org-mode headline format.
See |voom-mode-org|, ../../doc/voom.txt#*voom-mode-org*
"""
import re
headline_match = re.compile(r'^(\*+)\s').match
def hook_makeOutline(VO, blines):
"""Return (tlines, bnodes, levels) for Body lines blines.
blines is either Vim buffer object (Body) or list of buffer lines.
"""
Z = len(blines)
tlines, bnodes, levels = [], [], []
tlines_add, bnodes_add, levels_add = tlines.append, bnodes.append, levels.append
for i in xrange(Z):
if not blines[i].startswith('*'):
continue
bline = blines[i]
m = headline_match(bline)
if not m:
continue
lev = len(m.group(1))
head = bline[lev:].strip()
tline = ' %s|%s' %('. '*(lev-1), head)
tlines_add(tline)
bnodes_add(i+1)
levels_add(lev)
return (tlines, bnodes, levels)
def hook_newHeadline(VO, level, blnum, tlnum):
"""Return (tree_head, bodyLines).
tree_head is new headline string in Tree buffer (text after |).
bodyLines is list of lines to insert in Body buffer.
"""
tree_head = 'NewHeadline'
bodyLines = ['%s %s' %('*'*level, tree_head), '']
return (tree_head, bodyLines)
def hook_changeLevBodyHead(VO, h, levDelta):
"""Increase of decrease level number of Body headline by levDelta."""
if levDelta==0: return h
m = headline_match(h)
level = len(m.group(1))
return '%s%s' %('*'*(level+levDelta), h[m.end(1):])
| 31.224138 | 84 | 0.629486 |
import re
headline_match = re.compile(r'^(\*+)\s').match
def hook_makeOutline(VO, blines):
Z = len(blines)
tlines, bnodes, levels = [], [], []
tlines_add, bnodes_add, levels_add = tlines.append, bnodes.append, levels.append
for i in xrange(Z):
if not blines[i].startswith('*'):
continue
bline = blines[i]
m = headline_match(bline)
if not m:
continue
lev = len(m.group(1))
head = bline[lev:].strip()
tline = ' %s|%s' %('. '*(lev-1), head)
tlines_add(tline)
bnodes_add(i+1)
levels_add(lev)
return (tlines, bnodes, levels)
def hook_newHeadline(VO, level, blnum, tlnum):
tree_head = 'NewHeadline'
bodyLines = ['%s %s' %('*'*level, tree_head), '']
return (tree_head, bodyLines)
def hook_changeLevBodyHead(VO, h, levDelta):
if levDelta==0: return h
m = headline_match(h)
level = len(m.group(1))
return '%s%s' %('*'*(level+levDelta), h[m.end(1):])
| true | true |
f714e1119b8f7e34f516de3746a674c249a5f780 | 969 | py | Python | experiments/2014-01-28-extrap-SE.py | jaesikchoi/gpss-research | 2a64958a018f1668f7b8eedf33c4076a63af7868 | [
"MIT"
] | 151 | 2015-01-09T19:25:05.000Z | 2022-01-05T02:05:52.000Z | experiments/2014-01-28-extrap-SE.py | jaesikchoi/gpss-research | 2a64958a018f1668f7b8eedf33c4076a63af7868 | [
"MIT"
] | 1 | 2016-08-04T13:12:51.000Z | 2016-08-04T13:12:51.000Z | experiments/2014-01-28-extrap-SE.py | jaesikchoi/gpss-research | 2a64958a018f1668f7b8eedf33c4076a63af7868 | [
"MIT"
] | 59 | 2015-02-04T19:13:58.000Z | 2021-07-28T23:36:09.000Z | Experiment(description='SE extrapolation experiment',
data_dir='../data/tsdlr_9010/',
max_depth=1,
random_order=False,
k=1,
debug=False,
local_computation=False,
n_rand=9,
sd=2,
jitter_sd=0.1,
max_jobs=1000,
verbose=False,
make_predictions=True,
skip_complete=True,
results_dir='../results/2014-01-28-extrap-SE/',
iters=250,
base_kernels='SE',
random_seed=1,
subset=True,
subset_size=250,
full_iters=10,
bundle_size=5,
additive_form=True,
mean='ff.MeanZero()', # Starting mean
kernel='ff.NoiseKernel()', # Starting kernel
lik='ff.LikGauss(sf=-np.Inf)', # Starting likelihood
score='bic',
search_operators=[('A', ('+', 'A', 'B'), {'A': 'kernel', 'B': 'base'})])
| 33.413793 | 83 | 0.495356 | Experiment(description='SE extrapolation experiment',
data_dir='../data/tsdlr_9010/',
max_depth=1,
random_order=False,
k=1,
debug=False,
local_computation=False,
n_rand=9,
sd=2,
jitter_sd=0.1,
max_jobs=1000,
verbose=False,
make_predictions=True,
skip_complete=True,
results_dir='../results/2014-01-28-extrap-SE/',
iters=250,
base_kernels='SE',
random_seed=1,
subset=True,
subset_size=250,
full_iters=10,
bundle_size=5,
additive_form=True,
mean='ff.MeanZero()',
kernel='ff.NoiseKernel()',
lik='ff.LikGauss(sf=-np.Inf)',
score='bic',
search_operators=[('A', ('+', 'A', 'B'), {'A': 'kernel', 'B': 'base'})])
| true | true |
f714e120e53da70f11f7c6bb03b17c5cd3ea28fe | 1,756 | py | Python | python/draw_height_time.py | ntu-as-cooklab/Weather-Balloon-Radiosonde-Tracker | 85e85a869439798475ad6711c280dae630c03c46 | [
"MIT"
] | 5 | 2018-04-24T19:43:20.000Z | 2022-01-24T19:31:48.000Z | python/draw_height_time.py | ntu-as-cooklab/Weather-Balloon-Radiosonde-Tracker | 85e85a869439798475ad6711c280dae630c03c46 | [
"MIT"
] | 3 | 2017-12-28T15:30:49.000Z | 2018-03-07T15:01:25.000Z | python/draw_height_time.py | ntu-as-cooklab/Weather-Balloon-Radiosonde-Tracker | 85e85a869439798475ad6711c280dae630c03c46 | [
"MIT"
] | 1 | 2018-03-07T12:59:27.000Z | 2018-03-07T12:59:27.000Z | from os import listdir, getcwd
from os.path import isfile, join
from math import sin, cos
from setting_utils import timeLimit, heightLimit, input_stream
files = [f for f in listdir(join(getcwd(), 'uploads')) if isfile(join(getcwd(), 'uploads', f))]
files = [f for f in files if f.endswith(".txt")]
czml =(
'var height_time_data = {\n'
'data: [\n'
)
fileIndex = 0
for file in files:
czml += ('[');
FILE_PATH = join(getcwd(), 'uploads', str(file))
data = []
with open(FILE_PATH, 'r') as input_stream :
lines = input_stream.readlines()
for i in range( 4, len(lines)) : #avoid head text
words = lines[i].split(' ')
words = [x for x in words if len(x) > 0]
#---Setting---
minutes = float(words[0]) + float(words[1])/60
height = float(words[3])
if(minutes > timeLimit):
break
if(height > heightLimit):
break
#-------------
if (len(words)>15) : #avoid crash data
minutes = float(words[0]) + float(words[1])/60
data.append([ minutes, float(words[3])])
input_stream.close()
for j in range(0, len(data)) :
czml += ('[ %f, %f], ' %(data[j][0],data[j][1]))
fileIndex += 1
czml += ('], \n')
czml += (
'],\n'
'filename: ['
)
for file in files:
czml += ('"%s",' %(file))
czml += (
'],\n'
'xAxisName: "minute(s)",\n'
"yAxisName: 'meter(s)',\n"
'xMax: 0,\n'
'yMax: 0,\n'
'xMin: 1000,\n'
'yMin: 1000,\n'
'target: "height_time",\n'
'W: 800,\n'
'H: 400\n'
'}\n'
)
fout = open(join(getcwd(), 'balloon', 'data', 'height_time_data.js'), 'w')
fout.write(czml)
fout.close()
| 22.512821 | 95 | 0.514237 | from os import listdir, getcwd
from os.path import isfile, join
from math import sin, cos
from setting_utils import timeLimit, heightLimit, input_stream
files = [f for f in listdir(join(getcwd(), 'uploads')) if isfile(join(getcwd(), 'uploads', f))]
files = [f for f in files if f.endswith(".txt")]
czml =(
'var height_time_data = {\n'
'data: [\n'
)
fileIndex = 0
for file in files:
czml += ('[');
FILE_PATH = join(getcwd(), 'uploads', str(file))
data = []
with open(FILE_PATH, 'r') as input_stream :
lines = input_stream.readlines()
for i in range( 4, len(lines)) :
words = lines[i].split(' ')
words = [x for x in words if len(x) > 0]
minutes = float(words[0]) + float(words[1])/60
height = float(words[3])
if(minutes > timeLimit):
break
if(height > heightLimit):
break
if (len(words)>15) :
minutes = float(words[0]) + float(words[1])/60
data.append([ minutes, float(words[3])])
input_stream.close()
for j in range(0, len(data)) :
czml += ('[ %f, %f], ' %(data[j][0],data[j][1]))
fileIndex += 1
czml += ('], \n')
czml += (
'],\n'
'filename: ['
)
for file in files:
czml += ('"%s",' %(file))
czml += (
'],\n'
'xAxisName: "minute(s)",\n'
"yAxisName: 'meter(s)',\n"
'xMax: 0,\n'
'yMax: 0,\n'
'xMin: 1000,\n'
'yMin: 1000,\n'
'target: "height_time",\n'
'W: 800,\n'
'H: 400\n'
'}\n'
)
fout = open(join(getcwd(), 'balloon', 'data', 'height_time_data.js'), 'w')
fout.write(czml)
fout.close()
| true | true |
f714e1362842774b2075236c5764ecaf5ce5ef8c | 24,230 | py | Python | bagit_profile.py | tdilauro/bagit-profiles-validator | e73b66223fc05bc2498cb7f6dd5814940e8852e7 | [
"Unlicense"
] | 3 | 2018-05-18T16:07:57.000Z | 2020-05-01T16:08:26.000Z | bagit_profile.py | tdilauro/bagit-profiles-validator | e73b66223fc05bc2498cb7f6dd5814940e8852e7 | [
"Unlicense"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | Lib/site-packages/bagit_profile.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | 3 | 2018-11-06T17:04:45.000Z | 2021-07-21T08:08:03.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A simple Python module for validating BagIt profiles. See
https://github.com/bagit-profiles/bagit-profiles
for more information.
This module is intended for use with https://github.com/edsu/bagit but does not extend it.
Usage:
import bagit
import bagit_profile
# Instantiate an existing Bag using https://github.com/edsu/bagit.
bag = bagit.Bag('mydir')
# Instantiate a profile, supplying its URI.
my_profile = bagit_profile.Profile('http://example.com/bagitprofile.json')
# Validate 'Serialization' and 'Accept-Serialization'. This must be done
# before .validate(bag) is called. 'mydir' is the path to the Bag.
if my_profile.validate_serialization('mydir'):
print "Serialization validates"
else:
print "Serialization does not validate"
# Validate the rest of the profile.
if my_profile.validate(bag):
print "Validates"
else:
print "Does not validate"
"""
import json
import logging
import mimetypes
import sys
from fnmatch import fnmatch
from os import listdir, walk
from os.path import basename, exists, isdir, isfile, join, relpath, split
if sys.version_info > (3,):
basestring = str
from urllib.request import urlopen # pylint: no-name-in-module
else:
basestring = basestring
from urllib import urlopen # pylint: disable=no-name-in-module
# Define an exceptin class for use within this module.
class ProfileValidationError(Exception):
# TODO: or just 'pass' instead of __init__ and __str__
def __init__(self, value):
super(ProfileValidationError, self).__init__(value)
self.value = value
def __str__(self):
return repr(self.value)
class ProfileValidationReport(object): # pylint: disable=useless-object-inheritance
def __init__(self):
self.errors = []
@property
def is_valid(self):
return not self.errors
def __str__(self):
if self.is_valid:
return "VALID"
return "INVALID: %s" % "\n ".join(["%s" % e for e in self.errors])
# Define the Profile class.
class Profile(object): # pylint: disable=useless-object-inheritance
_baginfo_profile_id_tag = "BagIt-Profile-Identifier"
def __init__(self, url, profile=None, ignore_baginfo_tag_case=False):
self.url = url
if profile is None:
profile = self.get_profile()
else:
if isinstance(profile, dict):
profile = profile
else:
profile = json.loads(profile)
self.validate_bagit_profile(profile)
# Report of the errors in the last run of validate
self.report = None
self.profile = profile
self.ignore_baginfo_tag_case = ignore_baginfo_tag_case
def _fail(self, msg):
logging.error(msg)
raise ProfileValidationError(msg)
def _warn(self, msg):
logging.error(msg)
def get_profile(self):
try:
f = urlopen(self.url)
profile = f.read()
if sys.version_info > (3,):
profile = profile.decode("utf-8")
profile = json.loads(profile)
except Exception as e: # pylint: disable=broad-except
print("Cannot retrieve profile from %s: %s", self.url, e)
logging.error("Cannot retrieve profile from %s: %s", self.url, e)
# This is a fatal error.
sys.exit(1)
return profile
# Call all the validate functions other than validate_bagit_profile(),
# which we've already called. 'Serialization' and 'Accept-Serialization'
# are validated in validate_serialization().
def validate(self, bag):
self.report = ProfileValidationReport()
for (fn, msg, min_version) in [
(self.validate_bag_info, "Error in bag-info.txt", None),
(self.validate_manifests_required, "Required manifests not found", None),
(
self.validate_tag_manifests_required,
"Required tag manifests not found",
None,
),
(self.validate_payload_manifests_allowed, "Disallowed payload manifests present", (1, 3, 0)),
(self.validate_tag_manifests_allowed, "Disallowed tag manifests present", (1, 3, 0)),
(self.validate_tag_files_required, "Required tag files not found", None),
(
self.validate_allow_fetch,
"fetch.txt is present but is not allowed",
None,
),
(
self.validate_accept_bagit_version,
"Required BagIt version not found",
None,
),
(self.validate_tag_files_allowed, "Tag files not allowed", (1, 2, 0)),
]:
try:
if min_version and self.profile_version_info < min_version:
logging.info(
"Skipping %s introduced in version %s (version validated: %s)",
fn,
min_version,
self.profile_version_info,
)
continue
fn(bag)
except ProfileValidationError as e:
# self._warn("%s: %s" % (msg, e))
self.report.errors.append(e)
return self.report.is_valid
def validate_bagit_profile(self, profile):
"""
Set default values for unspecified tags and validate the profile itself.
"""
if "Serialization" not in profile:
profile["Serialization"] = "optional"
if "Allow-Fetch.txt" not in profile:
profile["Allow-Fetch.txt"] = True
if (
"BagIt-Profile-Info" in profile
and "BagIt-Profile-Version" in profile["BagIt-Profile-Info"]
):
profile_version = profile["BagIt-Profile-Info"]["BagIt-Profile-Version"]
else:
profile_version = "1.1.0"
self.profile_version_info = tuple(int(i) for i in profile_version.split("."))
self.validate_bagit_profile_info(profile)
self.validate_bagit_profile_accept_bagit_versions(profile)
self.validate_bagit_profile_bag_info(profile)
# Check self.profile['bag-profile-info'] to see if "Source-Organization",
# "External-Description", "Version" and "BagIt-Profile-Identifier" are present.
def validate_bagit_profile_info(self, profile):
if "BagIt-Profile-Info" not in profile:
self._fail("%s: Required 'BagIt-Profile-Info' dict is missing." % profile)
if "Source-Organization" not in profile["BagIt-Profile-Info"]:
self._fail(
"%s: Required 'Source-Organization' tag is not in 'BagIt-Profile-Info'."
% profile
)
if "Version" not in profile["BagIt-Profile-Info"]:
self._warn(
"%s: Required 'Version' tag is not in 'BagIt-Profile-Info'." % profile
)
return False
if "BagIt-Profile-Identifier" not in profile["BagIt-Profile-Info"]:
self._fail(
"%s: Required 'BagIt-Profile-Identifier' tag is not in 'BagIt-Profile-Info'."
% profile
)
return True
def validate_bagit_profile_accept_bagit_versions(self, profile):
"""
Ensure all versions in 'Accept-BagIt-Version' are strings
"""
if "Accept-BagIt-Version" in profile:
for version_number in profile["Accept-BagIt-Version"]:
# pylint: disable=undefined-variable
if not isinstance(version_number, basestring):
raise ProfileValidationError(
'Version number "%s" in "Accept-BagIt-Version" is not a string!'
% version_number
)
return True
def validate_bagit_profile_bag_info(self, profile):
if 'Bag-Info' in profile:
for tag in profile['Bag-Info']:
config = profile['Bag-Info'][tag]
if self.profile_version_info >= (1, 3, 0) and \
'description' in config and not isinstance(config['description'], basestring):
self._fail("%s: Profile Bag-Info '%s' tag 'description' property, when present, must be a string." %
(profile, tag))
return True
# Validate tags in self.profile['Bag-Info'].
def validate_bag_info(self, bag):
# First, check to see if bag-info.txt exists.
path_to_baginfotxt = join(bag.path, "bag-info.txt")
if not exists(path_to_baginfotxt):
self._fail("%s: bag-info.txt is not present." % bag)
# Then check for the required 'BagIt-Profile-Identifier' tag and ensure it has the same value
# as self.url.
if self.ignore_baginfo_tag_case:
bag_info = {self.normalize_tag(k): v for k, v in bag.info.items()}
ignore_tag_case_help = ""
else:
bag_info = bag.info
ignore_tag_case_help = " Set 'ignore_baginfo_tag_case' to True if you wish to ignore tag case."
profile_id_tag = self.normalize_tag(self._baginfo_profile_id_tag)
if profile_id_tag not in bag_info:
self._fail(
("%s: Required '%s' tag is not in bag-info.txt." + ignore_tag_case_help)
% (bag, self._baginfo_profile_id_tag)
)
else:
if bag_info[profile_id_tag] != self.url:
self._fail(
"%s: '%s' tag does not contain this profile's URI: <%s> != <%s>"
% (bag, profile_id_tag, bag_info[profile_id_tag], self.url)
)
# Then, iterate through self.profile['Bag-Info'] and if a key has a dict containing a 'required' key that is
# True, check to see if that key exists in bag.info.
for tag in self.profile["Bag-Info"]:
normalized_tag = self.normalize_tag(tag)
config = self.profile["Bag-Info"][tag]
if "required" in config and config["required"] is True:
if normalized_tag not in bag_info:
self._fail(
("%s: Required tag '%s' is not present in bag-info.txt." + ignore_tag_case_help)
% (bag, tag)
)
# If the tag is in bag-info.txt, check to see if the value is constrained.
if "values" in config and normalized_tag in bag_info:
if bag_info[normalized_tag] not in config["values"]:
self._fail(
"%s: Required tag '%s' is present in bag-info.txt but does not have an allowed value ('%s')."
% (bag, tag, bag_info[normalized_tag])
)
# If the tag is nonrepeatable, make sure it only exists once. We do this by checking to see if the value for the key is a list.
if "repeatable" in config and config["repeatable"] is False:
value = bag_info.get(normalized_tag)
if isinstance(value, list):
self._fail(
"%s: Nonrepeatable tag '%s' occurs %s times in bag-info.txt."
% (bag, tag, len(value))
)
return True
# Normalize to canonical lowercase, if profile is ignoring bag-info.txt tag case.
def normalize_tag(self, tag):
return tag if not self.ignore_baginfo_tag_case else tag.lower()
# For each member of self.profile['manifests_required'], throw an exception if
# the manifest file is not present.
def validate_manifests_required(self, bag):
for manifest_type in self.profile["Manifests-Required"]:
path_to_manifest = join(bag.path, "manifest-" + manifest_type + ".txt")
if not exists(path_to_manifest):
self._fail(
"%s: Required manifest type '%s' is not present in Bag."
% (bag, manifest_type)
)
return True
# For each member of self.profile['tag_manifests_required'], throw an exception if
# the tag manifest file is not present.
def validate_tag_manifests_required(self, bag):
# Tag manifests are optional, so we return True if none are defined in the profile.
if "Tag-Manifests-Required" not in self.profile:
return True
for tag_manifest_type in self.profile["Tag-Manifests-Required"]:
path_to_tag_manifest = join(
bag.path, "tagmanifest-" + tag_manifest_type + ".txt"
)
if not exists(path_to_tag_manifest):
self._fail(
"%s: Required tag manifest type '%s' is not present in Bag."
% (bag, tag_manifest_type)
)
return True
@staticmethod
def manifest_algorithms(manifest_files):
for filepath in manifest_files:
filename = basename(filepath)
if filename.startswith("tagmanifest-"):
prefix = "tagmanifest-"
else:
prefix = "manifest-"
algorithm = filename.replace(prefix, "").replace(".txt", "")
yield algorithm
def validate_tag_manifests_allowed(self, bag):
return self._validate_allowed_manifests(bag, manifest_type="tag",
manifests_present=self.manifest_algorithms(bag.tagmanifest_files()),
allowed_attribute="Tag-Manifests-Allowed",
required_attribute="Tag-Manifests-Required")
def validate_payload_manifests_allowed(self, bag):
return self._validate_allowed_manifests(bag, manifest_type="payload",
manifests_present=self.manifest_algorithms(bag.manifest_files()),
allowed_attribute="Manifests-Allowed",
required_attribute="Manifests-Required")
def _validate_allowed_manifests(self, bag, manifest_type=None, manifests_present=None,
allowed_attribute=None, required_attribute=None):
if allowed_attribute not in self.profile:
return True
allowed = self.profile[allowed_attribute]
required = self.profile[required_attribute] if required_attribute in self.profile else []
required_but_not_allowed = [alg for alg in required if alg not in allowed]
if required_but_not_allowed:
self._fail("%s: Required %s manifest type(s) %s not allowed by %s" %
(bag, manifest_type, [str(a) for a in required_but_not_allowed], allowed_attribute))
present_but_not_allowed = [alg for alg in manifests_present if alg not in allowed]
if present_but_not_allowed:
self._fail("%s: Unexpected %s manifest type(s) '%s' present, but not allowed by %s" %
(bag, manifest_type, [str(a) for a in present_but_not_allowed], allowed_attribute))
return True
def validate_tag_files_allowed(self, bag):
"""
Validate the ``Tag-Files-Allowed`` tag.
"""
allowed = (
self.profile["Tag-Files-Allowed"]
if "Tag-Files-Allowed" in self.profile
else ["*"]
)
required = (
self.profile["Tag-Files-Required"]
if "Tag-Files-Required" in self.profile
else []
)
# For each member of 'Tag-Files-Required' ensure it is also in 'Tag-Files-Allowed'.
required_but_not_allowed = [f for f in required if not fnmatch_any(f, allowed)]
if required_but_not_allowed:
self._fail(
"%s: Required tag files '%s' not listed in Tag-Files-Allowed"
% (bag, required_but_not_allowed)
)
# For each tag file in the bag base directory, ensure it is also in 'Tag-Files-Allowed'.
for tag_file in find_tag_files(bag.path):
tag_file = relpath(tag_file, bag.path)
if not fnmatch_any(tag_file, allowed):
self._fail(
"%s: Existing tag file '%s' is not listed in Tag-Files-Allowed."
% (bag, tag_file)
)
# For each member of self.profile['Tag-Files-Required'], throw an exception if
# the path does not exist.
def validate_tag_files_required(self, bag):
# Tag files are optional, so we return True if none are defined in the profile.
if "Tag-Files-Required" not in self.profile:
return True
for tag_file in self.profile["Tag-Files-Required"]:
path_to_tag_file = join(bag.path, tag_file)
if not exists(path_to_tag_file):
self._fail(
"%s: Required tag file '%s' is not present in Bag."
% (bag, path_to_tag_file)
)
return True
# Check to see if this constraint is False, and if it is, then check to see
# if the fetch.txt file exists. If it does, throw an exception.
def validate_allow_fetch(self, bag):
if self.profile["Allow-Fetch.txt"] is False:
path_to_fetchtxt = join(bag.path, "fetch.txt")
if exists(path_to_fetchtxt):
self._fail("%s: Fetch.txt is present but is not allowed." % bag)
return True
# Check the Bag's version, and if it's not in the list of allowed versions,
# throw an exception.
def validate_accept_bagit_version(self, bag):
actual = bag.tags["BagIt-Version"]
allowed = self.profile["Accept-BagIt-Version"]
if actual not in allowed:
self._fail(
"%s: Bag version '%s' is not in list of allowed values: %s"
% (bag, actual, allowed)
)
return True
# Perform tests on 'Serialization' and 'Accept-Serialization', in one function.
# Since https://github.com/edsu/bagit can't tell us if a Bag is serialized or
# not, we need to pass this function the path to the Bag, not the object. Also,
# this method needs to be called before .validate().
def validate_serialization(self, path_to_bag):
# First, perform the two negative tests.
if not exists(path_to_bag):
raise IOError("Can't find file %s" % path_to_bag)
if self.profile["Serialization"] == "required" and isdir(path_to_bag):
self._fail(
"%s: Bag serialization is required but Bag is a directory."
% path_to_bag
)
if self.profile["Serialization"] == "forbidden" and isfile(path_to_bag):
self._fail(
"%s: Bag serialization is forbidden but Bag appears is a file."
% path_to_bag
)
# Then test to see whether the Bag is serialized (is a file) and whether the mimetype is one
# of the allowed types.
if (
self.profile["Serialization"] == "required"
or self.profile["Serialization"] == "optional"
and isfile(path_to_bag)
):
_, bag_file = split(path_to_bag)
mtype = mimetypes.guess_type(bag_file)
if mtype[0] not in self.profile["Accept-Serialization"]:
self._fail(
"%s: Bag serialization is forbidden but Bag appears is a file."
% path_to_bag
)
# If we have passed the serialization tests, return True.
return True
# Return true if any of the pattern fnmatches a file path
def fnmatch_any(f, pats):
for pat in pats:
if fnmatch(f, pat):
return True
return False
# Find tag files
def find_tag_files(bag_dir):
for root, _, basenames in walk(bag_dir):
reldir = relpath(root, bag_dir)
for basename in basenames:
if fnmatch(reldir, "data*") or (
reldir == "."
and fnmatch_any(
basename,
[
"manifest-*.txt",
"bag-info.txt",
"tagmanifest-*.txt",
"bagit.txt",
"fetch.txt",
],
)
):
continue
fpath = join(root, basename)
if isfile(fpath):
yield fpath
def _configure_logging(args):
import time
log_format = "%(asctime)s - %(levelname)s - %(message)s"
if args.quiet:
args.loglevel = "ERROR"
level = logging.getLevelName(args.loglevel)
if args.no_logfile:
logging.basicConfig(level=level, format=log_format)
else:
if args.logdir:
filename = join(
args.log + "/logs", "BagitProfile_" + time.strftime("%y_%m_%d") + ".log"
)
else:
filename = "BagitProfile%s.log" % time.strftime("%y_%m_%d")
logging.basicConfig(filename=filename, level=level, format=log_format)
def _main():
# Command-line version.
import bagit
from argparse import ArgumentParser
from pkg_resources import get_distribution
parser = ArgumentParser(description="Validate BagIt bags against BagIt profiles")
parser.add_argument(
"--version",
action="version",
version="%(prog)s, v" + get_distribution("bagit_profile").version,
)
parser.add_argument(
"--quiet",
action="store_true",
help="Suppress all output except errors. Default: %(default)s",
)
parser.add_argument(
"-i", "--ignore-baginfo-tag-case",
dest="ignore_baginfo_tag_case",
action="store_true",
help="Ignore capitalization for Bag-Info tag names. Default: %(default)s",
)
parser.add_argument(
"--log", dest="logdir", help="Log directory. Default: %(default)s"
)
parser.add_argument(
"--no-logfile",
action="store_true",
help="Do not log to a log file. Default: %(default)s",
)
parser.add_argument(
"--loglevel",
default="INFO",
choices=("DEBUG", "INFO", "ERROR"),
help="Log level. Default: %(default)s",
)
parser.add_argument(
"--file", help="Load profile from FILE, not by URL. Default: %(default)s."
)
parser.add_argument(
"--report",
action="store_true",
help="Print validation report. Default: %(default)s",
)
parser.add_argument(
"--skip",
action="append",
default=[],
help="Skip validation steps. Default: %(default)s",
choices=("serialization", "profile"),
)
parser.add_argument("profile_url", nargs=1)
parser.add_argument("bagit_path", nargs=1)
args = parser.parse_args()
profile_url = args.profile_url[0]
bagit_path = args.bagit_path[0]
_configure_logging(args)
# Instantiate a profile, supplying its URI.
if args.file:
with open(args.file, "r") as local_file:
profile = Profile(profile_url, profile=local_file.read(),
ignore_baginfo_tag_case=args.ignore_baginfo_tag_case)
else:
profile = Profile(profile_url, ignore_baginfo_tag_case=args.ignore_baginfo_tag_case)
# Instantiate an existing Bag.
bag = bagit.Bag(bagit_path) # pylint: disable=no-member
# Validate 'Serialization' and 'Accept-Serialization', then perform general validation.
if "serialization" not in args.skip:
if profile.validate_serialization(bagit_path):
print(u"✓ Serialization validates")
else:
print(u"✗ Serialization does not validate")
sys.exit(1)
# Validate the rest of the profile.
if "profile" not in args.skip:
if profile.validate(bag):
print(u"✓ Validates against %s" % profile_url)
else:
print(u"✗ Does not validate against %s" % profile_url)
if args.report:
print(profile.report)
sys.exit(2)
if __name__ == "__main__":
_main()
| 39.786535 | 139 | 0.586587 |
import json
import logging
import mimetypes
import sys
from fnmatch import fnmatch
from os import listdir, walk
from os.path import basename, exists, isdir, isfile, join, relpath, split
if sys.version_info > (3,):
basestring = str
from urllib.request import urlopen
else:
basestring = basestring
from urllib import urlopen
class ProfileValidationError(Exception):
def __init__(self, value):
super(ProfileValidationError, self).__init__(value)
self.value = value
def __str__(self):
return repr(self.value)
class ProfileValidationReport(object):
def __init__(self):
self.errors = []
@property
def is_valid(self):
return not self.errors
def __str__(self):
if self.is_valid:
return "VALID"
return "INVALID: %s" % "\n ".join(["%s" % e for e in self.errors])
class Profile(object):
_baginfo_profile_id_tag = "BagIt-Profile-Identifier"
def __init__(self, url, profile=None, ignore_baginfo_tag_case=False):
self.url = url
if profile is None:
profile = self.get_profile()
else:
if isinstance(profile, dict):
profile = profile
else:
profile = json.loads(profile)
self.validate_bagit_profile(profile)
self.report = None
self.profile = profile
self.ignore_baginfo_tag_case = ignore_baginfo_tag_case
def _fail(self, msg):
logging.error(msg)
raise ProfileValidationError(msg)
def _warn(self, msg):
logging.error(msg)
def get_profile(self):
try:
f = urlopen(self.url)
profile = f.read()
if sys.version_info > (3,):
profile = profile.decode("utf-8")
profile = json.loads(profile)
except Exception as e:
print("Cannot retrieve profile from %s: %s", self.url, e)
logging.error("Cannot retrieve profile from %s: %s", self.url, e)
sys.exit(1)
return profile
# are validated in validate_serialization().
def validate(self, bag):
self.report = ProfileValidationReport()
for (fn, msg, min_version) in [
(self.validate_bag_info, "Error in bag-info.txt", None),
(self.validate_manifests_required, "Required manifests not found", None),
(
self.validate_tag_manifests_required,
"Required tag manifests not found",
None,
),
(self.validate_payload_manifests_allowed, "Disallowed payload manifests present", (1, 3, 0)),
(self.validate_tag_manifests_allowed, "Disallowed tag manifests present", (1, 3, 0)),
(self.validate_tag_files_required, "Required tag files not found", None),
(
self.validate_allow_fetch,
"fetch.txt is present but is not allowed",
None,
),
(
self.validate_accept_bagit_version,
"Required BagIt version not found",
None,
),
(self.validate_tag_files_allowed, "Tag files not allowed", (1, 2, 0)),
]:
try:
if min_version and self.profile_version_info < min_version:
logging.info(
"Skipping %s introduced in version %s (version validated: %s)",
fn,
min_version,
self.profile_version_info,
)
continue
fn(bag)
except ProfileValidationError as e:
# self._warn("%s: %s" % (msg, e))
self.report.errors.append(e)
return self.report.is_valid
def validate_bagit_profile(self, profile):
if "Serialization" not in profile:
profile["Serialization"] = "optional"
if "Allow-Fetch.txt" not in profile:
profile["Allow-Fetch.txt"] = True
if (
"BagIt-Profile-Info" in profile
and "BagIt-Profile-Version" in profile["BagIt-Profile-Info"]
):
profile_version = profile["BagIt-Profile-Info"]["BagIt-Profile-Version"]
else:
profile_version = "1.1.0"
self.profile_version_info = tuple(int(i) for i in profile_version.split("."))
self.validate_bagit_profile_info(profile)
self.validate_bagit_profile_accept_bagit_versions(profile)
self.validate_bagit_profile_bag_info(profile)
# Check self.profile['bag-profile-info'] to see if "Source-Organization",
# "External-Description", "Version" and "BagIt-Profile-Identifier" are present.
def validate_bagit_profile_info(self, profile):
if "BagIt-Profile-Info" not in profile:
self._fail("%s: Required 'BagIt-Profile-Info' dict is missing." % profile)
if "Source-Organization" not in profile["BagIt-Profile-Info"]:
self._fail(
"%s: Required 'Source-Organization' tag is not in 'BagIt-Profile-Info'."
% profile
)
if "Version" not in profile["BagIt-Profile-Info"]:
self._warn(
"%s: Required 'Version' tag is not in 'BagIt-Profile-Info'." % profile
)
return False
if "BagIt-Profile-Identifier" not in profile["BagIt-Profile-Info"]:
self._fail(
"%s: Required 'BagIt-Profile-Identifier' tag is not in 'BagIt-Profile-Info'."
% profile
)
return True
def validate_bagit_profile_accept_bagit_versions(self, profile):
if "Accept-BagIt-Version" in profile:
for version_number in profile["Accept-BagIt-Version"]:
# pylint: disable=undefined-variable
if not isinstance(version_number, basestring):
raise ProfileValidationError(
'Version number "%s" in "Accept-BagIt-Version" is not a string!'
% version_number
)
return True
def validate_bagit_profile_bag_info(self, profile):
if 'Bag-Info' in profile:
for tag in profile['Bag-Info']:
config = profile['Bag-Info'][tag]
if self.profile_version_info >= (1, 3, 0) and \
'description' in config and not isinstance(config['description'], basestring):
self._fail("%s: Profile Bag-Info '%s' tag 'description' property, when present, must be a string." %
(profile, tag))
return True
# Validate tags in self.profile['Bag-Info'].
def validate_bag_info(self, bag):
# First, check to see if bag-info.txt exists.
path_to_baginfotxt = join(bag.path, "bag-info.txt")
if not exists(path_to_baginfotxt):
self._fail("%s: bag-info.txt is not present." % bag)
# Then check for the required 'BagIt-Profile-Identifier' tag and ensure it has the same value
# as self.url.
if self.ignore_baginfo_tag_case:
bag_info = {self.normalize_tag(k): v for k, v in bag.info.items()}
ignore_tag_case_help = ""
else:
bag_info = bag.info
ignore_tag_case_help = " Set 'ignore_baginfo_tag_case' to True if you wish to ignore tag case."
profile_id_tag = self.normalize_tag(self._baginfo_profile_id_tag)
if profile_id_tag not in bag_info:
self._fail(
("%s: Required '%s' tag is not in bag-info.txt." + ignore_tag_case_help)
% (bag, self._baginfo_profile_id_tag)
)
else:
if bag_info[profile_id_tag] != self.url:
self._fail(
"%s: '%s' tag does not contain this profile's URI: <%s> != <%s>"
% (bag, profile_id_tag, bag_info[profile_id_tag], self.url)
)
for tag in self.profile["Bag-Info"]:
normalized_tag = self.normalize_tag(tag)
config = self.profile["Bag-Info"][tag]
if "required" in config and config["required"] is True:
if normalized_tag not in bag_info:
self._fail(
("%s: Required tag '%s' is not present in bag-info.txt." + ignore_tag_case_help)
% (bag, tag)
)
if "values" in config and normalized_tag in bag_info:
if bag_info[normalized_tag] not in config["values"]:
self._fail(
"%s: Required tag '%s' is present in bag-info.txt but does not have an allowed value ('%s')."
% (bag, tag, bag_info[normalized_tag])
)
if "repeatable" in config and config["repeatable"] is False:
value = bag_info.get(normalized_tag)
if isinstance(value, list):
self._fail(
"%s: Nonrepeatable tag '%s' occurs %s times in bag-info.txt."
% (bag, tag, len(value))
)
return True
def normalize_tag(self, tag):
return tag if not self.ignore_baginfo_tag_case else tag.lower()
def validate_manifests_required(self, bag):
for manifest_type in self.profile["Manifests-Required"]:
path_to_manifest = join(bag.path, "manifest-" + manifest_type + ".txt")
if not exists(path_to_manifest):
self._fail(
"%s: Required manifest type '%s' is not present in Bag."
% (bag, manifest_type)
)
return True
def validate_tag_manifests_required(self, bag):
if "Tag-Manifests-Required" not in self.profile:
return True
for tag_manifest_type in self.profile["Tag-Manifests-Required"]:
path_to_tag_manifest = join(
bag.path, "tagmanifest-" + tag_manifest_type + ".txt"
)
if not exists(path_to_tag_manifest):
self._fail(
"%s: Required tag manifest type '%s' is not present in Bag."
% (bag, tag_manifest_type)
)
return True
@staticmethod
def manifest_algorithms(manifest_files):
for filepath in manifest_files:
filename = basename(filepath)
if filename.startswith("tagmanifest-"):
prefix = "tagmanifest-"
else:
prefix = "manifest-"
algorithm = filename.replace(prefix, "").replace(".txt", "")
yield algorithm
def validate_tag_manifests_allowed(self, bag):
return self._validate_allowed_manifests(bag, manifest_type="tag",
manifests_present=self.manifest_algorithms(bag.tagmanifest_files()),
allowed_attribute="Tag-Manifests-Allowed",
required_attribute="Tag-Manifests-Required")
def validate_payload_manifests_allowed(self, bag):
return self._validate_allowed_manifests(bag, manifest_type="payload",
manifests_present=self.manifest_algorithms(bag.manifest_files()),
allowed_attribute="Manifests-Allowed",
required_attribute="Manifests-Required")
def _validate_allowed_manifests(self, bag, manifest_type=None, manifests_present=None,
allowed_attribute=None, required_attribute=None):
if allowed_attribute not in self.profile:
return True
allowed = self.profile[allowed_attribute]
required = self.profile[required_attribute] if required_attribute in self.profile else []
required_but_not_allowed = [alg for alg in required if alg not in allowed]
if required_but_not_allowed:
self._fail("%s: Required %s manifest type(s) %s not allowed by %s" %
(bag, manifest_type, [str(a) for a in required_but_not_allowed], allowed_attribute))
present_but_not_allowed = [alg for alg in manifests_present if alg not in allowed]
if present_but_not_allowed:
self._fail("%s: Unexpected %s manifest type(s) '%s' present, but not allowed by %s" %
(bag, manifest_type, [str(a) for a in present_but_not_allowed], allowed_attribute))
return True
def validate_tag_files_allowed(self, bag):
allowed = (
self.profile["Tag-Files-Allowed"]
if "Tag-Files-Allowed" in self.profile
else ["*"]
)
required = (
self.profile["Tag-Files-Required"]
if "Tag-Files-Required" in self.profile
else []
)
required_but_not_allowed = [f for f in required if not fnmatch_any(f, allowed)]
if required_but_not_allowed:
self._fail(
"%s: Required tag files '%s' not listed in Tag-Files-Allowed"
% (bag, required_but_not_allowed)
)
for tag_file in find_tag_files(bag.path):
tag_file = relpath(tag_file, bag.path)
if not fnmatch_any(tag_file, allowed):
self._fail(
"%s: Existing tag file '%s' is not listed in Tag-Files-Allowed."
% (bag, tag_file)
)
def validate_tag_files_required(self, bag):
if "Tag-Files-Required" not in self.profile:
return True
for tag_file in self.profile["Tag-Files-Required"]:
path_to_tag_file = join(bag.path, tag_file)
if not exists(path_to_tag_file):
self._fail(
"%s: Required tag file '%s' is not present in Bag."
% (bag, path_to_tag_file)
)
return True
def validate_allow_fetch(self, bag):
if self.profile["Allow-Fetch.txt"] is False:
path_to_fetchtxt = join(bag.path, "fetch.txt")
if exists(path_to_fetchtxt):
self._fail("%s: Fetch.txt is present but is not allowed." % bag)
return True
def validate_accept_bagit_version(self, bag):
actual = bag.tags["BagIt-Version"]
allowed = self.profile["Accept-BagIt-Version"]
if actual not in allowed:
self._fail(
"%s: Bag version '%s' is not in list of allowed values: %s"
% (bag, actual, allowed)
)
return True
# not, we need to pass this function the path to the Bag, not the object. Also,
# this method needs to be called before .validate().
def validate_serialization(self, path_to_bag):
# First, perform the two negative tests.
if not exists(path_to_bag):
raise IOError("Can't find file %s" % path_to_bag)
if self.profile["Serialization"] == "required" and isdir(path_to_bag):
self._fail(
"%s: Bag serialization is required but Bag is a directory."
% path_to_bag
)
if self.profile["Serialization"] == "forbidden" and isfile(path_to_bag):
self._fail(
"%s: Bag serialization is forbidden but Bag appears is a file."
% path_to_bag
)
if (
self.profile["Serialization"] == "required"
or self.profile["Serialization"] == "optional"
and isfile(path_to_bag)
):
_, bag_file = split(path_to_bag)
mtype = mimetypes.guess_type(bag_file)
if mtype[0] not in self.profile["Accept-Serialization"]:
self._fail(
"%s: Bag serialization is forbidden but Bag appears is a file."
% path_to_bag
)
return True
def fnmatch_any(f, pats):
for pat in pats:
if fnmatch(f, pat):
return True
return False
def find_tag_files(bag_dir):
for root, _, basenames in walk(bag_dir):
reldir = relpath(root, bag_dir)
for basename in basenames:
if fnmatch(reldir, "data*") or (
reldir == "."
and fnmatch_any(
basename,
[
"manifest-*.txt",
"bag-info.txt",
"tagmanifest-*.txt",
"bagit.txt",
"fetch.txt",
],
)
):
continue
fpath = join(root, basename)
if isfile(fpath):
yield fpath
def _configure_logging(args):
import time
log_format = "%(asctime)s - %(levelname)s - %(message)s"
if args.quiet:
args.loglevel = "ERROR"
level = logging.getLevelName(args.loglevel)
if args.no_logfile:
logging.basicConfig(level=level, format=log_format)
else:
if args.logdir:
filename = join(
args.log + "/logs", "BagitProfile_" + time.strftime("%y_%m_%d") + ".log"
)
else:
filename = "BagitProfile%s.log" % time.strftime("%y_%m_%d")
logging.basicConfig(filename=filename, level=level, format=log_format)
def _main():
import bagit
from argparse import ArgumentParser
from pkg_resources import get_distribution
parser = ArgumentParser(description="Validate BagIt bags against BagIt profiles")
parser.add_argument(
"--version",
action="version",
version="%(prog)s, v" + get_distribution("bagit_profile").version,
)
parser.add_argument(
"--quiet",
action="store_true",
help="Suppress all output except errors. Default: %(default)s",
)
parser.add_argument(
"-i", "--ignore-baginfo-tag-case",
dest="ignore_baginfo_tag_case",
action="store_true",
help="Ignore capitalization for Bag-Info tag names. Default: %(default)s",
)
parser.add_argument(
"--log", dest="logdir", help="Log directory. Default: %(default)s"
)
parser.add_argument(
"--no-logfile",
action="store_true",
help="Do not log to a log file. Default: %(default)s",
)
parser.add_argument(
"--loglevel",
default="INFO",
choices=("DEBUG", "INFO", "ERROR"),
help="Log level. Default: %(default)s",
)
parser.add_argument(
"--file", help="Load profile from FILE, not by URL. Default: %(default)s."
)
parser.add_argument(
"--report",
action="store_true",
help="Print validation report. Default: %(default)s",
)
parser.add_argument(
"--skip",
action="append",
default=[],
help="Skip validation steps. Default: %(default)s",
choices=("serialization", "profile"),
)
parser.add_argument("profile_url", nargs=1)
parser.add_argument("bagit_path", nargs=1)
args = parser.parse_args()
profile_url = args.profile_url[0]
bagit_path = args.bagit_path[0]
_configure_logging(args)
if args.file:
with open(args.file, "r") as local_file:
profile = Profile(profile_url, profile=local_file.read(),
ignore_baginfo_tag_case=args.ignore_baginfo_tag_case)
else:
profile = Profile(profile_url, ignore_baginfo_tag_case=args.ignore_baginfo_tag_case)
bag = bagit.Bag(bagit_path)
if "serialization" not in args.skip:
if profile.validate_serialization(bagit_path):
print(u"✓ Serialization validates")
else:
print(u"✗ Serialization does not validate")
sys.exit(1)
if "profile" not in args.skip:
if profile.validate(bag):
print(u"✓ Validates against %s" % profile_url)
else:
print(u"✗ Does not validate against %s" % profile_url)
if args.report:
print(profile.report)
sys.exit(2)
if __name__ == "__main__":
_main()
| true | true |
f714e2196b368d98e5bacb6be6c5d3f861d519e1 | 1,340 | py | Python | tests/unit/Sentry.py | jayvdb/platform-engine | 31fb8f329dc12d75e35d85c138718f68568b893a | [
"Apache-2.0"
] | null | null | null | tests/unit/Sentry.py | jayvdb/platform-engine | 31fb8f329dc12d75e35d85c138718f68568b893a | [
"Apache-2.0"
] | null | null | null | tests/unit/Sentry.py | jayvdb/platform-engine | 31fb8f329dc12d75e35d85c138718f68568b893a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from asyncy.Exceptions import StoryscriptError
from asyncy.Sentry import Sentry
from raven import Client
def test_init(patch):
# noinspection PyTypeChecker
Sentry.init(None, None) # No-op.
patch.init(Client)
Sentry.init('sentry_dsn', 'release_ver')
Client.__init__.assert_called_with(
dsn='sentry_dsn',
enable_breadcrumbs=False,
install_logging_hook=False,
hook_libraries=[],
release='release_ver')
# noinspection PyProtectedMember
assert Sentry._sentry_client is not None
def test_capture_exc(patch, magic):
patch.many(Client, ['captureException', 'user_context'])
Sentry.init('https://foo:foo@sentry.io/123', 'release_ver')
story = magic()
story.app.app_id = 'app_id'
story.app.version = 'app_version'
story.name = 'story_name'
line = magic()
line['ln'] = '28'
try:
raise StoryscriptError(message='foo', story=story, line=line)
except StoryscriptError as e:
Sentry.capture_exc(e, story, line, {'foo': 'bar'})
Client.user_context.assert_called_with({
'app_uuid': 'app_id',
'app_version': 'app_version'
})
Client.captureException.assert_called_with(extra={
'story_line': line['ln'],
'story_name': 'story_name',
'foo': 'bar'
})
| 27.916667 | 69 | 0.653731 |
from asyncy.Exceptions import StoryscriptError
from asyncy.Sentry import Sentry
from raven import Client
def test_init(patch):
Sentry.init(None, None)
patch.init(Client)
Sentry.init('sentry_dsn', 'release_ver')
Client.__init__.assert_called_with(
dsn='sentry_dsn',
enable_breadcrumbs=False,
install_logging_hook=False,
hook_libraries=[],
release='release_ver')
assert Sentry._sentry_client is not None
def test_capture_exc(patch, magic):
patch.many(Client, ['captureException', 'user_context'])
Sentry.init('https://foo:foo@sentry.io/123', 'release_ver')
story = magic()
story.app.app_id = 'app_id'
story.app.version = 'app_version'
story.name = 'story_name'
line = magic()
line['ln'] = '28'
try:
raise StoryscriptError(message='foo', story=story, line=line)
except StoryscriptError as e:
Sentry.capture_exc(e, story, line, {'foo': 'bar'})
Client.user_context.assert_called_with({
'app_uuid': 'app_id',
'app_version': 'app_version'
})
Client.captureException.assert_called_with(extra={
'story_line': line['ln'],
'story_name': 'story_name',
'foo': 'bar'
})
| true | true |
f714e2c0711678f8f014bdff84f94e2145a726a0 | 1,389 | py | Python | bot.py | phy1um/tmtc-discord-bot | 7d01cd4c1a78dc0b8aa2bb703c8970ff7bb27f92 | [
"MIT"
] | null | null | null | bot.py | phy1um/tmtc-discord-bot | 7d01cd4c1a78dc0b8aa2bb703c8970ff7bb27f92 | [
"MIT"
] | null | null | null | bot.py | phy1um/tmtc-discord-bot | 7d01cd4c1a78dc0b8aa2bb703c8970ff7bb27f92 | [
"MIT"
] | null | null | null | from constants import *
from gateway_protocol import Gateway
from api import DiscordAPI
import bot_config as config
import logging as log
log.basicConfig(encoding='utf-8', level=log.DEBUG)
class Bot(object):
def __init__(self, token):
self.g = Gateway(token)
self.api = DiscordAPI(token)
def run_gateway(self):
self.g.run()
def event(self, f):
return self.g.event(f)
if __name__ == "__main__":
print("=== bot startup ===")
cfg = config.from_file("config.json")
log_level = log.getLevelName(cfg.log_level)
bot = Bot(cfg.token)
@bot.event
async def ready(x):
log.info("gateway connection ready")
@bot.event
async def message_reaction_add(msg):
emoji = msg.data.emoji["name"]
if msg.data.message_id != cfg.message_id:
# wrong message, do nothing
log.debug(f"wrong message id, skipping")
return
if emoji not in cfg.emoji:
# unknown emoji, do nothing
log.debug(f"unknown emoji, skipping")
return
event_type = cfg.emoji[emoji]
if event_type == "announcement":
user_id = msg.data.user_id
log.info(f"adding announce role to {user_id}")
bot.api.run(f"/guilds/{GUILD_ID}/members/{user_id}/roles/{ANNOUNCEMENT_ROLE}", "PUT")
bot.run_gateway()
| 23.948276 | 97 | 0.614111 | from constants import *
from gateway_protocol import Gateway
from api import DiscordAPI
import bot_config as config
import logging as log
log.basicConfig(encoding='utf-8', level=log.DEBUG)
class Bot(object):
def __init__(self, token):
self.g = Gateway(token)
self.api = DiscordAPI(token)
def run_gateway(self):
self.g.run()
def event(self, f):
return self.g.event(f)
if __name__ == "__main__":
print("=== bot startup ===")
cfg = config.from_file("config.json")
log_level = log.getLevelName(cfg.log_level)
bot = Bot(cfg.token)
@bot.event
async def ready(x):
log.info("gateway connection ready")
@bot.event
async def message_reaction_add(msg):
emoji = msg.data.emoji["name"]
if msg.data.message_id != cfg.message_id:
log.debug(f"wrong message id, skipping")
return
if emoji not in cfg.emoji:
log.debug(f"unknown emoji, skipping")
return
event_type = cfg.emoji[emoji]
if event_type == "announcement":
user_id = msg.data.user_id
log.info(f"adding announce role to {user_id}")
bot.api.run(f"/guilds/{GUILD_ID}/members/{user_id}/roles/{ANNOUNCEMENT_ROLE}", "PUT")
bot.run_gateway()
| true | true |
f714e36b0fce1ae5deb107d8990396cd61bd0910 | 3,150 | py | Python | database/zenodo.py | MRCIEU/ewascatalog | a37dfeb207537831b4c5e313e0edecbad8a7c1a2 | [
"MIT"
] | 1 | 2021-08-05T09:39:48.000Z | 2021-08-05T09:39:48.000Z | database/zenodo.py | MRCIEU/ewascatalog | a37dfeb207537831b4c5e313e0edecbad8a7c1a2 | [
"MIT"
] | null | null | null | database/zenodo.py | MRCIEU/ewascatalog | a37dfeb207537831b4c5e313e0edecbad8a7c1a2 | [
"MIT"
] | null | null | null | # script to upload a file to zenodo sandbox via api
# seperate sandbox- and real-zenodo accounts and ACCESS_TOKENs each need to be created
# to adapt this script to real-zenodo (from sandbox implementation):
# update urls to zenodo.org from sandbox.zenodo.org
# update SANDBOX_TOKEN to a ACCESS_TOKEN from real-zenodo
import sys, json, requests
import pandas as pd
studyid = sys.argv[1]
file_dir = sys.argv[2]
access_token = sys.argv[3]
data_dir = file_dir+'/ewas-sum-stats/to-add/'+studyid
zfile=data_dir+'/zenodo.csv'
try:
zdata = pd.read_csv(zfile)
except FileNotFoundError:
print("Can't find the file "+zfile)
sys.exit()
print('Starting Zenodo upload process')
# specify ACCESS_TOKEN
# this needs to be generated for each sanbox/real account
ACCESS_TOKEN = access_token
# create empty upload
headers = {"Content-Type": "application/json"}
r = requests.post('https://zenodo.org/api/deposit/depositions', params={'access_token': ACCESS_TOKEN}, json={}, headers=headers)
# r = requests.post('https://sandbox.zenodo.org/api/deposit/depositions', params={'access_token': ACCESS_TOKEN}, json={}, headers=headers)
r.status_code
r.json()
# Get the deposition id from the previous response
# Upload the file to be deposited to Zenodo
deposition_id = r.json()['id']
data = {'name': 'results.csv'}
files = {'file': open(data_dir+'/results.csv')}
r = requests.post('https://zenodo.org/api/deposit/depositions/%s/files' % deposition_id, params={'access_token': ACCESS_TOKEN}, data=data, files=files)
# r = requests.post('https://sandbox.zenodo.org/api/deposit/depositions/%s/files' % deposition_id, params={'access_token': ACCESS_TOKEN}, data=data, files=files)
r.status_code
r.json()
# specify and attach the metadata for the upload
title = zdata.loc[0, 'title']
authors = zdata.loc[0, 'authors']
desc = zdata.loc[0, 'desc']
desc = desc + '\n\n' + 'Upload of this dataset was completed by The EWAS Catalog team. The data can be queried along with hundreds of other EWAS at ewascatalog.org. To upload your EWAS summary statistics and have a zenodo DOI generated for you go to ewascatalog.org/upload'
data = {'metadata':
{'title': title,
'upload_type': 'dataset',
'description': desc,
'creators': [{'name': authors}]}}
r = requests.put('https://zenodo.org/api/deposit/depositions/%s' % deposition_id, params={'access_token': ACCESS_TOKEN}, data=json.dumps(data), headers=headers)
# r = requests.put('https://sandbox.zenodo.org/api/deposit/depositions/%s' % deposition_id, params={'access_token': ACCESS_TOKEN}, data=json.dumps(data), headers=headers)
r.status_code
r.json()
# publish
r = requests.post('https://zenodo.org/api/deposit/depositions/%s/actions/publish' % deposition_id, params={'access_token': ACCESS_TOKEN} )
# r = requests.post('https://sandbox.zenodo.org/api/deposit/depositions/%s/actions/publish' % deposition_id, params={'access_token': ACCESS_TOKEN} )
status_code = r.status_code
if status_code != 202:
raise ValueError("Status code was" + str(status_code) + " and it should be 202. Check zenodo")
else:
print("Status code is 202. Happy days!")
# should be: 202
| 40.384615 | 273 | 0.729524 |
import sys, json, requests
import pandas as pd
studyid = sys.argv[1]
file_dir = sys.argv[2]
access_token = sys.argv[3]
data_dir = file_dir+'/ewas-sum-stats/to-add/'+studyid
zfile=data_dir+'/zenodo.csv'
try:
zdata = pd.read_csv(zfile)
except FileNotFoundError:
print("Can't find the file "+zfile)
sys.exit()
print('Starting Zenodo upload process')
# specify ACCESS_TOKEN
# this needs to be generated for each sanbox/real account
ACCESS_TOKEN = access_token
# create empty upload
headers = {"Content-Type": "application/json"}
r = requests.post('https://zenodo.org/api/deposit/depositions', params={'access_token': ACCESS_TOKEN}, json={}, headers=headers)
# r = requests.post('https://sandbox.zenodo.org/api/deposit/depositions', params={'access_token': ACCESS_TOKEN}, json={}, headers=headers)
r.status_code
r.json()
# Get the deposition id from the previous response
# Upload the file to be deposited to Zenodo
deposition_id = r.json()['id']
data = {'name': 'results.csv'}
files = {'file': open(data_dir+'/results.csv')}
r = requests.post('https://zenodo.org/api/deposit/depositions/%s/files' % deposition_id, params={'access_token': ACCESS_TOKEN}, data=data, files=files)
# r = requests.post('https://sandbox.zenodo.org/api/deposit/depositions/%s/files' % deposition_id, params={'access_token': ACCESS_TOKEN}, data=data, files=files)
r.status_code
r.json()
# specify and attach the metadata for the upload
title = zdata.loc[0, 'title']
authors = zdata.loc[0, 'authors']
desc = zdata.loc[0, 'desc']
desc = desc + '\n\n' + 'Upload of this dataset was completed by The EWAS Catalog team. The data can be queried along with hundreds of other EWAS at ewascatalog.org. To upload your EWAS summary statistics and have a zenodo DOI generated for you go to ewascatalog.org/upload'
data = {'metadata':
{'title': title,
'upload_type': 'dataset',
'description': desc,
'creators': [{'name': authors}]}}
r = requests.put('https://zenodo.org/api/deposit/depositions/%s' % deposition_id, params={'access_token': ACCESS_TOKEN}, data=json.dumps(data), headers=headers)
# r = requests.put('https://sandbox.zenodo.org/api/deposit/depositions/%s' % deposition_id, params={'access_token': ACCESS_TOKEN}, data=json.dumps(data), headers=headers)
r.status_code
r.json()
# publish
r = requests.post('https://zenodo.org/api/deposit/depositions/%s/actions/publish' % deposition_id, params={'access_token': ACCESS_TOKEN} )
# r = requests.post('https://sandbox.zenodo.org/api/deposit/depositions/%s/actions/publish' % deposition_id, params={'access_token': ACCESS_TOKEN} )
status_code = r.status_code
if status_code != 202:
raise ValueError("Status code was" + str(status_code) + " and it should be 202. Check zenodo")
else:
print("Status code is 202. Happy days!")
# should be: 202
| true | true |
f714e3a075ce1c505d60b891128c7925fcf59c0c | 4,476 | py | Python | mars/worker/tests/test_dispatcher.py | ChenQuan/mars | 46fc9747e99210cebfabfc2d85bcc8272440d1a3 | [
"Apache-2.0"
] | null | null | null | mars/worker/tests/test_dispatcher.py | ChenQuan/mars | 46fc9747e99210cebfabfc2d85bcc8272440d1a3 | [
"Apache-2.0"
] | null | null | null | mars/worker/tests/test_dispatcher.py | ChenQuan/mars | 46fc9747e99210cebfabfc2d85bcc8272440d1a3 | [
"Apache-2.0"
] | null | null | null | # Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from functools import partial
import gevent
from mars.tests.core import patch_method
from mars.utils import get_next_port
from mars.actors import create_actor_pool
from mars.promise import PromiseActor
from mars.worker import *
from mars.worker.tests.base import WorkerCase
class TaskActor(PromiseActor):
def __init__(self, queue_name, call_records):
super(TaskActor, self).__init__()
self._queue_name = queue_name
self._call_records = call_records
self._dispatch_ref = None
def post_create(self):
self._dispatch_ref = self.promise_ref(DispatchActor.default_name())
self._dispatch_ref.register_free_slot(self.uid, self._queue_name)
def queued_call(self, key, delay):
try:
self._call_records[key] = time.time()
gevent.sleep(delay)
finally:
self._dispatch_ref.register_free_slot(self.uid, self._queue_name)
class Test(WorkerCase):
@patch_method(DispatchActor._init_chunk_store)
def testDispatch(self, *_):
call_records = dict()
group_size = 4
mock_scheduler_addr = '127.0.0.1:%d' % get_next_port()
with create_actor_pool(n_process=1, backend='gevent',
address=mock_scheduler_addr) as pool:
dispatch_ref = pool.create_actor(DispatchActor, uid=DispatchActor.default_name())
# actors of g1
[pool.create_actor(TaskActor, 'g1', call_records) for _ in range(group_size)]
[pool.create_actor(TaskActor, 'g2', call_records) for _ in range(group_size)]
self.assertEqual(len(dispatch_ref.get_slots('g1')), group_size)
self.assertEqual(len(dispatch_ref.get_slots('g2')), group_size)
self.assertEqual(len(dispatch_ref.get_slots('g3')), 0)
self.assertEqual(dispatch_ref.get_hash_slot('g1', 'hash_str'),
dispatch_ref.get_hash_slot('g1', 'hash_str'))
dispatch_ref.get_free_slot('g1', callback=(('NonExist', mock_scheduler_addr), '_non_exist', {}))
self.assertEqual(dispatch_ref.get_free_slots_num().get('g1'), group_size)
# tasks within [0, group_size - 1] will run almost simultaneously,
# while the last one will be delayed due to lack of slots
with self.run_actor_test(pool) as test_actor:
from mars.promise import Promise
p = Promise(done=True)
_dispatch_ref = test_actor.promise_ref(DispatchActor.default_name())
def _call_on_dispatched(uid, key=None):
if uid is None:
call_records[key] = 'NoneUID'
else:
test_actor.promise_ref(uid).queued_call(key, 2, _tell=True)
for idx in range(group_size + 1):
p = p.then(lambda *_: _dispatch_ref.get_free_slot('g1', _promise=True)) \
.then(partial(_call_on_dispatched, key='%d_1' % idx)) \
.then(lambda *_: _dispatch_ref.get_free_slot('g2', _promise=True)) \
.then(partial(_call_on_dispatched, key='%d_2' % idx))
p.then(lambda *_: _dispatch_ref.get_free_slot('g3', _promise=True)) \
.then(partial(_call_on_dispatched, key='N_1')) \
.then(lambda *_: test_actor.set_result(None))
self.get_result(20)
self.assertEqual(call_records['N_1'], 'NoneUID')
self.assertLess(sum(abs(call_records['%d_1' % idx] - call_records['0_1'])
for idx in range(group_size)), 1)
self.assertGreater(call_records['%d_1' % group_size] - call_records['0_1'], 1)
self.assertLess(call_records['%d_1' % group_size] - call_records['0_1'], 3)
dispatch_ref.destroy()
| 43.038462 | 108 | 0.638517 |
import time
from functools import partial
import gevent
from mars.tests.core import patch_method
from mars.utils import get_next_port
from mars.actors import create_actor_pool
from mars.promise import PromiseActor
from mars.worker import *
from mars.worker.tests.base import WorkerCase
class TaskActor(PromiseActor):
def __init__(self, queue_name, call_records):
super(TaskActor, self).__init__()
self._queue_name = queue_name
self._call_records = call_records
self._dispatch_ref = None
def post_create(self):
self._dispatch_ref = self.promise_ref(DispatchActor.default_name())
self._dispatch_ref.register_free_slot(self.uid, self._queue_name)
def queued_call(self, key, delay):
try:
self._call_records[key] = time.time()
gevent.sleep(delay)
finally:
self._dispatch_ref.register_free_slot(self.uid, self._queue_name)
class Test(WorkerCase):
@patch_method(DispatchActor._init_chunk_store)
def testDispatch(self, *_):
call_records = dict()
group_size = 4
mock_scheduler_addr = '127.0.0.1:%d' % get_next_port()
with create_actor_pool(n_process=1, backend='gevent',
address=mock_scheduler_addr) as pool:
dispatch_ref = pool.create_actor(DispatchActor, uid=DispatchActor.default_name())
[pool.create_actor(TaskActor, 'g1', call_records) for _ in range(group_size)]
[pool.create_actor(TaskActor, 'g2', call_records) for _ in range(group_size)]
self.assertEqual(len(dispatch_ref.get_slots('g1')), group_size)
self.assertEqual(len(dispatch_ref.get_slots('g2')), group_size)
self.assertEqual(len(dispatch_ref.get_slots('g3')), 0)
self.assertEqual(dispatch_ref.get_hash_slot('g1', 'hash_str'),
dispatch_ref.get_hash_slot('g1', 'hash_str'))
dispatch_ref.get_free_slot('g1', callback=(('NonExist', mock_scheduler_addr), '_non_exist', {}))
self.assertEqual(dispatch_ref.get_free_slots_num().get('g1'), group_size)
with self.run_actor_test(pool) as test_actor:
from mars.promise import Promise
p = Promise(done=True)
_dispatch_ref = test_actor.promise_ref(DispatchActor.default_name())
def _call_on_dispatched(uid, key=None):
if uid is None:
call_records[key] = 'NoneUID'
else:
test_actor.promise_ref(uid).queued_call(key, 2, _tell=True)
for idx in range(group_size + 1):
p = p.then(lambda *_: _dispatch_ref.get_free_slot('g1', _promise=True)) \
.then(partial(_call_on_dispatched, key='%d_1' % idx)) \
.then(lambda *_: _dispatch_ref.get_free_slot('g2', _promise=True)) \
.then(partial(_call_on_dispatched, key='%d_2' % idx))
p.then(lambda *_: _dispatch_ref.get_free_slot('g3', _promise=True)) \
.then(partial(_call_on_dispatched, key='N_1')) \
.then(lambda *_: test_actor.set_result(None))
self.get_result(20)
self.assertEqual(call_records['N_1'], 'NoneUID')
self.assertLess(sum(abs(call_records['%d_1' % idx] - call_records['0_1'])
for idx in range(group_size)), 1)
self.assertGreater(call_records['%d_1' % group_size] - call_records['0_1'], 1)
self.assertLess(call_records['%d_1' % group_size] - call_records['0_1'], 3)
dispatch_ref.destroy()
| true | true |
f714e47b106eac676e74b6b6d55a7dccf1215a4c | 8,482 | py | Python | datasets/wikitext/wikitext.py | WojciechKusa/datasets | 1406a04c3e911cec2680d8bc513653e0cafcaaa4 | [
"Apache-2.0"
] | 10,608 | 2020-09-10T15:47:50.000Z | 2022-03-31T22:51:47.000Z | datasets/wikitext/wikitext.py | WojciechKusa/datasets | 1406a04c3e911cec2680d8bc513653e0cafcaaa4 | [
"Apache-2.0"
] | 2,396 | 2020-09-10T14:55:31.000Z | 2022-03-31T19:41:04.000Z | datasets/wikitext/wikitext.py | WojciechKusa/datasets | 1406a04c3e911cec2680d8bc513653e0cafcaaa4 | [
"Apache-2.0"
] | 1,530 | 2020-09-10T21:43:10.000Z | 2022-03-31T01:59:12.000Z | """TODO(wikitext): Add a description here."""
import os
import datasets
_CITATION = """\
@misc{merity2016pointer,
title={Pointer Sentinel Mixture Models},
author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},
year={2016},
eprint={1609.07843},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """\
The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified
Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike
License.
"""
_HOMEPAGE = "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/"
_LICENSE = "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)"
_DATA_URL = "https://s3.amazonaws.com/research.metamind.io/wikitext"
class WikitextConfig(datasets.BuilderConfig):
"""BuilderConfig for GLUE."""
def __init__(self, data_url, **kwargs):
"""BuilderConfig for Wikitext
Args:
data_url: `string`, url to the dataset (word or raw level)
**kwargs: keyword arguments forwarded to super.
"""
super(WikitextConfig, self).__init__(
version=datasets.Version(
"1.0.0",
),
**kwargs,
)
self.data_url = data_url
class Wikitext(datasets.GeneratorBasedBuilder):
"""TODO(wikitext_103): Short description of my dataset."""
# TODO(wikitext_103): Set up version.
VERSION = datasets.Version("0.1.0")
BUILDER_CONFIGS = [
WikitextConfig(
name="wikitext-103-v1",
data_url=_DATA_URL + "/" + "wikitext-103-v1.zip",
description="Word level dataset. No processing is needed other than replacing newlines with <eos> tokens.",
),
WikitextConfig(
name="wikitext-2-v1",
data_url=_DATA_URL + "/" + "wikitext-2-v1.zip",
description="Word level dataset. No processing is needed other than replacing newlines with <eos> tokens.",
),
WikitextConfig(
name="wikitext-103-raw-v1",
data_url=_DATA_URL + "/" + "wikitext-103-raw-v1.zip",
description="Raw level dataset: the raw tokens before the addition of <unk> tokens. "
"They should only be used for character level work or for creating newly derived datasets.",
),
WikitextConfig(
name="wikitext-2-raw-v1",
data_url=_DATA_URL + "/" + "wikitext-2-raw-v1.zip",
description="Raw level dataset: the raw tokens before the addition of <unk> tokens. "
"They should only be used for character level work or for creating newly derived datasets.",
),
]
def _info(self):
# TODO(wikitext): Specifies the datasets.DatasetInfo object
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# datasets.features.FeatureConnectors
features=datasets.Features(
{
"text": datasets.Value("string")
# These are the features of your dataset like images, labels ...
}
),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(wikitext): Downloads the data and defines the splits
# dl_manager is a datasets.download.DownloadManager that can be used to
# download and extract URLs
if self.config.name == "wikitext-103-v1":
data_file = dl_manager.download_and_extract(self.config.data_url)
data_dir = os.path.join(data_file, "wikitext-103")
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.tokens"), "split": "test"},
),
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.train.tokens"), "split": "train"},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.valid.tokens"), "split": "valid"},
),
]
else:
if self.config.name == "wikitext-103-raw-v1":
data_file = dl_manager.download_and_extract(self.config.data_url)
data_dir = os.path.join(data_file, "wikitext-103-raw")
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.raw"), "split": "test"},
),
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.train.raw"), "split": "train"},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.valid.raw"), "split": "valid"},
),
]
else:
if self.config.name == "wikitext-2-raw-v1":
data_file = dl_manager.download_and_extract(self.config.data_url)
data_dir = os.path.join(data_file, "wikitext-2-raw")
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.raw"), "split": "test"},
),
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.train.raw"), "split": "train"},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.valid.raw"), "split": "valid"},
),
]
else:
if self.config.name == "wikitext-2-v1":
data_file = dl_manager.download_and_extract(self.config.data_url)
data_dir = os.path.join(data_file, "wikitext-2")
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.tokens"), "split": "test"},
),
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_file": os.path.join(data_dir, "wiki.train.tokens"),
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"data_file": os.path.join(data_dir, "wiki.valid.tokens"),
"split": "valid",
},
),
]
def _generate_examples(self, data_file, split):
"""Yields examples."""
# TODO(wikitext): Yields (key, example) tuples from the dataset
with open(data_file, encoding="utf-8") as f:
for idx, row in enumerate(f):
if row.strip():
yield idx, {"text": row}
else:
yield idx, {"text": ""}
| 43.948187 | 119 | 0.524051 |
import os
import datasets
_CITATION = """\
@misc{merity2016pointer,
title={Pointer Sentinel Mixture Models},
author={Stephen Merity and Caiming Xiong and James Bradbury and Richard Socher},
year={2016},
eprint={1609.07843},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """\
The WikiText language modeling dataset is a collection of over 100 million tokens extracted from the set of verified
Good and Featured articles on Wikipedia. The dataset is available under the Creative Commons Attribution-ShareAlike
License.
"""
_HOMEPAGE = "https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/"
_LICENSE = "Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)"
_DATA_URL = "https://s3.amazonaws.com/research.metamind.io/wikitext"
class WikitextConfig(datasets.BuilderConfig):
def __init__(self, data_url, **kwargs):
super(WikitextConfig, self).__init__(
version=datasets.Version(
"1.0.0",
),
**kwargs,
)
self.data_url = data_url
class Wikitext(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("0.1.0")
BUILDER_CONFIGS = [
WikitextConfig(
name="wikitext-103-v1",
data_url=_DATA_URL + "/" + "wikitext-103-v1.zip",
description="Word level dataset. No processing is needed other than replacing newlines with <eos> tokens.",
),
WikitextConfig(
name="wikitext-2-v1",
data_url=_DATA_URL + "/" + "wikitext-2-v1.zip",
description="Word level dataset. No processing is needed other than replacing newlines with <eos> tokens.",
),
WikitextConfig(
name="wikitext-103-raw-v1",
data_url=_DATA_URL + "/" + "wikitext-103-raw-v1.zip",
description="Raw level dataset: the raw tokens before the addition of <unk> tokens. "
"They should only be used for character level work or for creating newly derived datasets.",
),
WikitextConfig(
name="wikitext-2-raw-v1",
data_url=_DATA_URL + "/" + "wikitext-2-raw-v1.zip",
description="Raw level dataset: the raw tokens before the addition of <unk> tokens. "
"They should only be used for character level work or for creating newly derived datasets.",
),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"text": datasets.Value("string")
}
),
# specify them here. They'll be used if as_supervised=True in
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
if self.config.name == "wikitext-103-v1":
data_file = dl_manager.download_and_extract(self.config.data_url)
data_dir = os.path.join(data_file, "wikitext-103")
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.tokens"), "split": "test"},
),
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.train.tokens"), "split": "train"},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.valid.tokens"), "split": "valid"},
),
]
else:
if self.config.name == "wikitext-103-raw-v1":
data_file = dl_manager.download_and_extract(self.config.data_url)
data_dir = os.path.join(data_file, "wikitext-103-raw")
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.raw"), "split": "test"},
),
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.train.raw"), "split": "train"},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.valid.raw"), "split": "valid"},
),
]
else:
if self.config.name == "wikitext-2-raw-v1":
data_file = dl_manager.download_and_extract(self.config.data_url)
data_dir = os.path.join(data_file, "wikitext-2-raw")
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.raw"), "split": "test"},
),
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.train.raw"), "split": "train"},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.valid.raw"), "split": "valid"},
),
]
else:
if self.config.name == "wikitext-2-v1":
data_file = dl_manager.download_and_extract(self.config.data_url)
data_dir = os.path.join(data_file, "wikitext-2")
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"data_file": os.path.join(data_dir, "wiki.test.tokens"), "split": "test"},
),
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_file": os.path.join(data_dir, "wiki.train.tokens"),
"split": "train",
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"data_file": os.path.join(data_dir, "wiki.valid.tokens"),
"split": "valid",
},
),
]
def _generate_examples(self, data_file, split):
with open(data_file, encoding="utf-8") as f:
for idx, row in enumerate(f):
if row.strip():
yield idx, {"text": row}
else:
yield idx, {"text": ""}
| true | true |
f714e52d70d6ddff64b9a0a585c2e4068c9397b7 | 48,390 | py | Python | wagtail/api/v2/tests/test_pages.py | sir-sigurd/wagtail | 18dd01a4cc7f7c51680400d7f39f80d661c4b1d5 | [
"BSD-3-Clause"
] | 1 | 2021-08-14T13:47:33.000Z | 2021-08-14T13:47:33.000Z | wagtail/api/v2/tests/test_pages.py | denza/wagtail | 3939397850f2c73d3f960cea5cc9c2cfae2d005d | [
"BSD-3-Clause"
] | 2 | 2021-03-10T14:04:08.000Z | 2021-05-08T21:24:46.000Z | wagtail/api/v2/tests/test_pages.py | denza/wagtail | 3939397850f2c73d3f960cea5cc9c2cfae2d005d | [
"BSD-3-Clause"
] | null | null | null | import collections
import json
import mock
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
from wagtail.api.v2 import signal_handlers
from wagtail.core.models import Page, Site
from wagtail.tests.demosite import models
from wagtail.tests.testapp.models import StreamPage
def get_total_page_count():
# Need to take away 1 as the root page is invisible over the API
return Page.objects.live().public().count() - 1
class TestPageListing(TestCase):
fixtures = ['demosite.json']
def get_response(self, **params):
return self.client.get(reverse('wagtailapi_v2:pages:listing'), params)
def get_page_id_list(self, content):
return [page['id'] for page in content['items']]
# BASIC TESTS
def test_basic(self):
response = self.get_response()
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-type'], 'application/json')
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Check that the meta section is there
self.assertIn('meta', content)
self.assertIsInstance(content['meta'], dict)
# Check that the total count is there and correct
self.assertIn('total_count', content['meta'])
self.assertIsInstance(content['meta']['total_count'], int)
self.assertEqual(content['meta']['total_count'], get_total_page_count())
# Check that the items section is there
self.assertIn('items', content)
self.assertIsInstance(content['items'], list)
# Check that each page has a meta section with type, detail_url, html_url, slug and first_published_at attributes
for page in content['items']:
self.assertIn('meta', page)
self.assertIsInstance(page['meta'], dict)
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'html_url', 'slug', 'first_published_at'})
def test_unpublished_pages_dont_appear_in_list(self):
total_count = get_total_page_count()
page = models.BlogEntryPage.objects.get(id=16)
page.unpublish()
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(content['meta']['total_count'], total_count - 1)
def test_private_pages_dont_appear_in_list(self):
total_count = get_total_page_count()
page = models.BlogIndexPage.objects.get(id=5)
page.view_restrictions.create(password='test')
new_total_count = get_total_page_count()
self.assertNotEqual(total_count, new_total_count)
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(content['meta']['total_count'], new_total_count)
# TYPE FILTER
def test_type_filter_items_are_all_blog_entries(self):
response = self.get_response(type='demosite.BlogEntryPage')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(page['meta']['type'], 'demosite.BlogEntryPage')
# No specific fields available by default
self.assertEqual(set(page.keys()), {'id', 'meta', 'title'})
def test_type_filter_total_count(self):
response = self.get_response(type='demosite.BlogEntryPage')
content = json.loads(response.content.decode('UTF-8'))
# Total count must be reduced as this filters the results
self.assertEqual(content['meta']['total_count'], 3)
def test_type_filter_multiple(self):
response = self.get_response(type='demosite.BlogEntryPage,demosite.EventPage')
content = json.loads(response.content.decode('UTF-8'))
blog_page_seen = False
event_page_seen = False
for page in content['items']:
self.assertIn(page['meta']['type'], ['demosite.BlogEntryPage', 'demosite.EventPage'])
if page['meta']['type'] == 'demosite.BlogEntryPage':
blog_page_seen = True
elif page['meta']['type'] == 'demosite.EventPage':
event_page_seen = True
# Only generic fields available
self.assertEqual(set(page.keys()), {'id', 'meta', 'title'})
self.assertTrue(blog_page_seen, "No blog pages were found in the items")
self.assertTrue(event_page_seen, "No event pages were found in the items")
def test_non_existant_type_gives_error(self):
response = self.get_response(type='demosite.IDontExist')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "type doesn't exist"})
def test_non_page_type_gives_error(self):
response = self.get_response(type='auth.User')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "type doesn't exist"})
# FIELDS
def test_fields_default(self):
response = self.get_response(type='demosite.BlogEntryPage')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title'})
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'html_url', 'slug', 'first_published_at'})
def test_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='title,date,feed_image')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'date', 'feed_image'})
def test_remove_fields(self):
response = self.get_response(fields='-title')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta'})
def test_remove_meta_fields(self):
response = self.get_response(fields='-html_url')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title'})
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'slug', 'first_published_at'})
def test_remove_all_meta_fields(self):
response = self.get_response(fields='-type,-detail_url,-slug,-first_published_at,-html_url')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'title'})
def test_remove_id_field(self):
response = self.get_response(fields='-id')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'meta', 'title'})
def test_all_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='*')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'date', 'related_links', 'tags', 'carousel_items', 'body', 'feed_image', 'feed_image_thumbnail'})
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'show_in_menus', 'first_published_at', 'seo_title', 'slug', 'html_url', 'search_description'})
def test_all_fields_then_remove_something(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='*,-title,-date,-seo_title')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'related_links', 'tags', 'carousel_items', 'body', 'feed_image', 'feed_image_thumbnail'})
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'show_in_menus', 'first_published_at', 'slug', 'html_url', 'search_description'})
def test_remove_all_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='_,id,type')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta'})
self.assertEqual(set(page['meta'].keys()), {'type'})
def test_nested_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='feed_image(width,height)')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page['feed_image'].keys()), {'id', 'meta', 'title', 'width', 'height'})
def test_remove_nested_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='feed_image(-title)')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page['feed_image'].keys()), {'id', 'meta'})
def test_all_nested_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='feed_image(*)')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page['feed_image'].keys()), {'id', 'meta', 'title', 'width', 'height'})
def test_remove_all_nested_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='feed_image(_,id)')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page['feed_image'].keys()), {'id'})
def test_nested_nested_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='carousel_items(image(width,height))')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
for carousel_item in page['carousel_items']:
# Note: inline objects default to displaying all fields
self.assertEqual(set(carousel_item.keys()), {'id', 'meta', 'image', 'embed_url', 'caption', 'link'})
self.assertEqual(set(carousel_item['image'].keys()), {'id', 'meta', 'title', 'width', 'height'})
def test_fields_child_relation(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='title,related_links')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'related_links'})
self.assertIsInstance(page['related_links'], list)
def test_fields_foreign_key(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='title,date,feed_image')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
feed_image = page['feed_image']
if feed_image is not None:
self.assertIsInstance(feed_image, dict)
self.assertEqual(set(feed_image.keys()), {'id', 'meta', 'title'})
self.assertIsInstance(feed_image['id'], int)
self.assertIsInstance(feed_image['meta'], dict)
self.assertEqual(set(feed_image['meta'].keys()), {'type', 'detail_url'})
self.assertEqual(feed_image['meta']['type'], 'wagtailimages.Image')
self.assertEqual(feed_image['meta']['detail_url'], 'http://localhost/api/v2beta/images/%d/' % feed_image['id'])
def test_fields_tags(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='tags')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'tags', 'title'})
self.assertIsInstance(page['tags'], list)
def test_fields_ordering(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='date,title,feed_image,related_links')
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Test field order
content = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(response.content.decode('UTF-8'))
field_order = [
'id',
'meta',
'title',
'date',
'feed_image',
'related_links',
]
self.assertEqual(list(content['items'][0].keys()), field_order)
def test_star_in_wrong_position_gives_error(self):
response = self.get_response(fields='title,*')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "fields error: '*' must be in the first position"})
def test_unknown_nested_fields_give_error(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='feed_image(123,title,abc)')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: 123, abc"})
def test_parent_field_gives_error(self):
# parent field isn't allowed in listings
response = self.get_response(fields='parent')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: parent"})
def test_fields_without_type_gives_error(self):
response = self.get_response(fields='title,related_links')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: related_links"})
def test_fields_which_are_not_in_api_fields_gives_error(self):
response = self.get_response(fields='path')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: path"})
def test_fields_unknown_field_gives_error(self):
response = self.get_response(fields='123,title,abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: 123, abc"})
def test_fields_remove_unknown_field_gives_error(self):
response = self.get_response(fields='-123,-title,-abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: 123, abc"})
def test_nested_fields_on_non_relational_field_gives_error(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='title(foo,bar)')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "'title' does not support nested fields"})
# FILTERING
def test_filtering_exact_filter(self):
response = self.get_response(title='Home page')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [2])
def test_filtering_exact_filter_on_specific_field(self):
response = self.get_response(type='demosite.BlogEntryPage', date='2013-12-02')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16])
def test_filtering_on_id(self):
response = self.get_response(id=16)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16])
def test_filtering_on_boolean(self):
response = self.get_response(show_in_menus='false')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [8, 9, 16, 18, 19, 17])
def test_filtering_doesnt_work_on_specific_fields_without_type(self):
response = self.get_response(date='2013-12-02')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "query parameter is not an operation or a recognised field: date"})
def test_filtering_tags(self):
response = self.get_response(type='demosite.BlogEntryPage', tags='wagtail')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16, 18])
def test_filtering_multiple_tags(self):
response = self.get_response(type='demosite.BlogEntryPage', tags='wagtail,bird')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16])
def test_filtering_unknown_field_gives_error(self):
response = self.get_response(not_a_field='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "query parameter is not an operation or a recognised field: not_a_field"})
def test_filtering_int_validation(self):
response = self.get_response(id='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "field filter error. 'abc' is not a valid value for id (invalid literal for int() with base 10: 'abc')"})
def test_filtering_boolean_validation(self):
response = self.get_response(show_in_menus='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "field filter error. 'abc' is not a valid value for show_in_menus (expected 'true' or 'false', got 'abc')"})
# CHILD OF FILTER
def test_child_of_filter(self):
response = self.get_response(child_of=5)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16, 18, 19])
def test_child_of_root(self):
# "root" gets children of the homepage of the current site
response = self.get_response(child_of='root')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [4, 5, 6, 20, 12])
def test_child_of_with_type(self):
response = self.get_response(type='demosite.EventPage', child_of=5)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [])
def test_child_of_unknown_page_gives_error(self):
response = self.get_response(child_of=1000)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "parent page doesn't exist"})
def test_child_of_not_integer_gives_error(self):
response = self.get_response(child_of='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "child_of must be a positive integer"})
def test_child_of_page_thats_not_in_same_site_gives_error(self):
# Root page is not in any site, so pretend it doesn't exist
response = self.get_response(child_of=1)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "parent page doesn't exist"})
# DESCENDANT OF FILTER
def test_descendant_of_filter(self):
response = self.get_response(descendant_of=6)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [10, 15, 17, 21, 22, 23])
def test_descendant_of_root(self):
# "root" gets decendants of the homepage of the current site
# Basically returns every page except the homepage
response = self.get_response(descendant_of='root')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [4, 8, 9, 5, 16, 18, 19, 6, 10, 15, 17, 21, 22, 23, 20, 13, 14, 12])
def test_descendant_of_with_type(self):
response = self.get_response(type='tests.EventPage', descendant_of=6)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [])
def test_descendant_of_unknown_page_gives_error(self):
response = self.get_response(descendant_of=1000)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "ancestor page doesn't exist"})
def test_descendant_of_not_integer_gives_error(self):
response = self.get_response(descendant_of='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "descendant_of must be a positive integer"})
def test_descendant_of_page_thats_not_in_same_site_gives_error(self):
# Root page is not in any site, so pretend it doesn't exist
response = self.get_response(descendant_of=1)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "ancestor page doesn't exist"})
def test_descendant_of_when_filtering_by_child_of_gives_error(self):
response = self.get_response(descendant_of=6, child_of=5)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "filtering by descendant_of with child_of is not supported"})
# ORDERING
def test_ordering_default(self):
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [2, 4, 8, 9, 5, 16, 18, 19, 6, 10, 15, 17, 21, 22, 23, 20, 13, 14, 12])
def test_ordering_by_title(self):
response = self.get_response(order='title')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [21, 22, 19, 23, 5, 16, 18, 12, 14, 8, 9, 4, 2, 13, 20, 17, 6, 10, 15])
def test_ordering_by_title_backwards(self):
response = self.get_response(order='-title')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [15, 10, 6, 17, 20, 13, 2, 4, 9, 8, 14, 12, 18, 16, 5, 23, 19, 22, 21])
def test_ordering_by_random(self):
response_1 = self.get_response(order='random')
content_1 = json.loads(response_1.content.decode('UTF-8'))
page_id_list_1 = self.get_page_id_list(content_1)
response_2 = self.get_response(order='random')
content_2 = json.loads(response_2.content.decode('UTF-8'))
page_id_list_2 = self.get_page_id_list(content_2)
self.assertNotEqual(page_id_list_1, page_id_list_2)
def test_ordering_by_random_backwards_gives_error(self):
response = self.get_response(order='-random')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "cannot order by 'random' (unknown field)"})
def test_ordering_by_random_with_offset_gives_error(self):
response = self.get_response(order='random', offset=10)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "random ordering with offset is not supported"})
def test_ordering_default_with_type(self):
response = self.get_response(type='demosite.BlogEntryPage')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16, 18, 19])
def test_ordering_by_title_with_type(self):
response = self.get_response(type='demosite.BlogEntryPage', order='title')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [19, 16, 18])
def test_ordering_by_specific_field_with_type(self):
response = self.get_response(type='demosite.BlogEntryPage', order='date')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16, 18, 19])
def test_ordering_by_unknown_field_gives_error(self):
response = self.get_response(order='not_a_field')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "cannot order by 'not_a_field' (unknown field)"})
# LIMIT
def test_limit_only_two_items_returned(self):
response = self.get_response(limit=2)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(len(content['items']), 2)
def test_limit_total_count(self):
response = self.get_response(limit=2)
content = json.loads(response.content.decode('UTF-8'))
# The total count must not be affected by "limit"
self.assertEqual(content['meta']['total_count'], get_total_page_count())
def test_limit_not_integer_gives_error(self):
response = self.get_response(limit='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "limit must be a positive integer"})
def test_limit_too_high_gives_error(self):
response = self.get_response(limit=1000)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "limit cannot be higher than 20"})
@override_settings(WAGTAILAPI_LIMIT_MAX=None)
def test_limit_max_none_gives_no_errors(self):
response = self.get_response(limit=1000000)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(content['items']), get_total_page_count())
@override_settings(WAGTAILAPI_LIMIT_MAX=10)
def test_limit_maximum_can_be_changed(self):
response = self.get_response(limit=20)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "limit cannot be higher than 10"})
@override_settings(WAGTAILAPI_LIMIT_MAX=2)
def test_limit_default_changes_with_max(self):
# The default limit is 20. If WAGTAILAPI_LIMIT_MAX is less than that,
# the default should change accordingly.
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(len(content['items']), 2)
# OFFSET
def test_offset_5_usually_appears_5th_in_list(self):
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list.index(5), 4)
def test_offset_5_moves_after_offset(self):
response = self.get_response(offset=4)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list.index(5), 0)
def test_offset_total_count(self):
response = self.get_response(offset=10)
content = json.loads(response.content.decode('UTF-8'))
# The total count must not be affected by "offset"
self.assertEqual(content['meta']['total_count'], get_total_page_count())
def test_offset_not_integer_gives_error(self):
response = self.get_response(offset='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "offset must be a positive integer"})
# SEARCH
def test_search_for_blog(self):
response = self.get_response(search='blog')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
# Check that the items are the blog index and three blog pages
self.assertEqual(set(page_id_list), set([5, 16, 18, 19]))
def test_search_with_type(self):
response = self.get_response(type='demosite.BlogEntryPage', search='blog')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(set(page_id_list), set([16, 18, 19]))
def test_search_with_filter(self):
response = self.get_response(title="Another blog post", search='blog', order='title')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [19])
def test_search_with_filter_on_non_filterable_field(self):
response = self.get_response(type='demosite.BlogEntryPage', body="foo", search='blog', order='title')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {
'message': "cannot filter by 'body' while searching (field is not indexed)"
})
def test_search_with_order(self):
response = self.get_response(search='blog', order='title')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [19, 5, 16, 18])
def test_search_with_order_on_non_filterable_field(self):
response = self.get_response(type='demosite.BlogEntryPage', search='blog', order='body')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {
'message': "cannot order by 'body' while searching (field is not indexed)"
})
@override_settings(WAGTAILAPI_SEARCH_ENABLED=False)
def test_search_when_disabled_gives_error(self):
response = self.get_response(search='blog')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "search is disabled"})
def test_search_when_filtering_by_tag_gives_error(self):
response = self.get_response(type='demosite.BlogEntryPage', search='blog', tags='wagtail')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "filtering by tag with a search query is not supported"})
def test_search_operator_and(self):
response = self.get_response(type='demosite.BlogEntryPage', search='blog again', search_operator='and')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(set(page_id_list), set([18]))
def test_search_operator_or(self):
response = self.get_response(type='demosite.BlogEntryPage', search='blog again', search_operator='or')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(set(page_id_list), set([16, 18, 19]))
def test_empty_searches_work(self):
response = self.get_response(search='')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-type'], 'application/json')
self.assertEqual(content['meta']['total_count'], 0)
# REGRESSION TESTS
def test_issue_3967(self):
# The API crashed whenever the listing view was called without a site configured
Site.objects.all().delete()
response = self.get_response()
self.assertEqual(response.status_code, 200)
class TestPageDetail(TestCase):
fixtures = ['demosite.json']
def get_response(self, page_id, **params):
return self.client.get(reverse('wagtailapi_v2:pages:detail', args=(page_id, )), params)
def test_basic(self):
response = self.get_response(16)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-type'], 'application/json')
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Check the id field
self.assertIn('id', content)
self.assertEqual(content['id'], 16)
# Check that the meta section is there
self.assertIn('meta', content)
self.assertIsInstance(content['meta'], dict)
# Check the meta type
self.assertIn('type', content['meta'])
self.assertEqual(content['meta']['type'], 'demosite.BlogEntryPage')
# Check the meta detail_url
self.assertIn('detail_url', content['meta'])
self.assertEqual(content['meta']['detail_url'], 'http://localhost/api/v2beta/pages/16/')
# Check the meta html_url
self.assertIn('html_url', content['meta'])
self.assertEqual(content['meta']['html_url'], 'http://localhost/blog-index/blog-post/')
# Check the parent field
self.assertIn('parent', content['meta'])
self.assertIsInstance(content['meta']['parent'], dict)
self.assertEqual(set(content['meta']['parent'].keys()), {'id', 'meta', 'title'})
self.assertEqual(content['meta']['parent']['id'], 5)
self.assertIsInstance(content['meta']['parent']['meta'], dict)
self.assertEqual(set(content['meta']['parent']['meta'].keys()), {'type', 'detail_url', 'html_url'})
self.assertEqual(content['meta']['parent']['meta']['type'], 'demosite.BlogIndexPage')
self.assertEqual(content['meta']['parent']['meta']['detail_url'], 'http://localhost/api/v2beta/pages/5/')
self.assertEqual(content['meta']['parent']['meta']['html_url'], 'http://localhost/blog-index/')
# Check that the custom fields are included
self.assertIn('date', content)
self.assertIn('body', content)
self.assertIn('tags', content)
self.assertIn('feed_image', content)
self.assertIn('related_links', content)
self.assertIn('carousel_items', content)
# Check that the date was serialised properly
self.assertEqual(content['date'], '2013-12-02')
# Check that the tags were serialised properly
self.assertEqual(content['tags'], ['bird', 'wagtail'])
# Check that the feed image was serialised properly
self.assertIsInstance(content['feed_image'], dict)
self.assertEqual(set(content['feed_image'].keys()), {'id', 'meta', 'title'})
self.assertEqual(content['feed_image']['id'], 7)
self.assertIsInstance(content['feed_image']['meta'], dict)
self.assertEqual(set(content['feed_image']['meta'].keys()), {'type', 'detail_url'})
self.assertEqual(content['feed_image']['meta']['type'], 'wagtailimages.Image')
self.assertEqual(content['feed_image']['meta']['detail_url'], 'http://localhost/api/v2beta/images/7/')
# Check that the feed images' thumbnail was serialised properly
self.assertEqual(content['feed_image_thumbnail'], {
# This is OK because it tells us it used ImageRenditionField to generate the output
'error': 'SourceImageIOError'
})
# Check that the child relations were serialised properly
self.assertEqual(content['related_links'], [])
for carousel_item in content['carousel_items']:
self.assertEqual(set(carousel_item.keys()), {'id', 'meta', 'embed_url', 'link', 'caption', 'image'})
self.assertEqual(set(carousel_item['meta'].keys()), {'type'})
def test_meta_parent_id_doesnt_show_root_page(self):
# Root page isn't in the site so don't show it if the user is looking at the home page
response = self.get_response(2)
content = json.loads(response.content.decode('UTF-8'))
self.assertIsNone(content['meta']['parent'])
def test_field_ordering(self):
response = self.get_response(16)
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Test field order
content = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(response.content.decode('UTF-8'))
field_order = [
'id',
'meta',
'title',
'body',
'tags',
'date',
'feed_image',
'feed_image_thumbnail',
'carousel_items',
'related_links',
]
self.assertEqual(list(content.keys()), field_order)
def test_null_foreign_key(self):
models.BlogEntryPage.objects.filter(id=16).update(feed_image_id=None)
response = self.get_response(16)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('related_links', content)
self.assertEqual(content['feed_image'], None)
# FIELDS
def test_remove_fields(self):
response = self.get_response(16, fields='-title')
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('id', set(content.keys()))
self.assertNotIn('title', set(content.keys()))
def test_remove_meta_fields(self):
response = self.get_response(16, fields='-html_url')
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('detail_url', set(content['meta'].keys()))
self.assertNotIn('html_url', set(content['meta'].keys()))
def test_remove_all_meta_fields(self):
response = self.get_response(16, fields='-type,-detail_url,-slug,-first_published_at,-html_url,-search_description,-show_in_menus,-parent,-seo_title')
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('id', set(content.keys()))
self.assertNotIn('meta', set(content.keys()))
def test_remove_id_field(self):
response = self.get_response(16, fields='-id')
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('title', set(content.keys()))
self.assertNotIn('id', set(content.keys()))
def test_remove_all_fields(self):
response = self.get_response(16, fields='_,id,type')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(set(content.keys()), {'id', 'meta'})
self.assertEqual(set(content['meta'].keys()), {'type'})
def test_nested_fields(self):
response = self.get_response(16, fields='feed_image(width,height)')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(set(content['feed_image'].keys()), {'id', 'meta', 'title', 'width', 'height'})
def test_remove_nested_fields(self):
response = self.get_response(16, fields='feed_image(-title)')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(set(content['feed_image'].keys()), {'id', 'meta'})
def test_all_nested_fields(self):
response = self.get_response(16, fields='feed_image(*)')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(set(content['feed_image'].keys()), {'id', 'meta', 'title', 'width', 'height'})
def test_remove_all_nested_fields(self):
response = self.get_response(16, fields='feed_image(_,id)')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(set(content['feed_image'].keys()), {'id'})
def test_nested_nested_fields(self):
response = self.get_response(16, fields='carousel_items(image(width,height))')
content = json.loads(response.content.decode('UTF-8'))
for carousel_item in content['carousel_items']:
# Note: inline objects default to displaying all fields
self.assertEqual(set(carousel_item.keys()), {'id', 'meta', 'image', 'embed_url', 'caption', 'link'})
self.assertEqual(set(carousel_item['image'].keys()), {'id', 'meta', 'title', 'width', 'height'})
def test_fields_child_relation_is_list(self):
response = self.get_response(16)
content = json.loads(response.content.decode('UTF-8'))
self.assertIsInstance(content['related_links'], list)
def test_fields_foreign_key(self):
response = self.get_response(16)
content = json.loads(response.content.decode('UTF-8'))
feed_image = content['feed_image']
self.assertIsInstance(feed_image, dict)
self.assertEqual(set(feed_image.keys()), {'id', 'meta', 'title'})
self.assertIsInstance(feed_image['id'], int)
self.assertIsInstance(feed_image['meta'], dict)
self.assertEqual(set(feed_image['meta'].keys()), {'type', 'detail_url'})
self.assertEqual(feed_image['meta']['type'], 'wagtailimages.Image')
self.assertEqual(feed_image['meta']['detail_url'], 'http://localhost/api/v2beta/images/%d/' % feed_image['id'])
def test_star_in_wrong_position_gives_error(self):
response = self.get_response(16, fields='title,*')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "fields error: '*' must be in the first position"})
def test_unknown_nested_fields_give_error(self):
response = self.get_response(16, fields='feed_image(123,title,abc)')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: 123, abc"})
def test_fields_which_are_not_in_api_fields_gives_error(self):
response = self.get_response(16, fields='path')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: path"})
def test_fields_unknown_field_gives_error(self):
response = self.get_response(16, fields='123,title,abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: 123, abc"})
def test_fields_remove_unknown_field_gives_error(self):
response = self.get_response(16, fields='-123,-title,-abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: 123, abc"})
def test_nested_fields_on_non_relational_field_gives_error(self):
response = self.get_response(16, fields='title(foo,bar)')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "'title' does not support nested fields"})
class TestPageDetailWithStreamField(TestCase):
fixtures = ['test.json']
def setUp(self):
self.homepage = Page.objects.get(url_path='/home/')
def make_stream_page(self, body):
stream_page = StreamPage(
title='stream page',
slug='stream-page',
body=body
)
return self.homepage.add_child(instance=stream_page)
def test_can_fetch_streamfield_content(self):
stream_page = self.make_stream_page('[{"type": "text", "value": "foo"}]')
response_url = reverse('wagtailapi_v2:pages:detail', args=(stream_page.id, ))
response = self.client.get(response_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-type'], 'application/json')
content = json.loads(response.content.decode('utf-8'))
self.assertIn('id', content)
self.assertEqual(content['id'], stream_page.id)
self.assertIn('body', content)
self.assertEqual(len(content['body']), 1)
self.assertEqual(content['body'][0]['type'], 'text')
self.assertEqual(content['body'][0]['value'], 'foo')
self.assertTrue(content['body'][0]['id'])
def test_image_block(self):
stream_page = self.make_stream_page('[{"type": "image", "value": 1}]')
response_url = reverse('wagtailapi_v2:pages:detail', args=(stream_page.id, ))
response = self.client.get(response_url)
content = json.loads(response.content.decode('utf-8'))
# ForeignKeys in a StreamField shouldn't be translated into dictionary representation
self.assertEqual(content['body'][0]['type'], 'image')
self.assertEqual(content['body'][0]['value'], 1)
def test_image_block_with_custom_get_api_representation(self):
stream_page = self.make_stream_page('[{"type": "image", "value": 1}]')
response_url = '{}?extended=1'.format(
reverse('wagtailapi_v2:pages:detail', args=(stream_page.id, ))
)
response = self.client.get(response_url)
content = json.loads(response.content.decode('utf-8'))
# the custom get_api_representation returns a dict of id and title for the image
self.assertEqual(content['body'][0]['type'], 'image')
self.assertEqual(content['body'][0]['value'], {'id': 1, 'title': 'A missing image'})
@override_settings(
WAGTAILFRONTENDCACHE={
'varnish': {
'BACKEND': 'wagtail.contrib.frontend_cache.backends.HTTPBackend',
'LOCATION': 'http://localhost:8000',
},
},
WAGTAILAPI_BASE_URL='http://api.example.com',
)
@mock.patch('wagtail.contrib.frontend_cache.backends.HTTPBackend.purge')
class TestPageCacheInvalidation(TestCase):
fixtures = ['demosite.json']
@classmethod
def setUpClass(cls):
super(TestPageCacheInvalidation, cls).setUpClass()
signal_handlers.register_signal_handlers()
@classmethod
def tearDownClass(cls):
super(TestPageCacheInvalidation, cls).tearDownClass()
signal_handlers.unregister_signal_handlers()
def test_republish_page_purges(self, purge):
Page.objects.get(id=2).save_revision().publish()
purge.assert_any_call('http://api.example.com/api/v2beta/pages/2/')
def test_unpublish_page_purges(self, purge):
Page.objects.get(id=2).unpublish()
purge.assert_any_call('http://api.example.com/api/v2beta/pages/2/')
def test_delete_page_purges(self, purge):
Page.objects.get(id=16).delete()
purge.assert_any_call('http://api.example.com/api/v2beta/pages/16/')
def test_save_draft_doesnt_purge(self, purge):
Page.objects.get(id=2).save_revision()
purge.assert_not_called()
| 42.410167 | 172 | 0.666171 | import collections
import json
import mock
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
from wagtail.api.v2 import signal_handlers
from wagtail.core.models import Page, Site
from wagtail.tests.demosite import models
from wagtail.tests.testapp.models import StreamPage
def get_total_page_count():
return Page.objects.live().public().count() - 1
class TestPageListing(TestCase):
fixtures = ['demosite.json']
def get_response(self, **params):
return self.client.get(reverse('wagtailapi_v2:pages:listing'), params)
def get_page_id_list(self, content):
return [page['id'] for page in content['items']]
def test_basic(self):
response = self.get_response()
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-type'], 'application/json')
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('meta', content)
self.assertIsInstance(content['meta'], dict)
self.assertIn('total_count', content['meta'])
self.assertIsInstance(content['meta']['total_count'], int)
self.assertEqual(content['meta']['total_count'], get_total_page_count())
self.assertIn('items', content)
self.assertIsInstance(content['items'], list)
for page in content['items']:
self.assertIn('meta', page)
self.assertIsInstance(page['meta'], dict)
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'html_url', 'slug', 'first_published_at'})
def test_unpublished_pages_dont_appear_in_list(self):
total_count = get_total_page_count()
page = models.BlogEntryPage.objects.get(id=16)
page.unpublish()
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(content['meta']['total_count'], total_count - 1)
def test_private_pages_dont_appear_in_list(self):
total_count = get_total_page_count()
page = models.BlogIndexPage.objects.get(id=5)
page.view_restrictions.create(password='test')
new_total_count = get_total_page_count()
self.assertNotEqual(total_count, new_total_count)
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(content['meta']['total_count'], new_total_count)
def test_type_filter_items_are_all_blog_entries(self):
response = self.get_response(type='demosite.BlogEntryPage')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(page['meta']['type'], 'demosite.BlogEntryPage')
self.assertEqual(set(page.keys()), {'id', 'meta', 'title'})
def test_type_filter_total_count(self):
response = self.get_response(type='demosite.BlogEntryPage')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(content['meta']['total_count'], 3)
def test_type_filter_multiple(self):
response = self.get_response(type='demosite.BlogEntryPage,demosite.EventPage')
content = json.loads(response.content.decode('UTF-8'))
blog_page_seen = False
event_page_seen = False
for page in content['items']:
self.assertIn(page['meta']['type'], ['demosite.BlogEntryPage', 'demosite.EventPage'])
if page['meta']['type'] == 'demosite.BlogEntryPage':
blog_page_seen = True
elif page['meta']['type'] == 'demosite.EventPage':
event_page_seen = True
self.assertEqual(set(page.keys()), {'id', 'meta', 'title'})
self.assertTrue(blog_page_seen, "No blog pages were found in the items")
self.assertTrue(event_page_seen, "No event pages were found in the items")
def test_non_existant_type_gives_error(self):
response = self.get_response(type='demosite.IDontExist')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "type doesn't exist"})
def test_non_page_type_gives_error(self):
response = self.get_response(type='auth.User')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "type doesn't exist"})
def test_fields_default(self):
response = self.get_response(type='demosite.BlogEntryPage')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title'})
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'html_url', 'slug', 'first_published_at'})
def test_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='title,date,feed_image')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'date', 'feed_image'})
def test_remove_fields(self):
response = self.get_response(fields='-title')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta'})
def test_remove_meta_fields(self):
response = self.get_response(fields='-html_url')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title'})
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'slug', 'first_published_at'})
def test_remove_all_meta_fields(self):
response = self.get_response(fields='-type,-detail_url,-slug,-first_published_at,-html_url')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'title'})
def test_remove_id_field(self):
response = self.get_response(fields='-id')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'meta', 'title'})
def test_all_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='*')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'date', 'related_links', 'tags', 'carousel_items', 'body', 'feed_image', 'feed_image_thumbnail'})
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'show_in_menus', 'first_published_at', 'seo_title', 'slug', 'html_url', 'search_description'})
def test_all_fields_then_remove_something(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='*,-title,-date,-seo_title')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'related_links', 'tags', 'carousel_items', 'body', 'feed_image', 'feed_image_thumbnail'})
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url', 'show_in_menus', 'first_published_at', 'slug', 'html_url', 'search_description'})
def test_remove_all_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='_,id,type')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta'})
self.assertEqual(set(page['meta'].keys()), {'type'})
def test_nested_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='feed_image(width,height)')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page['feed_image'].keys()), {'id', 'meta', 'title', 'width', 'height'})
def test_remove_nested_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='feed_image(-title)')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page['feed_image'].keys()), {'id', 'meta'})
def test_all_nested_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='feed_image(*)')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page['feed_image'].keys()), {'id', 'meta', 'title', 'width', 'height'})
def test_remove_all_nested_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='feed_image(_,id)')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page['feed_image'].keys()), {'id'})
def test_nested_nested_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='carousel_items(image(width,height))')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
for carousel_item in page['carousel_items']:
self.assertEqual(set(carousel_item.keys()), {'id', 'meta', 'image', 'embed_url', 'caption', 'link'})
self.assertEqual(set(carousel_item['image'].keys()), {'id', 'meta', 'title', 'width', 'height'})
def test_fields_child_relation(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='title,related_links')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'related_links'})
self.assertIsInstance(page['related_links'], list)
def test_fields_foreign_key(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='title,date,feed_image')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
feed_image = page['feed_image']
if feed_image is not None:
self.assertIsInstance(feed_image, dict)
self.assertEqual(set(feed_image.keys()), {'id', 'meta', 'title'})
self.assertIsInstance(feed_image['id'], int)
self.assertIsInstance(feed_image['meta'], dict)
self.assertEqual(set(feed_image['meta'].keys()), {'type', 'detail_url'})
self.assertEqual(feed_image['meta']['type'], 'wagtailimages.Image')
self.assertEqual(feed_image['meta']['detail_url'], 'http://localhost/api/v2beta/images/%d/' % feed_image['id'])
def test_fields_tags(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='tags')
content = json.loads(response.content.decode('UTF-8'))
for page in content['items']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'tags', 'title'})
self.assertIsInstance(page['tags'], list)
def test_fields_ordering(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='date,title,feed_image,related_links')
content = json.loads(response.content.decode('UTF-8'))
content = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(response.content.decode('UTF-8'))
field_order = [
'id',
'meta',
'title',
'date',
'feed_image',
'related_links',
]
self.assertEqual(list(content['items'][0].keys()), field_order)
def test_star_in_wrong_position_gives_error(self):
response = self.get_response(fields='title,*')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "fields error: '*' must be in the first position"})
def test_unknown_nested_fields_give_error(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='feed_image(123,title,abc)')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: 123, abc"})
def test_parent_field_gives_error(self):
response = self.get_response(fields='parent')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: parent"})
def test_fields_without_type_gives_error(self):
response = self.get_response(fields='title,related_links')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: related_links"})
def test_fields_which_are_not_in_api_fields_gives_error(self):
response = self.get_response(fields='path')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: path"})
def test_fields_unknown_field_gives_error(self):
response = self.get_response(fields='123,title,abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: 123, abc"})
def test_fields_remove_unknown_field_gives_error(self):
response = self.get_response(fields='-123,-title,-abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: 123, abc"})
def test_nested_fields_on_non_relational_field_gives_error(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='title(foo,bar)')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "'title' does not support nested fields"})
# FILTERING
def test_filtering_exact_filter(self):
response = self.get_response(title='Home page')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [2])
def test_filtering_exact_filter_on_specific_field(self):
response = self.get_response(type='demosite.BlogEntryPage', date='2013-12-02')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16])
def test_filtering_on_id(self):
response = self.get_response(id=16)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16])
def test_filtering_on_boolean(self):
response = self.get_response(show_in_menus='false')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [8, 9, 16, 18, 19, 17])
def test_filtering_doesnt_work_on_specific_fields_without_type(self):
response = self.get_response(date='2013-12-02')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "query parameter is not an operation or a recognised field: date"})
def test_filtering_tags(self):
response = self.get_response(type='demosite.BlogEntryPage', tags='wagtail')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16, 18])
def test_filtering_multiple_tags(self):
response = self.get_response(type='demosite.BlogEntryPage', tags='wagtail,bird')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16])
def test_filtering_unknown_field_gives_error(self):
response = self.get_response(not_a_field='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "query parameter is not an operation or a recognised field: not_a_field"})
def test_filtering_int_validation(self):
response = self.get_response(id='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "field filter error. 'abc' is not a valid value for id (invalid literal for int() with base 10: 'abc')"})
def test_filtering_boolean_validation(self):
response = self.get_response(show_in_menus='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "field filter error. 'abc' is not a valid value for show_in_menus (expected 'true' or 'false', got 'abc')"})
# CHILD OF FILTER
def test_child_of_filter(self):
response = self.get_response(child_of=5)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16, 18, 19])
def test_child_of_root(self):
# "root" gets children of the homepage of the current site
response = self.get_response(child_of='root')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [4, 5, 6, 20, 12])
def test_child_of_with_type(self):
response = self.get_response(type='demosite.EventPage', child_of=5)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [])
def test_child_of_unknown_page_gives_error(self):
response = self.get_response(child_of=1000)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "parent page doesn't exist"})
def test_child_of_not_integer_gives_error(self):
response = self.get_response(child_of='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "child_of must be a positive integer"})
def test_child_of_page_thats_not_in_same_site_gives_error(self):
response = self.get_response(child_of=1)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "parent page doesn't exist"})
def test_descendant_of_filter(self):
response = self.get_response(descendant_of=6)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [10, 15, 17, 21, 22, 23])
def test_descendant_of_root(self):
response = self.get_response(descendant_of='root')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [4, 8, 9, 5, 16, 18, 19, 6, 10, 15, 17, 21, 22, 23, 20, 13, 14, 12])
def test_descendant_of_with_type(self):
response = self.get_response(type='tests.EventPage', descendant_of=6)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [])
def test_descendant_of_unknown_page_gives_error(self):
response = self.get_response(descendant_of=1000)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "ancestor page doesn't exist"})
def test_descendant_of_not_integer_gives_error(self):
response = self.get_response(descendant_of='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "descendant_of must be a positive integer"})
def test_descendant_of_page_thats_not_in_same_site_gives_error(self):
# Root page is not in any site, so pretend it doesn't exist
response = self.get_response(descendant_of=1)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "ancestor page doesn't exist"})
def test_descendant_of_when_filtering_by_child_of_gives_error(self):
response = self.get_response(descendant_of=6, child_of=5)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "filtering by descendant_of with child_of is not supported"})
# ORDERING
def test_ordering_default(self):
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [2, 4, 8, 9, 5, 16, 18, 19, 6, 10, 15, 17, 21, 22, 23, 20, 13, 14, 12])
def test_ordering_by_title(self):
response = self.get_response(order='title')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [21, 22, 19, 23, 5, 16, 18, 12, 14, 8, 9, 4, 2, 13, 20, 17, 6, 10, 15])
def test_ordering_by_title_backwards(self):
response = self.get_response(order='-title')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [15, 10, 6, 17, 20, 13, 2, 4, 9, 8, 14, 12, 18, 16, 5, 23, 19, 22, 21])
def test_ordering_by_random(self):
response_1 = self.get_response(order='random')
content_1 = json.loads(response_1.content.decode('UTF-8'))
page_id_list_1 = self.get_page_id_list(content_1)
response_2 = self.get_response(order='random')
content_2 = json.loads(response_2.content.decode('UTF-8'))
page_id_list_2 = self.get_page_id_list(content_2)
self.assertNotEqual(page_id_list_1, page_id_list_2)
def test_ordering_by_random_backwards_gives_error(self):
response = self.get_response(order='-random')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "cannot order by 'random' (unknown field)"})
def test_ordering_by_random_with_offset_gives_error(self):
response = self.get_response(order='random', offset=10)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "random ordering with offset is not supported"})
def test_ordering_default_with_type(self):
response = self.get_response(type='demosite.BlogEntryPage')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16, 18, 19])
def test_ordering_by_title_with_type(self):
response = self.get_response(type='demosite.BlogEntryPage', order='title')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [19, 16, 18])
def test_ordering_by_specific_field_with_type(self):
response = self.get_response(type='demosite.BlogEntryPage', order='date')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16, 18, 19])
def test_ordering_by_unknown_field_gives_error(self):
response = self.get_response(order='not_a_field')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "cannot order by 'not_a_field' (unknown field)"})
# LIMIT
def test_limit_only_two_items_returned(self):
response = self.get_response(limit=2)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(len(content['items']), 2)
def test_limit_total_count(self):
response = self.get_response(limit=2)
content = json.loads(response.content.decode('UTF-8'))
# The total count must not be affected by "limit"
self.assertEqual(content['meta']['total_count'], get_total_page_count())
def test_limit_not_integer_gives_error(self):
response = self.get_response(limit='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "limit must be a positive integer"})
def test_limit_too_high_gives_error(self):
response = self.get_response(limit=1000)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "limit cannot be higher than 20"})
@override_settings(WAGTAILAPI_LIMIT_MAX=None)
def test_limit_max_none_gives_no_errors(self):
response = self.get_response(limit=1000000)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(content['items']), get_total_page_count())
@override_settings(WAGTAILAPI_LIMIT_MAX=10)
def test_limit_maximum_can_be_changed(self):
response = self.get_response(limit=20)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "limit cannot be higher than 10"})
@override_settings(WAGTAILAPI_LIMIT_MAX=2)
def test_limit_default_changes_with_max(self):
# The default limit is 20. If WAGTAILAPI_LIMIT_MAX is less than that,
# the default should change accordingly.
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(len(content['items']), 2)
# OFFSET
def test_offset_5_usually_appears_5th_in_list(self):
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list.index(5), 4)
def test_offset_5_moves_after_offset(self):
response = self.get_response(offset=4)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list.index(5), 0)
def test_offset_total_count(self):
response = self.get_response(offset=10)
content = json.loads(response.content.decode('UTF-8'))
# The total count must not be affected by "offset"
self.assertEqual(content['meta']['total_count'], get_total_page_count())
def test_offset_not_integer_gives_error(self):
response = self.get_response(offset='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "offset must be a positive integer"})
# SEARCH
def test_search_for_blog(self):
response = self.get_response(search='blog')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
# Check that the items are the blog index and three blog pages
self.assertEqual(set(page_id_list), set([5, 16, 18, 19]))
def test_search_with_type(self):
response = self.get_response(type='demosite.BlogEntryPage', search='blog')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(set(page_id_list), set([16, 18, 19]))
def test_search_with_filter(self):
response = self.get_response(title="Another blog post", search='blog', order='title')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [19])
def test_search_with_filter_on_non_filterable_field(self):
response = self.get_response(type='demosite.BlogEntryPage', body="foo", search='blog', order='title')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {
'message': "cannot filter by 'body' while searching (field is not indexed)"
})
def test_search_with_order(self):
response = self.get_response(search='blog', order='title')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [19, 5, 16, 18])
def test_search_with_order_on_non_filterable_field(self):
response = self.get_response(type='demosite.BlogEntryPage', search='blog', order='body')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {
'message': "cannot order by 'body' while searching (field is not indexed)"
})
@override_settings(WAGTAILAPI_SEARCH_ENABLED=False)
def test_search_when_disabled_gives_error(self):
response = self.get_response(search='blog')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "search is disabled"})
def test_search_when_filtering_by_tag_gives_error(self):
response = self.get_response(type='demosite.BlogEntryPage', search='blog', tags='wagtail')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "filtering by tag with a search query is not supported"})
def test_search_operator_and(self):
response = self.get_response(type='demosite.BlogEntryPage', search='blog again', search_operator='and')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(set(page_id_list), set([18]))
def test_search_operator_or(self):
response = self.get_response(type='demosite.BlogEntryPage', search='blog again', search_operator='or')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(set(page_id_list), set([16, 18, 19]))
def test_empty_searches_work(self):
response = self.get_response(search='')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-type'], 'application/json')
self.assertEqual(content['meta']['total_count'], 0)
# REGRESSION TESTS
def test_issue_3967(self):
# The API crashed whenever the listing view was called without a site configured
Site.objects.all().delete()
response = self.get_response()
self.assertEqual(response.status_code, 200)
class TestPageDetail(TestCase):
fixtures = ['demosite.json']
def get_response(self, page_id, **params):
return self.client.get(reverse('wagtailapi_v2:pages:detail', args=(page_id, )), params)
def test_basic(self):
response = self.get_response(16)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-type'], 'application/json')
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Check the id field
self.assertIn('id', content)
self.assertEqual(content['id'], 16)
# Check that the meta section is there
self.assertIn('meta', content)
self.assertIsInstance(content['meta'], dict)
# Check the meta type
self.assertIn('type', content['meta'])
self.assertEqual(content['meta']['type'], 'demosite.BlogEntryPage')
# Check the meta detail_url
self.assertIn('detail_url', content['meta'])
self.assertEqual(content['meta']['detail_url'], 'http://localhost/api/v2beta/pages/16/')
# Check the meta html_url
self.assertIn('html_url', content['meta'])
self.assertEqual(content['meta']['html_url'], 'http://localhost/blog-index/blog-post/')
# Check the parent field
self.assertIn('parent', content['meta'])
self.assertIsInstance(content['meta']['parent'], dict)
self.assertEqual(set(content['meta']['parent'].keys()), {'id', 'meta', 'title'})
self.assertEqual(content['meta']['parent']['id'], 5)
self.assertIsInstance(content['meta']['parent']['meta'], dict)
self.assertEqual(set(content['meta']['parent']['meta'].keys()), {'type', 'detail_url', 'html_url'})
self.assertEqual(content['meta']['parent']['meta']['type'], 'demosite.BlogIndexPage')
self.assertEqual(content['meta']['parent']['meta']['detail_url'], 'http://localhost/api/v2beta/pages/5/')
self.assertEqual(content['meta']['parent']['meta']['html_url'], 'http://localhost/blog-index/')
# Check that the custom fields are included
self.assertIn('date', content)
self.assertIn('body', content)
self.assertIn('tags', content)
self.assertIn('feed_image', content)
self.assertIn('related_links', content)
self.assertIn('carousel_items', content)
# Check that the date was serialised properly
self.assertEqual(content['date'], '2013-12-02')
# Check that the tags were serialised properly
self.assertEqual(content['tags'], ['bird', 'wagtail'])
# Check that the feed image was serialised properly
self.assertIsInstance(content['feed_image'], dict)
self.assertEqual(set(content['feed_image'].keys()), {'id', 'meta', 'title'})
self.assertEqual(content['feed_image']['id'], 7)
self.assertIsInstance(content['feed_image']['meta'], dict)
self.assertEqual(set(content['feed_image']['meta'].keys()), {'type', 'detail_url'})
self.assertEqual(content['feed_image']['meta']['type'], 'wagtailimages.Image')
self.assertEqual(content['feed_image']['meta']['detail_url'], 'http://localhost/api/v2beta/images/7/')
# Check that the feed images' thumbnail was serialised properly
self.assertEqual(content['feed_image_thumbnail'], {
'error': 'SourceImageIOError'
})
self.assertEqual(content['related_links'], [])
for carousel_item in content['carousel_items']:
self.assertEqual(set(carousel_item.keys()), {'id', 'meta', 'embed_url', 'link', 'caption', 'image'})
self.assertEqual(set(carousel_item['meta'].keys()), {'type'})
def test_meta_parent_id_doesnt_show_root_page(self):
response = self.get_response(2)
content = json.loads(response.content.decode('UTF-8'))
self.assertIsNone(content['meta']['parent'])
def test_field_ordering(self):
response = self.get_response(16)
content = json.loads(response.content.decode('UTF-8'))
content = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(response.content.decode('UTF-8'))
field_order = [
'id',
'meta',
'title',
'body',
'tags',
'date',
'feed_image',
'feed_image_thumbnail',
'carousel_items',
'related_links',
]
self.assertEqual(list(content.keys()), field_order)
def test_null_foreign_key(self):
models.BlogEntryPage.objects.filter(id=16).update(feed_image_id=None)
response = self.get_response(16)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('related_links', content)
self.assertEqual(content['feed_image'], None)
def test_remove_fields(self):
response = self.get_response(16, fields='-title')
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('id', set(content.keys()))
self.assertNotIn('title', set(content.keys()))
def test_remove_meta_fields(self):
response = self.get_response(16, fields='-html_url')
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('detail_url', set(content['meta'].keys()))
self.assertNotIn('html_url', set(content['meta'].keys()))
def test_remove_all_meta_fields(self):
response = self.get_response(16, fields='-type,-detail_url,-slug,-first_published_at,-html_url,-search_description,-show_in_menus,-parent,-seo_title')
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('id', set(content.keys()))
self.assertNotIn('meta', set(content.keys()))
def test_remove_id_field(self):
response = self.get_response(16, fields='-id')
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('title', set(content.keys()))
self.assertNotIn('id', set(content.keys()))
def test_remove_all_fields(self):
response = self.get_response(16, fields='_,id,type')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(set(content.keys()), {'id', 'meta'})
self.assertEqual(set(content['meta'].keys()), {'type'})
def test_nested_fields(self):
response = self.get_response(16, fields='feed_image(width,height)')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(set(content['feed_image'].keys()), {'id', 'meta', 'title', 'width', 'height'})
def test_remove_nested_fields(self):
response = self.get_response(16, fields='feed_image(-title)')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(set(content['feed_image'].keys()), {'id', 'meta'})
def test_all_nested_fields(self):
response = self.get_response(16, fields='feed_image(*)')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(set(content['feed_image'].keys()), {'id', 'meta', 'title', 'width', 'height'})
def test_remove_all_nested_fields(self):
response = self.get_response(16, fields='feed_image(_,id)')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(set(content['feed_image'].keys()), {'id'})
def test_nested_nested_fields(self):
response = self.get_response(16, fields='carousel_items(image(width,height))')
content = json.loads(response.content.decode('UTF-8'))
for carousel_item in content['carousel_items']:
self.assertEqual(set(carousel_item.keys()), {'id', 'meta', 'image', 'embed_url', 'caption', 'link'})
self.assertEqual(set(carousel_item['image'].keys()), {'id', 'meta', 'title', 'width', 'height'})
def test_fields_child_relation_is_list(self):
response = self.get_response(16)
content = json.loads(response.content.decode('UTF-8'))
self.assertIsInstance(content['related_links'], list)
def test_fields_foreign_key(self):
response = self.get_response(16)
content = json.loads(response.content.decode('UTF-8'))
feed_image = content['feed_image']
self.assertIsInstance(feed_image, dict)
self.assertEqual(set(feed_image.keys()), {'id', 'meta', 'title'})
self.assertIsInstance(feed_image['id'], int)
self.assertIsInstance(feed_image['meta'], dict)
self.assertEqual(set(feed_image['meta'].keys()), {'type', 'detail_url'})
self.assertEqual(feed_image['meta']['type'], 'wagtailimages.Image')
self.assertEqual(feed_image['meta']['detail_url'], 'http://localhost/api/v2beta/images/%d/' % feed_image['id'])
def test_star_in_wrong_position_gives_error(self):
response = self.get_response(16, fields='title,*')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "fields error: '*' must be in the first position"})
def test_unknown_nested_fields_give_error(self):
response = self.get_response(16, fields='feed_image(123,title,abc)')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: 123, abc"})
def test_fields_which_are_not_in_api_fields_gives_error(self):
response = self.get_response(16, fields='path')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: path"})
def test_fields_unknown_field_gives_error(self):
response = self.get_response(16, fields='123,title,abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: 123, abc"})
def test_fields_remove_unknown_field_gives_error(self):
response = self.get_response(16, fields='-123,-title,-abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: 123, abc"})
def test_nested_fields_on_non_relational_field_gives_error(self):
response = self.get_response(16, fields='title(foo,bar)')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "'title' does not support nested fields"})
class TestPageDetailWithStreamField(TestCase):
fixtures = ['test.json']
def setUp(self):
self.homepage = Page.objects.get(url_path='/home/')
def make_stream_page(self, body):
stream_page = StreamPage(
title='stream page',
slug='stream-page',
body=body
)
return self.homepage.add_child(instance=stream_page)
def test_can_fetch_streamfield_content(self):
stream_page = self.make_stream_page('[{"type": "text", "value": "foo"}]')
response_url = reverse('wagtailapi_v2:pages:detail', args=(stream_page.id, ))
response = self.client.get(response_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-type'], 'application/json')
content = json.loads(response.content.decode('utf-8'))
self.assertIn('id', content)
self.assertEqual(content['id'], stream_page.id)
self.assertIn('body', content)
self.assertEqual(len(content['body']), 1)
self.assertEqual(content['body'][0]['type'], 'text')
self.assertEqual(content['body'][0]['value'], 'foo')
self.assertTrue(content['body'][0]['id'])
def test_image_block(self):
stream_page = self.make_stream_page('[{"type": "image", "value": 1}]')
response_url = reverse('wagtailapi_v2:pages:detail', args=(stream_page.id, ))
response = self.client.get(response_url)
content = json.loads(response.content.decode('utf-8'))
self.assertEqual(content['body'][0]['type'], 'image')
self.assertEqual(content['body'][0]['value'], 1)
def test_image_block_with_custom_get_api_representation(self):
stream_page = self.make_stream_page('[{"type": "image", "value": 1}]')
response_url = '{}?extended=1'.format(
reverse('wagtailapi_v2:pages:detail', args=(stream_page.id, ))
)
response = self.client.get(response_url)
content = json.loads(response.content.decode('utf-8'))
# the custom get_api_representation returns a dict of id and title for the image
self.assertEqual(content['body'][0]['type'], 'image')
self.assertEqual(content['body'][0]['value'], {'id': 1, 'title': 'A missing image'})
@override_settings(
WAGTAILFRONTENDCACHE={
'varnish': {
'BACKEND': 'wagtail.contrib.frontend_cache.backends.HTTPBackend',
'LOCATION': 'http://localhost:8000',
},
},
WAGTAILAPI_BASE_URL='http://api.example.com',
)
@mock.patch('wagtail.contrib.frontend_cache.backends.HTTPBackend.purge')
class TestPageCacheInvalidation(TestCase):
fixtures = ['demosite.json']
@classmethod
def setUpClass(cls):
super(TestPageCacheInvalidation, cls).setUpClass()
signal_handlers.register_signal_handlers()
@classmethod
def tearDownClass(cls):
super(TestPageCacheInvalidation, cls).tearDownClass()
signal_handlers.unregister_signal_handlers()
def test_republish_page_purges(self, purge):
Page.objects.get(id=2).save_revision().publish()
purge.assert_any_call('http://api.example.com/api/v2beta/pages/2/')
def test_unpublish_page_purges(self, purge):
Page.objects.get(id=2).unpublish()
purge.assert_any_call('http://api.example.com/api/v2beta/pages/2/')
def test_delete_page_purges(self, purge):
Page.objects.get(id=16).delete()
purge.assert_any_call('http://api.example.com/api/v2beta/pages/16/')
def test_save_draft_doesnt_purge(self, purge):
Page.objects.get(id=2).save_revision()
purge.assert_not_called()
| true | true |
f714e5ccca4b369e0fbd09fb0a4e6218788b9ed7 | 3,513 | py | Python | google_or_tools/coloring_ip_sat.py | tias/hakank | 87b7f180c9393afce440864eb9e5fb119bdec1a4 | [
"MIT"
] | 279 | 2015-01-10T09:55:35.000Z | 2022-03-28T02:34:03.000Z | google_or_tools/coloring_ip_sat.py | tias/hakank | 87b7f180c9393afce440864eb9e5fb119bdec1a4 | [
"MIT"
] | 10 | 2017-10-05T15:48:50.000Z | 2021-09-20T12:06:52.000Z | google_or_tools/coloring_ip_sat.py | tias/hakank | 87b7f180c9393afce440864eb9e5fb119bdec1a4 | [
"MIT"
] | 83 | 2015-01-20T03:44:00.000Z | 2022-03-13T23:53:06.000Z | # Copyright 2021 Hakan Kjellerstrand hakank@gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple coloring problem (MIP approach) in OR-tools CP-SAT Solver.
Inspired by the GLPK:s model color.mod
'''
COLOR, Graph Coloring Problem
Written in GNU MathProg by Andrew Makhorin <mao@mai2.rcnet.ru>
Given an undirected loopless graph G = (V, E), where V is a set of
nodes, E <= V x V is a set of arcs, the Graph Coloring Problem is to
find a mapping (coloring) F: V -> C, where C = {1, 2, ... } is a set
of colors whose cardinality is as small as possible, such that
F(i) != F(j) for every arc (i,j) in E, that is adjacent nodes must
be assigned different colors.
'''
This is a port of my old OR-tools CP solver coloring_ip.py
This model was created by Hakan Kjellerstrand (hakank@gmail.com)
Also see my other OR-tols models: http://www.hakank.org/or_tools/
"""
from __future__ import print_function
from ortools.sat.python import cp_model as cp
import math, sys
# from cp_sat_utils import *
def main():
model = cp.CpModel()
# max number of colors
# [we know that 4 suffices for normal maps]
nc = 5
# number of nodes
n = 11
# set of nodes
V = list(range(n))
num_edges = 20
#
# Neighbours
#
# This data correspond to the instance myciel3.col from:
# http://mat.gsia.cmu.edu/COLOR/instances.html
#
# Note: 1-based (adjusted below)
E = [[1, 2], [1, 4], [1, 7], [1, 9], [2, 3], [2, 6], [2, 8], [3, 5], [3, 7],
[3, 10], [4, 5], [4, 6], [4, 10], [5, 8], [5, 9], [6, 11], [7, 11],
[8, 11], [9, 11], [10, 11]]
#
# declare variables
#
# x[i,c] = 1 means that node i is assigned color c
x = {}
for v in V:
for j in range(nc):
x[v, j] = model.NewIntVar(0, 1, 'v[%i,%i]' % (v, j))
# u[c] = 1 means that color c is used, i.e. assigned to some node
u = [model.NewIntVar(0, 1, 'u[%i]' % i) for i in range(nc)]
# number of colors used, to minimize
num_colors = model.NewIntVar(0,nc, "num_colors")
model.Add(num_colors == sum(u))
#
# constraints
#
# each node must be assigned exactly one color
for i in V:
model.Add(sum([x[i, c] for c in range(nc)]) == 1)
# adjacent nodes cannot be assigned the same color
# (and adjust to 0-based)
for i in range(num_edges):
for c in range(nc):
model.Add(x[E[i][0] - 1, c] + x[E[i][1] - 1, c] <= u[c])
# objective
model.Minimize(num_colors)
#
# solution
#
solver = cp.CpSolver()
status = solver.Solve(model)
if status == cp.OPTIMAL:
print()
print('number of colors:', solver.Value(num_colors))
print('colors used:', [solver.Value(u[i]) for i in range(nc)])
print()
for v in V:
print('v%i' % v, ' color ', end=' ')
for c in range(nc):
if solver.Value(x[v, c]) == 1:
print(c)
print()
print('NumConflicts:', solver.NumConflicts())
print('NumBranches:', solver.NumBranches())
print('WallTime:', solver.WallTime())
if __name__ == '__main__':
main()
| 27.232558 | 78 | 0.63507 |
from __future__ import print_function
from ortools.sat.python import cp_model as cp
import math, sys
def main():
model = cp.CpModel()
nc = 5
n = 11
V = list(range(n))
num_edges = 20
E = [[1, 2], [1, 4], [1, 7], [1, 9], [2, 3], [2, 6], [2, 8], [3, 5], [3, 7],
[3, 10], [4, 5], [4, 6], [4, 10], [5, 8], [5, 9], [6, 11], [7, 11],
[8, 11], [9, 11], [10, 11]]
x = {}
for v in V:
for j in range(nc):
x[v, j] = model.NewIntVar(0, 1, 'v[%i,%i]' % (v, j))
u = [model.NewIntVar(0, 1, 'u[%i]' % i) for i in range(nc)]
num_colors = model.NewIntVar(0,nc, "num_colors")
model.Add(num_colors == sum(u))
for i in V:
model.Add(sum([x[i, c] for c in range(nc)]) == 1)
for i in range(num_edges):
for c in range(nc):
model.Add(x[E[i][0] - 1, c] + x[E[i][1] - 1, c] <= u[c])
model.Minimize(num_colors)
solver = cp.CpSolver()
status = solver.Solve(model)
if status == cp.OPTIMAL:
print()
print('number of colors:', solver.Value(num_colors))
print('colors used:', [solver.Value(u[i]) for i in range(nc)])
print()
for v in V:
print('v%i' % v, ' color ', end=' ')
for c in range(nc):
if solver.Value(x[v, c]) == 1:
print(c)
print()
print('NumConflicts:', solver.NumConflicts())
print('NumBranches:', solver.NumBranches())
print('WallTime:', solver.WallTime())
if __name__ == '__main__':
main()
| true | true |
f714e6ac55f4e95ed142d9f2bf5143a5d4edabf6 | 1,179 | py | Python | utils/summaries.py | lzhmarkk/pytorch-deeplab-xception | 63f699214e4095a4edda21173012cc29e53125b3 | [
"MIT"
] | 2,766 | 2018-06-15T11:30:06.000Z | 2022-03-30T08:22:29.000Z | utils/summaries.py | lzhmarkk/pytorch-deeplab-xception | 63f699214e4095a4edda21173012cc29e53125b3 | [
"MIT"
] | 211 | 2018-06-29T07:02:02.000Z | 2022-03-25T03:38:19.000Z | utils/summaries.py | lzhmarkk/pytorch-deeplab-xception | 63f699214e4095a4edda21173012cc29e53125b3 | [
"MIT"
] | 867 | 2018-07-03T10:09:34.000Z | 2022-03-31T09:52:40.000Z | import os
import torch
from torchvision.utils import make_grid
from tensorboardX import SummaryWriter
from dataloaders.utils import decode_seg_map_sequence
class TensorboardSummary(object):
def __init__(self, directory):
self.directory = directory
def create_summary(self):
writer = SummaryWriter(log_dir=os.path.join(self.directory))
return writer
def visualize_image(self, writer, dataset, image, target, output, global_step):
grid_image = make_grid(image[:3].clone().cpu().data, 3, normalize=True)
writer.add_image('Image', grid_image, global_step)
grid_image = make_grid(decode_seg_map_sequence(torch.max(output[:3], 1)[1].detach().cpu().numpy(),
dataset=dataset), 3, normalize=False, range=(0, 255))
writer.add_image('Predicted label', grid_image, global_step)
grid_image = make_grid(decode_seg_map_sequence(torch.squeeze(target[:3], 1).detach().cpu().numpy(),
dataset=dataset), 3, normalize=False, range=(0, 255))
writer.add_image('Groundtruth label', grid_image, global_step) | 51.26087 | 108 | 0.659033 | import os
import torch
from torchvision.utils import make_grid
from tensorboardX import SummaryWriter
from dataloaders.utils import decode_seg_map_sequence
class TensorboardSummary(object):
def __init__(self, directory):
self.directory = directory
def create_summary(self):
writer = SummaryWriter(log_dir=os.path.join(self.directory))
return writer
def visualize_image(self, writer, dataset, image, target, output, global_step):
grid_image = make_grid(image[:3].clone().cpu().data, 3, normalize=True)
writer.add_image('Image', grid_image, global_step)
grid_image = make_grid(decode_seg_map_sequence(torch.max(output[:3], 1)[1].detach().cpu().numpy(),
dataset=dataset), 3, normalize=False, range=(0, 255))
writer.add_image('Predicted label', grid_image, global_step)
grid_image = make_grid(decode_seg_map_sequence(torch.squeeze(target[:3], 1).detach().cpu().numpy(),
dataset=dataset), 3, normalize=False, range=(0, 255))
writer.add_image('Groundtruth label', grid_image, global_step) | true | true |
f714e7fafd9de41aaacfbf8d84f6f21e60c66856 | 3,410 | py | Python | app/__init__.py | brandiqa/microblog-pytest | 652429fb440dc9e9f912b8376d3587641ab14348 | [
"MIT"
] | null | null | null | app/__init__.py | brandiqa/microblog-pytest | 652429fb440dc9e9f912b8376d3587641ab14348 | [
"MIT"
] | 1 | 2021-06-02T00:35:14.000Z | 2021-06-02T00:35:14.000Z | app/__init__.py | brandiqa/microblog-pytest | 652429fb440dc9e9f912b8376d3587641ab14348 | [
"MIT"
] | null | null | null | import logging
from logging.handlers import SMTPHandler, RotatingFileHandler
import os
from flask import Flask, request, current_app
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
from flask_mail import Mail
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_babel import Babel, lazy_gettext as _l
from elasticsearch import Elasticsearch
from redis import Redis
import rq
from config import Config
db = SQLAlchemy()
migrate = Migrate()
login = LoginManager()
login.login_view = 'auth.login'
login.login_message = _l('Please log in to access this page.')
mail = Mail()
bootstrap = Bootstrap()
moment = Moment()
babel = Babel()
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(config_class)
db.init_app(app)
migrate.init_app(app, db)
login.init_app(app)
mail.init_app(app)
bootstrap.init_app(app)
moment.init_app(app)
babel.init_app(app)
app.elasticsearch = Elasticsearch([app.config['ELASTICSEARCH_URL']]) \
if app.config['ELASTICSEARCH_URL'] else None
app.redis = Redis.from_url(app.config['REDIS_URL'])
app.task_queue = rq.Queue('microblog-tasks', connection=app.redis)
from app.errors import bp as errors_bp
app.register_blueprint(errors_bp)
from app.auth import bp as auth_bp
app.register_blueprint(auth_bp, url_prefix='/auth')
from app.main import bp as main_bp
app.register_blueprint(main_bp)
from app.api import bp as api_bp
app.register_blueprint(api_bp, url_prefix='/api')
@app.route("/hello")
def hello():
return "Hello, World!"
if not app.debug and not app.testing:
if app.config['MAIL_SERVER']:
auth = None
if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
auth = (app.config['MAIL_USERNAME'],
app.config['MAIL_PASSWORD'])
secure = None
if app.config['MAIL_USE_TLS']:
secure = ()
mail_handler = SMTPHandler(
mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
fromaddr='no-reply@' + app.config['MAIL_SERVER'],
toaddrs=app.config['ADMINS'], subject='Microblog Failure',
credentials=auth, secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
if app.config['LOG_TO_STDOUT']:
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
app.logger.addHandler(stream_handler)
else:
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler('logs/microblog.log',
maxBytes=10240, backupCount=10)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('Microblog startup')
return app
@babel.localeselector
def get_locale():
return request.accept_languages.best_match(current_app.config['LANGUAGES'])
from app import models
| 32.788462 | 79 | 0.660411 | import logging
from logging.handlers import SMTPHandler, RotatingFileHandler
import os
from flask import Flask, request, current_app
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_login import LoginManager
from flask_mail import Mail
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_babel import Babel, lazy_gettext as _l
from elasticsearch import Elasticsearch
from redis import Redis
import rq
from config import Config
db = SQLAlchemy()
migrate = Migrate()
login = LoginManager()
login.login_view = 'auth.login'
login.login_message = _l('Please log in to access this page.')
mail = Mail()
bootstrap = Bootstrap()
moment = Moment()
babel = Babel()
def create_app(config_class=Config):
app = Flask(__name__)
app.config.from_object(config_class)
db.init_app(app)
migrate.init_app(app, db)
login.init_app(app)
mail.init_app(app)
bootstrap.init_app(app)
moment.init_app(app)
babel.init_app(app)
app.elasticsearch = Elasticsearch([app.config['ELASTICSEARCH_URL']]) \
if app.config['ELASTICSEARCH_URL'] else None
app.redis = Redis.from_url(app.config['REDIS_URL'])
app.task_queue = rq.Queue('microblog-tasks', connection=app.redis)
from app.errors import bp as errors_bp
app.register_blueprint(errors_bp)
from app.auth import bp as auth_bp
app.register_blueprint(auth_bp, url_prefix='/auth')
from app.main import bp as main_bp
app.register_blueprint(main_bp)
from app.api import bp as api_bp
app.register_blueprint(api_bp, url_prefix='/api')
@app.route("/hello")
def hello():
return "Hello, World!"
if not app.debug and not app.testing:
if app.config['MAIL_SERVER']:
auth = None
if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
auth = (app.config['MAIL_USERNAME'],
app.config['MAIL_PASSWORD'])
secure = None
if app.config['MAIL_USE_TLS']:
secure = ()
mail_handler = SMTPHandler(
mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
fromaddr='no-reply@' + app.config['MAIL_SERVER'],
toaddrs=app.config['ADMINS'], subject='Microblog Failure',
credentials=auth, secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
if app.config['LOG_TO_STDOUT']:
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
app.logger.addHandler(stream_handler)
else:
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler('logs/microblog.log',
maxBytes=10240, backupCount=10)
file_handler.setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('Microblog startup')
return app
@babel.localeselector
def get_locale():
return request.accept_languages.best_match(current_app.config['LANGUAGES'])
from app import models
| true | true |
f714e80b7cf0f0a4bbd27f451d6c99bb727e414c | 863 | py | Python | Ninja/Leetcode/88_Merge_Sorted_Array.py | cyandterry/Python-Study | b40e6c4db10da417e72247f61146f7570621106a | [
"MIT"
] | 61 | 2015-02-03T20:25:55.000Z | 2021-05-17T19:33:40.000Z | Ninja/Leetcode/88_Merge_Sorted_Array.py | cyandterry/Python-Study | b40e6c4db10da417e72247f61146f7570621106a | [
"MIT"
] | null | null | null | Ninja/Leetcode/88_Merge_Sorted_Array.py | cyandterry/Python-Study | b40e6c4db10da417e72247f61146f7570621106a | [
"MIT"
] | 37 | 2015-02-04T07:12:52.000Z | 2020-05-16T18:47:16.000Z | """
Given two sorted integer arrays A and B, merge B into A as one sorted array.
Note:
You may assume that A has enough space (size that is greater or equal to m + n) to hold additional elements from B. The number of elements initialized in A and B are m and n respectively.
"""
class Solution:
# @param A a list of integers
# @param m an integer, length of A
# @param B a list of integers
# @param n an integer, length of B
# @return nothing
def merge(self, A, m, B, n):
i = m - 1
j = n - 1
x = m + n - 1
while i>=0 and j>=0:
if A[i] > B[j]:
A[x] = A[i]
i -= 1
else:
A[x] = B[j]
j -= 1
x -= 1
while j>=0:
A[x] = B[j]
x -= 1
j -= 1
# Focus on detail!!!
| 27.83871 | 187 | 0.479722 |
class Solution:
def merge(self, A, m, B, n):
i = m - 1
j = n - 1
x = m + n - 1
while i>=0 and j>=0:
if A[i] > B[j]:
A[x] = A[i]
i -= 1
else:
A[x] = B[j]
j -= 1
x -= 1
while j>=0:
A[x] = B[j]
x -= 1
j -= 1
| true | true |
f714e82ca1013c68e6fdf12798491074bf08099a | 13,720 | py | Python | jirafs/migrations.py | mcepl/jirafs | abe18222b8bbfb23877d176bab966809556a9637 | [
"MIT"
] | null | null | null | jirafs/migrations.py | mcepl/jirafs | abe18222b8bbfb23877d176bab966809556a9637 | [
"MIT"
] | null | null | null | jirafs/migrations.py | mcepl/jirafs | abe18222b8bbfb23877d176bab966809556a9637 | [
"MIT"
] | null | null | null | import json
import os
import shutil
import subprocess
from six.moves.urllib import parse
from . import utils
from .exceptions import GitCommandError
def set_repo_version(repo, version):
with open(repo.get_metadata_path('version'), 'w') as out:
out.write(str(version))
repo.run_git_command(
'add', '-f', repo.get_metadata_path('version'), failure_ok=True,
)
repo.run_git_command(
'commit', '-m', 'Upgraded Repository to v%s' % version, failure_ok=True
)
def migration_0002(repo, **kwargs):
""" Creates shadow repository used for storing remote values """
os.mkdir(
repo.get_metadata_path('shadow')
)
subprocess.check_call(
(
'git',
'clone',
'-q',
'../git',
'.'
),
cwd=repo.get_metadata_path('shadow'),
stdout=subprocess.PIPE,
)
try:
repo.run_git_command('checkout', '-b', 'jira', shadow=True)
except GitCommandError:
repo.run_git_command('checkout', 'jira', shadow=True)
repo.run_git_command(
'commit', '--allow-empty', '-m', 'Shadow Created', shadow=True
)
repo.run_git_command('push', 'origin', 'jira', shadow=True)
set_repo_version(repo, 2)
def migration_0003(repo, init=False, **kwargs):
""" Creates a shadow copy of the issue.
.. note::
Early versions of this migration improperly created the shadow
copy using an absolute path.
"""
try:
os.mkdir(repo.get_shadow_path('.jirafs'))
except OSError:
pass
storable = {
'options': repo.issue._options,
'raw': repo.issue.raw
}
with open(repo.get_shadow_path('.jirafs/issue.json'), 'w') as out:
out.write(json.dumps(storable))
issue_pickle_path = repo.get_shadow_path('.jirafs/issue.json')
repo.run_git_command('add', '-f', issue_pickle_path, shadow=True)
repo.run_git_command(
'commit', '-m', 'Completing migration_0003', shadow=True
)
repo.run_git_command('push', 'origin', 'jira', shadow=True)
repo.run_git_command('merge', 'jira')
set_repo_version(repo, 3)
def migration_0004(repo, **kwargs):
""" Moves remote_files.json into version control. """
local_remote_files_path = repo.get_metadata_path('remote_files.json')
jira_remote_files_path = repo.get_shadow_path('.jirafs/remote_files.json')
try:
os.rename(local_remote_files_path, jira_remote_files_path)
except (IOError, OSError):
with open(jira_remote_files_path, 'w') as out:
out.write('{}')
repo.run_git_command('add', '-f', jira_remote_files_path, shadow=True)
repo.run_git_command(
'commit', '-m', 'Completing migration_0004', shadow=True
)
repo.run_git_command('push', 'origin', 'jira', shadow=True)
repo.run_git_command('merge', 'jira')
set_repo_version(repo, 4)
def migration_0005(repo, init=False, **kwargs):
""" Dummy migration for RST->Jira format change.
Note: TicketFolders older than version 5 cannot be upgraded past
version 5; although I had written a migration for this originally,
there were a few hard-to-work-around bugs that I decided were
not quite important enough.
"""
if init:
set_repo_version(repo, 5)
return
repo_path = repo.path
temp_path = os.path.normpath(
os.path.join(
repo_path,
'../',
repo.path.split('/')[-1] + '.tmp'
)
)
repo.clone(
repo.issue_url,
repo.get_jira,
temp_path,
)
temp_dir = os.listdir(temp_path)
for filename in os.listdir(repo_path):
if filename not in temp_dir and not filename.endswith('.jira.rst'):
shutil.copyfile(
os.path.join(repo_path, filename),
os.path.join(temp_path, filename),
)
shutil.rmtree(repo_path)
os.rename(temp_path, repo_path)
set_repo_version(repo, 5)
def migration_0006(repo, init=False, **kwargs):
""" Fix a glitch preventing folders from being completely portable.
Early versions of Jirafs would write an absolute path to the ignore
file to the local git configuration, but that's not very desirable
because if you move the folder, the @stash_local_changes decorator
would then wipe out the git repository itself (among other things)
after stashing. Whoops; that's embarrassing.
"""
if init:
set_repo_version(repo, 6)
return
repo.run_git_command(
'config',
'--file=%s' % repo.get_metadata_path(
'git',
'config',
),
'core.excludesfile',
'.jirafs/gitignore',
)
set_repo_version(repo, 6)
def migration_0007(repo, init=False, **kwargs):
""" Create the plugin metadata directory."""
try:
os.mkdir(
repo.get_metadata_path(
'plugin_meta',
)
)
except OSError:
pass
with open(repo.get_metadata_path('plugin_meta', '.empty'), 'w') as out:
out.write('')
repo.run_git_command(
'add',
'-f',
repo.get_metadata_path('plugin_meta', '.empty',)
)
repo.run_git_command(
'commit',
'-m',
'Completing migration_0007',
failure_ok=True
)
set_repo_version(repo, 7)
def migration_0008(repo, init=False, **kwargs):
""" Commit most of .jirafs folder to git so we can back up. """
if init:
set_repo_version(repo, 8)
return
with open(repo.get_metadata_path('gitignore'), 'w') as out:
out.write(
'\n'.join(
[
'.jirafs/git',
'.jirafs/shadow',
'.jirafs/operation.log'
]
)
)
repo.run_git_command(
'add',
'.jirafs/gitignore',
)
repo.run_git_command(
'commit',
'-m',
'Updating gitignore',
failure_ok=True
)
files_to_add = [
'config',
'gitignore',
'issue_url',
'plugin_meta',
'version',
]
for filename in files_to_add:
repo.run_git_command(
'add',
repo.get_metadata_path(filename),
failure_ok=True
)
set_repo_version(repo, 8)
def migration_0009(repo, init=False, **kwargs):
""" Re-clone shadow copy so it does not reference an absolute path."""
if init:
set_repo_version(repo, 9)
shutil.rmtree(repo.get_metadata_path('shadow'))
os.mkdir(
repo.get_metadata_path('shadow')
)
subprocess.check_call(
(
'git',
'clone',
'-q',
'../git',
'.'
),
cwd=repo.get_metadata_path('shadow'),
stdout=subprocess.PIPE,
)
try:
repo.run_git_command('checkout', '-b', 'jira', shadow=True)
except GitCommandError:
repo.run_git_command('checkout', 'jira', shadow=True)
repo.run_git_command(
'commit', '--allow-empty', '-m', 'Shadow Created', shadow=True
)
repo.run_git_command('push', 'origin', 'jira', shadow=True)
set_repo_version(repo, 9)
def migration_0010(repo, init=False, **kwargs):
""" Make sure that the operation.log and plugin_meta are untracked/tracked.
* ``operation.log`` *cannot* be tracked, since if we make a change,
followed by a stash pop, operation.log may have encountered changes
since then.
* ``plugin_meta`` *must* be tracked, or when we pop stash,
"""
if init:
set_repo_version(repo, 10)
return
with open(repo.get_metadata_path('gitignore'), 'w') as out:
out.write(
'\n'.join(
[
'.jirafs/git',
'.jirafs/shadow',
'.jirafs/operation.log'
]
)
)
repo.run_git_command(
'add',
'-f',
'.jirafs/gitignore',
)
try:
os.mkdir(
repo.get_metadata_path(
'plugin_meta',
)
)
except OSError:
# Already exists
pass
with open(repo.get_metadata_path('plugin_meta', '.empty'), 'w') as out:
out.write('')
repo.run_git_command(
'add',
'-f',
repo.get_metadata_path(
'plugin_meta',
'.empty'
)
)
repo.run_git_command(
'rm',
'-f',
'--cached',
'.jirafs/operation.log',
failure_ok=True,
)
repo.run_git_command(
'commit',
'-m',
'Completing migration_0010',
failure_ok=True
)
set_repo_version(repo, 10)
def migration_0011(repo, init=False, **kwargs):
""" Re-clone shadow copy so it does not reference an absolute path.
.. note::
The amount of stumbling I've engaged in in managing this shadow
copy has been terribly embarassing. Who knew it was so complicated.
The TLDR is that you *cannot* use `shared` if you ever want the folder
to be portable, since it'll write an absolute path to the repository
in your `.jirafs/shadow/.git/objects/info/alternates` file.
"""
if init:
set_repo_version(repo, 11)
return
shutil.rmtree(repo.get_metadata_path('shadow'))
os.mkdir(
repo.get_metadata_path('shadow')
)
subprocess.check_call(
(
'git',
'clone',
'-q',
'../git',
'.'
),
cwd=repo.get_metadata_path('shadow'),
stdout=subprocess.PIPE,
)
try:
repo.run_git_command('checkout', '-b', 'jira', shadow=True)
except GitCommandError:
repo.run_git_command('checkout', 'jira', shadow=True)
repo.run_git_command(
'commit', '--allow-empty', '-m', 'Shadow Created', shadow=True
)
repo.run_git_command('push', '-f', 'origin', 'jira', shadow=True)
repo.run_git_command('merge', 'jira')
set_repo_version(repo, 11)
def migration_0012(repo, init=False, **kwargs):
""" Force the shadow repository to use a relative URL."""
subprocess.check_call(
(
'git',
'remote',
'set-url',
'origin',
'../git'
),
cwd=repo.get_metadata_path('shadow'),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
set_repo_version(repo, 12)
def migration_0013(repo, init=False, **kwargs):
""" Ensure that folder URL is written to issue_url file."""
if init:
set_repo_version(repo, 13)
return
result = repo.get_ticket_url()
if result is not None:
set_repo_version(repo, 13)
return
jira_base = utils.get_default_jira_server()
ticket_number = repo.path.split('/')[-1:][0].upper()
issue_url = parse.urljoin(
jira_base,
'browse/' + ticket_number + '/',
)
with open(repo.get_metadata_path('issue_url', 'w')) as out:
out.write(issue_url)
set_repo_version(repo, 13)
def migration_0014(repo, init=False, **kwargs):
if init:
set_repo_version(repo, 14)
return
with open(repo.get_metadata_path('git/info/exclude'), 'w') as out:
out.write(
'\n'.join(
[
'.jirafs/git',
'.jirafs/shadow',
'.jirafs/operation.log'
]
)
)
if os.path.exists(repo.get_local_path('.jirafs_ignore')):
shutil.copyfile(
repo.get_local_path('.jirafs_ignore'),
repo.get_local_path('.jirafs_local'),
)
repo.run_git_command(
'add',
'.jirafs_local',
)
if os.path.exists(repo.get_metadata_path('gitignore')):
shutil.copyfile(
repo.get_metadata_path('gitignore'),
repo.get_local_path('.jirafs_ignore')
)
repo.run_git_command(
'add',
'.jirafs_ignore',
)
repo.run_git_command(
'rm',
repo.get_metadata_path('gitignore')
)
repo.run_git_command(
'config',
'--file=%s' % repo.get_metadata_path(
'git',
'config',
),
'core.excludesfile',
'.jirafs/combined_ignore',
)
tracked_files = repo.run_git_command(
'ls-files', '-c', failure_ok=True
).split('\n')
filtered_files = repo.filter_ignored_files(
tracked_files,
'.jirafs_ignore'
)
ignored = repo.filter_ignored_files(
set(tracked_files) - set(filtered_files),
'.jirafs_local'
)
for filename in ignored:
repo.run_git_command(
'rm',
'--cached',
filename,
failure_ok=True,
shadow=True
)
repo.run_git_command(
'commit',
'-m',
'Completing migration_0014',
failure_ok=True,
shadow=True
)
set_repo_version(repo, 14)
def migration_0015(repo, init=False, **kwargs):
""" No-op; was previously something else."""
set_repo_version(repo, 15)
def migration_0016(repo, init=False, **kwargs):
""" Add the 'macros_applied.patch' file to the repository."""
macro_path = repo.get_metadata_path('macros_applied.patch')
if not os.path.exists(macro_path):
with open(macro_path, 'w') as out:
out.write('')
repo.run_git_command('add', '-f', macro_path)
repo.run_git_command(
'commit', '-m', 'Completing migration_0015', failure_ok=True
)
set_repo_version(repo, 16)
| 26.537718 | 79 | 0.571574 | import json
import os
import shutil
import subprocess
from six.moves.urllib import parse
from . import utils
from .exceptions import GitCommandError
def set_repo_version(repo, version):
with open(repo.get_metadata_path('version'), 'w') as out:
out.write(str(version))
repo.run_git_command(
'add', '-f', repo.get_metadata_path('version'), failure_ok=True,
)
repo.run_git_command(
'commit', '-m', 'Upgraded Repository to v%s' % version, failure_ok=True
)
def migration_0002(repo, **kwargs):
os.mkdir(
repo.get_metadata_path('shadow')
)
subprocess.check_call(
(
'git',
'clone',
'-q',
'../git',
'.'
),
cwd=repo.get_metadata_path('shadow'),
stdout=subprocess.PIPE,
)
try:
repo.run_git_command('checkout', '-b', 'jira', shadow=True)
except GitCommandError:
repo.run_git_command('checkout', 'jira', shadow=True)
repo.run_git_command(
'commit', '--allow-empty', '-m', 'Shadow Created', shadow=True
)
repo.run_git_command('push', 'origin', 'jira', shadow=True)
set_repo_version(repo, 2)
def migration_0003(repo, init=False, **kwargs):
try:
os.mkdir(repo.get_shadow_path('.jirafs'))
except OSError:
pass
storable = {
'options': repo.issue._options,
'raw': repo.issue.raw
}
with open(repo.get_shadow_path('.jirafs/issue.json'), 'w') as out:
out.write(json.dumps(storable))
issue_pickle_path = repo.get_shadow_path('.jirafs/issue.json')
repo.run_git_command('add', '-f', issue_pickle_path, shadow=True)
repo.run_git_command(
'commit', '-m', 'Completing migration_0003', shadow=True
)
repo.run_git_command('push', 'origin', 'jira', shadow=True)
repo.run_git_command('merge', 'jira')
set_repo_version(repo, 3)
def migration_0004(repo, **kwargs):
local_remote_files_path = repo.get_metadata_path('remote_files.json')
jira_remote_files_path = repo.get_shadow_path('.jirafs/remote_files.json')
try:
os.rename(local_remote_files_path, jira_remote_files_path)
except (IOError, OSError):
with open(jira_remote_files_path, 'w') as out:
out.write('{}')
repo.run_git_command('add', '-f', jira_remote_files_path, shadow=True)
repo.run_git_command(
'commit', '-m', 'Completing migration_0004', shadow=True
)
repo.run_git_command('push', 'origin', 'jira', shadow=True)
repo.run_git_command('merge', 'jira')
set_repo_version(repo, 4)
def migration_0005(repo, init=False, **kwargs):
if init:
set_repo_version(repo, 5)
return
repo_path = repo.path
temp_path = os.path.normpath(
os.path.join(
repo_path,
'../',
repo.path.split('/')[-1] + '.tmp'
)
)
repo.clone(
repo.issue_url,
repo.get_jira,
temp_path,
)
temp_dir = os.listdir(temp_path)
for filename in os.listdir(repo_path):
if filename not in temp_dir and not filename.endswith('.jira.rst'):
shutil.copyfile(
os.path.join(repo_path, filename),
os.path.join(temp_path, filename),
)
shutil.rmtree(repo_path)
os.rename(temp_path, repo_path)
set_repo_version(repo, 5)
def migration_0006(repo, init=False, **kwargs):
if init:
set_repo_version(repo, 6)
return
repo.run_git_command(
'config',
'--file=%s' % repo.get_metadata_path(
'git',
'config',
),
'core.excludesfile',
'.jirafs/gitignore',
)
set_repo_version(repo, 6)
def migration_0007(repo, init=False, **kwargs):
try:
os.mkdir(
repo.get_metadata_path(
'plugin_meta',
)
)
except OSError:
pass
with open(repo.get_metadata_path('plugin_meta', '.empty'), 'w') as out:
out.write('')
repo.run_git_command(
'add',
'-f',
repo.get_metadata_path('plugin_meta', '.empty',)
)
repo.run_git_command(
'commit',
'-m',
'Completing migration_0007',
failure_ok=True
)
set_repo_version(repo, 7)
def migration_0008(repo, init=False, **kwargs):
if init:
set_repo_version(repo, 8)
return
with open(repo.get_metadata_path('gitignore'), 'w') as out:
out.write(
'\n'.join(
[
'.jirafs/git',
'.jirafs/shadow',
'.jirafs/operation.log'
]
)
)
repo.run_git_command(
'add',
'.jirafs/gitignore',
)
repo.run_git_command(
'commit',
'-m',
'Updating gitignore',
failure_ok=True
)
files_to_add = [
'config',
'gitignore',
'issue_url',
'plugin_meta',
'version',
]
for filename in files_to_add:
repo.run_git_command(
'add',
repo.get_metadata_path(filename),
failure_ok=True
)
set_repo_version(repo, 8)
def migration_0009(repo, init=False, **kwargs):
if init:
set_repo_version(repo, 9)
shutil.rmtree(repo.get_metadata_path('shadow'))
os.mkdir(
repo.get_metadata_path('shadow')
)
subprocess.check_call(
(
'git',
'clone',
'-q',
'../git',
'.'
),
cwd=repo.get_metadata_path('shadow'),
stdout=subprocess.PIPE,
)
try:
repo.run_git_command('checkout', '-b', 'jira', shadow=True)
except GitCommandError:
repo.run_git_command('checkout', 'jira', shadow=True)
repo.run_git_command(
'commit', '--allow-empty', '-m', 'Shadow Created', shadow=True
)
repo.run_git_command('push', 'origin', 'jira', shadow=True)
set_repo_version(repo, 9)
def migration_0010(repo, init=False, **kwargs):
if init:
set_repo_version(repo, 10)
return
with open(repo.get_metadata_path('gitignore'), 'w') as out:
out.write(
'\n'.join(
[
'.jirafs/git',
'.jirafs/shadow',
'.jirafs/operation.log'
]
)
)
repo.run_git_command(
'add',
'-f',
'.jirafs/gitignore',
)
try:
os.mkdir(
repo.get_metadata_path(
'plugin_meta',
)
)
except OSError:
pass
with open(repo.get_metadata_path('plugin_meta', '.empty'), 'w') as out:
out.write('')
repo.run_git_command(
'add',
'-f',
repo.get_metadata_path(
'plugin_meta',
'.empty'
)
)
repo.run_git_command(
'rm',
'-f',
'--cached',
'.jirafs/operation.log',
failure_ok=True,
)
repo.run_git_command(
'commit',
'-m',
'Completing migration_0010',
failure_ok=True
)
set_repo_version(repo, 10)
def migration_0011(repo, init=False, **kwargs):
if init:
set_repo_version(repo, 11)
return
shutil.rmtree(repo.get_metadata_path('shadow'))
os.mkdir(
repo.get_metadata_path('shadow')
)
subprocess.check_call(
(
'git',
'clone',
'-q',
'../git',
'.'
),
cwd=repo.get_metadata_path('shadow'),
stdout=subprocess.PIPE,
)
try:
repo.run_git_command('checkout', '-b', 'jira', shadow=True)
except GitCommandError:
repo.run_git_command('checkout', 'jira', shadow=True)
repo.run_git_command(
'commit', '--allow-empty', '-m', 'Shadow Created', shadow=True
)
repo.run_git_command('push', '-f', 'origin', 'jira', shadow=True)
repo.run_git_command('merge', 'jira')
set_repo_version(repo, 11)
def migration_0012(repo, init=False, **kwargs):
subprocess.check_call(
(
'git',
'remote',
'set-url',
'origin',
'../git'
),
cwd=repo.get_metadata_path('shadow'),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
set_repo_version(repo, 12)
def migration_0013(repo, init=False, **kwargs):
if init:
set_repo_version(repo, 13)
return
result = repo.get_ticket_url()
if result is not None:
set_repo_version(repo, 13)
return
jira_base = utils.get_default_jira_server()
ticket_number = repo.path.split('/')[-1:][0].upper()
issue_url = parse.urljoin(
jira_base,
'browse/' + ticket_number + '/',
)
with open(repo.get_metadata_path('issue_url', 'w')) as out:
out.write(issue_url)
set_repo_version(repo, 13)
def migration_0014(repo, init=False, **kwargs):
if init:
set_repo_version(repo, 14)
return
with open(repo.get_metadata_path('git/info/exclude'), 'w') as out:
out.write(
'\n'.join(
[
'.jirafs/git',
'.jirafs/shadow',
'.jirafs/operation.log'
]
)
)
if os.path.exists(repo.get_local_path('.jirafs_ignore')):
shutil.copyfile(
repo.get_local_path('.jirafs_ignore'),
repo.get_local_path('.jirafs_local'),
)
repo.run_git_command(
'add',
'.jirafs_local',
)
if os.path.exists(repo.get_metadata_path('gitignore')):
shutil.copyfile(
repo.get_metadata_path('gitignore'),
repo.get_local_path('.jirafs_ignore')
)
repo.run_git_command(
'add',
'.jirafs_ignore',
)
repo.run_git_command(
'rm',
repo.get_metadata_path('gitignore')
)
repo.run_git_command(
'config',
'--file=%s' % repo.get_metadata_path(
'git',
'config',
),
'core.excludesfile',
'.jirafs/combined_ignore',
)
tracked_files = repo.run_git_command(
'ls-files', '-c', failure_ok=True
).split('\n')
filtered_files = repo.filter_ignored_files(
tracked_files,
'.jirafs_ignore'
)
ignored = repo.filter_ignored_files(
set(tracked_files) - set(filtered_files),
'.jirafs_local'
)
for filename in ignored:
repo.run_git_command(
'rm',
'--cached',
filename,
failure_ok=True,
shadow=True
)
repo.run_git_command(
'commit',
'-m',
'Completing migration_0014',
failure_ok=True,
shadow=True
)
set_repo_version(repo, 14)
def migration_0015(repo, init=False, **kwargs):
set_repo_version(repo, 15)
def migration_0016(repo, init=False, **kwargs):
macro_path = repo.get_metadata_path('macros_applied.patch')
if not os.path.exists(macro_path):
with open(macro_path, 'w') as out:
out.write('')
repo.run_git_command('add', '-f', macro_path)
repo.run_git_command(
'commit', '-m', 'Completing migration_0015', failure_ok=True
)
set_repo_version(repo, 16)
| true | true |
f714e83d2f50d6b29bdbd9adf5eabbbb4ba0812e | 6,187 | py | Python | Compiler/ppc.py | fqliao/MP-SPDZ | 070fca5c52ee225fe681f16f150f5fb1a7b4b3ca | [
"BSD-2-Clause"
] | null | null | null | Compiler/ppc.py | fqliao/MP-SPDZ | 070fca5c52ee225fe681f16f150f5fb1a7b4b3ca | [
"BSD-2-Clause"
] | null | null | null | Compiler/ppc.py | fqliao/MP-SPDZ | 070fca5c52ee225fe681f16f150f5fb1a7b4b3ca | [
"BSD-2-Clause"
] | null | null | null | import util
import math
from Compiler.types import Array, sint, sfloat, sfix, MemValue, cint, Matrix, _int
# import operator
# import math
# from Compiler.instructions import *
from Compiler.library import for_range, print_str, for_range, print_float_prec
import ml
pint = sint
pfloat = sfloat
pfix = sfix
pnum = pfloat
print_float_prec(4)
# Use to limit the tester workload
MAX_DATA_LENGTH = 500
MAX_ML_SIZE = 500
ppcConv2d = ml.FixConv2d
ppcMaxPool = ml.MaxPool
ppcRelu = ml.Relu
ppcDense = ml.Dense
def set_display_field_names(name_list):
println("result_fields = %s", ' '.join(name_list))
def display_data(field_values):
printfmt("result_values =")
for value in field_values:
printfmt(" %s", value)
println()
def get_ml_size(shape_array):
ml_size = 1
for i in range(1, len(shape_array)):
ml_size *= shape_array[i]
return ml_size
def pConv2d(input_shape, weight_shape, bias_shape, output_shape, stride,
padding='SAME', tf_weight_format=False, inputs=None):
input_shape_size = get_ml_size(input_shape)
if input_shape_size > MAX_ML_SIZE:
raise TypeError('input_shape could not larger than %s', MAX_ML_SIZE)
bias_shape_size = get_ml_size(bias_shape)
if bias_shape_size > MAX_ML_SIZE:
raise TypeError('bias_shape could not larger than %s', MAX_ML_SIZE)
return ml.FixConv2d(input_shape, weight_shape, bias_shape, output_shape, stride,
padding, tf_weight_format=False, inputs=None)
def pMaxPool(shape, strides=(1, 2, 2, 1), ksize=(1, 2, 2, 1),
padding='VALID'):
shape_size = get_ml_size(shape)
if shape_size > MAX_ML_SIZE:
raise TypeError('shape could not larger than %s', MAX_ML_SIZE)
strides_size = get_ml_size(strides)
if strides_size > MAX_ML_SIZE:
raise TypeError('strides_size could not larger than %s', MAX_ML_SIZE)
ksize_size = get_ml_size(ksize)
if ksize_size > MAX_ML_SIZE:
raise TypeError('ksize_size could not larger than %s', MAX_ML_SIZE)
return ml.MaxPool(shape, strides, ksize,
padding)
def pRelu(shape, inputs=None):
shape_size = get_ml_size(shape)
if shape_size > MAX_ML_SIZE:
raise TypeError('shape could not larger than %s', MAX_ML_SIZE)
return ml.Relu(shape, inputs)
def pDense(N, d_in, d_out, d=1, activation='id', debug=False):
if d_out > MAX_ML_SIZE:
raise TypeError('d_out could not larger than %s', MAX_ML_SIZE)
return ml.Dense(N, d_in, d_out, d, activation, debug)
def read_array(party_id, source_record_count, value_type=pnum):
if source_record_count > MAX_DATA_LENGTH:
raise TypeError(
'Array length could not larger than %s', MAX_DATA_LENGTH)
array_value = Array(source_record_count, value_type)
array_value.input_from(party_id)
return array_value
def max_in_array(array):
max_value = MemValue(array[0])
max_index = MemValue(pint(0))
@for_range(1, array.length)
def _(i):
cond = array[i] > max_value
max_index.write(condition(cond, pint(i), max_index.read()))
max_value.write(condition(cond, array[i], max_value.read()))
return max_value.read(), max_index.read()
def min_in_array(array):
value = MemValue(array[0])
index = MemValue(pint(0))
@for_range(1, array.length)
def _(i):
cond = array[i] < value
index.write(condition(cond, pint(i), index.read()))
value.write(condition(cond, array[i], value.read()))
return value.read(), index.read()
def combine_array(array1, array2):
if array1.value_type != array2.value_type:
raise TypeError('Array type does not match')
result_array = Array(array1.length+array2.length, array1.value_type)
result_array.assign(array1)
result_array.assign(array2, array1.length)
return result_array
def print_array(array):
printfmt("[ ")
@for_range(array.length)
def _(i):
printfmt("%s ", array[i].reveal())
println("]")
def read_matrix(party_id, height, width, value_type=pnum):
if height*width > MAX_DATA_LENGTH:
raise TypeError('Matrix size could not larger than %s',
MAX_DATA_LENGTH)
value = Matrix(height, width, value_type)
value.input_from(party_id)
return value
def print_matrix(matrix):
println("[")
@for_range(matrix.sizes[0])
def _(i):
printfmt(" [ ")
@for_range(matrix.sizes[1])
def _(j):
printfmt("%s ", matrix[i][j].reveal())
println("]")
println("]")
def condition(cond, a, b):
return util.if_else(cond, a, b)
def println(s='', *args):
print_str(s + '\n', *args)
def printfmt(s='', *args):
print_str(s, *args)
def to_pint(num):
if isinstance(num, pint):
return num
if isinstance(num, pfloat):
num = pfix(num)
if isinstance(num, pfix):
return num.v >> pfix.f
raise NotImplementedError('to_pint only implemented for pfloat and pfix.')
def pint_mod(self, other):
if isinstance(other, int):
l = math.log(other, 2)
if 2**int(round(l)) == other:
return self.mod2m(int(l))
else:
return self - to_pint(pfix(self) / other) * other
if isinstance(other, _int):
return self - to_pint(pfix(self) / other) * other
raise NotImplementedError('Argument modulus should be an integer type.')
def pint_div(self, other):
if isinstance(other, int):
l = math.log(other, 2)
if 2**int(round(l)) == other:
println("%s, %s, %s", (self >> l).reveal(), self.reveal(), l)
return self >> l
else:
return pfix(self) / other
# pfloat sometime produces buggy results, has to use pfix here.
if isinstance(other, _int):
return pfix(self) / other
raise NotImplementedError(
'Argument denominator should be an integer type.')
def pint_truediv(self, other):
return pnum(pint_div(self, other))
def pint_floordiv(self, other):
return to_pint(pint_div(self, other))
pint.__mod__ = pint_mod
#pint.__truediv__ = pint_truediv
pint.__floordiv__ = pint_floordiv
| 27.255507 | 84 | 0.659286 | import util
import math
from Compiler.types import Array, sint, sfloat, sfix, MemValue, cint, Matrix, _int
from Compiler.library import for_range, print_str, for_range, print_float_prec
import ml
pint = sint
pfloat = sfloat
pfix = sfix
pnum = pfloat
print_float_prec(4)
MAX_DATA_LENGTH = 500
MAX_ML_SIZE = 500
ppcConv2d = ml.FixConv2d
ppcMaxPool = ml.MaxPool
ppcRelu = ml.Relu
ppcDense = ml.Dense
def set_display_field_names(name_list):
println("result_fields = %s", ' '.join(name_list))
def display_data(field_values):
printfmt("result_values =")
for value in field_values:
printfmt(" %s", value)
println()
def get_ml_size(shape_array):
ml_size = 1
for i in range(1, len(shape_array)):
ml_size *= shape_array[i]
return ml_size
def pConv2d(input_shape, weight_shape, bias_shape, output_shape, stride,
padding='SAME', tf_weight_format=False, inputs=None):
input_shape_size = get_ml_size(input_shape)
if input_shape_size > MAX_ML_SIZE:
raise TypeError('input_shape could not larger than %s', MAX_ML_SIZE)
bias_shape_size = get_ml_size(bias_shape)
if bias_shape_size > MAX_ML_SIZE:
raise TypeError('bias_shape could not larger than %s', MAX_ML_SIZE)
return ml.FixConv2d(input_shape, weight_shape, bias_shape, output_shape, stride,
padding, tf_weight_format=False, inputs=None)
def pMaxPool(shape, strides=(1, 2, 2, 1), ksize=(1, 2, 2, 1),
padding='VALID'):
shape_size = get_ml_size(shape)
if shape_size > MAX_ML_SIZE:
raise TypeError('shape could not larger than %s', MAX_ML_SIZE)
strides_size = get_ml_size(strides)
if strides_size > MAX_ML_SIZE:
raise TypeError('strides_size could not larger than %s', MAX_ML_SIZE)
ksize_size = get_ml_size(ksize)
if ksize_size > MAX_ML_SIZE:
raise TypeError('ksize_size could not larger than %s', MAX_ML_SIZE)
return ml.MaxPool(shape, strides, ksize,
padding)
def pRelu(shape, inputs=None):
shape_size = get_ml_size(shape)
if shape_size > MAX_ML_SIZE:
raise TypeError('shape could not larger than %s', MAX_ML_SIZE)
return ml.Relu(shape, inputs)
def pDense(N, d_in, d_out, d=1, activation='id', debug=False):
if d_out > MAX_ML_SIZE:
raise TypeError('d_out could not larger than %s', MAX_ML_SIZE)
return ml.Dense(N, d_in, d_out, d, activation, debug)
def read_array(party_id, source_record_count, value_type=pnum):
if source_record_count > MAX_DATA_LENGTH:
raise TypeError(
'Array length could not larger than %s', MAX_DATA_LENGTH)
array_value = Array(source_record_count, value_type)
array_value.input_from(party_id)
return array_value
def max_in_array(array):
max_value = MemValue(array[0])
max_index = MemValue(pint(0))
@for_range(1, array.length)
def _(i):
cond = array[i] > max_value
max_index.write(condition(cond, pint(i), max_index.read()))
max_value.write(condition(cond, array[i], max_value.read()))
return max_value.read(), max_index.read()
def min_in_array(array):
value = MemValue(array[0])
index = MemValue(pint(0))
@for_range(1, array.length)
def _(i):
cond = array[i] < value
index.write(condition(cond, pint(i), index.read()))
value.write(condition(cond, array[i], value.read()))
return value.read(), index.read()
def combine_array(array1, array2):
if array1.value_type != array2.value_type:
raise TypeError('Array type does not match')
result_array = Array(array1.length+array2.length, array1.value_type)
result_array.assign(array1)
result_array.assign(array2, array1.length)
return result_array
def print_array(array):
printfmt("[ ")
@for_range(array.length)
def _(i):
printfmt("%s ", array[i].reveal())
println("]")
def read_matrix(party_id, height, width, value_type=pnum):
if height*width > MAX_DATA_LENGTH:
raise TypeError('Matrix size could not larger than %s',
MAX_DATA_LENGTH)
value = Matrix(height, width, value_type)
value.input_from(party_id)
return value
def print_matrix(matrix):
println("[")
@for_range(matrix.sizes[0])
def _(i):
printfmt(" [ ")
@for_range(matrix.sizes[1])
def _(j):
printfmt("%s ", matrix[i][j].reveal())
println("]")
println("]")
def condition(cond, a, b):
return util.if_else(cond, a, b)
def println(s='', *args):
print_str(s + '\n', *args)
def printfmt(s='', *args):
print_str(s, *args)
def to_pint(num):
if isinstance(num, pint):
return num
if isinstance(num, pfloat):
num = pfix(num)
if isinstance(num, pfix):
return num.v >> pfix.f
raise NotImplementedError('to_pint only implemented for pfloat and pfix.')
def pint_mod(self, other):
if isinstance(other, int):
l = math.log(other, 2)
if 2**int(round(l)) == other:
return self.mod2m(int(l))
else:
return self - to_pint(pfix(self) / other) * other
if isinstance(other, _int):
return self - to_pint(pfix(self) / other) * other
raise NotImplementedError('Argument modulus should be an integer type.')
def pint_div(self, other):
if isinstance(other, int):
l = math.log(other, 2)
if 2**int(round(l)) == other:
println("%s, %s, %s", (self >> l).reveal(), self.reveal(), l)
return self >> l
else:
return pfix(self) / other
if isinstance(other, _int):
return pfix(self) / other
raise NotImplementedError(
'Argument denominator should be an integer type.')
def pint_truediv(self, other):
return pnum(pint_div(self, other))
def pint_floordiv(self, other):
return to_pint(pint_div(self, other))
pint.__mod__ = pint_mod
pint.__floordiv__ = pint_floordiv
| true | true |
f714e8841d230fa94120f748f64ae122d1b782d6 | 17,326 | py | Python | dscript/commands/train.py | samsledje/D-SCRIPT | 3fa7ea685f7fcdc63468380267d1672f63bb8772 | [
"MIT"
] | 12 | 2020-11-15T11:36:27.000Z | 2022-03-14T13:30:35.000Z | dscript/commands/train.py | samsledje/D-SCRIPT | 3fa7ea685f7fcdc63468380267d1672f63bb8772 | [
"MIT"
] | 27 | 2020-12-01T02:38:55.000Z | 2022-02-25T19:08:18.000Z | dscript/commands/train.py | samsledje/D-SCRIPT | 3fa7ea685f7fcdc63468380267d1672f63bb8772 | [
"MIT"
] | 6 | 2021-07-05T23:16:56.000Z | 2022-03-30T03:29:12.000Z | """
Train a new model.
"""
import sys
import argparse
import h5py
import datetime
import subprocess as sp
import numpy as np
import pandas as pd
import gzip as gz
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import IterableDataset, DataLoader
from sklearn.metrics import average_precision_score as average_precision
import dscript
from dscript.utils import PairedDataset, collate_paired_sequences
from dscript.models.embedding import (
IdentityEmbed,
FullyConnectedEmbed,
)
from dscript.models.contact import ContactCNN
from dscript.models.interaction import ModelInteraction
def add_args(parser):
"""
Create parser for command line utility.
:meta private:
"""
data_grp = parser.add_argument_group("Data")
proj_grp = parser.add_argument_group("Projection Module")
contact_grp = parser.add_argument_group("Contact Module")
inter_grp = parser.add_argument_group("Interaction Module")
train_grp = parser.add_argument_group("Training")
misc_grp = parser.add_argument_group("Output and Device")
# Data
data_grp.add_argument("--train", help="Training data", required=True)
data_grp.add_argument("--val", help="Validation data", required=True)
data_grp.add_argument("--embedding", help="h5 file with embedded sequences", required=True)
data_grp.add_argument(
"--no-augment",
action="store_false",
dest='augment',
help="Set flag to not augment data by adding (B A) for all pairs (A B)",
)
# Embedding model
proj_grp.add_argument(
"--projection-dim",
type=int,
default=100,
help="Dimension of embedding projection layer (default: 100)",
)
proj_grp.add_argument(
"--dropout-p",
type=float,
default=0.5,
help="Parameter p for embedding dropout layer (default: 0.5)",
)
# Contact model
contact_grp.add_argument(
"--hidden-dim",
type=int,
default=50,
help="Number of hidden units for comparison layer in contact prediction (default: 50)",
)
contact_grp.add_argument(
"--kernel-width",
type=int,
default=7,
help="Width of convolutional filter for contact prediction (default: 7)",
)
# Interaction Model
inter_grp.add_argument(
"--no-w",
action="store_false",
dest='use_w',
help="Don't use weight matrix in interaction prediction model",
)
inter_grp.add_argument(
"--pool-width",
type=int,
default=9,
help="Size of max-pool in interaction model (default: 9)",
)
# Training
train_grp.add_argument(
"--negative-ratio",
type=int,
default=10,
help="Number of negative training samples for each positive training sample (default: 10)",
)
train_grp.add_argument(
"--epoch-scale",
type=int,
default=1,
help="Report heldout performance every this many epochs (default: 1)",
)
train_grp.add_argument("--num-epochs", type=int, default=10, help="Number of epochs (default: 10)")
train_grp.add_argument("--batch-size", type=int, default=25, help="Minibatch size (default: 25)")
train_grp.add_argument("--weight-decay", type=float, default=0, help="L2 regularization (default: 0)")
train_grp.add_argument("--lr", type=float, default=0.001, help="Learning rate (default: 0.001)")
train_grp.add_argument(
"--lambda",
dest="lambda_",
type=float,
default=0.35,
help="Weight on the similarity objective (default: 0.35)",
)
# Output
misc_grp.add_argument("-o", "--outfile", help="Output file path (default: stdout)")
misc_grp.add_argument("--save-prefix", help="Path prefix for saving models")
misc_grp.add_argument("-d", "--device", type=int, default=-1, help="Compute device to use")
misc_grp.add_argument("--checkpoint", help="Checkpoint model to start training from")
return parser
def predict_interaction(model, n0, n1, tensors, use_cuda):
"""
Predict whether a list of protein pairs will interact.
:param model: Model to be trained
:type model: dscript.models.interaction.ModelInteraction
:param n0: First protein names
:type n0: list[str]
:param n1: Second protein names
:type n1: list[str]
:param tensors: Dictionary of protein names to embeddings
:type tensors: dict[str, torch.Tensor]
:param use_cuda: Whether to use GPU
:type use_cuda: bool
"""
b = len(n0)
p_hat = []
for i in range(b):
z_a = tensors[n0[i]]
z_b = tensors[n1[i]]
if use_cuda:
z_a = z_a.cuda()
z_b = z_b.cuda()
p_hat.append(model.predict(z_a, z_b))
p_hat = torch.stack(p_hat, 0)
return p_hat
def predict_cmap_interaction(model, n0, n1, tensors, use_cuda):
"""
Predict whether a list of protein pairs will interact, as well as their contact map.
:param model: Model to be trained
:type model: dscript.models.interaction.ModelInteraction
:param n0: First protein names
:type n0: list[str]
:param n1: Second protein names
:type n1: list[str]
:param tensors: Dictionary of protein names to embeddings
:type tensors: dict[str, torch.Tensor]
:param use_cuda: Whether to use GPU
:type use_cuda: bool
"""
b = len(n0)
p_hat = []
c_map_mag = []
for i in range(b):
z_a = tensors[n0[i]]
z_b = tensors[n1[i]]
if use_cuda:
z_a = z_a.cuda()
z_b = z_b.cuda()
cm, ph = model.map_predict(z_a, z_b)
p_hat.append(ph)
c_map_mag.append(torch.mean(cm))
p_hat = torch.stack(p_hat, 0)
c_map_mag = torch.stack(c_map_mag, 0)
return c_map_mag, p_hat
def interaction_grad(model, n0, n1, y, tensors, use_cuda, weight=0.35):
"""
Compute gradient and backpropagate loss for a batch.
:param model: Model to be trained
:type model: dscript.models.interaction.ModelInteraction
:param n0: First protein names
:type n0: list[str]
:param n1: Second protein names
:type n1: list[str]
:param y: Interaction labels
:type y: torch.Tensor
:param tensors: Dictionary of protein names to embeddings
:type tensors: dict[str, torch.Tensor]
:param use_cuda: Whether to use GPU
:type use_cuda: bool
:param weight: Weight on the contact map magnitude objective. BCE loss is :math:`1 - \\text{weight}`.
:type weight: float
:return: (Loss, number correct, mean square error, batch size)
:rtype: (torch.Tensor, int, torch.Tensor, int)
"""
c_map_mag, p_hat = predict_cmap_interaction(model, n0, n1, tensors, use_cuda)
if use_cuda:
y = y.cuda()
y = Variable(y)
bce_loss = F.binary_cross_entropy(p_hat.float(), y.float())
cmap_loss = torch.mean(c_map_mag)
loss = (weight * bce_loss) + ((1 - weight) * cmap_loss)
b = len(p_hat)
# backprop loss
loss.backward()
if use_cuda:
y = y.cpu()
p_hat = p_hat.cpu()
with torch.no_grad():
guess_cutoff = 0.5
p_hat = p_hat.float()
p_guess = (guess_cutoff * torch.ones(b) < p_hat).float()
y = y.float()
correct = torch.sum(p_guess == y).item()
mse = torch.mean((y.float() - p_hat) ** 2).item()
return loss, correct, mse, b
def interaction_eval(model, test_iterator, tensors, use_cuda):
"""
Evaluate test data set performance.
:param model: Model to be trained
:type model: dscript.models.interaction.ModelInteraction
:param test_iterator: Test data iterator
:type test_iterator: torch.utils.data.DataLoader
:param tensors: Dictionary of protein names to embeddings
:type tensors: dict[str, torch.Tensor]
:param use_cuda: Whether to use GPU
:type use_cuda: bool
:return: (Loss, number correct, mean square error, precision, recall, F1 Score, AUPR)
:rtype: (torch.Tensor, int, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor)
"""
p_hat = []
true_y = []
for n0, n1, y in test_iterator:
p_hat.append(predict_interaction(model, n0, n1, tensors, use_cuda))
true_y.append(y)
y = torch.cat(true_y, 0)
p_hat = torch.cat(p_hat, 0)
if use_cuda:
y.cuda()
p_hat = torch.Tensor([x.cuda() for x in p_hat])
p_hat.cuda()
loss = F.binary_cross_entropy(p_hat.float(), y.float()).item()
b = len(y)
with torch.no_grad():
guess_cutoff = torch.Tensor([0.5]).float()
p_hat = p_hat.float()
y = y.float()
p_guess = (guess_cutoff * torch.ones(b) < p_hat).float()
correct = torch.sum(p_guess == y).item()
mse = torch.mean((y.float() - p_hat) ** 2).item()
tp = torch.sum(y * p_hat).item()
pr = tp / torch.sum(p_hat).item()
re = tp / torch.sum(y).item()
f1 = 2 * pr * re / (pr + re)
y = y.cpu().numpy()
p_hat = p_hat.data.cpu().numpy()
aupr = average_precision(y, p_hat)
return loss, correct, mse, pr, re, f1, aupr
def main(args):
"""
Run training from arguments.
:meta private:
"""
output = args.outfile
if output is None:
output = sys.stdout
else:
output = open(output, "w")
print(f'# Called as: {" ".join(sys.argv)}', file=output)
if output is not sys.stdout:
print(f'Called as: {" ".join(sys.argv)}')
# Set device
device = args.device
use_cuda = (device >= 0) and torch.cuda.is_available()
if use_cuda:
torch.cuda.set_device(device)
print(
f"# Using CUDA device {device} - {torch.cuda.get_device_name(device)}",
file=output,
)
else:
print("# Using CPU", file=output)
device = "cpu"
batch_size = args.batch_size
train_fi = args.train
test_fi = args.val
augment = args.augment
embedding_h5 = args.embedding
h5fi = h5py.File(embedding_h5, "r")
print(f"# Loading training pairs from {train_fi}...", file=output)
output.flush()
train_df = pd.read_csv(train_fi, sep="\t", header=None)
if augment:
train_n0 = pd.concat((train_df[0], train_df[1]), axis=0).reset_index(drop=True)
train_n1 = pd.concat((train_df[1], train_df[0]), axis=0).reset_index(drop=True)
train_y = torch.from_numpy(pd.concat((train_df[2], train_df[2])).values)
else:
train_n0, train_n1 = train_df[0], train_df[1]
train_y = torch.from_numpy(train_df[2].values)
print(f"# Loading testing pairs from {test_fi}...", file=output)
output.flush()
test_df = pd.read_csv(test_fi, sep="\t", header=None)
test_n0, test_n1 = test_df[0], test_df[1]
test_y = torch.from_numpy(test_df[2].values)
output.flush()
train_pairs = PairedDataset(train_n0, train_n1, train_y)
pairs_train_iterator = torch.utils.data.DataLoader(
train_pairs,
batch_size=batch_size,
collate_fn=collate_paired_sequences,
shuffle=True,
)
test_pairs = PairedDataset(test_n0, test_n1, test_y)
pairs_test_iterator = torch.utils.data.DataLoader(
test_pairs,
batch_size=batch_size,
collate_fn=collate_paired_sequences,
shuffle=True,
)
output.flush()
print(f"# Loading embeddings", file=output)
tensors = {}
all_proteins = set(train_n0).union(set(train_n1)).union(set(test_n0)).union(set(test_n1))
for prot_name in tqdm(all_proteins):
tensors[prot_name] = torch.from_numpy(h5fi[prot_name][:, :])
use_cuda = (args.device > -1) and torch.cuda.is_available()
if args.checkpoint is None:
projection_dim = args.projection_dim
dropout_p = args.dropout_p
embedding = FullyConnectedEmbed(6165, projection_dim, dropout=dropout_p)
print("# Initializing embedding model with:", file=output)
print(f"\tprojection_dim: {projection_dim}", file=output)
print(f"\tdropout_p: {dropout_p}", file=output)
# Create contact model
hidden_dim = args.hidden_dim
kernel_width = args.kernel_width
print("# Initializing contact model with:", file=output)
print(f"\thidden_dim: {hidden_dim}", file=output)
print(f"\tkernel_width: {kernel_width}", file=output)
contact = ContactCNN(projection_dim, hidden_dim, kernel_width)
# Create the full model
use_W = args.use_w
pool_width = args.pool_width
print("# Initializing interaction model with:", file=output)
print(f"\tpool_width: {pool_width}", file=output)
print(f"\tuse_w: {use_W}", file=output)
model = ModelInteraction(embedding, contact, use_W=use_W, pool_size=pool_width)
print(model, file=output)
else:
print("# Loading model from checkpoint {}".format(args.checkpoint), file=output)
model = torch.load(args.checkpoint)
model.use_cuda = use_cuda
if use_cuda:
model = model.cuda()
# Train the model
lr = args.lr
wd = args.weight_decay
num_epochs = args.num_epochs
batch_size = args.batch_size
report_steps = args.epoch_scale
inter_weight = args.lambda_
cmap_weight = 1 - inter_weight
digits = int(np.floor(np.log10(num_epochs))) + 1
save_prefix = args.save_prefix
if save_prefix is None:
save_prefix = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M")
params = [p for p in model.parameters() if p.requires_grad]
optim = torch.optim.Adam(params, lr=lr, weight_decay=wd)
print(f'# Using save prefix "{save_prefix}"', file=output)
print(f"# Training with Adam: lr={lr}, weight_decay={wd}", file=output)
print(f"\tnum_epochs: {num_epochs}", file=output)
print(f"\tepoch_scale: {report_steps}", file=output)
print(f"\tbatch_size: {batch_size}", file=output)
print(f"\tinteraction weight: {inter_weight}", file=output)
print(f"\tcontact map weight: {cmap_weight}", file=output)
output.flush()
batch_report_fmt = "# [{}/{}] training {:.1%}: Loss={:.6}, Accuracy={:.3%}, MSE={:.6}"
epoch_report_fmt = "# Finished Epoch {}/{}: Loss={:.6}, Accuracy={:.3%}, MSE={:.6}, Precision={:.6}, Recall={:.6}, F1={:.6}, AUPR={:.6}"
N = len(pairs_train_iterator) * batch_size
for epoch in range(num_epochs):
model.train()
n = 0
loss_accum = 0
acc_accum = 0
mse_accum = 0
# Train batches
for (z0, z1, y) in tqdm(pairs_train_iterator, desc=f"Epoch {epoch+1}/{num_epochs}",total=len(pairs_train_iterator)):
loss, correct, mse, b = interaction_grad(model, z0, z1, y, tensors, use_cuda, weight=inter_weight)
n += b
delta = b * (loss - loss_accum)
loss_accum += delta / n
delta = correct - b * acc_accum
acc_accum += delta / n
delta = b * (mse - mse_accum)
mse_accum += delta / n
report = (n - b) // 100 < n // 100
optim.step()
optim.zero_grad()
model.clip()
if report:
tokens = [
epoch + 1,
num_epochs,
n / N,
loss_accum,
acc_accum,
mse_accum,
]
if output is not sys.stdout:
print(batch_report_fmt.format(*tokens), file=output)
output.flush()
if (epoch + 1) % report_steps == 0:
model.eval()
with torch.no_grad():
(
inter_loss,
inter_correct,
inter_mse,
inter_pr,
inter_re,
inter_f1,
inter_aupr,
) = interaction_eval(model, pairs_test_iterator, tensors, use_cuda)
tokens = [
epoch + 1,
num_epochs,
inter_loss,
inter_correct / (len(pairs_test_iterator) * batch_size),
inter_mse,
inter_pr,
inter_re,
inter_f1,
inter_aupr,
]
print(epoch_report_fmt.format(*tokens), file=output)
output.flush()
# Save the model
if save_prefix is not None:
save_path = save_prefix + "_epoch" + str(epoch + 1).zfill(digits) + ".sav"
print(f"# Saving model to {save_path}", file=output)
model.cpu()
torch.save(model, save_path)
if use_cuda:
model.cuda()
output.flush()
if save_prefix is not None:
save_path = save_prefix + "_final.sav"
print(f"# Saving final model to {save_path}", file=output)
model.cpu()
torch.save(model, save_path)
if use_cuda:
model.cuda()
output.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
add_args(parser)
main(parser.parse_args())
| 31.387681 | 140 | 0.609546 |
import sys
import argparse
import h5py
import datetime
import subprocess as sp
import numpy as np
import pandas as pd
import gzip as gz
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import IterableDataset, DataLoader
from sklearn.metrics import average_precision_score as average_precision
import dscript
from dscript.utils import PairedDataset, collate_paired_sequences
from dscript.models.embedding import (
IdentityEmbed,
FullyConnectedEmbed,
)
from dscript.models.contact import ContactCNN
from dscript.models.interaction import ModelInteraction
def add_args(parser):
data_grp = parser.add_argument_group("Data")
proj_grp = parser.add_argument_group("Projection Module")
contact_grp = parser.add_argument_group("Contact Module")
inter_grp = parser.add_argument_group("Interaction Module")
train_grp = parser.add_argument_group("Training")
misc_grp = parser.add_argument_group("Output and Device")
data_grp.add_argument("--train", help="Training data", required=True)
data_grp.add_argument("--val", help="Validation data", required=True)
data_grp.add_argument("--embedding", help="h5 file with embedded sequences", required=True)
data_grp.add_argument(
"--no-augment",
action="store_false",
dest='augment',
help="Set flag to not augment data by adding (B A) for all pairs (A B)",
)
proj_grp.add_argument(
"--projection-dim",
type=int,
default=100,
help="Dimension of embedding projection layer (default: 100)",
)
proj_grp.add_argument(
"--dropout-p",
type=float,
default=0.5,
help="Parameter p for embedding dropout layer (default: 0.5)",
)
contact_grp.add_argument(
"--hidden-dim",
type=int,
default=50,
help="Number of hidden units for comparison layer in contact prediction (default: 50)",
)
contact_grp.add_argument(
"--kernel-width",
type=int,
default=7,
help="Width of convolutional filter for contact prediction (default: 7)",
)
inter_grp.add_argument(
"--no-w",
action="store_false",
dest='use_w',
help="Don't use weight matrix in interaction prediction model",
)
inter_grp.add_argument(
"--pool-width",
type=int,
default=9,
help="Size of max-pool in interaction model (default: 9)",
)
# Training
train_grp.add_argument(
"--negative-ratio",
type=int,
default=10,
help="Number of negative training samples for each positive training sample (default: 10)",
)
train_grp.add_argument(
"--epoch-scale",
type=int,
default=1,
help="Report heldout performance every this many epochs (default: 1)",
)
train_grp.add_argument("--num-epochs", type=int, default=10, help="Number of epochs (default: 10)")
train_grp.add_argument("--batch-size", type=int, default=25, help="Minibatch size (default: 25)")
train_grp.add_argument("--weight-decay", type=float, default=0, help="L2 regularization (default: 0)")
train_grp.add_argument("--lr", type=float, default=0.001, help="Learning rate (default: 0.001)")
train_grp.add_argument(
"--lambda",
dest="lambda_",
type=float,
default=0.35,
help="Weight on the similarity objective (default: 0.35)",
)
# Output
misc_grp.add_argument("-o", "--outfile", help="Output file path (default: stdout)")
misc_grp.add_argument("--save-prefix", help="Path prefix for saving models")
misc_grp.add_argument("-d", "--device", type=int, default=-1, help="Compute device to use")
misc_grp.add_argument("--checkpoint", help="Checkpoint model to start training from")
return parser
def predict_interaction(model, n0, n1, tensors, use_cuda):
b = len(n0)
p_hat = []
for i in range(b):
z_a = tensors[n0[i]]
z_b = tensors[n1[i]]
if use_cuda:
z_a = z_a.cuda()
z_b = z_b.cuda()
p_hat.append(model.predict(z_a, z_b))
p_hat = torch.stack(p_hat, 0)
return p_hat
def predict_cmap_interaction(model, n0, n1, tensors, use_cuda):
b = len(n0)
p_hat = []
c_map_mag = []
for i in range(b):
z_a = tensors[n0[i]]
z_b = tensors[n1[i]]
if use_cuda:
z_a = z_a.cuda()
z_b = z_b.cuda()
cm, ph = model.map_predict(z_a, z_b)
p_hat.append(ph)
c_map_mag.append(torch.mean(cm))
p_hat = torch.stack(p_hat, 0)
c_map_mag = torch.stack(c_map_mag, 0)
return c_map_mag, p_hat
def interaction_grad(model, n0, n1, y, tensors, use_cuda, weight=0.35):
c_map_mag, p_hat = predict_cmap_interaction(model, n0, n1, tensors, use_cuda)
if use_cuda:
y = y.cuda()
y = Variable(y)
bce_loss = F.binary_cross_entropy(p_hat.float(), y.float())
cmap_loss = torch.mean(c_map_mag)
loss = (weight * bce_loss) + ((1 - weight) * cmap_loss)
b = len(p_hat)
# backprop loss
loss.backward()
if use_cuda:
y = y.cpu()
p_hat = p_hat.cpu()
with torch.no_grad():
guess_cutoff = 0.5
p_hat = p_hat.float()
p_guess = (guess_cutoff * torch.ones(b) < p_hat).float()
y = y.float()
correct = torch.sum(p_guess == y).item()
mse = torch.mean((y.float() - p_hat) ** 2).item()
return loss, correct, mse, b
def interaction_eval(model, test_iterator, tensors, use_cuda):
p_hat = []
true_y = []
for n0, n1, y in test_iterator:
p_hat.append(predict_interaction(model, n0, n1, tensors, use_cuda))
true_y.append(y)
y = torch.cat(true_y, 0)
p_hat = torch.cat(p_hat, 0)
if use_cuda:
y.cuda()
p_hat = torch.Tensor([x.cuda() for x in p_hat])
p_hat.cuda()
loss = F.binary_cross_entropy(p_hat.float(), y.float()).item()
b = len(y)
with torch.no_grad():
guess_cutoff = torch.Tensor([0.5]).float()
p_hat = p_hat.float()
y = y.float()
p_guess = (guess_cutoff * torch.ones(b) < p_hat).float()
correct = torch.sum(p_guess == y).item()
mse = torch.mean((y.float() - p_hat) ** 2).item()
tp = torch.sum(y * p_hat).item()
pr = tp / torch.sum(p_hat).item()
re = tp / torch.sum(y).item()
f1 = 2 * pr * re / (pr + re)
y = y.cpu().numpy()
p_hat = p_hat.data.cpu().numpy()
aupr = average_precision(y, p_hat)
return loss, correct, mse, pr, re, f1, aupr
def main(args):
output = args.outfile
if output is None:
output = sys.stdout
else:
output = open(output, "w")
print(f'
if output is not sys.stdout:
print(f'Called as: {" ".join(sys.argv)}')
# Set device
device = args.device
use_cuda = (device >= 0) and torch.cuda.is_available()
if use_cuda:
torch.cuda.set_device(device)
print(
f"# Using CUDA device {device} - {torch.cuda.get_device_name(device)}",
file=output,
)
else:
print("# Using CPU", file=output)
device = "cpu"
batch_size = args.batch_size
train_fi = args.train
test_fi = args.val
augment = args.augment
embedding_h5 = args.embedding
h5fi = h5py.File(embedding_h5, "r")
print(f"# Loading training pairs from {train_fi}...", file=output)
output.flush()
train_df = pd.read_csv(train_fi, sep="\t", header=None)
if augment:
train_n0 = pd.concat((train_df[0], train_df[1]), axis=0).reset_index(drop=True)
train_n1 = pd.concat((train_df[1], train_df[0]), axis=0).reset_index(drop=True)
train_y = torch.from_numpy(pd.concat((train_df[2], train_df[2])).values)
else:
train_n0, train_n1 = train_df[0], train_df[1]
train_y = torch.from_numpy(train_df[2].values)
print(f"# Loading testing pairs from {test_fi}...", file=output)
output.flush()
test_df = pd.read_csv(test_fi, sep="\t", header=None)
test_n0, test_n1 = test_df[0], test_df[1]
test_y = torch.from_numpy(test_df[2].values)
output.flush()
train_pairs = PairedDataset(train_n0, train_n1, train_y)
pairs_train_iterator = torch.utils.data.DataLoader(
train_pairs,
batch_size=batch_size,
collate_fn=collate_paired_sequences,
shuffle=True,
)
test_pairs = PairedDataset(test_n0, test_n1, test_y)
pairs_test_iterator = torch.utils.data.DataLoader(
test_pairs,
batch_size=batch_size,
collate_fn=collate_paired_sequences,
shuffle=True,
)
output.flush()
print(f"# Loading embeddings", file=output)
tensors = {}
all_proteins = set(train_n0).union(set(train_n1)).union(set(test_n0)).union(set(test_n1))
for prot_name in tqdm(all_proteins):
tensors[prot_name] = torch.from_numpy(h5fi[prot_name][:, :])
use_cuda = (args.device > -1) and torch.cuda.is_available()
if args.checkpoint is None:
projection_dim = args.projection_dim
dropout_p = args.dropout_p
embedding = FullyConnectedEmbed(6165, projection_dim, dropout=dropout_p)
print("# Initializing embedding model with:", file=output)
print(f"\tprojection_dim: {projection_dim}", file=output)
print(f"\tdropout_p: {dropout_p}", file=output)
# Create contact model
hidden_dim = args.hidden_dim
kernel_width = args.kernel_width
print("# Initializing contact model with:", file=output)
print(f"\thidden_dim: {hidden_dim}", file=output)
print(f"\tkernel_width: {kernel_width}", file=output)
contact = ContactCNN(projection_dim, hidden_dim, kernel_width)
# Create the full model
use_W = args.use_w
pool_width = args.pool_width
print("# Initializing interaction model with:", file=output)
print(f"\tpool_width: {pool_width}", file=output)
print(f"\tuse_w: {use_W}", file=output)
model = ModelInteraction(embedding, contact, use_W=use_W, pool_size=pool_width)
print(model, file=output)
else:
print("# Loading model from checkpoint {}".format(args.checkpoint), file=output)
model = torch.load(args.checkpoint)
model.use_cuda = use_cuda
if use_cuda:
model = model.cuda()
# Train the model
lr = args.lr
wd = args.weight_decay
num_epochs = args.num_epochs
batch_size = args.batch_size
report_steps = args.epoch_scale
inter_weight = args.lambda_
cmap_weight = 1 - inter_weight
digits = int(np.floor(np.log10(num_epochs))) + 1
save_prefix = args.save_prefix
if save_prefix is None:
save_prefix = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M")
params = [p for p in model.parameters() if p.requires_grad]
optim = torch.optim.Adam(params, lr=lr, weight_decay=wd)
print(f'
print(f"# Training with Adam: lr={lr}, weight_decay={wd}", file=output)
print(f"\tnum_epochs: {num_epochs}", file=output)
print(f"\tepoch_scale: {report_steps}", file=output)
print(f"\tbatch_size: {batch_size}", file=output)
print(f"\tinteraction weight: {inter_weight}", file=output)
print(f"\tcontact map weight: {cmap_weight}", file=output)
output.flush()
batch_report_fmt = "# [{}/{}] training {:.1%}: Loss={:.6}, Accuracy={:.3%}, MSE={:.6}"
epoch_report_fmt = "# Finished Epoch {}/{}: Loss={:.6}, Accuracy={:.3%}, MSE={:.6}, Precision={:.6}, Recall={:.6}, F1={:.6}, AUPR={:.6}"
N = len(pairs_train_iterator) * batch_size
for epoch in range(num_epochs):
model.train()
n = 0
loss_accum = 0
acc_accum = 0
mse_accum = 0
# Train batches
for (z0, z1, y) in tqdm(pairs_train_iterator, desc=f"Epoch {epoch+1}/{num_epochs}",total=len(pairs_train_iterator)):
loss, correct, mse, b = interaction_grad(model, z0, z1, y, tensors, use_cuda, weight=inter_weight)
n += b
delta = b * (loss - loss_accum)
loss_accum += delta / n
delta = correct - b * acc_accum
acc_accum += delta / n
delta = b * (mse - mse_accum)
mse_accum += delta / n
report = (n - b) // 100 < n // 100
optim.step()
optim.zero_grad()
model.clip()
if report:
tokens = [
epoch + 1,
num_epochs,
n / N,
loss_accum,
acc_accum,
mse_accum,
]
if output is not sys.stdout:
print(batch_report_fmt.format(*tokens), file=output)
output.flush()
if (epoch + 1) % report_steps == 0:
model.eval()
with torch.no_grad():
(
inter_loss,
inter_correct,
inter_mse,
inter_pr,
inter_re,
inter_f1,
inter_aupr,
) = interaction_eval(model, pairs_test_iterator, tensors, use_cuda)
tokens = [
epoch + 1,
num_epochs,
inter_loss,
inter_correct / (len(pairs_test_iterator) * batch_size),
inter_mse,
inter_pr,
inter_re,
inter_f1,
inter_aupr,
]
print(epoch_report_fmt.format(*tokens), file=output)
output.flush()
# Save the model
if save_prefix is not None:
save_path = save_prefix + "_epoch" + str(epoch + 1).zfill(digits) + ".sav"
print(f"# Saving model to {save_path}", file=output)
model.cpu()
torch.save(model, save_path)
if use_cuda:
model.cuda()
output.flush()
if save_prefix is not None:
save_path = save_prefix + "_final.sav"
print(f"# Saving final model to {save_path}", file=output)
model.cpu()
torch.save(model, save_path)
if use_cuda:
model.cuda()
output.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
add_args(parser)
main(parser.parse_args())
| true | true |
f714e944300f9dc8d4448ae55e5b7c4d463b66f6 | 667 | py | Python | setup.py | ameya98/roc2pr | ab19d7552e2e9ae32ca00a1be4a17b29a3f915fa | [
"MIT"
] | 1 | 2020-09-08T14:51:48.000Z | 2020-09-08T14:51:48.000Z | setup.py | ameya98/pr2roc | ab19d7552e2e9ae32ca00a1be4a17b29a3f915fa | [
"MIT"
] | null | null | null | setup.py | ameya98/pr2roc | ab19d7552e2e9ae32ca00a1be4a17b29a3f915fa | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pr2roc",
version="0.0.1",
author="Ameya Daigavane",
author_email="ameya.d.98@gmail.com",
description="A package to resample precision-recall curves correctly.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ameya98/pr2roc",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=2.7',
) | 30.318182 | 75 | 0.667166 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pr2roc",
version="0.0.1",
author="Ameya Daigavane",
author_email="ameya.d.98@gmail.com",
description="A package to resample precision-recall curves correctly.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/ameya98/pr2roc",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=2.7',
) | true | true |
f714eaabcfc91716d629e476a3730ed8f6d6ff30 | 2,766 | py | Python | core/converter/coordinate_converter.py | tringuyenminh23/chronos | cf20e65ca81b7cd2f3000383e870902b421fe3b0 | [
"MIT"
] | null | null | null | core/converter/coordinate_converter.py | tringuyenminh23/chronos | cf20e65ca81b7cd2f3000383e870902b421fe3b0 | [
"MIT"
] | null | null | null | core/converter/coordinate_converter.py | tringuyenminh23/chronos | cf20e65ca81b7cd2f3000383e870902b421fe3b0 | [
"MIT"
] | null | null | null | import requests
from abc import ABC, abstractmethod
from typing import Tuple, List
import json
class CoordinateConverter(ABC):
def __init__(self):
super().__init__()
@abstractmethod
def convert_coordinate(self, coordinate: Tuple, base_system_code, target_system_code):
pass
@abstractmethod
def convert_multiple_coordinates(self, coordinates: List[Tuple], base_system_code, target_system_code):
pass
class EpsgCoordinateConverter(CoordinateConverter):
def __init__(self):
super().__init__()
self.base_url = 'http://epsg.io/trans?'
def convert_coordinate(self, coordinate: Tuple, base_system_code: str, target_system_code: str):
"""
:param coordinate: tuple of 2 or 3 coordinate
:param base_system_code: source system code in epsg in string format (ESPG:3879 -> 3879)
:param target_system_code: target system code
:return: Converted coordinates
"""
if len(coordinate) < 2 or len(coordinate) > 3:
raise ValueError('Coordinate must be a tuple contains (x, y) or (x, y, z) coordinates')
if len(coordinate) == 2:
query = f"x={coordinate[0]}&y={coordinate[1]}"
else:
query = f"x={coordinate[0]}&y={coordinate[1]}&z={coordinate[2]}"
query += f"&s_srs={base_system_code}&t_srs={target_system_code}"
r = requests.get(self.base_url + query)
r.raise_for_status()
result_as_json = json.loads(r.content.decode('latin1'))
return result_as_json['x'], result_as_json['y']
def convert_multiple_coordinates(self, coordinates: List[Tuple], base_system_code, target_system_code):
"""
:param coordinates: list of tuple of 2 or 3 coordinate
:param base_system_code: source system code in epsg in string format (ESPG:3879 -> 3879)
:param target_system_code: target system code
:return: List of converted coordinates
"""
if len(coordinates[0]) < 2 or len(coordinates[0]) > 3:
raise ValueError('Coordinates must be a list of tuple contains (x, y) or (x, y, z) coordinates')
query = 'data='
for idx, coor in enumerate(coordinates):
query += ','.join([str(c) for c in coor])
if idx != len(coor) - 1:
query += ';'
query += f"&s_srs={base_system_code}&t_srs={target_system_code}"
r = requests.get(self.base_url + query)
r.raise_for_status()
result_as_json = json.loads(r.content.decode('latin1'))
if len(coordinates[0]) == 2:
results = [(t['x'], t['y']) for t in result_as_json]
else:
results = [(t['x'], t['y'], t['z']) for t in result_as_json]
return results
| 39.514286 | 108 | 0.630875 | import requests
from abc import ABC, abstractmethod
from typing import Tuple, List
import json
class CoordinateConverter(ABC):
def __init__(self):
super().__init__()
@abstractmethod
def convert_coordinate(self, coordinate: Tuple, base_system_code, target_system_code):
pass
@abstractmethod
def convert_multiple_coordinates(self, coordinates: List[Tuple], base_system_code, target_system_code):
pass
class EpsgCoordinateConverter(CoordinateConverter):
def __init__(self):
super().__init__()
self.base_url = 'http://epsg.io/trans?'
def convert_coordinate(self, coordinate: Tuple, base_system_code: str, target_system_code: str):
if len(coordinate) < 2 or len(coordinate) > 3:
raise ValueError('Coordinate must be a tuple contains (x, y) or (x, y, z) coordinates')
if len(coordinate) == 2:
query = f"x={coordinate[0]}&y={coordinate[1]}"
else:
query = f"x={coordinate[0]}&y={coordinate[1]}&z={coordinate[2]}"
query += f"&s_srs={base_system_code}&t_srs={target_system_code}"
r = requests.get(self.base_url + query)
r.raise_for_status()
result_as_json = json.loads(r.content.decode('latin1'))
return result_as_json['x'], result_as_json['y']
def convert_multiple_coordinates(self, coordinates: List[Tuple], base_system_code, target_system_code):
if len(coordinates[0]) < 2 or len(coordinates[0]) > 3:
raise ValueError('Coordinates must be a list of tuple contains (x, y) or (x, y, z) coordinates')
query = 'data='
for idx, coor in enumerate(coordinates):
query += ','.join([str(c) for c in coor])
if idx != len(coor) - 1:
query += ';'
query += f"&s_srs={base_system_code}&t_srs={target_system_code}"
r = requests.get(self.base_url + query)
r.raise_for_status()
result_as_json = json.loads(r.content.decode('latin1'))
if len(coordinates[0]) == 2:
results = [(t['x'], t['y']) for t in result_as_json]
else:
results = [(t['x'], t['y'], t['z']) for t in result_as_json]
return results
| true | true |
f714ec32dd2c3ee61a6b4c3f6009a99ad349e191 | 314 | py | Python | day4/1.py | bujiie/adventofcode2015 | 40d04b078bf9ebd90a544e4259c65cb77de36928 | [
"MIT"
] | null | null | null | day4/1.py | bujiie/adventofcode2015 | 40d04b078bf9ebd90a544e4259c65cb77de36928 | [
"MIT"
] | null | null | null | day4/1.py | bujiie/adventofcode2015 | 40d04b078bf9ebd90a544e4259c65cb77de36928 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import fileinput
import hashlib
hash = None
with fileinput.input() as fp:
hash = fp.readline().strip()
res = None
i = 0
zeros = 5
while True:
s = f'{hash}{str(i)}'
h = hashlib.md5(s.encode())
res = h.hexdigest()
if res.startswith('0'*zeros):
break;
i += 1
print(i)
print(res)
| 12.56 | 30 | 0.636943 |
import fileinput
import hashlib
hash = None
with fileinput.input() as fp:
hash = fp.readline().strip()
res = None
i = 0
zeros = 5
while True:
s = f'{hash}{str(i)}'
h = hashlib.md5(s.encode())
res = h.hexdigest()
if res.startswith('0'*zeros):
break;
i += 1
print(i)
print(res)
| true | true |
f714edb5b8db1159d14893789256eff798138f9d | 17,348 | py | Python | thespian/test/test_deadLettering.py | dendron2000/Thespian | 0acbc5a0803f6d2be3421ea6eb08c6beecbf3802 | [
"MIT"
] | 210 | 2015-08-31T19:39:34.000Z | 2020-01-10T08:07:48.000Z | thespian/test/test_deadLettering.py | dendron2000/Thespian | 0acbc5a0803f6d2be3421ea6eb08c6beecbf3802 | [
"MIT"
] | 85 | 2017-04-08T19:28:42.000Z | 2022-03-23T15:25:49.000Z | thespian/test/test_deadLettering.py | dendron2000/Thespian | 0acbc5a0803f6d2be3421ea6eb08c6beecbf3802 | [
"MIT"
] | 47 | 2015-09-01T19:24:20.000Z | 2020-01-02T20:03:05.000Z | """Verify DeadLetter handling behavior.
Current behavior is that an Actor may register for DeadLetter
handling. If it is registered, any message sent to an Actor that is
no longer present will be redirected to the register DeadLetter actor
(in its original form).
On exit of the DeadLetter handling Actor, the system reverts to the
default where dead letters are discarded.
If another Actor registers for DeadLetter handling, the new
registration will supercede the old registration. The original
handler is not aware of this, and will no longer receive DeadLetters,
even if the new handler de-registers.
Dead letters are handled by the local ActorSystem. Even if the parent
of an Actor is located in a separate system, the DeadLetter handler is
in the local System.
"""
import time
from thespian.actors import *
from thespian.test import *
from datetime import timedelta
ASK_WAIT = timedelta(seconds=15)
dead_routing_wait = lambda: inTestDelay(timedelta(milliseconds=125))
actor_exit_wait = lambda: inTestDelay(timedelta(milliseconds=50))
actor_create_wait = lambda: inTestDelay(timedelta(milliseconds=750))
actor_do_stuff_wait = lambda: inTestDelay(timedelta(milliseconds=500))
class DLHandler(Actor):
def receiveMessage(self, msg, sender):
if msg == 'Start':
self.handleDeadLetters()
elif msg == 'Stop':
self.handleDeadLetters(False)
elif msg == 'Count':
self.send(sender, getattr(self, 'numDeadLetters', 0))
elif isinstance(msg, ActorExitRequest):
pass
else:
# got a dead letter
self.numDeadLetters = getattr(self, 'numDeadLetters', 0) + 1
class DLParent(Actor):
def receiveMessage(self, msg, sender):
if not isinstance(msg, ActorSystemMessage): # or isinstance(msg, DeadEnvelope):
if not getattr(self, 'dlchild', None):
self.dlchild = self.createActor(DLHandler)
if self.dlchild == sender:
# Upward
self.send(self.lastSender, msg)
else:
# Downward
self.lastSender = sender
if msg == 'exit please':
self.send(self.dlchild, ActorExitRequest())
else:
self.send(self.dlchild, msg)
# UDP does not provide the ability to validate delivery of messages
# (outside of higher-level validation handshakes), so this system base
# cannot support Dead Lettering (as documented).
class TestFuncDeadLettering(object):
def checkNewDLCount(self, asys, handlerAddress, oldCount):
#asys = ActorSystem()
cnt = asys.ask(handlerAddress, 'Count', ASK_WAIT)
retries = 30
while cnt <= oldCount and retries:
retries -= 1
dead_routing_wait()
cnt = asys.ask(handlerAddress, 'Count', ASK_WAIT)
assert cnt > oldCount
return cnt
def test01_registerDeadLetter(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLHandler)
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Stop')
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
def test11_registerDeadLetterSubActor(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLParent)
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Stop')
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
def test02_GetDeadLetter(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLHandler)
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
cnt = self.checkNewDLCount(asys, handler, -1)
pawn = asys.createActor(DLHandler)
asys.tell(pawn, ActorExitRequest())
actor_exit_wait()
asys.tell(pawn, 'hello')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(pawn, 'hi')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(handler, 'Stop')
actor_exit_wait()
asys.tell(pawn, 'another')
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(pawn, 'and another')
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
def test12_GetDeadLetterSubActor(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLParent)
r = asys.ask(handler, 'Count', ASK_WAIT)
assert 0 == r
asys.tell(handler, 'Start')
cnt = self.checkNewDLCount(asys, handler, -1)
pawn = asys.createActor(DLParent)
asys.tell(pawn, 'exit please')
actor_create_wait()
asys.tell(pawn, 'hello')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(pawn, 'hi')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(handler, 'Stop')
actor_exit_wait()
asys.tell(pawn, 'another')
r = asys.ask(handler, 'Count', ASK_WAIT)
assert cnt == r
asys.tell(pawn, 'and another')
r = asys.ask(handler, 'Count', ASK_WAIT)
assert cnt == r
def test03_DLRegisterOnlyOnce(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLHandler)
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
cnt = self.checkNewDLCount(asys, handler, -1)
# Create another actor and shut it down so we can capture its dead letters
pawn = asys.createActor(DLHandler)
asys.tell(pawn, ActorExitRequest())
actor_do_stuff_wait()
# Send a couple of messages and verify they are each passed to the dead letter handler
asys.tell(pawn, 'hello')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(pawn, 'hi')
cnt = self.checkNewDLCount(asys, handler, cnt)
# Another start has no effect; remains the dead letter handler.
asys.tell(handler, 'Start')
actor_do_stuff_wait()
# Send another couple of messages to the dead actor and verify dead letter receipt.
asys.tell(pawn, 'another')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(pawn, 'and another')
cnt = self.checkNewDLCount(asys, handler, cnt)
def test13_DLRegisterOnlyOnce(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLParent)
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
cnt = self.checkNewDLCount(asys, handler, -1)
# Create another actor and shut it down so we can capture its dead letters
pawn = asys.createActor(DLParent)
asys.tell(pawn, ActorExitRequest())
actor_exit_wait()
# Send a couple of messages and verify they are each passed to the dead letter handler
asys.tell(pawn, 'hello')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(pawn, 'hi')
cnt = self.checkNewDLCount(asys, handler, cnt)
# Another start has no effect; remains the dead letter handler.
asys.tell(handler, 'Start')
actor_do_stuff_wait()
# Send another couple of messages to the dead actor and verify dead letter receipt.
asys.tell(pawn, 'another')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(pawn, 'and another')
cnt = self.checkNewDLCount(asys, handler, cnt)
def test04_DLMultipleHandlers(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLHandler)
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
cnt = self.checkNewDLCount(asys, handler, -1)
pawn = asys.createActor(DLHandler)
asys.tell(pawn, ActorExitRequest())
actor_exit_wait()
asys.tell(pawn, 'hello')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(pawn, 'hi')
cnt = self.checkNewDLCount(asys, handler, cnt)
handler2 = asys.createActor(DLHandler)
asys.tell(handler2, 'Start')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert 0 == asys.ask(handler2, 'Count', ASK_WAIT)
cnt2 = self.checkNewDLCount(asys, handler2, -1)
asys.tell(pawn, 'another')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(pawn, 'and another')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Stop') # no effect
actor_do_stuff_wait()
asys.tell(pawn, 'more messages')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages again')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler2, 'Stop')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages repeated')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages again repeated')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages repeated reprised')
cnt = self.checkNewDLCount(asys, handler, cnt)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages again repeated reprised')
cnt = self.checkNewDLCount(asys, handler, cnt)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
def test14_DLMultipleHandlers(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLParent)
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
cnt = self.checkNewDLCount(asys, handler, -1)
pawn = asys.createActor(DLParent)
asys.tell(pawn, ActorExitRequest())
actor_exit_wait()
asys.tell(pawn, 'hello')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(pawn, 'hi')
cnt = self.checkNewDLCount(asys, handler, cnt)
handler2 = asys.createActor(DLParent)
asys.tell(handler2, 'Start')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert 0 == asys.ask(handler2, 'Count', ASK_WAIT)
cnt2 = self.checkNewDLCount(asys, handler2, -1)
asys.tell(pawn, 'another')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(pawn, 'and another')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Stop') # no effect
actor_do_stuff_wait()
asys.tell(pawn, 'more messages')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages again')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler2, 'Stop')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages repeated')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages again repeated')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages repeated reprised')
cnt = self.checkNewDLCount(asys, handler, cnt)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages again repeated reprised')
cnt = self.checkNewDLCount(asys, handler, cnt)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
def test05_DLAutoRemoval(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLHandler)
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
handler2 = asys.createActor(DLHandler)
asys.tell(handler2, 'Start')
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
assert 0 == asys.ask(handler2, 'Count', ASK_WAIT)
# Create actor and kill it so messages to it it will be dead-letter routed.
pawn = asys.createActor(DLHandler)
asys.tell(pawn, ActorExitRequest())
actor_exit_wait()
# Send a message ane make sure the later dead-letter handler receives it
cnt = 0
cnt2 = 0
asys.tell(pawn, 'hello')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
# Again, to ensure no round-robining is occurring
asys.tell(pawn, 'hi')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
# Now remove dead letter handler; ensure dead letters are dropped
asys.tell(handler2, ActorExitRequest())
actor_exit_wait()
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(pawn, 'another')
actor_do_stuff_wait()
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
# Tell first dead letter handler to re-register
asys.tell(handler, 'Start')
# n.b. tell or ask might create temporary actor, so can't assume startnum == 0
cnt = asys.ask(handler, 'Count', ASK_WAIT)
# Verify first dead letter handler is getting dead letters again
asys.tell(pawn, 'another again')
cnt = self.checkNewDLCount(asys, handler, cnt)
def test15_DLAutoRemoval(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLParent)
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
handler2 = asys.createActor(DLParent)
asys.tell(handler2, 'Start')
actor_do_stuff_wait()
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
assert 0 == asys.ask(handler2, 'Count', ASK_WAIT)
# Create actor and kill it so messages to it it will be dead-letter routed.
pawn = asys.createActor(DLParent)
asys.tell(pawn, ActorExitRequest())
actor_exit_wait()
# Send a message and make sure the later dead-letter handler receives it
cnt = 0
cnt2 = 0
asys.tell(pawn, 'hello')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
# Again, to ensure no round-robining is occurring
asys.tell(pawn, 'hi')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
# Now remove dead letter handler; ensure dead letters are dropped
asys.tell(handler2, ActorExitRequest())
actor_exit_wait()
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(pawn, 'another')
actor_do_stuff_wait()
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
# Tell first dead letter handler to re-register
asys.tell(handler, 'Start')
actor_do_stuff_wait()
# n.b. tell or ask might create temporary actor, so can't assume startnum == 0
cnt = asys.ask(handler, 'Count', ASK_WAIT)
# Verify first dead letter handler is getting dead letters again
asys.tell(pawn, 'another again')
cnt = self.checkNewDLCount(asys, handler, cnt)
#KWQ: test multiple actor systems
| 37.227468 | 94 | 0.640016 |
import time
from thespian.actors import *
from thespian.test import *
from datetime import timedelta
ASK_WAIT = timedelta(seconds=15)
dead_routing_wait = lambda: inTestDelay(timedelta(milliseconds=125))
actor_exit_wait = lambda: inTestDelay(timedelta(milliseconds=50))
actor_create_wait = lambda: inTestDelay(timedelta(milliseconds=750))
actor_do_stuff_wait = lambda: inTestDelay(timedelta(milliseconds=500))
class DLHandler(Actor):
def receiveMessage(self, msg, sender):
if msg == 'Start':
self.handleDeadLetters()
elif msg == 'Stop':
self.handleDeadLetters(False)
elif msg == 'Count':
self.send(sender, getattr(self, 'numDeadLetters', 0))
elif isinstance(msg, ActorExitRequest):
pass
else:
self.numDeadLetters = getattr(self, 'numDeadLetters', 0) + 1
class DLParent(Actor):
def receiveMessage(self, msg, sender):
if not isinstance(msg, ActorSystemMessage):
if not getattr(self, 'dlchild', None):
self.dlchild = self.createActor(DLHandler)
if self.dlchild == sender:
self.send(self.lastSender, msg)
else:
self.lastSender = sender
if msg == 'exit please':
self.send(self.dlchild, ActorExitRequest())
else:
self.send(self.dlchild, msg)
class TestFuncDeadLettering(object):
def checkNewDLCount(self, asys, handlerAddress, oldCount):
cnt = asys.ask(handlerAddress, 'Count', ASK_WAIT)
retries = 30
while cnt <= oldCount and retries:
retries -= 1
dead_routing_wait()
cnt = asys.ask(handlerAddress, 'Count', ASK_WAIT)
assert cnt > oldCount
return cnt
def test01_registerDeadLetter(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLHandler)
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Stop')
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
def test11_registerDeadLetterSubActor(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLParent)
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Stop')
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
def test02_GetDeadLetter(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLHandler)
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
cnt = self.checkNewDLCount(asys, handler, -1)
pawn = asys.createActor(DLHandler)
asys.tell(pawn, ActorExitRequest())
actor_exit_wait()
asys.tell(pawn, 'hello')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(pawn, 'hi')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(handler, 'Stop')
actor_exit_wait()
asys.tell(pawn, 'another')
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(pawn, 'and another')
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
def test12_GetDeadLetterSubActor(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLParent)
r = asys.ask(handler, 'Count', ASK_WAIT)
assert 0 == r
asys.tell(handler, 'Start')
cnt = self.checkNewDLCount(asys, handler, -1)
pawn = asys.createActor(DLParent)
asys.tell(pawn, 'exit please')
actor_create_wait()
asys.tell(pawn, 'hello')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(pawn, 'hi')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(handler, 'Stop')
actor_exit_wait()
asys.tell(pawn, 'another')
r = asys.ask(handler, 'Count', ASK_WAIT)
assert cnt == r
asys.tell(pawn, 'and another')
r = asys.ask(handler, 'Count', ASK_WAIT)
assert cnt == r
def test03_DLRegisterOnlyOnce(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLHandler)
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
cnt = self.checkNewDLCount(asys, handler, -1)
pawn = asys.createActor(DLHandler)
asys.tell(pawn, ActorExitRequest())
actor_do_stuff_wait()
asys.tell(pawn, 'hello')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(pawn, 'hi')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(handler, 'Start')
actor_do_stuff_wait()
asys.tell(pawn, 'another')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(pawn, 'and another')
cnt = self.checkNewDLCount(asys, handler, cnt)
def test13_DLRegisterOnlyOnce(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLParent)
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
cnt = self.checkNewDLCount(asys, handler, -1)
pawn = asys.createActor(DLParent)
asys.tell(pawn, ActorExitRequest())
actor_exit_wait()
asys.tell(pawn, 'hello')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(pawn, 'hi')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(handler, 'Start')
actor_do_stuff_wait()
asys.tell(pawn, 'another')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(pawn, 'and another')
cnt = self.checkNewDLCount(asys, handler, cnt)
def test04_DLMultipleHandlers(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLHandler)
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
cnt = self.checkNewDLCount(asys, handler, -1)
pawn = asys.createActor(DLHandler)
asys.tell(pawn, ActorExitRequest())
actor_exit_wait()
asys.tell(pawn, 'hello')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(pawn, 'hi')
cnt = self.checkNewDLCount(asys, handler, cnt)
handler2 = asys.createActor(DLHandler)
asys.tell(handler2, 'Start')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert 0 == asys.ask(handler2, 'Count', ASK_WAIT)
cnt2 = self.checkNewDLCount(asys, handler2, -1)
asys.tell(pawn, 'another')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(pawn, 'and another')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Stop')
actor_do_stuff_wait()
asys.tell(pawn, 'more messages')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages again')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler2, 'Stop')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages repeated')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages again repeated')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages repeated reprised')
cnt = self.checkNewDLCount(asys, handler, cnt)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages again repeated reprised')
cnt = self.checkNewDLCount(asys, handler, cnt)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
def test14_DLMultipleHandlers(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLParent)
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
cnt = self.checkNewDLCount(asys, handler, -1)
pawn = asys.createActor(DLParent)
asys.tell(pawn, ActorExitRequest())
actor_exit_wait()
asys.tell(pawn, 'hello')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(pawn, 'hi')
cnt = self.checkNewDLCount(asys, handler, cnt)
handler2 = asys.createActor(DLParent)
asys.tell(handler2, 'Start')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert 0 == asys.ask(handler2, 'Count', ASK_WAIT)
cnt2 = self.checkNewDLCount(asys, handler2, -1)
asys.tell(pawn, 'another')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(pawn, 'and another')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Stop')
actor_do_stuff_wait()
asys.tell(pawn, 'more messages')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages again')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler2, 'Stop')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages repeated')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages again repeated')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages repeated reprised')
cnt = self.checkNewDLCount(asys, handler, cnt)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages again repeated reprised')
cnt = self.checkNewDLCount(asys, handler, cnt)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
def test05_DLAutoRemoval(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLHandler)
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
handler2 = asys.createActor(DLHandler)
asys.tell(handler2, 'Start')
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
assert 0 == asys.ask(handler2, 'Count', ASK_WAIT)
pawn = asys.createActor(DLHandler)
asys.tell(pawn, ActorExitRequest())
actor_exit_wait()
cnt = 0
cnt2 = 0
asys.tell(pawn, 'hello')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(pawn, 'hi')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler2, ActorExitRequest())
actor_exit_wait()
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(pawn, 'another')
actor_do_stuff_wait()
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
cnt = asys.ask(handler, 'Count', ASK_WAIT)
# Verify first dead letter handler is getting dead letters again
asys.tell(pawn, 'another again')
cnt = self.checkNewDLCount(asys, handler, cnt)
def test15_DLAutoRemoval(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLParent)
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
handler2 = asys.createActor(DLParent)
asys.tell(handler2, 'Start')
actor_do_stuff_wait()
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
assert 0 == asys.ask(handler2, 'Count', ASK_WAIT)
# Create actor and kill it so messages to it it will be dead-letter routed.
pawn = asys.createActor(DLParent)
asys.tell(pawn, ActorExitRequest())
actor_exit_wait()
# Send a message and make sure the later dead-letter handler receives it
cnt = 0
cnt2 = 0
asys.tell(pawn, 'hello')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
# Again, to ensure no round-robining is occurring
asys.tell(pawn, 'hi')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
# Now remove dead letter handler; ensure dead letters are dropped
asys.tell(handler2, ActorExitRequest())
actor_exit_wait()
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(pawn, 'another')
actor_do_stuff_wait()
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
# Tell first dead letter handler to re-register
asys.tell(handler, 'Start')
actor_do_stuff_wait()
# n.b. tell or ask might create temporary actor, so can't assume startnum == 0
cnt = asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(pawn, 'another again')
cnt = self.checkNewDLCount(asys, handler, cnt)
| true | true |
f714edba273ac98faf971ba9c109eee8aee8bd86 | 2,833 | py | Python | z2/part2/batch/jm/parser_errors_2/366414300.py | kozakusek/ipp-2020-testy | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | [
"MIT"
] | 1 | 2020-04-16T12:13:47.000Z | 2020-04-16T12:13:47.000Z | z2/part2/batch/jm/parser_errors_2/366414300.py | kozakusek/ipp-2020-testy | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | [
"MIT"
] | 18 | 2020-03-06T17:50:15.000Z | 2020-05-19T14:58:30.000Z | z2/part2/batch/jm/parser_errors_2/366414300.py | kozakusek/ipp-2020-testy | 09aa008fa53d159672cc7cbf969a6b237e15a7b8 | [
"MIT"
] | 18 | 2020-03-06T17:45:13.000Z | 2020-06-09T19:18:31.000Z | from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 366414300
"""
"""
random actions, total chaos
"""
board = gamma_new(5, 4, 4, 1)
assert board is not None
assert gamma_move(board, 1, 2, 0) == 1
assert gamma_free_fields(board, 1) == 3
assert gamma_golden_possible(board, 1) == 0
assert gamma_move(board, 2, 2, 1) == 1
assert gamma_move(board, 3, 2, 1) == 0
assert gamma_move(board, 3, 1, 1) == 1
assert gamma_move(board, 4, 2, 3) == 1
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 4, 4, 2) == 0
assert gamma_move(board, 4, 1, 1) == 0
assert gamma_move(board, 1, 0, 4) == 0
assert gamma_busy_fields(board, 1) == 1
assert gamma_move(board, 2, 1, 1) == 0
assert gamma_move(board, 3, 1, 0) == 1
assert gamma_move(board, 1, 1, 3) == 0
assert gamma_move(board, 2, 2, 2) == 1
assert gamma_move(board, 3, 1, 0) == 0
assert gamma_move(board, 3, 4, 0) == 0
assert gamma_move(board, 4, 3, 4) == 0
assert gamma_move(board, 4, 0, 1) == 0
assert gamma_move(board, 1, 2, 4) == 0
assert gamma_move(board, 1, 4, 3) == 0
board162686102 = gamma_board(board)
assert board162686102 is not None
assert board162686102 == ("..4..\n"
"..2..\n"
".32..\n"
".31..\n")
del board162686102
board162686102 = None
assert gamma_move(board, 2, 1, 3) == 0
assert gamma_move(board, 2, 0, 3) == 0
assert gamma_free_fields(board, 2) == 3
assert gamma_move(board, 3, 1, 1) == 0
assert gamma_move(board, 4, 3, 1) == 0
assert gamma_move(board, 1, 3, 4) == 0
assert gamma_move(board, 2, 1, 4) == 0
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_busy_fields(board, 2) == 2
assert gamma_move(board, 3, 2, 0) == 0
assert gamma_move(board, 3, 1, 1) == 0
assert gamma_move(board, 4, 0, 0) == 0
assert gamma_move(board, 1, 0, 3) == 0
assert gamma_move(board, 1, 1, 0) == 0
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_move(board, 2, 4, 0) == 0
assert gamma_move(board, 3, 0, 0) == 1
assert gamma_move(board, 4, 2, 3) == 0
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_move(board, 3, 1, 0) == 0
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 4, 4, 0) == 0
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 1, 3, 0) == 1
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 3, 3, 3) == 0
assert gamma_move(board, 3, 4, 1) == 0
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 4, 3, 1) == 0
assert gamma_move(board, 4, 3, 2) == 0
assert gamma_move(board, 1, 3, 1) == 1
assert gamma_move(board, 3, 2, 1) == 0
assert gamma_move(board, 4, 3, 0) == 0
gamma_delete(board)
| 30.462366 | 44 | 0.650547 | from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
board = gamma_new(5, 4, 4, 1)
assert board is not None
assert gamma_move(board, 1, 2, 0) == 1
assert gamma_free_fields(board, 1) == 3
assert gamma_golden_possible(board, 1) == 0
assert gamma_move(board, 2, 2, 1) == 1
assert gamma_move(board, 3, 2, 1) == 0
assert gamma_move(board, 3, 1, 1) == 1
assert gamma_move(board, 4, 2, 3) == 1
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 3, 0, 4) == 0
assert gamma_move(board, 4, 4, 2) == 0
assert gamma_move(board, 4, 1, 1) == 0
assert gamma_move(board, 1, 0, 4) == 0
assert gamma_busy_fields(board, 1) == 1
assert gamma_move(board, 2, 1, 1) == 0
assert gamma_move(board, 3, 1, 0) == 1
assert gamma_move(board, 1, 1, 3) == 0
assert gamma_move(board, 2, 2, 2) == 1
assert gamma_move(board, 3, 1, 0) == 0
assert gamma_move(board, 3, 4, 0) == 0
assert gamma_move(board, 4, 3, 4) == 0
assert gamma_move(board, 4, 0, 1) == 0
assert gamma_move(board, 1, 2, 4) == 0
assert gamma_move(board, 1, 4, 3) == 0
board162686102 = gamma_board(board)
assert board162686102 is not None
assert board162686102 == ("..4..\n"
"..2..\n"
".32..\n"
".31..\n")
del board162686102
board162686102 = None
assert gamma_move(board, 2, 1, 3) == 0
assert gamma_move(board, 2, 0, 3) == 0
assert gamma_free_fields(board, 2) == 3
assert gamma_move(board, 3, 1, 1) == 0
assert gamma_move(board, 4, 3, 1) == 0
assert gamma_move(board, 1, 3, 4) == 0
assert gamma_move(board, 2, 1, 4) == 0
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_busy_fields(board, 2) == 2
assert gamma_move(board, 3, 2, 0) == 0
assert gamma_move(board, 3, 1, 1) == 0
assert gamma_move(board, 4, 0, 0) == 0
assert gamma_move(board, 1, 0, 3) == 0
assert gamma_move(board, 1, 1, 0) == 0
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_move(board, 2, 4, 0) == 0
assert gamma_move(board, 3, 0, 0) == 1
assert gamma_move(board, 4, 2, 3) == 0
assert gamma_move(board, 2, 2, 0) == 0
assert gamma_move(board, 3, 1, 0) == 0
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 4, 4, 0) == 0
assert gamma_move(board, 1, 0, 2) == 0
assert gamma_move(board, 1, 3, 0) == 1
assert gamma_move(board, 2, 3, 0) == 0
assert gamma_move(board, 3, 3, 3) == 0
assert gamma_move(board, 3, 4, 1) == 0
assert gamma_golden_possible(board, 3) == 1
assert gamma_move(board, 4, 3, 1) == 0
assert gamma_move(board, 4, 3, 2) == 0
assert gamma_move(board, 1, 3, 1) == 1
assert gamma_move(board, 3, 2, 1) == 0
assert gamma_move(board, 4, 3, 0) == 0
gamma_delete(board)
| true | true |
f714edba6f5e54b2903a01e66bac1da132698edc | 1,773 | py | Python | examples/part_c.py | Viasat/salabim_plus | f68b207a469648f75cafdb9a3a0e3f772ad9b08a | [
"MIT"
] | 3 | 2020-07-12T16:18:08.000Z | 2022-03-31T20:29:51.000Z | examples/part_c.py | JackNelson/salabim_plus | f68b207a469648f75cafdb9a3a0e3f772ad9b08a | [
"MIT"
] | null | null | null | examples/part_c.py | JackNelson/salabim_plus | f68b207a469648f75cafdb9a3a0e3f772ad9b08a | [
"MIT"
] | 1 | 2020-06-12T20:19:45.000Z | 2020-06-12T20:19:45.000Z | import misc_tools
import random
def create_routing(env, first_step='op1'):
tasks = {
'op1': misc_tools.make_assembly_step(
env=env,
run_time=random.gauss(mu=12, sigma=0.5),
route_to='op2'),
'op2': {
'location': env['machine_3'],
'worker': env['technician'],
'manned': False,
'setup_time': random.uniform(a=2, b=5),
'run_time': random.gauss(mu=15, sigma=0.25),
'teardown_time': 0,
'transit_time': 1,
'yield': 0.85,
'route_to_pass': 'op3',
'route_to_fail': 'rework'
},
'op3': {
'location': env['common_process'],
'worker': env['technician'],
'manned': True,
'setup_time': random.triangular(low=1, high=4, mode=2),
'run_time': random.gauss(mu=2, sigma=0.5),
'teardown_time': random.uniform(a=1, b=2),
'transit_time': 1,
'route_to': env['part_c_storage']
},
'rework': {
'location': env['assembly_bench'],
'worker': env['assembler'],
'manned': True,
'setup_time': 0,
'run_time': random.expovariate(lambd=0.5)*15,
'teardown_time': 0,
'transit_time': 1,
'fail_count': 2,
'route_to_pass': 'op2',
'route_to_fail': env['scrap_storage']
}
}
return misc_tools.make_steps(first_step=first_step, tasks=tasks)
def get_bom(env):
return {
'part_a': {
'location': env['part_a_kanban'],
'qty': 1
},
'part_b': {
'location': env['part_b_kanban'],
'qty': 2
}
} | 29.55 | 68 | 0.478849 | import misc_tools
import random
def create_routing(env, first_step='op1'):
tasks = {
'op1': misc_tools.make_assembly_step(
env=env,
run_time=random.gauss(mu=12, sigma=0.5),
route_to='op2'),
'op2': {
'location': env['machine_3'],
'worker': env['technician'],
'manned': False,
'setup_time': random.uniform(a=2, b=5),
'run_time': random.gauss(mu=15, sigma=0.25),
'teardown_time': 0,
'transit_time': 1,
'yield': 0.85,
'route_to_pass': 'op3',
'route_to_fail': 'rework'
},
'op3': {
'location': env['common_process'],
'worker': env['technician'],
'manned': True,
'setup_time': random.triangular(low=1, high=4, mode=2),
'run_time': random.gauss(mu=2, sigma=0.5),
'teardown_time': random.uniform(a=1, b=2),
'transit_time': 1,
'route_to': env['part_c_storage']
},
'rework': {
'location': env['assembly_bench'],
'worker': env['assembler'],
'manned': True,
'setup_time': 0,
'run_time': random.expovariate(lambd=0.5)*15,
'teardown_time': 0,
'transit_time': 1,
'fail_count': 2,
'route_to_pass': 'op2',
'route_to_fail': env['scrap_storage']
}
}
return misc_tools.make_steps(first_step=first_step, tasks=tasks)
def get_bom(env):
return {
'part_a': {
'location': env['part_a_kanban'],
'qty': 1
},
'part_b': {
'location': env['part_b_kanban'],
'qty': 2
}
} | true | true |
f714edde1080126efd87ebb2e29ea0002cb76a78 | 122 | py | Python | irnl_rdt_correction/__main__.py | pylhc/irnl_rdt_correction | 7360728ffaa66b0c9f7b4825c241a3949df18962 | [
"MIT"
] | null | null | null | irnl_rdt_correction/__main__.py | pylhc/irnl_rdt_correction | 7360728ffaa66b0c9f7b4825c241a3949df18962 | [
"MIT"
] | null | null | null | irnl_rdt_correction/__main__.py | pylhc/irnl_rdt_correction | 7360728ffaa66b0c9f7b4825c241a3949df18962 | [
"MIT"
] | null | null | null | from irnl_rdt_correction.irnl_rdt_correction import main, log_setup
if __name__ == '__main__':
log_setup()
main() | 24.4 | 67 | 0.754098 | from irnl_rdt_correction.irnl_rdt_correction import main, log_setup
if __name__ == '__main__':
log_setup()
main() | true | true |
f714eea8b200ced2a6fd1482b2234ba9eb5303f0 | 27 | py | Python | reolink_baichuan/camera_api.py | xannor/reolink_baichuan | 390f469d19eb4308cd390ed2357705aa4fe7fb38 | [
"MIT"
] | 1 | 2021-08-13T16:14:32.000Z | 2021-08-13T16:14:32.000Z | reolink_baichuan/camera_api.py | xannor/reolink_baichuan | 390f469d19eb4308cd390ed2357705aa4fe7fb38 | [
"MIT"
] | null | null | null | reolink_baichuan/camera_api.py | xannor/reolink_baichuan | 390f469d19eb4308cd390ed2357705aa4fe7fb38 | [
"MIT"
] | 1 | 2021-05-15T12:51:34.000Z | 2021-05-15T12:51:34.000Z | """
Reolink Camera API
"""
| 6.75 | 18 | 0.592593 | true | true | |
f714ef557ca4ceb8492ccb8cd834a8c222a15a93 | 6,909 | py | Python | test.py | spk921/RTFNet | 4dad2a63e13e9c302da45ad5a3af4d85cf474694 | [
"MIT"
] | 1 | 2020-11-04T10:38:33.000Z | 2020-11-04T10:38:33.000Z | test.py | spk921/RTFNet | 4dad2a63e13e9c302da45ad5a3af4d85cf474694 | [
"MIT"
] | null | null | null | test.py | spk921/RTFNet | 4dad2a63e13e9c302da45ad5a3af4d85cf474694 | [
"MIT"
] | 1 | 2021-02-25T03:27:16.000Z | 2021-02-25T03:27:16.000Z | # coding:utf-8
# modified from: https://github.com/haqishen/MFNet-pytorch
# By Yuxiang Sun, Aug. 2, 2019
# Email: sun.yuxiang@outlook.com
import os
import argparse
import time
import datetime
import numpy as np
import sys
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
from util.MF_dataset import MF_dataset
from model import RTFNet
from sklearn.metrics import confusion_matrix
n_class = 9
data_dir = './dataset/'
model_dir = './weights_backup/'
def main():
conf_total = np.zeros((n_class,n_class))
model = eval(args.model_name)(n_class=n_class)
if args.gpu >= 0: model.cuda(args.gpu)
print('| loading model file %s... ' % model_file)
pretrained_weight = torch.load(model_file, map_location = lambda storage, loc: storage.cuda(args.gpu))
own_state = model.state_dict()
for name, param in pretrained_weight.items():
if name not in own_state:
continue
own_state[name].copy_(param)
print('done!')
test_dataset = MF_dataset(data_dir, args.dataset_name, have_label=True, input_h=args.img_height, input_w=args.img_width)
test_loader = DataLoader(
dataset = test_dataset,
batch_size = batch_size,
shuffle = False,
num_workers = args.num_workers,
pin_memory = True,
drop_last = False
)
test_loader.n_iter = len(test_loader)
ave_time_cost = 0.0
model.eval()
with torch.no_grad():
for it, (images, labels, names) in enumerate(test_loader):
images = Variable(images)
labels = Variable(labels)
if args.gpu >= 0:
images = images.cuda(args.gpu)
labels = labels.cuda(args.gpu)
start_time = time.time()
logits = model(images) # logits.size(): mini_batch*num_class*480*640
end_time = time.time()
if it>10: # # ignore the first 10 frames
ave_time_cost += (end_time-start_time)
# convert tensor to numpy 1d array
label = labels.cpu().numpy().squeeze().flatten()
prediction = logits.argmax(1).cpu().numpy().squeeze().flatten() # prediction and label are both 1-d array, size: minibatch*640*480
# generate confusion matrix frame-by-frame
conf = confusion_matrix(label, prediction, [0,1,2,3,4,5,6,7,8]) # conf is an n_class*n_class matrix, vertical axis: groundtruth, horizontal axis: prediction
conf_total += conf
print("| frame %d/%d, time cost: %.2f ms" %(it+1, test_loader.n_iter, (end_time-start_time)*1000))
# calculate recall (Acc) and IoU for each class
recall_per_class = np.zeros(n_class)
iou_per_class = np.zeros(n_class)
for cid in range(0, n_class): # cid: class id
if conf_total[cid, 0:].sum() == 0:
recall_per_class[cid] = np.nan
else:
recall_per_class[cid] = float(conf_total[cid, cid]) / float(conf_total[cid, 0:].sum()) # recall (Acc) = TP/TP+FN
if (conf_total[cid, 0:].sum() + conf_total[0:, cid].sum() - conf_total[cid, cid]) == 0:
iou_per_class[cid] = np.nan
else:
iou_per_class[cid] = float(conf_total[cid, cid]) / float((conf_total[cid, 0:].sum() + conf_total[0:, cid].sum() - conf_total[cid, cid])) # IoU = TP/TP+FP+FN
print('\n###########################################################################')
print('\n| %s: %s test results (with batch size %d) on %s using %s:' %(args.model_name, args.weight_name, batch_size, datetime.date.today(), torch.cuda.get_device_name(args.gpu)))
print('\n| * the tested dataset name: %s' % args.dataset_name)
print('| * the tested image count: %d' % test_loader.n_iter)
print('| * the tested image size: %d*%d' %(args.img_height, args.img_width))
print("| * recall per class: \n unlabeled: %.6f, car: %.6f, person: %.6f, bike: %.6f, curve: %.6f, car_stop: %.6f, guardrail: %.6f, color_cone: %.6f, bump: %.6f" \
%(recall_per_class[0], recall_per_class[1], recall_per_class[2], recall_per_class[3], recall_per_class[4], recall_per_class[5], recall_per_class[6], recall_per_class[7], recall_per_class[8]))
print("| * iou per class: \n unlabeled: %.6f, car: %.6f, person: %.6f, bike: %.6f, curve: %.6f, car_stop: %.6f, guardrail: %.6f, color_cone: %.6f, bump: %.6f" \
%(iou_per_class[0], iou_per_class[1], iou_per_class[2], iou_per_class[3], iou_per_class[4], iou_per_class[5], iou_per_class[6], iou_per_class[7], iou_per_class[8]))
print("\n| * average values (np.mean(x)): \n recall: %.6f, iou: %.6f" \
%(recall_per_class.mean(), iou_per_class.mean()))
print("| * average values (np.mean(np.nan_to_num(x))): \n recall: %.6f, iou: %.6f" \
%(np.mean(np.nan_to_num(recall_per_class)), np.mean(np.nan_to_num(iou_per_class))))
print('\n| * the average time cost per frame (with batch size %d): %.2f ms, namely, the inference speed is %.2f fps' %(batch_size, ave_time_cost*1000/(test_loader.n_iter-11), 1.0/(ave_time_cost/(test_loader.n_iter-11)))) # ignore the first 10 frames
#print('\n| * the total confusion matrix: ')
#np.set_printoptions(precision=8, threshold=np.inf, linewidth=np.inf, suppress=True)
#print(conf_total)
print('\n###########################################################################')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Test with pytorch')
parser.add_argument('--model_name', '-M', type=str, default='RTFNet')
parser.add_argument('--weight_name', '-W', type=str, default='RTFNet_152') # RTFNet_152, RTFNet_50, please change the number of layers in the network file
parser.add_argument('--dataset_name', '-D', type=str, default='test') # test, test_day, test_night
parser.add_argument('--img_height', '-IH', type=int, default=480)
parser.add_argument('--img_width', '-IW', type=int, default=640)
parser.add_argument('--gpu', '-G', type=int, default=0)
parser.add_argument('--num_workers', '-j', type=int, default=8)
args = parser.parse_args()
batch_size = 1 # do not change this parameter!
torch.cuda.set_device(args.gpu)
print("\n| the gpu count:", torch.cuda.device_count())
print("| the current used gpu:", torch.cuda.current_device(), '\n')
model_dir = os.path.join(model_dir, args.weight_name) # model_dir = './weights_backup/'
if os.path.exists(model_dir) is False:
print("| the %s does not exit." %(model_dir))
sys.exit()
model_file = os.path.join(model_dir, 'final.pth')
if os.path.exists(model_file) is True:
print('| use the final model file.')
else:
print('| no model file found.')
sys.exit()
print('| testing %s: %s on GPU #%d with pytorch' % (args.model_name, args.weight_name, args.gpu))
main()
| 49.35 | 253 | 0.627587 |
import os
import argparse
import time
import datetime
import numpy as np
import sys
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
from util.MF_dataset import MF_dataset
from model import RTFNet
from sklearn.metrics import confusion_matrix
n_class = 9
data_dir = './dataset/'
model_dir = './weights_backup/'
def main():
conf_total = np.zeros((n_class,n_class))
model = eval(args.model_name)(n_class=n_class)
if args.gpu >= 0: model.cuda(args.gpu)
print('| loading model file %s... ' % model_file)
pretrained_weight = torch.load(model_file, map_location = lambda storage, loc: storage.cuda(args.gpu))
own_state = model.state_dict()
for name, param in pretrained_weight.items():
if name not in own_state:
continue
own_state[name].copy_(param)
print('done!')
test_dataset = MF_dataset(data_dir, args.dataset_name, have_label=True, input_h=args.img_height, input_w=args.img_width)
test_loader = DataLoader(
dataset = test_dataset,
batch_size = batch_size,
shuffle = False,
num_workers = args.num_workers,
pin_memory = True,
drop_last = False
)
test_loader.n_iter = len(test_loader)
ave_time_cost = 0.0
model.eval()
with torch.no_grad():
for it, (images, labels, names) in enumerate(test_loader):
images = Variable(images)
labels = Variable(labels)
if args.gpu >= 0:
images = images.cuda(args.gpu)
labels = labels.cuda(args.gpu)
start_time = time.time()
logits = model(images)
end_time = time.time()
if it>10: st += (end_time-start_time)
label = labels.cpu().numpy().squeeze().flatten()
prediction = logits.argmax(1).cpu().numpy().squeeze().flatten()
conf = confusion_matrix(label, prediction, [0,1,2,3,4,5,6,7,8])
conf_total += conf
print("| frame %d/%d, time cost: %.2f ms" %(it+1, test_loader.n_iter, (end_time-start_time)*1000))
recall_per_class = np.zeros(n_class)
iou_per_class = np.zeros(n_class)
for cid in range(0, n_class):
if conf_total[cid, 0:].sum() == 0:
recall_per_class[cid] = np.nan
else:
recall_per_class[cid] = float(conf_total[cid, cid]) / float(conf_total[cid, 0:].sum())
if (conf_total[cid, 0:].sum() + conf_total[0:, cid].sum() - conf_total[cid, cid]) == 0:
iou_per_class[cid] = np.nan
else:
iou_per_class[cid] = float(conf_total[cid, cid]) / float((conf_total[cid, 0:].sum() + conf_total[0:, cid].sum() - conf_total[cid, cid]))
print('\n###########################################################################')
print('\n| %s: %s test results (with batch size %d) on %s using %s:' %(args.model_name, args.weight_name, batch_size, datetime.date.today(), torch.cuda.get_device_name(args.gpu)))
print('\n| * the tested dataset name: %s' % args.dataset_name)
print('| * the tested image count: %d' % test_loader.n_iter)
print('| * the tested image size: %d*%d' %(args.img_height, args.img_width))
print("| * recall per class: \n unlabeled: %.6f, car: %.6f, person: %.6f, bike: %.6f, curve: %.6f, car_stop: %.6f, guardrail: %.6f, color_cone: %.6f, bump: %.6f" \
%(recall_per_class[0], recall_per_class[1], recall_per_class[2], recall_per_class[3], recall_per_class[4], recall_per_class[5], recall_per_class[6], recall_per_class[7], recall_per_class[8]))
print("| * iou per class: \n unlabeled: %.6f, car: %.6f, person: %.6f, bike: %.6f, curve: %.6f, car_stop: %.6f, guardrail: %.6f, color_cone: %.6f, bump: %.6f" \
%(iou_per_class[0], iou_per_class[1], iou_per_class[2], iou_per_class[3], iou_per_class[4], iou_per_class[5], iou_per_class[6], iou_per_class[7], iou_per_class[8]))
print("\n| * average values (np.mean(x)): \n recall: %.6f, iou: %.6f" \
%(recall_per_class.mean(), iou_per_class.mean()))
print("| * average values (np.mean(np.nan_to_num(x))): \n recall: %.6f, iou: %.6f" \
%(np.mean(np.nan_to_num(recall_per_class)), np.mean(np.nan_to_num(iou_per_class))))
print('\n| * the average time cost per frame (with batch size %d): %.2f ms, namely, the inference speed is %.2f fps' %(batch_size, ave_time_cost*1000/(test_loader.n_iter-11), 1.0/(ave_time_cost/(test_loader.n_iter-11))))
print('\n###########################################################################')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Test with pytorch')
parser.add_argument('--model_name', '-M', type=str, default='RTFNet')
parser.add_argument('--weight_name', '-W', type=str, default='RTFNet_152')
parser.add_argument('--dataset_name', '-D', type=str, default='test')
parser.add_argument('--img_height', '-IH', type=int, default=480)
parser.add_argument('--img_width', '-IW', type=int, default=640)
parser.add_argument('--gpu', '-G', type=int, default=0)
parser.add_argument('--num_workers', '-j', type=int, default=8)
args = parser.parse_args()
batch_size = 1
torch.cuda.set_device(args.gpu)
print("\n| the gpu count:", torch.cuda.device_count())
print("| the current used gpu:", torch.cuda.current_device(), '\n')
model_dir = os.path.join(model_dir, args.weight_name)
if os.path.exists(model_dir) is False:
print("| the %s does not exit." %(model_dir))
sys.exit()
model_file = os.path.join(model_dir, 'final.pth')
if os.path.exists(model_file) is True:
print('| use the final model file.')
else:
print('| no model file found.')
sys.exit()
print('| testing %s: %s on GPU #%d with pytorch' % (args.model_name, args.weight_name, args.gpu))
main()
| true | true |
f714f0b9624cf9de0c997ff4a2f5217b29268d2c | 5,779 | py | Python | tests/unit/test_validator_cli.py | ajenie/sawtooth-validator | c21436b3abbac4d2ce7cf6a65d9c71ea79d78e98 | [
"Apache-2.0"
] | 4 | 2017-05-22T15:53:29.000Z | 2021-12-03T02:11:30.000Z | tests/unit/test_validator_cli.py | ajenie/sawtooth-validator | c21436b3abbac4d2ce7cf6a65d9c71ea79d78e98 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_validator_cli.py | ajenie/sawtooth-validator | c21436b3abbac4d2ce7cf6a65d9c71ea79d78e98 | [
"Apache-2.0"
] | 2 | 2017-10-16T02:36:34.000Z | 2021-12-03T02:11:19.000Z | # Copyright 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import os
import unittest
from txnmain.validator_cli import get_configuration
class TestValidatorCLI(unittest.TestCase):
def test_currency_home(self):
os.environ.clear()
os.environ["CURRENCYHOME"] = "/test_path"
cfg = get_configuration(args=[], config_files_required=False)
self.assertIn("CurrencyHome", cfg)
self.assertEquals(cfg["CurrencyHome"], "/test_path")
self.assertEquals(cfg["ConfigDirectory"], "/test_path/etc")
self.assertEquals(cfg["LogDirectory"], "/test_path/logs")
self.assertEquals(cfg["DataDirectory"], "/test_path/data")
def test_default_config_posix(self):
os.environ.clear()
cfg = get_configuration(args=[],
os_name='posix',
config_files_required=False)
self.assertNotIn("CurrencyHome", cfg)
self.assertEquals(cfg["ConfigDirectory"], "/etc/sawtooth-validator")
self.assertEquals(cfg["LogDirectory"], "/var/log/sawtooth-validator")
self.assertEquals(cfg["DataDirectory"], "/var/lib/sawtooth-validator")
def test_default_config_nt(self):
os.environ.clear()
cfg = get_configuration(args=[],
os_name='nt',
config_files_required=False)
self.assertNotIn("CurrencyHome", cfg)
self.assertEquals(
cfg["ConfigDirectory"],
"C:\\Program Files (x86)\\Intel\\sawtooth-validator\\conf")
self.assertEquals(
cfg["LogDirectory"],
"C:\\Program Files (x86)\\Intel\\sawtooth-validator\\logs")
self.assertEquals(
cfg["DataDirectory"],
"C:\\Program Files (x86)\\Intel\\sawtooth-validator\\data")
def test_logconfig_arg(self):
os.environ.clear()
cfg = get_configuration(args=["--log-config=Logging.js"],
config_files_required=False)
self.assertIn("LogConfigFile", cfg)
self.assertEquals(cfg["LogConfigFile"], "Logging.js")
def test_options_mapping_conf_dir(self):
os.environ.clear()
cfg = get_configuration(args=["--conf-dir=/test_path/etc"],
config_files_required=False)
self.assertIn("ConfigDirectory", cfg)
self.assertEquals(cfg["ConfigDirectory"], "/test_path/etc")
def test_options_mapping_data_dir(self):
os.environ.clear()
cfg = get_configuration(args=["--data-dir=/test_path/data"],
config_files_required=False)
self.assertIn("DataDirectory", cfg)
self.assertEquals(cfg["DataDirectory"], "/test_path/data")
def test_options_mapping_type(self):
os.environ.clear()
cfg = get_configuration(args=["--type=test"],
config_files_required=False)
self.assertIn("LedgerType", cfg)
self.assertEquals(cfg["LedgerType"], "test")
def test_options_mapping_key_file(self):
os.environ.clear()
cfg = get_configuration(args=["--keyfile=/test_path/keys/key.wif"],
config_files_required=False)
self.assertIn("KeyFile", cfg)
self.assertEquals(cfg["KeyFile"], "/test_path/keys/key.wif")
def test_options_mapping_node(self):
os.environ.clear()
cfg = get_configuration(args=["--node=test000"],
config_files_required=False)
self.assertIn("NodeName", cfg)
self.assertEquals(cfg["NodeName"], "test000")
def test_options_mapping_listsn(self):
os.environ.clear()
cfg = get_configuration(args=['--listen="localhost:5500/UDP gossip"'],
config_files_required=False)
self.assertIn("Listen", cfg)
self.assertEquals(cfg["Listen"], ['"localhost:5500/UDP gossip"'])
def test_options_mapping_restore(self):
os.environ.clear()
cfg = get_configuration(args=["--restore"],
config_files_required=False)
self.assertEquals(cfg["Restore"], True)
def test_options_mapping_peers(self):
os.environ.clear()
cfg = get_configuration(args=["--peers=testpeer1"],
config_files_required=False)
self.assertIn("Peers", cfg)
self.assertIn("testpeer1", cfg["Peers"])
def test_options_mapping_url(self):
os.environ.clear()
cfg = get_configuration(args=["--url",
"http://testhost:8888,"
"http://testhost:8889",
"--url",
"http://testhost:8890"],
config_files_required=False)
self.assertIn("LedgerURL", cfg)
self.assertIn("http://testhost:8888", cfg["LedgerURL"])
self.assertIn("http://testhost:8889", cfg["LedgerURL"])
self.assertIn("http://testhost:8890", cfg["LedgerURL"])
if __name__ == '__main__':
unittest.main()
| 35.89441 | 80 | 0.59249 |
import os
import unittest
from txnmain.validator_cli import get_configuration
class TestValidatorCLI(unittest.TestCase):
def test_currency_home(self):
os.environ.clear()
os.environ["CURRENCYHOME"] = "/test_path"
cfg = get_configuration(args=[], config_files_required=False)
self.assertIn("CurrencyHome", cfg)
self.assertEquals(cfg["CurrencyHome"], "/test_path")
self.assertEquals(cfg["ConfigDirectory"], "/test_path/etc")
self.assertEquals(cfg["LogDirectory"], "/test_path/logs")
self.assertEquals(cfg["DataDirectory"], "/test_path/data")
def test_default_config_posix(self):
os.environ.clear()
cfg = get_configuration(args=[],
os_name='posix',
config_files_required=False)
self.assertNotIn("CurrencyHome", cfg)
self.assertEquals(cfg["ConfigDirectory"], "/etc/sawtooth-validator")
self.assertEquals(cfg["LogDirectory"], "/var/log/sawtooth-validator")
self.assertEquals(cfg["DataDirectory"], "/var/lib/sawtooth-validator")
def test_default_config_nt(self):
os.environ.clear()
cfg = get_configuration(args=[],
os_name='nt',
config_files_required=False)
self.assertNotIn("CurrencyHome", cfg)
self.assertEquals(
cfg["ConfigDirectory"],
"C:\\Program Files (x86)\\Intel\\sawtooth-validator\\conf")
self.assertEquals(
cfg["LogDirectory"],
"C:\\Program Files (x86)\\Intel\\sawtooth-validator\\logs")
self.assertEquals(
cfg["DataDirectory"],
"C:\\Program Files (x86)\\Intel\\sawtooth-validator\\data")
def test_logconfig_arg(self):
os.environ.clear()
cfg = get_configuration(args=["--log-config=Logging.js"],
config_files_required=False)
self.assertIn("LogConfigFile", cfg)
self.assertEquals(cfg["LogConfigFile"], "Logging.js")
def test_options_mapping_conf_dir(self):
os.environ.clear()
cfg = get_configuration(args=["--conf-dir=/test_path/etc"],
config_files_required=False)
self.assertIn("ConfigDirectory", cfg)
self.assertEquals(cfg["ConfigDirectory"], "/test_path/etc")
def test_options_mapping_data_dir(self):
os.environ.clear()
cfg = get_configuration(args=["--data-dir=/test_path/data"],
config_files_required=False)
self.assertIn("DataDirectory", cfg)
self.assertEquals(cfg["DataDirectory"], "/test_path/data")
def test_options_mapping_type(self):
os.environ.clear()
cfg = get_configuration(args=["--type=test"],
config_files_required=False)
self.assertIn("LedgerType", cfg)
self.assertEquals(cfg["LedgerType"], "test")
def test_options_mapping_key_file(self):
os.environ.clear()
cfg = get_configuration(args=["--keyfile=/test_path/keys/key.wif"],
config_files_required=False)
self.assertIn("KeyFile", cfg)
self.assertEquals(cfg["KeyFile"], "/test_path/keys/key.wif")
def test_options_mapping_node(self):
os.environ.clear()
cfg = get_configuration(args=["--node=test000"],
config_files_required=False)
self.assertIn("NodeName", cfg)
self.assertEquals(cfg["NodeName"], "test000")
def test_options_mapping_listsn(self):
os.environ.clear()
cfg = get_configuration(args=['--listen="localhost:5500/UDP gossip"'],
config_files_required=False)
self.assertIn("Listen", cfg)
self.assertEquals(cfg["Listen"], ['"localhost:5500/UDP gossip"'])
def test_options_mapping_restore(self):
os.environ.clear()
cfg = get_configuration(args=["--restore"],
config_files_required=False)
self.assertEquals(cfg["Restore"], True)
def test_options_mapping_peers(self):
os.environ.clear()
cfg = get_configuration(args=["--peers=testpeer1"],
config_files_required=False)
self.assertIn("Peers", cfg)
self.assertIn("testpeer1", cfg["Peers"])
def test_options_mapping_url(self):
os.environ.clear()
cfg = get_configuration(args=["--url",
"http://testhost:8888,"
"http://testhost:8889",
"--url",
"http://testhost:8890"],
config_files_required=False)
self.assertIn("LedgerURL", cfg)
self.assertIn("http://testhost:8888", cfg["LedgerURL"])
self.assertIn("http://testhost:8889", cfg["LedgerURL"])
self.assertIn("http://testhost:8890", cfg["LedgerURL"])
if __name__ == '__main__':
unittest.main()
| true | true |
f714f3d1f909cc42bd23a2c7442b97bb0ce95b3a | 13,654 | py | Python | samples/client/petstore/python/petstore_api/model/child_lizard.py | JigarJoshi/openapi-generator | 785535b8d6881b358463994823abbda2b26ff42e | [
"Apache-2.0"
] | 1 | 2022-01-03T04:40:07.000Z | 2022-01-03T04:40:07.000Z | samples/client/petstore/python/petstore_api/model/child_lizard.py | JigarJoshi/openapi-generator | 785535b8d6881b358463994823abbda2b26ff42e | [
"Apache-2.0"
] | 28 | 2021-04-07T07:38:36.000Z | 2022-03-31T03:10:56.000Z | samples/client/petstore/python/petstore_api/model/child_lizard.py | JigarJoshi/openapi-generator | 785535b8d6881b358463994823abbda2b26ff42e | [
"Apache-2.0"
] | 2 | 2021-11-03T10:07:15.000Z | 2021-12-17T13:00:53.000Z | """
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from petstore_api.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from petstore_api.exceptions import ApiAttributeError
def lazy_import():
from petstore_api.model.child_lizard_all_of import ChildLizardAllOf
from petstore_api.model.parent_pet import ParentPet
globals()['ChildLizardAllOf'] = ChildLizardAllOf
globals()['ParentPet'] = ParentPet
class ChildLizard(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'pet_type': (str,), # noqa: E501
'loves_rocks': (bool,), # noqa: E501
}
@cached_property
def discriminator():
val = {
}
if not val:
return None
return {'pet_type': val}
attribute_map = {
'pet_type': 'pet_type', # noqa: E501
'loves_rocks': 'lovesRocks', # noqa: E501
}
read_only_vars = {
}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""ChildLizard - a model defined in OpenAPI
Keyword Args:
pet_type (str):
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
loves_rocks (bool): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ChildLizard - a model defined in OpenAPI
Keyword Args:
pet_type (str):
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
loves_rocks (bool): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error because the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
ChildLizardAllOf,
ParentPet,
],
'oneOf': [
],
}
| 42.403727 | 174 | 0.581075 |
import re
import sys
from petstore_api.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from petstore_api.exceptions import ApiAttributeError
def lazy_import():
from petstore_api.model.child_lizard_all_of import ChildLizardAllOf
from petstore_api.model.parent_pet import ParentPet
globals()['ChildLizardAllOf'] = ChildLizardAllOf
globals()['ParentPet'] = ParentPet
class ChildLizard(ModelComposed):
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,)
_nullable = False
@cached_property
def openapi_types():
lazy_import()
return {
'pet_type': (str,),
'loves_rocks': (bool,),
}
@cached_property
def discriminator():
val = {
}
if not val:
return None
return {'pet_type': val}
attribute_map = {
'pet_type': 'pet_type',
'loves_rocks': 'lovesRocks',
}
read_only_vars = {
}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
composed_info = validate_get_composed_info(
constant_args, kwargs, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
discarded_args = composed_info[3]
for var_name, var_value in kwargs.items():
if var_name in discarded_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self._additional_properties_model_instances:
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
@cached_property
def _composed_schemas():
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
ChildLizardAllOf,
ParentPet,
],
'oneOf': [
],
}
| true | true |
f714f53f337435de514cd32802ebf103c855cc8e | 319 | py | Python | backend/server/go-spider.py | thomas5566/new-django-react-app | 25a1f499de60a35d4cc40a7dca3696e04d92d5dc | [
"MIT"
] | null | null | null | backend/server/go-spider.py | thomas5566/new-django-react-app | 25a1f499de60a35d4cc40a7dca3696e04d92d5dc | [
"MIT"
] | null | null | null | backend/server/go-spider.py | thomas5566/new-django-react-app | 25a1f499de60a35d4cc40a7dca3696e04d92d5dc | [
"MIT"
] | null | null | null | from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from botmovies.spiders.ptt import PttMoviesSpider
from botmovies.spiders.yahoo import YahooSpider
process = CrawlerProcess(get_project_settings())
process.crawl(PttMoviesSpider)
process.crawl(YahooSpider)
process.start()
| 29 | 53 | 0.858934 | from scrapy.crawler import CrawlerProcess
from scrapy.utils.project import get_project_settings
from botmovies.spiders.ptt import PttMoviesSpider
from botmovies.spiders.yahoo import YahooSpider
process = CrawlerProcess(get_project_settings())
process.crawl(PttMoviesSpider)
process.crawl(YahooSpider)
process.start()
| true | true |
f714f642a68008e196da074e26144251d4a5f260 | 611 | py | Python | python/network/Foundations-of-Python-Network-Programming/foundations-of-python-network-programming-14/source/chapter18/rpyc_server.py | bosserbosser/codetest | 987563900d912e891b53eeda8e2cf36f3c769430 | [
"Apache-2.0"
] | null | null | null | python/network/Foundations-of-Python-Network-Programming/foundations-of-python-network-programming-14/source/chapter18/rpyc_server.py | bosserbosser/codetest | 987563900d912e891b53eeda8e2cf36f3c769430 | [
"Apache-2.0"
] | null | null | null | python/network/Foundations-of-Python-Network-Programming/foundations-of-python-network-programming-14/source/chapter18/rpyc_server.py | bosserbosser/codetest | 987563900d912e891b53eeda8e2cf36f3c769430 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Foundations of Python Network Programming, Third Edition
# https://github.com/brandon-rhodes/fopnp/blob/m/py3/chapter18/rpyc_server.py
# RPyC server
import rpyc
def main():
from rpyc.utils.server import ThreadedServer
t = ThreadedServer(MyService, port = 18861)
t.start()
class MyService(rpyc.Service):
def exposed_line_counter(self, fileobj, function):
print('Client has invoked exposed_line_counter()')
for linenum, line in enumerate(fileobj.readlines()):
function(line)
return linenum + 1
if __name__ == '__main__':
main()
| 27.772727 | 77 | 0.702128 |
import rpyc
def main():
from rpyc.utils.server import ThreadedServer
t = ThreadedServer(MyService, port = 18861)
t.start()
class MyService(rpyc.Service):
def exposed_line_counter(self, fileobj, function):
print('Client has invoked exposed_line_counter()')
for linenum, line in enumerate(fileobj.readlines()):
function(line)
return linenum + 1
if __name__ == '__main__':
main()
| true | true |
f714f69a08b35e1b9d65ff1ce11b3bc8d056174d | 531 | py | Python | Task2B.py | henryseal/PartIA-Flood-Warning-System-main | 4110a22b4b4a1b6ac8778aa176ddb1a577d245b1 | [
"MIT"
] | null | null | null | Task2B.py | henryseal/PartIA-Flood-Warning-System-main | 4110a22b4b4a1b6ac8778aa176ddb1a577d245b1 | [
"MIT"
] | null | null | null | Task2B.py | henryseal/PartIA-Flood-Warning-System-main | 4110a22b4b4a1b6ac8778aa176ddb1a577d245b1 | [
"MIT"
] | null | null | null | # Copyright (C) 2018 Garth N. Wells
#
# SPDX-License-Identifier: MIT
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.flood import stations_level_over_threshold
def run():
stations = build_station_list()
update_water_levels(stations)
for station_tuple in stations_level_over_threshold(stations, 0.8):
print(station_tuple[0].name + " " + str(station_tuple[1]))
if __name__ == "__main__":
print("*** Task 2B: CUED Part IA Flood Warning System ***")
run()
| 26.55 | 75 | 0.73258 |
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.flood import stations_level_over_threshold
def run():
stations = build_station_list()
update_water_levels(stations)
for station_tuple in stations_level_over_threshold(stations, 0.8):
print(station_tuple[0].name + " " + str(station_tuple[1]))
if __name__ == "__main__":
print("*** Task 2B: CUED Part IA Flood Warning System ***")
run()
| true | true |
f714f6e6db1898081eaba5c2d3937b62899fb8ac | 476 | py | Python | Desafios/Desafio101.py | Felix-xilef/Curso-de-Python | cdff7c7f3850e6326e274c8c1987b9e1a18ce910 | [
"MIT"
] | null | null | null | Desafios/Desafio101.py | Felix-xilef/Curso-de-Python | cdff7c7f3850e6326e274c8c1987b9e1a18ce910 | [
"MIT"
] | null | null | null | Desafios/Desafio101.py | Felix-xilef/Curso-de-Python | cdff7c7f3850e6326e274c8c1987b9e1a18ce910 | [
"MIT"
] | null | null | null | from auxiliar import receberInt
def voto(nasc):
from datetime import date
idade = int(date.today().year) - nasc
if idade < 16:
return f'Com {idade} anos, voto: NEGADO'
elif idade < 18 or idade >= 60:
return f'Com {idade} anos, voto: OPCIONAL'
else:
return f'Com {idade} anos, voto: OBRIGATÓRIO'
# main
nascimento = receberInt('Digite o ano de nascimento: ')
print(voto(nascimento))
input('\n\nPressione <enter> para continuar')
| 25.052632 | 55 | 0.655462 | from auxiliar import receberInt
def voto(nasc):
from datetime import date
idade = int(date.today().year) - nasc
if idade < 16:
return f'Com {idade} anos, voto: NEGADO'
elif idade < 18 or idade >= 60:
return f'Com {idade} anos, voto: OPCIONAL'
else:
return f'Com {idade} anos, voto: OBRIGATÓRIO'
nascimento = receberInt('Digite o ano de nascimento: ')
print(voto(nascimento))
input('\n\nPressione <enter> para continuar')
| true | true |
f714f71252970ab103635098b3af05715486c851 | 675 | py | Python | examples/argument_group.py | gmerz/ArgTyper | 56e1d60ce2cc8f7d889fb8890ddbe922b85ab9f3 | [
"MIT"
] | 1 | 2021-04-26T19:46:33.000Z | 2021-04-26T19:46:33.000Z | examples/argument_group.py | gmerz/ArgTyper | 56e1d60ce2cc8f7d889fb8890ddbe922b85ab9f3 | [
"MIT"
] | null | null | null | examples/argument_group.py | gmerz/ArgTyper | 56e1d60ce2cc8f7d889fb8890ddbe922b85ab9f3 | [
"MIT"
] | null | null | null | import argtyper
@argtyper.ArgumentGroup(
["firstname", "lastname"],
title="Name details",
description="Give your full name here",
)
@argtyper.ArgumentGroup(
["nickname", "firstname"],
title="Nickname details",
description="Give your Nickname here",
)
@argtyper.Argument(
"amount", "repetitions", help="How often should we say hello?", metavar="reps"
)
@argtyper.Argument(
"lastname", "--name", "--n", help="Give me your name", default="Yoda"
)
def hello(nickname: str, firstname: str, lastname: str, amount: int = 2):
print("\n".join([f"Hello {firstname} '{nickname.upper()}' {lastname}"] * amount))
at = argtyper.ArgTyper(hello)
at()
| 25.961538 | 85 | 0.662222 | import argtyper
@argtyper.ArgumentGroup(
["firstname", "lastname"],
title="Name details",
description="Give your full name here",
)
@argtyper.ArgumentGroup(
["nickname", "firstname"],
title="Nickname details",
description="Give your Nickname here",
)
@argtyper.Argument(
"amount", "repetitions", help="How often should we say hello?", metavar="reps"
)
@argtyper.Argument(
"lastname", "--name", "--n", help="Give me your name", default="Yoda"
)
def hello(nickname: str, firstname: str, lastname: str, amount: int = 2):
print("\n".join([f"Hello {firstname} '{nickname.upper()}' {lastname}"] * amount))
at = argtyper.ArgTyper(hello)
at()
| true | true |
f714f92a92fb4764cbd9b8709835322bbc54cf6b | 1,474 | py | Python | tensorflow_datasets/text/__init__.py | MyWhiteCastle/datasets | e75a54948bb8aaf9cf45933a538502d2f66c41a6 | [
"Apache-2.0"
] | 2 | 2019-11-23T18:41:58.000Z | 2020-08-12T21:00:39.000Z | tensorflow_datasets/text/__init__.py | MyWhiteCastle/datasets | e75a54948bb8aaf9cf45933a538502d2f66c41a6 | [
"Apache-2.0"
] | null | null | null | tensorflow_datasets/text/__init__.py | MyWhiteCastle/datasets | e75a54948bb8aaf9cf45933a538502d2f66c41a6 | [
"Apache-2.0"
] | 1 | 2019-12-14T00:32:08.000Z | 2019-12-14T00:32:08.000Z | # coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Text datasets."""
from tensorflow_datasets.text.definite_pronoun_resolution import DefinitePronounResolution
from tensorflow_datasets.text.gap import Gap
from tensorflow_datasets.text.glue import Glue
from tensorflow_datasets.text.imdb import IMDBReviews
from tensorflow_datasets.text.imdb import IMDBReviewsConfig
from tensorflow_datasets.text.lm1b import Lm1b
from tensorflow_datasets.text.lm1b import Lm1bConfig
from tensorflow_datasets.text.multi_nli import MultiNLI
from tensorflow_datasets.text.multi_nli_mismatch import MultiNLIMismatch
from tensorflow_datasets.text.snli import Snli
from tensorflow_datasets.text.squad import Squad
from tensorflow_datasets.text.super_glue import SuperGlue
from tensorflow_datasets.text.trivia_qa import TriviaQA
from tensorflow_datasets.text.wikipedia import Wikipedia
from tensorflow_datasets.text.xnli import Xnli
| 44.666667 | 90 | 0.833786 |
from tensorflow_datasets.text.definite_pronoun_resolution import DefinitePronounResolution
from tensorflow_datasets.text.gap import Gap
from tensorflow_datasets.text.glue import Glue
from tensorflow_datasets.text.imdb import IMDBReviews
from tensorflow_datasets.text.imdb import IMDBReviewsConfig
from tensorflow_datasets.text.lm1b import Lm1b
from tensorflow_datasets.text.lm1b import Lm1bConfig
from tensorflow_datasets.text.multi_nli import MultiNLI
from tensorflow_datasets.text.multi_nli_mismatch import MultiNLIMismatch
from tensorflow_datasets.text.snli import Snli
from tensorflow_datasets.text.squad import Squad
from tensorflow_datasets.text.super_glue import SuperGlue
from tensorflow_datasets.text.trivia_qa import TriviaQA
from tensorflow_datasets.text.wikipedia import Wikipedia
from tensorflow_datasets.text.xnli import Xnli
| true | true |
f714f98147bf1c6b56576249d2a0857054514332 | 5,166 | py | Python | tests/test_development_scripts.py | dmwcode/ntc-templates | 684f45b34e453c5d2a20df2a8769c66555017e22 | [
"Apache-2.0"
] | 817 | 2016-04-27T22:47:59.000Z | 2022-03-29T21:47:37.000Z | tests/test_development_scripts.py | dmwcode/ntc-templates | 684f45b34e453c5d2a20df2a8769c66555017e22 | [
"Apache-2.0"
] | 577 | 2016-05-13T12:41:12.000Z | 2022-03-31T02:42:14.000Z | tests/test_development_scripts.py | dmwcode/ntc-templates | 684f45b34e453c5d2a20df2a8769c66555017e22 | [
"Apache-2.0"
] | 677 | 2016-04-27T22:48:03.000Z | 2022-03-28T16:20:36.000Z | import os
import glob
from copy import deepcopy
import pytest
from ruamel.yaml.compat import StringIO
import development_scripts
@pytest.fixture(scope="module")
def yaml_comments_file():
with open("tests/mocks/load/yaml_comments.yml", encoding="utf-8") as fh:
return development_scripts.YAML_OBJECT.load(fh)
@pytest.fixture
def copy_yaml_comments(yaml_comments_file):
return deepcopy(yaml_comments_file)
@pytest.fixture
def teardown_normalize_file():
filepaths = {}
def _teardown_normalize_file(filepath):
with open(filepath, encoding="utf-8") as fh:
contents = fh.read()
filepaths[filepath] = contents
yield _teardown_normalize_file
for filepath, contents in filepaths.items():
with open(filepath, "w", encoding="utf-8") as fh:
fh.write(contents)
@pytest.fixture(scope="module")
def expected_file():
expected_path = "tests/mocks/expected/parsed_sample.yml"
with open(expected_path, encoding="utf-8") as fh:
return fh.read()
@pytest.fixture(scope="module")
def expected_mac_file():
expected_path = "tests/mocks/expected/show_mac.yml"
with open(expected_path, encoding="utf-8") as fh:
return fh.read()
@pytest.fixture
def teardown_delete_file():
filepaths = []
def _teardown_delete_file(filepath):
filepaths.append(filepath)
yield _teardown_delete_file
for file in filepaths:
os.remove(file)
def test_ensure_spacing_for_multiline_comment():
remark = "comment 11\n# comment 12\n#comment 13\n"
remark_formatted = development_scripts.ensure_spacing_for_multiline_comment(remark)
assert remark_formatted == "comment 11\n# comment 12\n# comment 13"
def test_ensure_space_after_octothorpe(copy_yaml_comments):
comment = copy_yaml_comments.ca.items["b"][2]
development_scripts.ensure_space_after_octothorpe(comment)
assert comment.value == "# comment 2\n# comment 3\n"
def test_ensure_space_comments(copy_yaml_comments):
comments = copy_yaml_comments.ca.items
comment_values = comments.values()
development_scripts.ensure_space_comments(comment_values)
assert comments["a"][2].value == "# comment 1\n"
assert comments["b"][2].value == "# comment 2\n# comment 3\n"
assert comments["d"][3][0].value == "# comment 7\n"
def test_update_yaml_comments(copy_yaml_comments):
development_scripts.update_yaml_comments(copy_yaml_comments)
string_yaml = StringIO()
development_scripts.YAML_OBJECT.dump(copy_yaml_comments, string_yaml)
actual = string_yaml.getvalue()
with open("tests/mocks/expected/yaml_comments.yml", encoding="utf-8") as fh:
expected = fh.read()
assert actual == expected
def test_transform_file(teardown_normalize_file, expected_file):
load_file = "tests/mocks/load/parsed_sample.yml"
teardown_normalize_file(load_file)
development_scripts.transform_file(load_file)
with open(load_file, encoding="utf-8") as actual:
assert actual.read() == expected_file
def test_transform_glob(teardown_normalize_file, expected_file):
glob_dir = "tests/mocks/load/gl*"
parsed_files = glob.glob(f"{glob_dir}/*.yml")
for file in parsed_files:
teardown_normalize_file(file)
development_scripts.transform_glob(glob_dir)
for file in parsed_files:
with open(file, encoding="utf-8") as actual:
assert actual.read() == expected_file
def test_ensure_yaml_standards(teardown_normalize_file, expected_file):
load_file = "tests/mocks/load/parsed_sample.yml"
teardown_normalize_file(load_file)
with open(load_file, encoding="utf-8") as fh:
load_yaml = development_scripts.YAML_OBJECT.load(fh)
development_scripts.ensure_yaml_standards(load_yaml, load_file)
with open(load_file, encoding="utf-8") as actual:
assert actual.read() == expected_file
def test_parse_test_filepath():
filepath = "tests/cisco_ios/show_version/cisco_ios_show_version.raw"
platform, command, filename = development_scripts.parse_test_filepath(filepath)
assert platform == "cisco_ios"
assert command == "show version"
assert filename == "cisco_ios_show_version"
def test_build_parsed_data_from_output(teardown_delete_file, expected_mac_file):
load_file = "tests/mocks/cisco_ios/show_mac-address-table/show_mac1.raw"
yaml_file = f"{load_file[:-3]}yml"
teardown_delete_file(yaml_file)
development_scripts.build_parsed_data_from_output(load_file, test_dir="tests/mocks")
with open(yaml_file, encoding="utf-8") as actual:
assert actual.read() == expected_mac_file
def test_build_parsed_data_from_dir(teardown_delete_file, expected_mac_file):
glob_dir = "tests/mocks/cisco_ios/show_mac-*"
command_files = glob.iglob(f"{glob_dir}/*.raw")
parsed_files = [f"{file[:-3]}yml" for file in command_files]
for file in parsed_files:
teardown_delete_file(file)
development_scripts.build_parsed_data_from_dir(glob_dir, test_dir="tests/mocks")
for file in parsed_files:
with open(file, encoding="utf-8") as actual:
assert actual.read() == expected_mac_file
| 33.115385 | 88 | 0.734611 | import os
import glob
from copy import deepcopy
import pytest
from ruamel.yaml.compat import StringIO
import development_scripts
@pytest.fixture(scope="module")
def yaml_comments_file():
with open("tests/mocks/load/yaml_comments.yml", encoding="utf-8") as fh:
return development_scripts.YAML_OBJECT.load(fh)
@pytest.fixture
def copy_yaml_comments(yaml_comments_file):
return deepcopy(yaml_comments_file)
@pytest.fixture
def teardown_normalize_file():
filepaths = {}
def _teardown_normalize_file(filepath):
with open(filepath, encoding="utf-8") as fh:
contents = fh.read()
filepaths[filepath] = contents
yield _teardown_normalize_file
for filepath, contents in filepaths.items():
with open(filepath, "w", encoding="utf-8") as fh:
fh.write(contents)
@pytest.fixture(scope="module")
def expected_file():
expected_path = "tests/mocks/expected/parsed_sample.yml"
with open(expected_path, encoding="utf-8") as fh:
return fh.read()
@pytest.fixture(scope="module")
def expected_mac_file():
expected_path = "tests/mocks/expected/show_mac.yml"
with open(expected_path, encoding="utf-8") as fh:
return fh.read()
@pytest.fixture
def teardown_delete_file():
filepaths = []
def _teardown_delete_file(filepath):
filepaths.append(filepath)
yield _teardown_delete_file
for file in filepaths:
os.remove(file)
def test_ensure_spacing_for_multiline_comment():
remark = "comment 11\n# comment 12\n#comment 13\n"
remark_formatted = development_scripts.ensure_spacing_for_multiline_comment(remark)
assert remark_formatted == "comment 11\n# comment 12\n# comment 13"
def test_ensure_space_after_octothorpe(copy_yaml_comments):
comment = copy_yaml_comments.ca.items["b"][2]
development_scripts.ensure_space_after_octothorpe(comment)
assert comment.value == "# comment 2\n# comment 3\n"
def test_ensure_space_comments(copy_yaml_comments):
comments = copy_yaml_comments.ca.items
comment_values = comments.values()
development_scripts.ensure_space_comments(comment_values)
assert comments["a"][2].value == "# comment 1\n"
assert comments["b"][2].value == "# comment 2\n# comment 3\n"
assert comments["d"][3][0].value == "# comment 7\n"
def test_update_yaml_comments(copy_yaml_comments):
development_scripts.update_yaml_comments(copy_yaml_comments)
string_yaml = StringIO()
development_scripts.YAML_OBJECT.dump(copy_yaml_comments, string_yaml)
actual = string_yaml.getvalue()
with open("tests/mocks/expected/yaml_comments.yml", encoding="utf-8") as fh:
expected = fh.read()
assert actual == expected
def test_transform_file(teardown_normalize_file, expected_file):
load_file = "tests/mocks/load/parsed_sample.yml"
teardown_normalize_file(load_file)
development_scripts.transform_file(load_file)
with open(load_file, encoding="utf-8") as actual:
assert actual.read() == expected_file
def test_transform_glob(teardown_normalize_file, expected_file):
glob_dir = "tests/mocks/load/gl*"
parsed_files = glob.glob(f"{glob_dir}/*.yml")
for file in parsed_files:
teardown_normalize_file(file)
development_scripts.transform_glob(glob_dir)
for file in parsed_files:
with open(file, encoding="utf-8") as actual:
assert actual.read() == expected_file
def test_ensure_yaml_standards(teardown_normalize_file, expected_file):
load_file = "tests/mocks/load/parsed_sample.yml"
teardown_normalize_file(load_file)
with open(load_file, encoding="utf-8") as fh:
load_yaml = development_scripts.YAML_OBJECT.load(fh)
development_scripts.ensure_yaml_standards(load_yaml, load_file)
with open(load_file, encoding="utf-8") as actual:
assert actual.read() == expected_file
def test_parse_test_filepath():
filepath = "tests/cisco_ios/show_version/cisco_ios_show_version.raw"
platform, command, filename = development_scripts.parse_test_filepath(filepath)
assert platform == "cisco_ios"
assert command == "show version"
assert filename == "cisco_ios_show_version"
def test_build_parsed_data_from_output(teardown_delete_file, expected_mac_file):
load_file = "tests/mocks/cisco_ios/show_mac-address-table/show_mac1.raw"
yaml_file = f"{load_file[:-3]}yml"
teardown_delete_file(yaml_file)
development_scripts.build_parsed_data_from_output(load_file, test_dir="tests/mocks")
with open(yaml_file, encoding="utf-8") as actual:
assert actual.read() == expected_mac_file
def test_build_parsed_data_from_dir(teardown_delete_file, expected_mac_file):
glob_dir = "tests/mocks/cisco_ios/show_mac-*"
command_files = glob.iglob(f"{glob_dir}/*.raw")
parsed_files = [f"{file[:-3]}yml" for file in command_files]
for file in parsed_files:
teardown_delete_file(file)
development_scripts.build_parsed_data_from_dir(glob_dir, test_dir="tests/mocks")
for file in parsed_files:
with open(file, encoding="utf-8") as actual:
assert actual.read() == expected_mac_file
| true | true |
f714f9af20d505dd8a6b78bf8ee9169697d1f5cd | 8,853 | py | Python | custom_components/xiaomi_miot/light.py | ss109/hass-xiaomi-miot | a69c8e0e44400b9aa0f94f1003d3c6f3de4996fd | [
"Apache-2.0"
] | 1 | 2021-12-10T12:30:34.000Z | 2021-12-10T12:30:34.000Z | custom_components/xiaomi_miot/light.py | ss109/hass-xiaomi-miot | a69c8e0e44400b9aa0f94f1003d3c6f3de4996fd | [
"Apache-2.0"
] | null | null | null | custom_components/xiaomi_miot/light.py | ss109/hass-xiaomi-miot | a69c8e0e44400b9aa0f94f1003d3c6f3de4996fd | [
"Apache-2.0"
] | null | null | null | """Support for Xiaomi lights."""
import logging
from functools import partial
from homeassistant.const import * # noqa: F401
from homeassistant.components.light import (
DOMAIN as ENTITY_DOMAIN,
LightEntity,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR_TEMP,
SUPPORT_COLOR,
SUPPORT_EFFECT,
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_EFFECT,
)
from homeassistant.util import color
from . import (
DOMAIN,
CONF_MODEL,
XIAOMI_CONFIG_SCHEMA as PLATFORM_SCHEMA, # noqa: F401
MiotToggleEntity,
ToggleSubEntity,
async_setup_config_entry,
bind_services_to_entries,
)
from .core.miot_spec import (
MiotSpec,
MiotService,
)
from miio.utils import (
rgb_to_int,
int_to_rgb,
)
try:
# hass 2021.4.0b0+
from homeassistant.components.light import (
COLOR_MODE_ONOFF,
COLOR_MODE_BRIGHTNESS,
COLOR_MODE_COLOR_TEMP,
COLOR_MODE_HS,
)
except ImportError:
COLOR_MODE_ONOFF = 'onoff'
COLOR_MODE_BRIGHTNESS = 'brightness'
COLOR_MODE_COLOR_TEMP = 'color_temp'
COLOR_MODE_HS = 'hs'
_LOGGER = logging.getLogger(__name__)
DATA_KEY = f'{ENTITY_DOMAIN}.{DOMAIN}'
SERVICE_TO_METHOD = {}
async def async_setup_entry(hass, config_entry, async_add_entities):
await async_setup_config_entry(hass, config_entry, async_setup_platform, async_add_entities, ENTITY_DOMAIN)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
hass.data.setdefault(DATA_KEY, {})
hass.data[DOMAIN]['add_entities'][ENTITY_DOMAIN] = async_add_entities
model = str(config.get(CONF_MODEL) or '')
entities = []
if model.find('mrbond.airer') >= 0:
pass
else:
miot = config.get('miot_type')
if miot:
spec = await MiotSpec.async_from_type(hass, miot)
for srv in spec.get_services(ENTITY_DOMAIN):
if not srv.get_property('on'):
continue
entities.append(MiotLightEntity(config, srv))
for entity in entities:
hass.data[DOMAIN]['entities'][entity.unique_id] = entity
async_add_entities(entities, update_before_add=True)
bind_services_to_entries(hass, SERVICE_TO_METHOD)
class MiotLightEntity(MiotToggleEntity, LightEntity):
def __init__(self, config: dict, miot_service: MiotService, **kwargs):
kwargs.setdefault('logger', _LOGGER)
super().__init__(miot_service, config=config, **kwargs)
self._prop_power = miot_service.get_property('on')
self._prop_mode = miot_service.get_property('mode')
self._prop_brightness = miot_service.get_property('brightness')
self._prop_color_temp = miot_service.get_property('color_temperature')
self._prop_color = miot_service.get_property('color')
self._srv_ambient_custom = miot_service.spec.get_service('ambient_light_custom')
if self._srv_ambient_custom:
if not self._prop_color:
self._prop_color = self._srv_ambient_custom.get_property('color')
self._attr_supported_color_modes = set()
if self._prop_power:
self._attr_supported_color_modes.add(COLOR_MODE_ONOFF)
if self._prop_brightness:
self._supported_features |= SUPPORT_BRIGHTNESS
self._attr_supported_color_modes.add(COLOR_MODE_BRIGHTNESS)
if self._prop_color_temp:
self._supported_features |= SUPPORT_COLOR_TEMP
self._attr_supported_color_modes.add(COLOR_MODE_COLOR_TEMP)
if self._prop_color:
self._supported_features |= SUPPORT_COLOR
self._attr_supported_color_modes.add(COLOR_MODE_HS)
if self._prop_mode:
self._supported_features |= SUPPORT_EFFECT
def turn_on(self, **kwargs):
ret = False
if not self.is_on:
ret = self.set_property(self._prop_power, True)
if self._prop_brightness and ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
per = brightness / 255
val = per * 100
if self._prop_brightness.value_range:
val = per * self._prop_brightness.range_max()
_LOGGER.debug('Setting light: %s brightness: %s %s%%', self.name, brightness, per * 100)
ret = self.set_property(self._prop_brightness, round(val))
if self._prop_color_temp and ATTR_COLOR_TEMP in kwargs:
mired = kwargs[ATTR_COLOR_TEMP]
color_temp = self.translate_mired(mired)
_LOGGER.debug('Setting light: %s color temperature: %s mireds, %s ct', self.name, mired, color_temp)
ret = self.set_property(self._prop_color_temp, color_temp)
if self._prop_color and ATTR_HS_COLOR in kwargs:
rgb = color.color_hs_to_RGB(*kwargs[ATTR_HS_COLOR])
num = rgb_to_int(rgb)
_LOGGER.debug('Setting light: %s color: %s', self.name, rgb)
ret = self.set_property(self._prop_color, num)
if self._prop_mode and ATTR_EFFECT in kwargs:
val = self._prop_mode.list_value(kwargs[ATTR_EFFECT])
_LOGGER.debug('Setting light: %s effect: %s(%s)', self.name, kwargs[ATTR_EFFECT], val)
ret = self.set_property(self._prop_mode, val)
return ret
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
val = None
if self._prop_brightness:
val = self._prop_brightness.from_dict(self._state_attrs)
if val is None:
return None
rmx = 100
if self._prop_brightness.value_range:
rmx = self._prop_brightness.range_max()
return round(255 / rmx * int(val))
@property
def hs_color(self):
"""Return the hue and saturation color value [float, float]."""
rgb = self.rgb_color
if rgb is not None:
return color.color_RGB_to_hs(*rgb)
return None
@property
def rgb_color(self):
"""Return the rgb color value [int, int, int]."""
if self._prop_color:
num = round(self._prop_color.from_dict(self._state_attrs) or 0)
return int_to_rgb(num)
return None
@property
def color_temp(self):
if not self._prop_color_temp:
return None
return self.translate_mired(self._prop_color_temp.from_dict(self._state_attrs) or 2700)
@property
def min_mireds(self):
if not self._prop_color_temp:
return None
return self.translate_mired(self._prop_color_temp.value_range[1] or 5700)
@property
def max_mireds(self):
if not self._prop_color_temp:
return None
return self.translate_mired(self._prop_color_temp.value_range[0] or 2700)
@staticmethod
def translate_mired(num):
try:
return round(1000000 / num)
except TypeError:
return round(1000000 / 2700)
@property
def effect_list(self):
if self._prop_mode:
return self._prop_mode.list_descriptions()
return None
@property
def effect(self):
if self._prop_mode:
val = self._prop_mode.from_dict(self._state_attrs)
if val is not None:
return self._prop_mode.list_description(val)
return None
class MiotLightSubEntity(MiotLightEntity, ToggleSubEntity):
def __init__(self, parent, miot_service: MiotService):
prop_power = miot_service.get_property('on')
ToggleSubEntity.__init__(self, parent, prop_power.full_name, {
'keys': list((miot_service.mapping() or {}).keys()),
})
MiotLightEntity.__init__(self, {
**parent.miot_config,
'name': f'{parent.device_name}',
}, miot_service, device=parent.miot_device)
self.entity_id = miot_service.generate_entity_id(self)
self._prop_power = prop_power
def update(self, data=None):
super().update(data)
if not self._available:
return
async def async_update(self):
await self.hass.async_add_executor_job(partial(self.update))
class LightSubEntity(ToggleSubEntity, LightEntity):
_brightness = None
_color_temp = None
def update(self, data=None):
super().update(data)
if self._available:
attrs = self._state_attrs
self._brightness = attrs.get('brightness', 0)
self._color_temp = attrs.get('color_temp', 0)
def turn_on(self, **kwargs):
self.call_parent(['turn_on_light', 'turn_on'], **kwargs)
def turn_off(self, **kwargs):
self.call_parent(['turn_off_light', 'turn_off'], **kwargs)
@property
def brightness(self):
return self._brightness
@property
def color_temp(self):
return self._color_temp
| 33.790076 | 112 | 0.656952 | import logging
from functools import partial
from homeassistant.const import *
from homeassistant.components.light import (
DOMAIN as ENTITY_DOMAIN,
LightEntity,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR_TEMP,
SUPPORT_COLOR,
SUPPORT_EFFECT,
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_EFFECT,
)
from homeassistant.util import color
from . import (
DOMAIN,
CONF_MODEL,
XIAOMI_CONFIG_SCHEMA as PLATFORM_SCHEMA,
MiotToggleEntity,
ToggleSubEntity,
async_setup_config_entry,
bind_services_to_entries,
)
from .core.miot_spec import (
MiotSpec,
MiotService,
)
from miio.utils import (
rgb_to_int,
int_to_rgb,
)
try:
from homeassistant.components.light import (
COLOR_MODE_ONOFF,
COLOR_MODE_BRIGHTNESS,
COLOR_MODE_COLOR_TEMP,
COLOR_MODE_HS,
)
except ImportError:
COLOR_MODE_ONOFF = 'onoff'
COLOR_MODE_BRIGHTNESS = 'brightness'
COLOR_MODE_COLOR_TEMP = 'color_temp'
COLOR_MODE_HS = 'hs'
_LOGGER = logging.getLogger(__name__)
DATA_KEY = f'{ENTITY_DOMAIN}.{DOMAIN}'
SERVICE_TO_METHOD = {}
async def async_setup_entry(hass, config_entry, async_add_entities):
await async_setup_config_entry(hass, config_entry, async_setup_platform, async_add_entities, ENTITY_DOMAIN)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
hass.data.setdefault(DATA_KEY, {})
hass.data[DOMAIN]['add_entities'][ENTITY_DOMAIN] = async_add_entities
model = str(config.get(CONF_MODEL) or '')
entities = []
if model.find('mrbond.airer') >= 0:
pass
else:
miot = config.get('miot_type')
if miot:
spec = await MiotSpec.async_from_type(hass, miot)
for srv in spec.get_services(ENTITY_DOMAIN):
if not srv.get_property('on'):
continue
entities.append(MiotLightEntity(config, srv))
for entity in entities:
hass.data[DOMAIN]['entities'][entity.unique_id] = entity
async_add_entities(entities, update_before_add=True)
bind_services_to_entries(hass, SERVICE_TO_METHOD)
class MiotLightEntity(MiotToggleEntity, LightEntity):
def __init__(self, config: dict, miot_service: MiotService, **kwargs):
kwargs.setdefault('logger', _LOGGER)
super().__init__(miot_service, config=config, **kwargs)
self._prop_power = miot_service.get_property('on')
self._prop_mode = miot_service.get_property('mode')
self._prop_brightness = miot_service.get_property('brightness')
self._prop_color_temp = miot_service.get_property('color_temperature')
self._prop_color = miot_service.get_property('color')
self._srv_ambient_custom = miot_service.spec.get_service('ambient_light_custom')
if self._srv_ambient_custom:
if not self._prop_color:
self._prop_color = self._srv_ambient_custom.get_property('color')
self._attr_supported_color_modes = set()
if self._prop_power:
self._attr_supported_color_modes.add(COLOR_MODE_ONOFF)
if self._prop_brightness:
self._supported_features |= SUPPORT_BRIGHTNESS
self._attr_supported_color_modes.add(COLOR_MODE_BRIGHTNESS)
if self._prop_color_temp:
self._supported_features |= SUPPORT_COLOR_TEMP
self._attr_supported_color_modes.add(COLOR_MODE_COLOR_TEMP)
if self._prop_color:
self._supported_features |= SUPPORT_COLOR
self._attr_supported_color_modes.add(COLOR_MODE_HS)
if self._prop_mode:
self._supported_features |= SUPPORT_EFFECT
def turn_on(self, **kwargs):
ret = False
if not self.is_on:
ret = self.set_property(self._prop_power, True)
if self._prop_brightness and ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
per = brightness / 255
val = per * 100
if self._prop_brightness.value_range:
val = per * self._prop_brightness.range_max()
_LOGGER.debug('Setting light: %s brightness: %s %s%%', self.name, brightness, per * 100)
ret = self.set_property(self._prop_brightness, round(val))
if self._prop_color_temp and ATTR_COLOR_TEMP in kwargs:
mired = kwargs[ATTR_COLOR_TEMP]
color_temp = self.translate_mired(mired)
_LOGGER.debug('Setting light: %s color temperature: %s mireds, %s ct', self.name, mired, color_temp)
ret = self.set_property(self._prop_color_temp, color_temp)
if self._prop_color and ATTR_HS_COLOR in kwargs:
rgb = color.color_hs_to_RGB(*kwargs[ATTR_HS_COLOR])
num = rgb_to_int(rgb)
_LOGGER.debug('Setting light: %s color: %s', self.name, rgb)
ret = self.set_property(self._prop_color, num)
if self._prop_mode and ATTR_EFFECT in kwargs:
val = self._prop_mode.list_value(kwargs[ATTR_EFFECT])
_LOGGER.debug('Setting light: %s effect: %s(%s)', self.name, kwargs[ATTR_EFFECT], val)
ret = self.set_property(self._prop_mode, val)
return ret
@property
def brightness(self):
val = None
if self._prop_brightness:
val = self._prop_brightness.from_dict(self._state_attrs)
if val is None:
return None
rmx = 100
if self._prop_brightness.value_range:
rmx = self._prop_brightness.range_max()
return round(255 / rmx * int(val))
@property
def hs_color(self):
rgb = self.rgb_color
if rgb is not None:
return color.color_RGB_to_hs(*rgb)
return None
@property
def rgb_color(self):
if self._prop_color:
num = round(self._prop_color.from_dict(self._state_attrs) or 0)
return int_to_rgb(num)
return None
@property
def color_temp(self):
if not self._prop_color_temp:
return None
return self.translate_mired(self._prop_color_temp.from_dict(self._state_attrs) or 2700)
@property
def min_mireds(self):
if not self._prop_color_temp:
return None
return self.translate_mired(self._prop_color_temp.value_range[1] or 5700)
@property
def max_mireds(self):
if not self._prop_color_temp:
return None
return self.translate_mired(self._prop_color_temp.value_range[0] or 2700)
@staticmethod
def translate_mired(num):
try:
return round(1000000 / num)
except TypeError:
return round(1000000 / 2700)
@property
def effect_list(self):
if self._prop_mode:
return self._prop_mode.list_descriptions()
return None
@property
def effect(self):
if self._prop_mode:
val = self._prop_mode.from_dict(self._state_attrs)
if val is not None:
return self._prop_mode.list_description(val)
return None
class MiotLightSubEntity(MiotLightEntity, ToggleSubEntity):
def __init__(self, parent, miot_service: MiotService):
prop_power = miot_service.get_property('on')
ToggleSubEntity.__init__(self, parent, prop_power.full_name, {
'keys': list((miot_service.mapping() or {}).keys()),
})
MiotLightEntity.__init__(self, {
**parent.miot_config,
'name': f'{parent.device_name}',
}, miot_service, device=parent.miot_device)
self.entity_id = miot_service.generate_entity_id(self)
self._prop_power = prop_power
def update(self, data=None):
super().update(data)
if not self._available:
return
async def async_update(self):
await self.hass.async_add_executor_job(partial(self.update))
class LightSubEntity(ToggleSubEntity, LightEntity):
_brightness = None
_color_temp = None
def update(self, data=None):
super().update(data)
if self._available:
attrs = self._state_attrs
self._brightness = attrs.get('brightness', 0)
self._color_temp = attrs.get('color_temp', 0)
def turn_on(self, **kwargs):
self.call_parent(['turn_on_light', 'turn_on'], **kwargs)
def turn_off(self, **kwargs):
self.call_parent(['turn_off_light', 'turn_off'], **kwargs)
@property
def brightness(self):
return self._brightness
@property
def color_temp(self):
return self._color_temp
| true | true |
f714f9e04cfc2c6e3e123f7aa5966dc910128689 | 10,457 | py | Python | tests/test_app/test_result.py | u6052029/cogent3 | ca0efcb7f60b715bcbfbecd924cdb98a53cefe20 | [
"BSD-3-Clause"
] | null | null | null | tests/test_app/test_result.py | u6052029/cogent3 | ca0efcb7f60b715bcbfbecd924cdb98a53cefe20 | [
"BSD-3-Clause"
] | null | null | null | tests/test_app/test_result.py | u6052029/cogent3 | ca0efcb7f60b715bcbfbecd924cdb98a53cefe20 | [
"BSD-3-Clause"
] | null | null | null | from unittest import TestCase, main
from cogent3 import make_aligned_seqs
from cogent3.app import evo as evo_app
from cogent3.app.result import (
generic_result,
model_collection_result,
model_result,
)
from cogent3.util.deserialise import deserialise_object
__author__ = "Gavin Huttley"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = ["Gavin Huttley"]
__license__ = "BSD-3"
__version__ = "2020.7.2a"
__maintainer__ = "Gavin Huttley"
__email__ = "Gavin.Huttley@anu.edu.au"
__status__ = "Alpha"
class TestGenericResult(TestCase):
def test_deserialised_values(self):
"""correctly deserialises values"""
from cogent3 import DNA
data = {"type": "cogent3.core.moltype.MolType", "moltype": "dna"}
result = generic_result(source="blah.json")
result["key"] = data
result.deserialised_values()
got = result["key"]
self.assertEqual(got, DNA)
# if we have a type value without "cogent3", leaves as is
data = {"type": "core.moltype.MolType", "moltype": "dna"}
result = generic_result(source="blah.json")
result["key"] = data
result.deserialised_values()
got = result["key"]
self.assertEqual(got, data)
# or if no "type" entry, leaves as is
data = {"moltype": "dna"}
result = generic_result(source="blah.json")
result["key"] = data
result.deserialised_values()
got = result["key"]
self.assertEqual(got, data)
def test_repr_str(self):
"""it works"""
data = {"type": "cogent3.core.moltype.MolType", "moltype": "dna"}
result = generic_result(source="blah.json")
result["key"] = data
r = repr(result)
s = str(result)
def test_keys(self):
"""it works"""
data = {"type": "cogent3.core.moltype.MolType", "moltype": "dna"}
result = generic_result(source="blah.json")
result["key"] = data
keys = result.keys()
self.assertEqual(keys, ["key"])
class TestModelResult(TestCase):
def test_model_result_alignment(self):
"""returns alignment from lf"""
_data = {
"Human": "ATGCGGCTCGCGGAGGCCGCGCTCGCGGAG",
"Mouse": "ATGCCCGGCGCCAAGGCAGCGCTGGCGGAG",
"Opossum": "ATGCCAGTGAAAGTGGCGGCGGTGGCTGAG",
}
aln = make_aligned_seqs(data=_data, moltype="dna")
mod = evo_app.model(
"F81",
show_progress=False,
opt_args=dict(max_evaluations=5, limit_action="ignore"),
)
result = mod(aln)
got = result.alignment
self.assertEqual(got.to_dict(), _data)
def test_model_result_alignment_split_pos_model(self):
"""returns alignment from lf with split codon positions"""
_data = {
"Human": "ATGCGGCTCGCGGAGGCCGCGCTCGCGGAG",
"Mouse": "ATGCCCGGCGCCAAGGCAGCGCTGGCGGAG",
"Opossum": "ATGCCAGTGAAAGTGGCGGCGGTGGCTGAG",
}
aln = make_aligned_seqs(data=_data, moltype="dna")
mod = evo_app.model(
"F81",
split_codons=True,
show_progress=False,
opt_args=dict(max_evaluations=5, limit_action="ignore"),
)
result = mod(aln)
for i in range(1, 4):
got = result.alignment[i]
expect = aln[i - 1 :: 3]
self.assertEqual(got.to_dict(), expect.to_dict())
def test_model_result_repr_split_pos_model(self):
"""repr works for model_result of split codon positions"""
_data = {
"Human": "ATGCGGCTCGCGGAGGCCGCGCTCGCGGAG",
"Mouse": "ATGCCCGGCGCCAAGGCAGCGCTGGCGGAG",
"Opossum": "ATGCCAGTGAAAGTGGCGGCGGTGGCTGAG",
}
aln = make_aligned_seqs(data=_data, moltype="dna")
mod = evo_app.model(
"F81",
split_codons=True,
show_progress=False,
opt_args=dict(max_evaluations=55, limit_action="ignore"),
)
result = mod(aln)
s = repr(result)
def test_model_result_tree_split_pos_model(self):
"""returns tree from lf with split codon positions"""
_data = {
"Human": "ATGCGGCTCGCGGAGGCCGCGCTCGCGGAG",
"Mouse": "ATGCCCGGCGCCAAGGCAGCGCTGGCGGAG",
"Opossum": "ATGCCAGTGAAAGTGGCGGCGGTGGCTGAG",
}
aln = make_aligned_seqs(data=_data, moltype="dna")
mod = evo_app.model(
"F81",
split_codons=True,
show_progress=False,
opt_args=dict(max_evaluations=55, limit_action="ignore"),
)
result = mod(aln)
self.assertTrue(len(result.tree), 3)
# check the trees are different by summing lengths
lengths = set()
for i, t in result.tree.items():
lengths.add(t.total_length())
self.assertTrue(len(lengths) > 1)
def test_model_result_simulate_alignment(self):
"""returns tree from lf with split codon positions"""
_data = {
"Human": "ATGCGGCTCGCGGAGGCCGCGCTCGCGGAG",
"Mouse": "ATGCCCGGCGCCAAGGCAGCGCTGGCGGAG",
"Opossum": "ATGCCAGTGAAAGTGGCGGCGGTGGCTGAG",
}
aln = make_aligned_seqs(data=_data, moltype="dna")
mod = evo_app.model(
"F81",
split_codons=True,
show_progress=False,
opt_args=dict(max_evaluations=55, limit_action="ignore"),
)
result = mod(aln)
got = result.simulate_alignment()
self.assertEqual(len(aln), len(got))
self.assertNotEqual(aln.to_dict(), got.to_dict())
def test_model_result_tree_discrete_time(self):
"""returns paralinear lengths"""
_data = {
"Human": "ATGCGGCTCGCGGAGGCCGCGCTCGCGGAG",
"Mouse": "ATGCCCGGCGCCAAGGCAGCGCTGGCGGAG",
"Opossum": "ATGCCAGTGAAAGTGGCGGCGGTGGCTGAG",
}
aln = make_aligned_seqs(data=_data, moltype="dna")
model1 = evo_app.model(
"BH", opt_args=dict(max_evaluations=25, limit_action="ignore")
)
result = model1(aln)
got = result.tree
self.assertEqual(
got.children[0].params["length"], got.children[0].params["paralinear"]
)
def test_model_result_setitem(self):
"""TypeError if value a likelihood function, or a dict with correct type"""
v = dict(type="arbitrary")
r = model_result(name="one", source="two")
with self.assertRaises(TypeError):
r["name"] = v
with self.assertRaises(TypeError):
r["name"] = 4
_data = {
"Human": "ATGCGGCTCGCGGAGGCCGCGCTCGCGGAG",
"Mouse": "ATGCCCGGCGCCAAGGCAGCGCTGGCGGAG",
"Opossum": "ATGCCAGTGAAAGTGGCGGCGGTGGCTGAG",
}
aln = make_aligned_seqs(data=_data, moltype="dna")
with self.assertRaises(TypeError):
r["name"] = aln
class TestModelCollectionResult(TestCase):
_model_results = {}
def setUp(self):
"""constructs _model_results if they don't already exist"""
if self._model_results:
return
_data = {
"Human": "ATGCGGCTCGCGGAGGCCGCGCTCGCGGAG",
"Mouse": "ATGCCCGGCGCCAAGGCAGCGCTGGCGGAG",
"Opossum": "ATGCCAGTGAAAGTGGCGGCGGTGGCTGAG",
}
aln = make_aligned_seqs(data=_data, moltype="dna")
model1 = evo_app.model(
"F81", opt_args=dict(max_evaluations=25, limit_action="ignore")
)
model2 = evo_app.model(
"HKY85", opt_args=dict(max_evaluations=25, limit_action="ignore")
)
mr1 = model1(aln)
mr2 = model2(aln)
self._model_results[mr1.name] = mr1
self._model_results[mr2.name] = mr2
def test_get_best_model(self):
"""should correctly identify the best model"""
coll = model_collection_result(None)
coll.update(self._model_results)
got = coll.get_best_model()
# we ensure a model_result instance is returned from the possible set
self.assertIn(got, self._model_results.values())
def test_select_model(self):
"""correctly select models"""
# we ensure a series of model_result instances is returned
coll = model_collection_result(None)
coll.update(self._model_results)
got = coll.select_models()
self.assertTrue(len(got) > 0)
possible = list(self._model_results.values())
for m in got:
self.assertIn(m, possible)
def test_model_collection_result_repr(self):
"""constructed result can do the different repr"""
result = model_collection_result(None)
coll = model_collection_result(None)
coll.update(self._model_results)
got = result.__repr__()
self.assertIsInstance(got, str)
got = result._repr_html_()
self.assertIsInstance(got, str)
def test_json_roundtrip(self):
"""roundtrip from json correct"""
coll = model_collection_result(name="blah", source="blah2")
coll.update(self._model_results)
self.assertEqual(coll.name, "blah")
self.assertEqual(coll.source, "blah2")
orig = coll.__repr__()
got = deserialise_object(coll.to_json())
self.assertEqual(got.__repr__(), orig)
self.assertIsInstance(got, model_collection_result)
self.assertEqual(got.name, coll.name)
self.assertEqual(got.source, coll.source)
# select_models() should not fail
got = deserialise_object(coll.to_json())
m = got.select_models()
self.assertIsInstance(m[0], model_result)
class TestHypothesisResult(TestCase):
def test_pvalue(self):
"""hypothesis test p-value property"""
_data = {
"Human": "ATGCGGCTCGCGGAGGCCGCGCTCGCGGAG",
"Mouse": "ATGCCCGGCGCCAAGGCAGCGCTGGCGGAG",
"Opossum": "ATGCCAGTGAAAGTGGCGGCGGTGGCTGAG",
}
aln = make_aligned_seqs(data=_data, moltype="dna")
model1 = evo_app.model(
"F81", opt_args=dict(max_evaluations=25, limit_action="ignore")
)
model2 = evo_app.model(
"HKY85", opt_args=dict(max_evaluations=25, limit_action="ignore")
)
hyp = evo_app.hypothesis(model1, model2)
result = hyp(aln)
self.assertTrue(0 <= result.pvalue <= 1)
if __name__ == "__main__":
main()
| 35.568027 | 83 | 0.609353 | from unittest import TestCase, main
from cogent3 import make_aligned_seqs
from cogent3.app import evo as evo_app
from cogent3.app.result import (
generic_result,
model_collection_result,
model_result,
)
from cogent3.util.deserialise import deserialise_object
__author__ = "Gavin Huttley"
__copyright__ = "Copyright 2007-2020, The Cogent Project"
__credits__ = ["Gavin Huttley"]
__license__ = "BSD-3"
__version__ = "2020.7.2a"
__maintainer__ = "Gavin Huttley"
__email__ = "Gavin.Huttley@anu.edu.au"
__status__ = "Alpha"
class TestGenericResult(TestCase):
def test_deserialised_values(self):
from cogent3 import DNA
data = {"type": "cogent3.core.moltype.MolType", "moltype": "dna"}
result = generic_result(source="blah.json")
result["key"] = data
result.deserialised_values()
got = result["key"]
self.assertEqual(got, DNA)
data = {"type": "core.moltype.MolType", "moltype": "dna"}
result = generic_result(source="blah.json")
result["key"] = data
result.deserialised_values()
got = result["key"]
self.assertEqual(got, data)
data = {"moltype": "dna"}
result = generic_result(source="blah.json")
result["key"] = data
result.deserialised_values()
got = result["key"]
self.assertEqual(got, data)
def test_repr_str(self):
data = {"type": "cogent3.core.moltype.MolType", "moltype": "dna"}
result = generic_result(source="blah.json")
result["key"] = data
r = repr(result)
s = str(result)
def test_keys(self):
data = {"type": "cogent3.core.moltype.MolType", "moltype": "dna"}
result = generic_result(source="blah.json")
result["key"] = data
keys = result.keys()
self.assertEqual(keys, ["key"])
class TestModelResult(TestCase):
def test_model_result_alignment(self):
_data = {
"Human": "ATGCGGCTCGCGGAGGCCGCGCTCGCGGAG",
"Mouse": "ATGCCCGGCGCCAAGGCAGCGCTGGCGGAG",
"Opossum": "ATGCCAGTGAAAGTGGCGGCGGTGGCTGAG",
}
aln = make_aligned_seqs(data=_data, moltype="dna")
mod = evo_app.model(
"F81",
show_progress=False,
opt_args=dict(max_evaluations=5, limit_action="ignore"),
)
result = mod(aln)
got = result.alignment
self.assertEqual(got.to_dict(), _data)
def test_model_result_alignment_split_pos_model(self):
_data = {
"Human": "ATGCGGCTCGCGGAGGCCGCGCTCGCGGAG",
"Mouse": "ATGCCCGGCGCCAAGGCAGCGCTGGCGGAG",
"Opossum": "ATGCCAGTGAAAGTGGCGGCGGTGGCTGAG",
}
aln = make_aligned_seqs(data=_data, moltype="dna")
mod = evo_app.model(
"F81",
split_codons=True,
show_progress=False,
opt_args=dict(max_evaluations=5, limit_action="ignore"),
)
result = mod(aln)
for i in range(1, 4):
got = result.alignment[i]
expect = aln[i - 1 :: 3]
self.assertEqual(got.to_dict(), expect.to_dict())
def test_model_result_repr_split_pos_model(self):
_data = {
"Human": "ATGCGGCTCGCGGAGGCCGCGCTCGCGGAG",
"Mouse": "ATGCCCGGCGCCAAGGCAGCGCTGGCGGAG",
"Opossum": "ATGCCAGTGAAAGTGGCGGCGGTGGCTGAG",
}
aln = make_aligned_seqs(data=_data, moltype="dna")
mod = evo_app.model(
"F81",
split_codons=True,
show_progress=False,
opt_args=dict(max_evaluations=55, limit_action="ignore"),
)
result = mod(aln)
s = repr(result)
def test_model_result_tree_split_pos_model(self):
_data = {
"Human": "ATGCGGCTCGCGGAGGCCGCGCTCGCGGAG",
"Mouse": "ATGCCCGGCGCCAAGGCAGCGCTGGCGGAG",
"Opossum": "ATGCCAGTGAAAGTGGCGGCGGTGGCTGAG",
}
aln = make_aligned_seqs(data=_data, moltype="dna")
mod = evo_app.model(
"F81",
split_codons=True,
show_progress=False,
opt_args=dict(max_evaluations=55, limit_action="ignore"),
)
result = mod(aln)
self.assertTrue(len(result.tree), 3)
lengths = set()
for i, t in result.tree.items():
lengths.add(t.total_length())
self.assertTrue(len(lengths) > 1)
def test_model_result_simulate_alignment(self):
_data = {
"Human": "ATGCGGCTCGCGGAGGCCGCGCTCGCGGAG",
"Mouse": "ATGCCCGGCGCCAAGGCAGCGCTGGCGGAG",
"Opossum": "ATGCCAGTGAAAGTGGCGGCGGTGGCTGAG",
}
aln = make_aligned_seqs(data=_data, moltype="dna")
mod = evo_app.model(
"F81",
split_codons=True,
show_progress=False,
opt_args=dict(max_evaluations=55, limit_action="ignore"),
)
result = mod(aln)
got = result.simulate_alignment()
self.assertEqual(len(aln), len(got))
self.assertNotEqual(aln.to_dict(), got.to_dict())
def test_model_result_tree_discrete_time(self):
_data = {
"Human": "ATGCGGCTCGCGGAGGCCGCGCTCGCGGAG",
"Mouse": "ATGCCCGGCGCCAAGGCAGCGCTGGCGGAG",
"Opossum": "ATGCCAGTGAAAGTGGCGGCGGTGGCTGAG",
}
aln = make_aligned_seqs(data=_data, moltype="dna")
model1 = evo_app.model(
"BH", opt_args=dict(max_evaluations=25, limit_action="ignore")
)
result = model1(aln)
got = result.tree
self.assertEqual(
got.children[0].params["length"], got.children[0].params["paralinear"]
)
def test_model_result_setitem(self):
v = dict(type="arbitrary")
r = model_result(name="one", source="two")
with self.assertRaises(TypeError):
r["name"] = v
with self.assertRaises(TypeError):
r["name"] = 4
_data = {
"Human": "ATGCGGCTCGCGGAGGCCGCGCTCGCGGAG",
"Mouse": "ATGCCCGGCGCCAAGGCAGCGCTGGCGGAG",
"Opossum": "ATGCCAGTGAAAGTGGCGGCGGTGGCTGAG",
}
aln = make_aligned_seqs(data=_data, moltype="dna")
with self.assertRaises(TypeError):
r["name"] = aln
class TestModelCollectionResult(TestCase):
_model_results = {}
def setUp(self):
if self._model_results:
return
_data = {
"Human": "ATGCGGCTCGCGGAGGCCGCGCTCGCGGAG",
"Mouse": "ATGCCCGGCGCCAAGGCAGCGCTGGCGGAG",
"Opossum": "ATGCCAGTGAAAGTGGCGGCGGTGGCTGAG",
}
aln = make_aligned_seqs(data=_data, moltype="dna")
model1 = evo_app.model(
"F81", opt_args=dict(max_evaluations=25, limit_action="ignore")
)
model2 = evo_app.model(
"HKY85", opt_args=dict(max_evaluations=25, limit_action="ignore")
)
mr1 = model1(aln)
mr2 = model2(aln)
self._model_results[mr1.name] = mr1
self._model_results[mr2.name] = mr2
def test_get_best_model(self):
coll = model_collection_result(None)
coll.update(self._model_results)
got = coll.get_best_model()
self.assertIn(got, self._model_results.values())
def test_select_model(self):
coll = model_collection_result(None)
coll.update(self._model_results)
got = coll.select_models()
self.assertTrue(len(got) > 0)
possible = list(self._model_results.values())
for m in got:
self.assertIn(m, possible)
def test_model_collection_result_repr(self):
result = model_collection_result(None)
coll = model_collection_result(None)
coll.update(self._model_results)
got = result.__repr__()
self.assertIsInstance(got, str)
got = result._repr_html_()
self.assertIsInstance(got, str)
def test_json_roundtrip(self):
coll = model_collection_result(name="blah", source="blah2")
coll.update(self._model_results)
self.assertEqual(coll.name, "blah")
self.assertEqual(coll.source, "blah2")
orig = coll.__repr__()
got = deserialise_object(coll.to_json())
self.assertEqual(got.__repr__(), orig)
self.assertIsInstance(got, model_collection_result)
self.assertEqual(got.name, coll.name)
self.assertEqual(got.source, coll.source)
got = deserialise_object(coll.to_json())
m = got.select_models()
self.assertIsInstance(m[0], model_result)
class TestHypothesisResult(TestCase):
def test_pvalue(self):
_data = {
"Human": "ATGCGGCTCGCGGAGGCCGCGCTCGCGGAG",
"Mouse": "ATGCCCGGCGCCAAGGCAGCGCTGGCGGAG",
"Opossum": "ATGCCAGTGAAAGTGGCGGCGGTGGCTGAG",
}
aln = make_aligned_seqs(data=_data, moltype="dna")
model1 = evo_app.model(
"F81", opt_args=dict(max_evaluations=25, limit_action="ignore")
)
model2 = evo_app.model(
"HKY85", opt_args=dict(max_evaluations=25, limit_action="ignore")
)
hyp = evo_app.hypothesis(model1, model2)
result = hyp(aln)
self.assertTrue(0 <= result.pvalue <= 1)
if __name__ == "__main__":
main()
| true | true |
f714fbc79b42edf40142a4ad4bbb7a90e3778f3f | 789 | py | Python | account/views.py | AhteshamSid/College_school_management_system | a8504708ea2f347d18d4ac59198f29d05c0374d2 | [
"MIT"
] | null | null | null | account/views.py | AhteshamSid/College_school_management_system | a8504708ea2f347d18d4ac59198f29d05c0374d2 | [
"MIT"
] | null | null | null | account/views.py | AhteshamSid/College_school_management_system | a8504708ea2f347d18d4ac59198f29d05c0374d2 | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from .models import UserProfile
from .forms import ProfileForm
def profile(request, pk):
profile = UserProfile.objects.get(id=pk)
context = {
'profile': profile
}
return render(request, 'account/profile.html', context)
def update_profile(request, pk):
profile = UserProfile.objects.get(id=pk)
forms = ProfileForm(instance=profile)
if request.method == 'POST':
forms = ProfileForm(request.POST, request.FILES, instance=profile)
if forms.is_valid():
forms.save()
return redirect('home')
context = {
'forms': forms
}
return render(request, 'account/update-profile.html', context)
| 29.222222 | 75 | 0.653992 | from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from .models import UserProfile
from .forms import ProfileForm
def profile(request, pk):
profile = UserProfile.objects.get(id=pk)
context = {
'profile': profile
}
return render(request, 'account/profile.html', context)
def update_profile(request, pk):
profile = UserProfile.objects.get(id=pk)
forms = ProfileForm(instance=profile)
if request.method == 'POST':
forms = ProfileForm(request.POST, request.FILES, instance=profile)
if forms.is_valid():
forms.save()
return redirect('home')
context = {
'forms': forms
}
return render(request, 'account/update-profile.html', context)
| true | true |
f714fbdb129a1c7ec713e34c3c33a04f1236e5c5 | 9,949 | py | Python | pyanalyze/test_annotations.py | sobolevn/pyanalyze | f3851db84e57e3ff7f8e2dd271c3b218e2d3bbcc | [
"Apache-2.0"
] | null | null | null | pyanalyze/test_annotations.py | sobolevn/pyanalyze | f3851db84e57e3ff7f8e2dd271c3b218e2d3bbcc | [
"Apache-2.0"
] | null | null | null | pyanalyze/test_annotations.py | sobolevn/pyanalyze | f3851db84e57e3ff7f8e2dd271c3b218e2d3bbcc | [
"Apache-2.0"
] | null | null | null | # static analysis: ignore
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from .test_name_check_visitor import TestNameCheckVisitorBase
from .test_node_visitor import skip_before
from .error_code import ErrorCode
class TestAnnotations(TestNameCheckVisitorBase):
@skip_before((3, 5))
def test_union(self):
self.assert_passes(
"""
import re
from typing import Union, Optional, List, Set, Dict, Match, Pattern
_Pattern = type(re.compile("a"))
_Match = type(re.match("a", "a"))
def capybara() -> Union[int, str]:
return 0
def kerodon() -> Optional[int]:
return None
def complex() -> Union[List[str], Set[int], Dict[float, List[str]], int]:
return []
def check() -> None:
assert_is_value(capybara(), MultiValuedValue([TypedValue(int), TypedValue(str)]))
assert_is_value(kerodon(), MultiValuedValue([TypedValue(int), KnownValue(None)]))
assert_is_value(
complex(),
MultiValuedValue(
[
GenericValue(list, [TypedValue(str)]),
GenericValue(set, [TypedValue(int)]),
GenericValue(
dict, [TypedValue(float), GenericValue(list, [TypedValue(str)])]
),
TypedValue(int),
]
),
)
def rgx(m: Match[str], p: Pattern[bytes]) -> None:
assert_is_value(p, GenericValue(_Pattern, [TypedValue(bytes)]))
assert_is_value(m, GenericValue(_Match, [TypedValue(str)]))
"""
)
@skip_before((3, 5))
def test_generic(self):
self.assert_passes(
"""
from typing import List, SupportsInt
def capybara(x: List[int], y: List, z: SupportsInt) -> None:
assert_is_value(x, GenericValue(list, [TypedValue(int)]))
assert_is_value(y, TypedValue(list))
assert_is_value(z, TypedValue(SupportsInt))
"""
)
@skip_before((3, 5))
def test_self_type(self):
self.assert_passes(
"""
class Capybara:
def f(self: int) -> None:
assert_is_value(self, TypedValue(int))
def g(self) -> None:
assert_is_value(self, TypedValue(Capybara))
"""
)
@skip_before((3, 5))
def test_newtype(self):
self.assert_passes(
"""
from typing import NewType, Tuple
X = NewType("X", int)
Y = NewType("Y", Tuple[str, ...])
def capybara(x: X, y: Y) -> None:
assert_is_value(x, NewTypeValue(X))
print(y) # just asserting that this doesn't cause errors
"""
)
@skip_before((3, 5))
def test_literal(self):
self.assert_passes(
"""
from typing_extensions import Literal
def capybara(x: Literal[True], y: Literal[True, False]) -> None:
assert_is_value(x, KnownValue(True))
assert_is_value(y, MultiValuedValue([KnownValue(True), KnownValue(False)]))
"""
)
@skip_before((3, 5))
def test_contextmanager(self):
self.assert_passes(
"""
from contextlib import contextmanager
from typing import Iterator
@contextmanager
def capybara() -> Iterator[int]:
yield 3
def kerodon():
# Ideally should be ContextManager[int], but at least
# it should not be Iterator[int], which is what pyanalyze
# used to infer.
assert_is_value(capybara(), UNRESOLVED_VALUE)
"""
)
@skip_before((3, 0))
def test_none_annotations(self):
self.assert_passes(
"""
def mara() -> None:
pass
class Capybara:
def __init__(self) -> None:
pass
def check() -> None:
# Make sure we don't infer None if __init__ is annotated
# as returning None.
assert_is_value(Capybara(), TypedValue(Capybara))
assert_is_value(mara(), KnownValue(None))
"""
)
@skip_before((3, 0))
def test_annotations(self):
self.assert_passes(
"""
def caviidae() -> None:
x = int
# tests that annotations in a nested functions are not evaluated in a context where they don't exist
def capybara(a: x, *b: x, c: x, d: x=3, **kwargs: x):
pass
assert_is_value(capybara, KnownValue(capybara))
"""
)
self.assert_passes(
"""
class Caviidae:
class Capybara:
pass
def eat(self, x: Capybara):
assert_is_value(self, TypedValue(Caviidae))
@staticmethod
def static(x: "Caviidae"):
assert_is_value(x, TypedValue(Caviidae))
"""
)
self.assert_fails(
ErrorCode.incompatible_argument,
"""
def capybara(x: int) -> None:
pass
def kerodon():
capybara("not an int")
""",
)
@skip_before((3, 0))
def test_incompatible_return_value(self):
self.assert_fails(
ErrorCode.incompatible_return_value,
"""
def capybara() -> int:
return "not an int"
""",
)
self.assert_fails(
ErrorCode.incompatible_return_value,
"""
def capybara(x: bool) -> int:
if not x:
return
return 42
""",
)
self.assert_passes(
"""
from typing import Generator
def capybara(x: bool) -> Generator[int, None, None]:
if not x:
return
yield 42
"""
)
self.assert_fails(
ErrorCode.incompatible_return_value,
"""
def f() -> int:
pass
""",
)
self.assert_passes(
"""
from abc import abstractmethod
class X:
@abstractmethod
def f(self) -> int:
pass
""",
)
self.assert_fails(
ErrorCode.incompatible_return_value,
"""
def f() -> None:
assert_is_value(g(), UNRESOLVED_VALUE)
return g()
def g():
pass
""",
)
@skip_before((3, 0))
def test_incompatible_default(self):
self.assert_fails(
ErrorCode.incompatible_default,
"""
def capybara(x: int = None) -> None:
pass
""",
)
@skip_before((3, 0))
def test_property(self):
self.assert_passes(
"""
class Capybara:
def __init__(self, x):
self.x = x
@property
def f(self) -> int:
return self.x
def get_g(self) -> int:
return self.x * 2
g = property(get_g)
def user(c: Capybara) -> None:
assert_is_value(c.f, TypedValue(int))
assert_is_value(c.get_g(), TypedValue(int))
assert_is_value(c.g, TypedValue(int))
"""
)
@skip_before((3, 0))
def test_annotations_override_return(self):
self.assert_passes(
"""
from typing import Any
def f() -> Any:
return 0
def g():
return 0
def capybara():
assert_is_value(f(), UNRESOLVED_VALUE)
assert_is_value(g(), KnownValue(0))
"""
)
@skip_before((3, 0))
def test_cached_classmethod(self):
# just test that this doesn't crash
self.assert_passes(
"""
from functools import lru_cache
class Capybara:
@classmethod
@lru_cache()
def f(cls) -> int:
return 3
"""
)
@skip_before((3, 6))
def test_annassign(self):
self.assert_passes(
"""
def capybara(y):
x: int = y
assert_is_value(y, UNRESOLVED_VALUE)
assert_is_value(x, TypedValue(int))
"""
)
self.assert_fails(
ErrorCode.incompatible_assignment,
"""
def capybara(y: str):
x: int = y
""",
)
@skip_before((3, 5))
def test_tuples(self):
self.assert_passes(
"""
from typing import Tuple, Union
def capybara(x: Tuple[int, ...], y: Tuple[int], z: Tuple[str, int], omega: Union[Tuple[str, int], None]) -> None:
assert_is_value(x, GenericValue(tuple, [TypedValue(int)]))
assert_is_value(y, SequenceIncompleteValue(tuple, [TypedValue(int)]))
assert_is_value(z, SequenceIncompleteValue(tuple, [TypedValue(str), TypedValue(int)]))
assert_is_value(omega, MultiValuedValue([
SequenceIncompleteValue(tuple, [TypedValue(str), TypedValue(int)]),
KnownValue(None),
]))
"""
)
@skip_before((3, 0))
def test_invalid_annotation(self):
self.assert_fails(
ErrorCode.invalid_annotation,
"""
def f(x: 1):
pass
""",
)
@skip_before((3, 0))
def test_forward_ref(self):
self.assert_fails(
ErrorCode.undefined_name,
"""
def f(x: "NoSuchType"):
pass
""",
)
self.assert_passes(
"""
import typing
from typing import Optional
def capybara(x: "X", y: "Optional[X]", z: "typing.Optional[X]"):
assert_is_value(x, TypedValue(X))
assert_is_value(y, MultiValuedValue([KnownValue(None), TypedValue(X)]))
assert_is_value(z, MultiValuedValue([KnownValue(None), TypedValue(X)]))
class X:
pass
"""
)
self.assert_passes(
"""
from typing import List
def capybara(x: "List[int]") -> "List[str]":
assert_is_value(x, GenericValue(list, [TypedValue(int)]))
assert_is_value(capybara(x), GenericValue(list, [TypedValue(str)]))
return []
"""
)
self.assert_fails(
ErrorCode.incompatible_return_value,
"""
def f() -> "int":
return ""
""",
)
@skip_before((3, 0))
def test_pattern(self):
self.assert_passes(
"""
from typing import Pattern
import re
_Pattern = type(re.compile(""))
def capybara(x: Pattern[str]):
assert_is_value(x, GenericValue(_Pattern, [TypedValue(str)]))
"""
)
@skip_before((3, 6))
def test_final(self):
self.assert_passes(
"""
from typing_extensions import Final
x: Final = 3
def capybara():
y: Final = 4
assert_is_value(x, KnownValue(3))
assert_is_value(y, KnownValue(4))
"""
)
@skip_before((3, 6))
def test_type(self):
self.assert_passes(
"""
from typing import Type
def capybara(x: Type[str], y: "Type[int]"):
assert_is_value(x, SubclassValue(str))
assert_is_value(y, SubclassValue(int))
"""
)
| 22.976905 | 113 | 0.592924 |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from .test_name_check_visitor import TestNameCheckVisitorBase
from .test_node_visitor import skip_before
from .error_code import ErrorCode
class TestAnnotations(TestNameCheckVisitorBase):
@skip_before((3, 5))
def test_union(self):
self.assert_passes(
"""
import re
from typing import Union, Optional, List, Set, Dict, Match, Pattern
_Pattern = type(re.compile("a"))
_Match = type(re.match("a", "a"))
def capybara() -> Union[int, str]:
return 0
def kerodon() -> Optional[int]:
return None
def complex() -> Union[List[str], Set[int], Dict[float, List[str]], int]:
return []
def check() -> None:
assert_is_value(capybara(), MultiValuedValue([TypedValue(int), TypedValue(str)]))
assert_is_value(kerodon(), MultiValuedValue([TypedValue(int), KnownValue(None)]))
assert_is_value(
complex(),
MultiValuedValue(
[
GenericValue(list, [TypedValue(str)]),
GenericValue(set, [TypedValue(int)]),
GenericValue(
dict, [TypedValue(float), GenericValue(list, [TypedValue(str)])]
),
TypedValue(int),
]
),
)
def rgx(m: Match[str], p: Pattern[bytes]) -> None:
assert_is_value(p, GenericValue(_Pattern, [TypedValue(bytes)]))
assert_is_value(m, GenericValue(_Match, [TypedValue(str)]))
"""
)
@skip_before((3, 5))
def test_generic(self):
self.assert_passes(
"""
from typing import List, SupportsInt
def capybara(x: List[int], y: List, z: SupportsInt) -> None:
assert_is_value(x, GenericValue(list, [TypedValue(int)]))
assert_is_value(y, TypedValue(list))
assert_is_value(z, TypedValue(SupportsInt))
"""
)
@skip_before((3, 5))
def test_self_type(self):
self.assert_passes(
"""
class Capybara:
def f(self: int) -> None:
assert_is_value(self, TypedValue(int))
def g(self) -> None:
assert_is_value(self, TypedValue(Capybara))
"""
)
@skip_before((3, 5))
def test_newtype(self):
self.assert_passes(
"""
from typing import NewType, Tuple
X = NewType("X", int)
Y = NewType("Y", Tuple[str, ...])
def capybara(x: X, y: Y) -> None:
assert_is_value(x, NewTypeValue(X))
print(y) # just asserting that this doesn't cause errors
"""
)
@skip_before((3, 5))
def test_literal(self):
self.assert_passes(
"""
from typing_extensions import Literal
def capybara(x: Literal[True], y: Literal[True, False]) -> None:
assert_is_value(x, KnownValue(True))
assert_is_value(y, MultiValuedValue([KnownValue(True), KnownValue(False)]))
"""
)
@skip_before((3, 5))
def test_contextmanager(self):
self.assert_passes(
"""
from contextlib import contextmanager
from typing import Iterator
@contextmanager
def capybara() -> Iterator[int]:
yield 3
def kerodon():
# Ideally should be ContextManager[int], but at least
# it should not be Iterator[int], which is what pyanalyze
# used to infer.
assert_is_value(capybara(), UNRESOLVED_VALUE)
"""
)
@skip_before((3, 0))
def test_none_annotations(self):
self.assert_passes(
"""
def mara() -> None:
pass
class Capybara:
def __init__(self) -> None:
pass
def check() -> None:
# Make sure we don't infer None if __init__ is annotated
# as returning None.
assert_is_value(Capybara(), TypedValue(Capybara))
assert_is_value(mara(), KnownValue(None))
"""
)
@skip_before((3, 0))
def test_annotations(self):
self.assert_passes(
"""
def caviidae() -> None:
x = int
# tests that annotations in a nested functions are not evaluated in a context where they don't exist
def capybara(a: x, *b: x, c: x, d: x=3, **kwargs: x):
pass
assert_is_value(capybara, KnownValue(capybara))
"""
)
self.assert_passes(
"""
class Caviidae:
class Capybara:
pass
def eat(self, x: Capybara):
assert_is_value(self, TypedValue(Caviidae))
@staticmethod
def static(x: "Caviidae"):
assert_is_value(x, TypedValue(Caviidae))
"""
)
self.assert_fails(
ErrorCode.incompatible_argument,
"""
def capybara(x: int) -> None:
pass
def kerodon():
capybara("not an int")
""",
)
@skip_before((3, 0))
def test_incompatible_return_value(self):
self.assert_fails(
ErrorCode.incompatible_return_value,
"""
def capybara() -> int:
return "not an int"
""",
)
self.assert_fails(
ErrorCode.incompatible_return_value,
"""
def capybara(x: bool) -> int:
if not x:
return
return 42
""",
)
self.assert_passes(
"""
from typing import Generator
def capybara(x: bool) -> Generator[int, None, None]:
if not x:
return
yield 42
"""
)
self.assert_fails(
ErrorCode.incompatible_return_value,
"""
def f() -> int:
pass
""",
)
self.assert_passes(
"""
from abc import abstractmethod
class X:
@abstractmethod
def f(self) -> int:
pass
""",
)
self.assert_fails(
ErrorCode.incompatible_return_value,
"""
def f() -> None:
assert_is_value(g(), UNRESOLVED_VALUE)
return g()
def g():
pass
""",
)
@skip_before((3, 0))
def test_incompatible_default(self):
self.assert_fails(
ErrorCode.incompatible_default,
"""
def capybara(x: int = None) -> None:
pass
""",
)
@skip_before((3, 0))
def test_property(self):
self.assert_passes(
"""
class Capybara:
def __init__(self, x):
self.x = x
@property
def f(self) -> int:
return self.x
def get_g(self) -> int:
return self.x * 2
g = property(get_g)
def user(c: Capybara) -> None:
assert_is_value(c.f, TypedValue(int))
assert_is_value(c.get_g(), TypedValue(int))
assert_is_value(c.g, TypedValue(int))
"""
)
@skip_before((3, 0))
def test_annotations_override_return(self):
self.assert_passes(
"""
from typing import Any
def f() -> Any:
return 0
def g():
return 0
def capybara():
assert_is_value(f(), UNRESOLVED_VALUE)
assert_is_value(g(), KnownValue(0))
"""
)
@skip_before((3, 0))
def test_cached_classmethod(self):
# just test that this doesn't crash
self.assert_passes(
"""
from functools import lru_cache
class Capybara:
@classmethod
@lru_cache()
def f(cls) -> int:
return 3
"""
)
@skip_before((3, 6))
def test_annassign(self):
self.assert_passes(
"""
def capybara(y):
x: int = y
assert_is_value(y, UNRESOLVED_VALUE)
assert_is_value(x, TypedValue(int))
"""
)
self.assert_fails(
ErrorCode.incompatible_assignment,
"""
def capybara(y: str):
x: int = y
""",
)
@skip_before((3, 5))
def test_tuples(self):
self.assert_passes(
"""
from typing import Tuple, Union
def capybara(x: Tuple[int, ...], y: Tuple[int], z: Tuple[str, int], omega: Union[Tuple[str, int], None]) -> None:
assert_is_value(x, GenericValue(tuple, [TypedValue(int)]))
assert_is_value(y, SequenceIncompleteValue(tuple, [TypedValue(int)]))
assert_is_value(z, SequenceIncompleteValue(tuple, [TypedValue(str), TypedValue(int)]))
assert_is_value(omega, MultiValuedValue([
SequenceIncompleteValue(tuple, [TypedValue(str), TypedValue(int)]),
KnownValue(None),
]))
"""
)
@skip_before((3, 0))
def test_invalid_annotation(self):
self.assert_fails(
ErrorCode.invalid_annotation,
"""
def f(x: 1):
pass
""",
)
@skip_before((3, 0))
def test_forward_ref(self):
self.assert_fails(
ErrorCode.undefined_name,
"""
def f(x: "NoSuchType"):
pass
""",
)
self.assert_passes(
"""
import typing
from typing import Optional
def capybara(x: "X", y: "Optional[X]", z: "typing.Optional[X]"):
assert_is_value(x, TypedValue(X))
assert_is_value(y, MultiValuedValue([KnownValue(None), TypedValue(X)]))
assert_is_value(z, MultiValuedValue([KnownValue(None), TypedValue(X)]))
class X:
pass
"""
)
self.assert_passes(
"""
from typing import List
def capybara(x: "List[int]") -> "List[str]":
assert_is_value(x, GenericValue(list, [TypedValue(int)]))
assert_is_value(capybara(x), GenericValue(list, [TypedValue(str)]))
return []
"""
)
self.assert_fails(
ErrorCode.incompatible_return_value,
"""
def f() -> "int":
return ""
""",
)
@skip_before((3, 0))
def test_pattern(self):
self.assert_passes(
"""
from typing import Pattern
import re
_Pattern = type(re.compile(""))
def capybara(x: Pattern[str]):
assert_is_value(x, GenericValue(_Pattern, [TypedValue(str)]))
"""
)
@skip_before((3, 6))
def test_final(self):
self.assert_passes(
"""
from typing_extensions import Final
x: Final = 3
def capybara():
y: Final = 4
assert_is_value(x, KnownValue(3))
assert_is_value(y, KnownValue(4))
"""
)
@skip_before((3, 6))
def test_type(self):
self.assert_passes(
"""
from typing import Type
def capybara(x: Type[str], y: "Type[int]"):
assert_is_value(x, SubclassValue(str))
assert_is_value(y, SubclassValue(int))
"""
)
| true | true |
f714fc4571882f467493e6f5ded8f4fd81a3114e | 9,403 | py | Python | src/ebay_rest/api/buy_browse/models/payment_method.py | gbm001/ebay_rest | 077d3478423ccd80ff35e0361821d6a11180bc54 | [
"MIT"
] | null | null | null | src/ebay_rest/api/buy_browse/models/payment_method.py | gbm001/ebay_rest | 077d3478423ccd80ff35e0361821d6a11180bc54 | [
"MIT"
] | null | null | null | src/ebay_rest/api/buy_browse/models/payment_method.py | gbm001/ebay_rest | 077d3478423ccd80ff35e0361821d6a11180bc54 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Browse API
<p>The Browse API has the following resources:</p> <ul> <li><b> item_summary: </b> Lets shoppers search for specific items by keyword, GTIN, category, charity, product, or item aspects and refine the results by using filters, such as aspects, compatibility, and fields values.</li> <li><b> search_by_image: </b><a href=\"https://developer.ebay.com/api-docs/static/versioning.html#API\" target=\"_blank\"><img src=\"/cms/img/docs/experimental-icon.svg\" class=\"legend-icon experimental-icon\" alt=\"Experimental Release\" title=\"Experimental Release\" /> (Experimental)</a> Lets shoppers search for specific items by image. You can refine the results by using URI parameters and filters.</li> <li><b> item: </b> <ul><li>Lets you retrieve the details of a specific item or all the items in an item group, which is an item with variations such as color and size and check if a product is compatible with the specified item, such as if a specific car is compatible with a specific part.</li> <li>Provides a bridge between the eBay legacy APIs, such as <b> Finding</b>, and the RESTful APIs, which use different formats for the item IDs.</li> </ul> </li> <li> <b> shopping_cart: </b> <a href=\"https://developer.ebay.com/api-docs/static/versioning.html#API\" target=\"_blank\"><img src=\"/cms/img/docs/experimental-icon.svg\" class=\"legend-icon experimental-icon\" alt=\"Experimental Release\" title=\"Experimental Release\" /> (Experimental)</a> <a href=\"https://developer.ebay.com/api-docs/static/versioning.html#Limited\" target=\"_blank\"> <img src=\"/cms/img/docs/partners-api.svg\" class=\"legend-icon partners-icon\" title=\"Limited Release\" alt=\"Limited Release\" />(Limited Release)</a> Provides the ability for eBay members to see the contents of their eBay cart, and add, remove, and change the quantity of items in their eBay cart. <b> Note: </b> This resource is not available in the eBay API Explorer.</li></ul> <p>The <b> item_summary</b>, <b> search_by_image</b>, and <b> item</b> resource calls require an <a href=\"/api-docs/static/oauth-client-credentials-grant.html\">Application access token</a>. The <b> shopping_cart</b> resource calls require a <a href=\"/api-docs/static/oauth-authorization-code-grant.html\">User access token</a>.</p> # noqa: E501
OpenAPI spec version: v1.8.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PaymentMethod(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'payment_instructions': 'list[str]',
'payment_method_brands': 'list[PaymentMethodBrand]',
'payment_method_type': 'str',
'seller_instructions': 'list[str]'
}
attribute_map = {
'payment_instructions': 'paymentInstructions',
'payment_method_brands': 'paymentMethodBrands',
'payment_method_type': 'paymentMethodType',
'seller_instructions': 'sellerInstructions'
}
def __init__(self, payment_instructions=None, payment_method_brands=None, payment_method_type=None, seller_instructions=None): # noqa: E501
"""PaymentMethod - a model defined in Swagger""" # noqa: E501
self._payment_instructions = None
self._payment_method_brands = None
self._payment_method_type = None
self._seller_instructions = None
self.discriminator = None
if payment_instructions is not None:
self.payment_instructions = payment_instructions
if payment_method_brands is not None:
self.payment_method_brands = payment_method_brands
if payment_method_type is not None:
self.payment_method_type = payment_method_type
if seller_instructions is not None:
self.seller_instructions = seller_instructions
@property
def payment_instructions(self):
"""Gets the payment_instructions of this PaymentMethod. # noqa: E501
The payment instructions for the buyer, such as cash in person or contact seller. # noqa: E501
:return: The payment_instructions of this PaymentMethod. # noqa: E501
:rtype: list[str]
"""
return self._payment_instructions
@payment_instructions.setter
def payment_instructions(self, payment_instructions):
"""Sets the payment_instructions of this PaymentMethod.
The payment instructions for the buyer, such as cash in person or contact seller. # noqa: E501
:param payment_instructions: The payment_instructions of this PaymentMethod. # noqa: E501
:type: list[str]
"""
self._payment_instructions = payment_instructions
@property
def payment_method_brands(self):
"""Gets the payment_method_brands of this PaymentMethod. # noqa: E501
The payment method brands, including the payment method brand type and logo image. # noqa: E501
:return: The payment_method_brands of this PaymentMethod. # noqa: E501
:rtype: list[PaymentMethodBrand]
"""
return self._payment_method_brands
@payment_method_brands.setter
def payment_method_brands(self, payment_method_brands):
"""Sets the payment_method_brands of this PaymentMethod.
The payment method brands, including the payment method brand type and logo image. # noqa: E501
:param payment_method_brands: The payment_method_brands of this PaymentMethod. # noqa: E501
:type: list[PaymentMethodBrand]
"""
self._payment_method_brands = payment_method_brands
@property
def payment_method_type(self):
"""Gets the payment_method_type of this PaymentMethod. # noqa: E501
The payment method type, such as credit card or cash. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/buy/browse/types/gct:PaymentMethodTypeEnum'>eBay API documentation</a> # noqa: E501
:return: The payment_method_type of this PaymentMethod. # noqa: E501
:rtype: str
"""
return self._payment_method_type
@payment_method_type.setter
def payment_method_type(self, payment_method_type):
"""Sets the payment_method_type of this PaymentMethod.
The payment method type, such as credit card or cash. For implementation help, refer to <a href='https://developer.ebay.com/api-docs/buy/browse/types/gct:PaymentMethodTypeEnum'>eBay API documentation</a> # noqa: E501
:param payment_method_type: The payment_method_type of this PaymentMethod. # noqa: E501
:type: str
"""
self._payment_method_type = payment_method_type
@property
def seller_instructions(self):
"""Gets the seller_instructions of this PaymentMethod. # noqa: E501
The seller instructions to the buyer, such as accepts credit cards or see description. # noqa: E501
:return: The seller_instructions of this PaymentMethod. # noqa: E501
:rtype: list[str]
"""
return self._seller_instructions
@seller_instructions.setter
def seller_instructions(self, seller_instructions):
"""Sets the seller_instructions of this PaymentMethod.
The seller instructions to the buyer, such as accepts credit cards or see description. # noqa: E501
:param seller_instructions: The seller_instructions of this PaymentMethod. # noqa: E501
:type: list[str]
"""
self._seller_instructions = seller_instructions
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PaymentMethod, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PaymentMethod):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 47.730964 | 2,314 | 0.668829 |
import pprint
import re
import six
class PaymentMethod(object):
swagger_types = {
'payment_instructions': 'list[str]',
'payment_method_brands': 'list[PaymentMethodBrand]',
'payment_method_type': 'str',
'seller_instructions': 'list[str]'
}
attribute_map = {
'payment_instructions': 'paymentInstructions',
'payment_method_brands': 'paymentMethodBrands',
'payment_method_type': 'paymentMethodType',
'seller_instructions': 'sellerInstructions'
}
def __init__(self, payment_instructions=None, payment_method_brands=None, payment_method_type=None, seller_instructions=None):
self._payment_instructions = None
self._payment_method_brands = None
self._payment_method_type = None
self._seller_instructions = None
self.discriminator = None
if payment_instructions is not None:
self.payment_instructions = payment_instructions
if payment_method_brands is not None:
self.payment_method_brands = payment_method_brands
if payment_method_type is not None:
self.payment_method_type = payment_method_type
if seller_instructions is not None:
self.seller_instructions = seller_instructions
@property
def payment_instructions(self):
return self._payment_instructions
@payment_instructions.setter
def payment_instructions(self, payment_instructions):
self._payment_instructions = payment_instructions
@property
def payment_method_brands(self):
return self._payment_method_brands
@payment_method_brands.setter
def payment_method_brands(self, payment_method_brands):
self._payment_method_brands = payment_method_brands
@property
def payment_method_type(self):
return self._payment_method_type
@payment_method_type.setter
def payment_method_type(self, payment_method_type):
self._payment_method_type = payment_method_type
@property
def seller_instructions(self):
return self._seller_instructions
@seller_instructions.setter
def seller_instructions(self, seller_instructions):
self._seller_instructions = seller_instructions
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PaymentMethod, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, PaymentMethod):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f714fc808fcfb6c6731b0e09e82f0d3179f49b65 | 2,971 | py | Python | v1/awsbuild/bao_signal_handler.py | badassops/ops-aws | 2e6b76e62e7b9edaa3ba43ff57df90b75c75aba7 | [
"BSD-3-Clause"
] | 2 | 2019-02-28T06:49:19.000Z | 2019-12-30T09:41:17.000Z | v1/awsbuild/bao_signal_handler.py | badassops/ops-aws | 2e6b76e62e7b9edaa3ba43ff57df90b75c75aba7 | [
"BSD-3-Clause"
] | null | null | null | v1/awsbuild/bao_signal_handler.py | badassops/ops-aws | 2e6b76e62e7b9edaa3ba43ff57df90b75c75aba7 | [
"BSD-3-Clause"
] | null | null | null | # vim:fileencoding=utf-8:noet
""" python method """
# Copyright (c) 2010 - 2019, © Badassops LLC / Luc Suryo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#*
#* File : bao_signal_handler.py
#* Description : function to handle interrupts
#* Author : Luc Suryo <luc@badassops.com>
#* Version : 0.2
#* Date : Feb 21, 2019
#*
#* History :
#* Date: Author: Info:
#* Jun 1, 2010 LIS First Release
#* Feb 21, 2019 LIS refactored
import signal
import sys
def signal_handler(signum, frame):
""" signal/interrupts handler
@param signum {int} The interrupt ID according to signal.h.
@param frame {string} Memory frame where the interrupted was called.
"""
if signum is int(signal.SIGHUP):
print('Received -HUP, app does not support reload. {}'.format(frame))
elif signum is int(signal.SIGINT):
print('Received ctrl-c, aborted on your request. {}'.format(frame))
elif signum is int(signal.SIGTERM):
print('Received kill -TERM, terminating. {}'.format(frame))
else:
print('Received unknwon interrupt : {}'.format(signum))
sys.exit(128 + signum)
def install_int_handler():
""" Install signal/interrupts handler, we capture only SIGHUP, SIGINT and TERM
"""
signal.signal(signal.SIGHUP, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
| 43.691176 | 85 | 0.703467 |
import signal
import sys
def signal_handler(signum, frame):
if signum is int(signal.SIGHUP):
print('Received -HUP, app does not support reload. {}'.format(frame))
elif signum is int(signal.SIGINT):
print('Received ctrl-c, aborted on your request. {}'.format(frame))
elif signum is int(signal.SIGTERM):
print('Received kill -TERM, terminating. {}'.format(frame))
else:
print('Received unknwon interrupt : {}'.format(signum))
sys.exit(128 + signum)
def install_int_handler():
signal.signal(signal.SIGHUP, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
| true | true |
f714fd687acb9dcd38aefae007bf9b8459b33ed2 | 2,485 | py | Python | svc/lycanthropy/auth/client.py | kryptops/lycanthropy | 8b18a78e1586b9e5d4d433f307a3dd72d961f4fe | [
"BSD-3-Clause"
] | 11 | 2020-08-14T18:55:17.000Z | 2022-02-18T07:35:12.000Z | svc/lycanthropy/auth/client.py | kryptops/lycanthropy | 8b18a78e1586b9e5d4d433f307a3dd72d961f4fe | [
"BSD-3-Clause"
] | 9 | 2020-08-17T02:26:11.000Z | 2022-02-19T22:59:53.000Z | svc/lycanthropy/auth/client.py | kryptops/lycanthropy | 8b18a78e1586b9e5d4d433f307a3dd72d961f4fe | [
"BSD-3-Clause"
] | 2 | 2020-09-14T15:23:47.000Z | 2022-02-20T03:04:54.000Z | import hashlib
import random
import lycanthropy.sql.interface
import lycanthropy.crypto
import jwt
def decodeToken(token,config):
rawData = jwt.decode(
token,
config['secret'],
algorithms=['HS256']
)
return rawData
def monitoringToken(user,config,remote,identity):
userData = lycanthropy.sql.interface.filterUser({'username':user})[0]
token = jwt.encode({
'user':user,
'_wolfmon':identity,
'campaigns':userData['campaigns'],
'roles':userData['roles'],
'_host':remote
},
config['secret'],
algorithm='HS256'
).decode('utf-8')
return token
def apiToken(user,config,remote):
userData = lycanthropy.sql.interface.filterUser({'username':user})[0]
token = jwt.encode({
'user':user,
'campaigns':userData['campaigns'],
'roles':userData['roles'],
'_host':remote
},
config['secret'],
algorithm='HS256'
).decode('utf-8')
return token
def getCampaignAccess(user,config,token,remote,wolfmon):
decoded = decodeToken(token,config)
if decoded['user'] == user and decoded['_host'] == remote and wolfmon == decoded['_wolfmon']:
userData = lycanthropy.sql.interface.filterUser({'username': user})[0]
return userData['campaigns'].split(',')
else:
return 'error'
def verifyToken(user,config,token,remote):
decoded = decodeToken(token,config)
if decoded['user'] == user and decoded['_host'] == remote:
return True
else:
return False
def verifyAuth(user,password):
userData = lycanthropy.sql.interface.filterUser({'username':user})[0]
print(userData)
if userData == []:
return False
else:
reconstruct = mkHash(password,userData['password'].split('.')[0])
print(reconstruct)
if reconstruct == userData['password']:
return True
else:
return False
def mkHash(password,salt):
passHmac = hashlib.pbkdf2_hmac('sha256',password.encode('utf-8'),salt.encode('utf-8'),100000)
return '{}.{}'.format(salt,passHmac.hex())
def mkSalt():
alpha = "ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
strOut = []
for i in range(32):
strOut.append(
alpha[random.randint(
0,
len(alpha)-1
)]
)
return "".join(strOut)
def mkUser(user,password):
pwdSalt = mkSalt()
passObj = mkHash(password,pwdSalt)
return passObj
| 26.157895 | 97 | 0.615292 | import hashlib
import random
import lycanthropy.sql.interface
import lycanthropy.crypto
import jwt
def decodeToken(token,config):
rawData = jwt.decode(
token,
config['secret'],
algorithms=['HS256']
)
return rawData
def monitoringToken(user,config,remote,identity):
userData = lycanthropy.sql.interface.filterUser({'username':user})[0]
token = jwt.encode({
'user':user,
'_wolfmon':identity,
'campaigns':userData['campaigns'],
'roles':userData['roles'],
'_host':remote
},
config['secret'],
algorithm='HS256'
).decode('utf-8')
return token
def apiToken(user,config,remote):
userData = lycanthropy.sql.interface.filterUser({'username':user})[0]
token = jwt.encode({
'user':user,
'campaigns':userData['campaigns'],
'roles':userData['roles'],
'_host':remote
},
config['secret'],
algorithm='HS256'
).decode('utf-8')
return token
def getCampaignAccess(user,config,token,remote,wolfmon):
decoded = decodeToken(token,config)
if decoded['user'] == user and decoded['_host'] == remote and wolfmon == decoded['_wolfmon']:
userData = lycanthropy.sql.interface.filterUser({'username': user})[0]
return userData['campaigns'].split(',')
else:
return 'error'
def verifyToken(user,config,token,remote):
decoded = decodeToken(token,config)
if decoded['user'] == user and decoded['_host'] == remote:
return True
else:
return False
def verifyAuth(user,password):
userData = lycanthropy.sql.interface.filterUser({'username':user})[0]
print(userData)
if userData == []:
return False
else:
reconstruct = mkHash(password,userData['password'].split('.')[0])
print(reconstruct)
if reconstruct == userData['password']:
return True
else:
return False
def mkHash(password,salt):
passHmac = hashlib.pbkdf2_hmac('sha256',password.encode('utf-8'),salt.encode('utf-8'),100000)
return '{}.{}'.format(salt,passHmac.hex())
def mkSalt():
alpha = "ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
strOut = []
for i in range(32):
strOut.append(
alpha[random.randint(
0,
len(alpha)-1
)]
)
return "".join(strOut)
def mkUser(user,password):
pwdSalt = mkSalt()
passObj = mkHash(password,pwdSalt)
return passObj
| true | true |
f714fec78acf88635ae3a5489d89aaa3ac2fe45a | 1,162 | py | Python | app/view/index.py | InnopolisAero/uavcan.org | cef212cdb4fb2c3f672b04780445229607c93eaa | [
"MIT"
] | null | null | null | app/view/index.py | InnopolisAero/uavcan.org | cef212cdb4fb2c3f672b04780445229607c93eaa | [
"MIT"
] | null | null | null | app/view/index.py | InnopolisAero/uavcan.org | cef212cdb4fb2c3f672b04780445229607c93eaa | [
"MIT"
] | null | null | null | #
# Copyright (C) 2019 UAVCAN Development Team <info@zubax.com>.
# Author: Pavel Kirienko <pavel.kirienko@zubax.com>
#
from .. import app
from ..model import devel_feed, forum_feed, adopters
from flask import render_template
FEED_LENGTH = 15
TITLE = 'UAVCAN - a lightweight protocol designed for reliable communication ' \
'in aerospace and robotic applications over robust vehicular networks'
# noinspection PyBroadException
@app.route('/')
def _index():
try:
development_feed_entries = devel_feed.get(max_items=FEED_LENGTH)
except Exception:
development_feed_entries = None
app.logger.exception('Devel feed error')
try:
forum_feed_entries = forum_feed.get(max_items=FEED_LENGTH)
except Exception:
forum_feed_entries = None
app.logger.exception('Forum feed error')
adopter_list = adopters.get_list()
return render_template('index.html',
title=TITLE,
development_feed_entries=development_feed_entries,
forum_feed_entries=forum_feed_entries,
adopters=adopter_list)
| 29.05 | 80 | 0.674699 |
from .. import app
from ..model import devel_feed, forum_feed, adopters
from flask import render_template
FEED_LENGTH = 15
TITLE = 'UAVCAN - a lightweight protocol designed for reliable communication ' \
'in aerospace and robotic applications over robust vehicular networks'
@app.route('/')
def _index():
try:
development_feed_entries = devel_feed.get(max_items=FEED_LENGTH)
except Exception:
development_feed_entries = None
app.logger.exception('Devel feed error')
try:
forum_feed_entries = forum_feed.get(max_items=FEED_LENGTH)
except Exception:
forum_feed_entries = None
app.logger.exception('Forum feed error')
adopter_list = adopters.get_list()
return render_template('index.html',
title=TITLE,
development_feed_entries=development_feed_entries,
forum_feed_entries=forum_feed_entries,
adopters=adopter_list)
| true | true |
f714ffc25bab8da9a862bf45880ff26921b227b0 | 5,358 | py | Python | pynextcaller/tests/test_by_address.py | trezorg/nextcaller-python-api | 452ea9dbd945d8bf1bc2122ac1ffb886346d78cc | [
"MIT"
] | null | null | null | pynextcaller/tests/test_by_address.py | trezorg/nextcaller-python-api | 452ea9dbd945d8bf1bc2122ac1ffb886346d78cc | [
"MIT"
] | null | null | null | pynextcaller/tests/test_by_address.py | trezorg/nextcaller-python-api | 452ea9dbd945d8bf1bc2122ac1ffb886346d78cc | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import unittest
try:
from unittest import mock
except ImportError:
import mock
try:
from .base import BaseTestCase, BasePlatformTestCase
except (ValueError, ImportError):
from pynextcaller.tests.base import BaseTestCase, BasePlatformTestCase
ADDRESS_JSON_RESULT_EXAMPLE = '''
{
"records": [
{
"id": "97d949a413f4ea8b85e9586e1f2d9a",
"first_name": "Jerry",
"last_name": "Seinfeld",
"name": "Jerry Seinfeld",
"language": "English",
"fraud_threat": "low",
"spoof": "false",
"phone": [
{
"number": "2125558383",
"carrier": "Verizon Wireless",
"line_type": "LAN"
}
],
"address": [
{
"city": "New York",
"extended_zip": "",
"country": "USA",
"line2": "Apt 5a",
"line1": "129 West 81st Street",
"state": "NY",
"zip_code": "10024"
}
],
"email": "demo@nextcaller.com",
"social_links": [
{
"followers": 1,
"type": "twitter",
"url": "https://twitter.com/nextcaller"
},
{
"type": "facebook",
"url": "https://www.facebook.com/nextcaller"
},
{
"type": "linkedin",
"url": "https://www.linkedin.com/company/next-caller"
}
],
"age": "45-54",
"gender": "Male",
"household_income": "50k-75k",
"marital_status": "Single",
"presence_of_children": "No",
"home_owner_status": "Rent",
"market_value": "350k-500k",
"length_of_residence": "12 Years",
"high_net_worth": "No",
"occupation": "Entertainer",
"education": "Completed College",
"department": "not specified"
}
]
}
'''
WRONG_ADDRESS_DATA = {
'first_name': 'Jerry',
'last_name': 'Seinfeld',
'address': '129 West 81st Street',
'city': 'New York',
}
WRONG_ADDRESS_ZIP_DATA = {
'first_name': 'Jerry',
'last_name': 'Seinfeld',
'address': '129 West 81st Street',
'city': 'New York',
'state': 'NY',
'zip_code': '1002',
}
WRONG_ADDRESS_FIELDS_DATA = {
'first_name': 'Jerry',
'last_name': 'Seinfeld',
'address': '129 West 81st Street',
'city': 'New York',
'state': 'NY',
'zip_code': '10024',
'test_field': 'xx',
}
ADDRESS_DATA = {
'first_name': 'Jerry',
'last_name': 'Seinfeld',
'address': '129 West 81st Street',
'city': 'New York',
'state': 'NY',
'zip_code': '10024',
}
class AddressTestCase(BaseTestCase):
def test_address_by_not_full_address(self):
self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE)
self.assertRaises(
ValueError, self.client.get_by_address_name, WRONG_ADDRESS_DATA)
def test_address_by_wrong_zip(self):
self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE)
self.assertRaises(
ValueError, self.client.get_by_address_name, WRONG_ADDRESS_ZIP_DATA)
def test_address_by_wrong_fields(self):
self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE)
self.assertRaises(
ValueError, self.client.get_by_address_name, WRONG_ADDRESS_FIELDS_DATA)
def test_by_address(self):
self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE)
res = self.client.get_by_address_name(ADDRESS_DATA)
self.assertTrue(res['records'])
self.assertEqual(res['records'][0]['email'], 'demo@nextcaller.com')
self.assertEqual(res['records'][0]['first_name'], 'Jerry')
self.assertEqual(res['records'][0]['last_name'], 'Seinfeld')
class PlatformAddressTestCase(BasePlatformTestCase):
def test_address_by_not_full_address(self):
self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE)
self.assertRaises(
ValueError, self.client.get_by_address_name,
WRONG_ADDRESS_DATA, self.platform_username)
def test_address_by_wrong_zip(self):
self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE)
self.assertRaises(
ValueError, self.client.get_by_address_name,
WRONG_ADDRESS_ZIP_DATA, self.platform_username)
def test_address_by_wrong_fields(self):
self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE)
self.assertRaises(
ValueError, self.client.get_by_address_name,
WRONG_ADDRESS_FIELDS_DATA, self.platform_username)
def test_by_address(self):
self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE)
res = self.client.get_by_address_name(ADDRESS_DATA, self.platform_username)
self.assertTrue(res['records'])
self.assertEqual(res['records'][0]['email'], 'demo@nextcaller.com')
self.assertEqual(res['records'][0]['first_name'], 'Jerry')
self.assertEqual(res['records'][0]['last_name'], 'Seinfeld')
if __name__ == '__main__':
unittest.main()
| 31.333333 | 83 | 0.573162 | from __future__ import unicode_literals
import unittest
try:
from unittest import mock
except ImportError:
import mock
try:
from .base import BaseTestCase, BasePlatformTestCase
except (ValueError, ImportError):
from pynextcaller.tests.base import BaseTestCase, BasePlatformTestCase
ADDRESS_JSON_RESULT_EXAMPLE = '''
{
"records": [
{
"id": "97d949a413f4ea8b85e9586e1f2d9a",
"first_name": "Jerry",
"last_name": "Seinfeld",
"name": "Jerry Seinfeld",
"language": "English",
"fraud_threat": "low",
"spoof": "false",
"phone": [
{
"number": "2125558383",
"carrier": "Verizon Wireless",
"line_type": "LAN"
}
],
"address": [
{
"city": "New York",
"extended_zip": "",
"country": "USA",
"line2": "Apt 5a",
"line1": "129 West 81st Street",
"state": "NY",
"zip_code": "10024"
}
],
"email": "demo@nextcaller.com",
"social_links": [
{
"followers": 1,
"type": "twitter",
"url": "https://twitter.com/nextcaller"
},
{
"type": "facebook",
"url": "https://www.facebook.com/nextcaller"
},
{
"type": "linkedin",
"url": "https://www.linkedin.com/company/next-caller"
}
],
"age": "45-54",
"gender": "Male",
"household_income": "50k-75k",
"marital_status": "Single",
"presence_of_children": "No",
"home_owner_status": "Rent",
"market_value": "350k-500k",
"length_of_residence": "12 Years",
"high_net_worth": "No",
"occupation": "Entertainer",
"education": "Completed College",
"department": "not specified"
}
]
}
'''
WRONG_ADDRESS_DATA = {
'first_name': 'Jerry',
'last_name': 'Seinfeld',
'address': '129 West 81st Street',
'city': 'New York',
}
WRONG_ADDRESS_ZIP_DATA = {
'first_name': 'Jerry',
'last_name': 'Seinfeld',
'address': '129 West 81st Street',
'city': 'New York',
'state': 'NY',
'zip_code': '1002',
}
WRONG_ADDRESS_FIELDS_DATA = {
'first_name': 'Jerry',
'last_name': 'Seinfeld',
'address': '129 West 81st Street',
'city': 'New York',
'state': 'NY',
'zip_code': '10024',
'test_field': 'xx',
}
ADDRESS_DATA = {
'first_name': 'Jerry',
'last_name': 'Seinfeld',
'address': '129 West 81st Street',
'city': 'New York',
'state': 'NY',
'zip_code': '10024',
}
class AddressTestCase(BaseTestCase):
def test_address_by_not_full_address(self):
self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE)
self.assertRaises(
ValueError, self.client.get_by_address_name, WRONG_ADDRESS_DATA)
def test_address_by_wrong_zip(self):
self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE)
self.assertRaises(
ValueError, self.client.get_by_address_name, WRONG_ADDRESS_ZIP_DATA)
def test_address_by_wrong_fields(self):
self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE)
self.assertRaises(
ValueError, self.client.get_by_address_name, WRONG_ADDRESS_FIELDS_DATA)
def test_by_address(self):
self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE)
res = self.client.get_by_address_name(ADDRESS_DATA)
self.assertTrue(res['records'])
self.assertEqual(res['records'][0]['email'], 'demo@nextcaller.com')
self.assertEqual(res['records'][0]['first_name'], 'Jerry')
self.assertEqual(res['records'][0]['last_name'], 'Seinfeld')
class PlatformAddressTestCase(BasePlatformTestCase):
def test_address_by_not_full_address(self):
self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE)
self.assertRaises(
ValueError, self.client.get_by_address_name,
WRONG_ADDRESS_DATA, self.platform_username)
def test_address_by_wrong_zip(self):
self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE)
self.assertRaises(
ValueError, self.client.get_by_address_name,
WRONG_ADDRESS_ZIP_DATA, self.platform_username)
def test_address_by_wrong_fields(self):
self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE)
self.assertRaises(
ValueError, self.client.get_by_address_name,
WRONG_ADDRESS_FIELDS_DATA, self.platform_username)
def test_by_address(self):
self.patch_http_request(ADDRESS_JSON_RESULT_EXAMPLE)
res = self.client.get_by_address_name(ADDRESS_DATA, self.platform_username)
self.assertTrue(res['records'])
self.assertEqual(res['records'][0]['email'], 'demo@nextcaller.com')
self.assertEqual(res['records'][0]['first_name'], 'Jerry')
self.assertEqual(res['records'][0]['last_name'], 'Seinfeld')
if __name__ == '__main__':
unittest.main()
| true | true |
f71501f1216ea4346d3b0a6f63bb45fb0f07341f | 52,205 | py | Python | sympy/matrices/tests/test_commonmatrix.py | AugustinJose1221/sympy | 94731be8cc4ee7d2a63065732dd086fb272029ad | [
"BSD-3-Clause"
] | 2 | 2019-10-18T12:45:34.000Z | 2020-08-10T08:27:59.000Z | sympy/matrices/tests/test_commonmatrix.py | AugustinJose1221/sympy | 94731be8cc4ee7d2a63065732dd086fb272029ad | [
"BSD-3-Clause"
] | null | null | null | sympy/matrices/tests/test_commonmatrix.py | AugustinJose1221/sympy | 94731be8cc4ee7d2a63065732dd086fb272029ad | [
"BSD-3-Clause"
] | 1 | 2019-10-18T12:39:41.000Z | 2019-10-18T12:39:41.000Z | import collections
import random
from sympy.assumptions import Q
from sympy.core.add import Add
from sympy.core.compatibility import range
from sympy.core.function import (Function, diff)
from sympy.core.numbers import (E, Float, I, Integer, oo, pi)
from sympy.core.relational import (Eq, Lt)
from sympy.core.singleton import S
from sympy.core.symbol import (Symbol, symbols)
from sympy.functions.elementary.complexes import Abs
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.miscellaneous import (Max, Min, sqrt)
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.elementary.trigonometric import (cos, sin, tan)
from sympy.logic.boolalg import (And, Or)
from sympy.matrices.common import (ShapeError, MatrixError, NonSquareMatrixError,
_MinimalMatrix, MatrixShaping, MatrixProperties, MatrixOperations, MatrixArithmetic,
MatrixSpecial)
from sympy.matrices.matrices import (MatrixDeterminant,
MatrixReductions, MatrixSubspaces, MatrixEigen, MatrixCalculus)
from sympy.matrices import (Matrix, diag, eye,
matrix_multiply_elementwise, ones, zeros, SparseMatrix)
from sympy.polys.polytools import Poly
from sympy.simplify.simplify import simplify
from sympy.simplify.trigsimp import trigsimp
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.iterables import flatten
from sympy.utilities.pytest import (raises, XFAIL, slow, skip,
warns_deprecated_sympy)
from sympy.abc import a, b, c, d, x, y, z
# classes to test the basic matrix classes
class ShapingOnlyMatrix(_MinimalMatrix, MatrixShaping):
pass
def eye_Shaping(n):
return ShapingOnlyMatrix(n, n, lambda i, j: int(i == j))
def zeros_Shaping(n):
return ShapingOnlyMatrix(n, n, lambda i, j: 0)
class PropertiesOnlyMatrix(_MinimalMatrix, MatrixProperties):
pass
def eye_Properties(n):
return PropertiesOnlyMatrix(n, n, lambda i, j: int(i == j))
def zeros_Properties(n):
return PropertiesOnlyMatrix(n, n, lambda i, j: 0)
class OperationsOnlyMatrix(_MinimalMatrix, MatrixOperations):
pass
def eye_Operations(n):
return OperationsOnlyMatrix(n, n, lambda i, j: int(i == j))
def zeros_Operations(n):
return OperationsOnlyMatrix(n, n, lambda i, j: 0)
class ArithmeticOnlyMatrix(_MinimalMatrix, MatrixArithmetic):
pass
def eye_Arithmetic(n):
return ArithmeticOnlyMatrix(n, n, lambda i, j: int(i == j))
def zeros_Arithmetic(n):
return ArithmeticOnlyMatrix(n, n, lambda i, j: 0)
class DeterminantOnlyMatrix(_MinimalMatrix, MatrixDeterminant):
pass
def eye_Determinant(n):
return DeterminantOnlyMatrix(n, n, lambda i, j: int(i == j))
def zeros_Determinant(n):
return DeterminantOnlyMatrix(n, n, lambda i, j: 0)
class ReductionsOnlyMatrix(_MinimalMatrix, MatrixReductions):
pass
def eye_Reductions(n):
return ReductionsOnlyMatrix(n, n, lambda i, j: int(i == j))
def zeros_Reductions(n):
return ReductionsOnlyMatrix(n, n, lambda i, j: 0)
class SpecialOnlyMatrix(_MinimalMatrix, MatrixSpecial):
pass
class SubspaceOnlyMatrix(_MinimalMatrix, MatrixSubspaces):
pass
class EigenOnlyMatrix(_MinimalMatrix, MatrixEigen):
pass
class CalculusOnlyMatrix(_MinimalMatrix, MatrixCalculus):
pass
def test__MinimalMatrix():
x = _MinimalMatrix(2, 3, [1, 2, 3, 4, 5, 6])
assert x.rows == 2
assert x.cols == 3
assert x[2] == 3
assert x[1, 1] == 5
assert list(x) == [1, 2, 3, 4, 5, 6]
assert list(x[1, :]) == [4, 5, 6]
assert list(x[:, 1]) == [2, 5]
assert list(x[:, :]) == list(x)
assert x[:, :] == x
assert _MinimalMatrix(x) == x
assert _MinimalMatrix([[1, 2, 3], [4, 5, 6]]) == x
assert _MinimalMatrix(([1, 2, 3], [4, 5, 6])) == x
assert _MinimalMatrix([(1, 2, 3), (4, 5, 6)]) == x
assert _MinimalMatrix(((1, 2, 3), (4, 5, 6))) == x
assert not (_MinimalMatrix([[1, 2], [3, 4], [5, 6]]) == x)
# ShapingOnlyMatrix tests
def test_vec():
m = ShapingOnlyMatrix(2, 2, [1, 3, 2, 4])
m_vec = m.vec()
assert m_vec.cols == 1
for i in range(4):
assert m_vec[i] == i + 1
def test_tolist():
lst = [[S.One, S.Half, x*y, S.Zero], [x, y, z, x**2], [y, -S.One, z*x, 3]]
flat_lst = [S.One, S.Half, x*y, S.Zero, x, y, z, x**2, y, -S.One, z*x, 3]
m = ShapingOnlyMatrix(3, 4, flat_lst)
assert m.tolist() == lst
def test_row_col_del():
e = ShapingOnlyMatrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 9])
raises(ValueError, lambda: e.row_del(5))
raises(ValueError, lambda: e.row_del(-5))
raises(ValueError, lambda: e.col_del(5))
raises(ValueError, lambda: e.col_del(-5))
assert e.row_del(2) == e.row_del(-1) == Matrix([[1, 2, 3], [4, 5, 6]])
assert e.col_del(2) == e.col_del(-1) == Matrix([[1, 2], [4, 5], [7, 8]])
assert e.row_del(1) == e.row_del(-2) == Matrix([[1, 2, 3], [7, 8, 9]])
assert e.col_del(1) == e.col_del(-2) == Matrix([[1, 3], [4, 6], [7, 9]])
def test_get_diag_blocks1():
a = Matrix([[1, 2], [2, 3]])
b = Matrix([[3, x], [y, 3]])
c = Matrix([[3, x, 3], [y, 3, z], [x, y, z]])
assert a.get_diag_blocks() == [a]
assert b.get_diag_blocks() == [b]
assert c.get_diag_blocks() == [c]
def test_get_diag_blocks2():
a = Matrix([[1, 2], [2, 3]])
b = Matrix([[3, x], [y, 3]])
c = Matrix([[3, x, 3], [y, 3, z], [x, y, z]])
A, B, C, D = diag(a, b, b), diag(a, b, c), diag(a, c, b), diag(c, c, b)
A = ShapingOnlyMatrix(A.rows, A.cols, A)
B = ShapingOnlyMatrix(B.rows, B.cols, B)
C = ShapingOnlyMatrix(C.rows, C.cols, C)
D = ShapingOnlyMatrix(D.rows, D.cols, D)
assert A.get_diag_blocks() == [a, b, b]
assert B.get_diag_blocks() == [a, b, c]
assert C.get_diag_blocks() == [a, c, b]
assert D.get_diag_blocks() == [c, c, b]
def test_shape():
m = ShapingOnlyMatrix(1, 2, [0, 0])
m.shape == (1, 2)
def test_reshape():
m0 = eye_Shaping(3)
assert m0.reshape(1, 9) == Matrix(1, 9, (1, 0, 0, 0, 1, 0, 0, 0, 1))
m1 = ShapingOnlyMatrix(3, 4, lambda i, j: i + j)
assert m1.reshape(
4, 3) == Matrix(((0, 1, 2), (3, 1, 2), (3, 4, 2), (3, 4, 5)))
assert m1.reshape(2, 6) == Matrix(((0, 1, 2, 3, 1, 2), (3, 4, 2, 3, 4, 5)))
def test_row_col():
m = ShapingOnlyMatrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 9])
assert m.row(0) == Matrix(1, 3, [1, 2, 3])
assert m.col(0) == Matrix(3, 1, [1, 4, 7])
def test_row_join():
assert eye_Shaping(3).row_join(Matrix([7, 7, 7])) == \
Matrix([[1, 0, 0, 7],
[0, 1, 0, 7],
[0, 0, 1, 7]])
def test_col_join():
assert eye_Shaping(3).col_join(Matrix([[7, 7, 7]])) == \
Matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[7, 7, 7]])
def test_row_insert():
r4 = Matrix([[4, 4, 4]])
for i in range(-4, 5):
l = [1, 0, 0]
l.insert(i, 4)
assert flatten(eye_Shaping(3).row_insert(i, r4).col(0).tolist()) == l
def test_col_insert():
c4 = Matrix([4, 4, 4])
for i in range(-4, 5):
l = [0, 0, 0]
l.insert(i, 4)
assert flatten(zeros_Shaping(3).col_insert(i, c4).row(0).tolist()) == l
# issue 13643
assert eye_Shaping(6).col_insert(3, Matrix([[2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]])) == \
Matrix([[1, 0, 0, 2, 2, 0, 0, 0],
[0, 1, 0, 2, 2, 0, 0, 0],
[0, 0, 1, 2, 2, 0, 0, 0],
[0, 0, 0, 2, 2, 1, 0, 0],
[0, 0, 0, 2, 2, 0, 1, 0],
[0, 0, 0, 2, 2, 0, 0, 1]])
def test_extract():
m = ShapingOnlyMatrix(4, 3, lambda i, j: i*3 + j)
assert m.extract([0, 1, 3], [0, 1]) == Matrix(3, 2, [0, 1, 3, 4, 9, 10])
assert m.extract([0, 3], [0, 0, 2]) == Matrix(2, 3, [0, 0, 2, 9, 9, 11])
assert m.extract(range(4), range(3)) == m
raises(IndexError, lambda: m.extract([4], [0]))
raises(IndexError, lambda: m.extract([0], [3]))
def test_hstack():
m = ShapingOnlyMatrix(4, 3, lambda i, j: i*3 + j)
m2 = ShapingOnlyMatrix(3, 4, lambda i, j: i*3 + j)
assert m == m.hstack(m)
assert m.hstack(m, m, m) == ShapingOnlyMatrix.hstack(m, m, m) == Matrix([
[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5],
[6, 7, 8, 6, 7, 8, 6, 7, 8],
[9, 10, 11, 9, 10, 11, 9, 10, 11]])
raises(ShapeError, lambda: m.hstack(m, m2))
assert Matrix.hstack() == Matrix()
# test regression #12938
M1 = Matrix.zeros(0, 0)
M2 = Matrix.zeros(0, 1)
M3 = Matrix.zeros(0, 2)
M4 = Matrix.zeros(0, 3)
m = ShapingOnlyMatrix.hstack(M1, M2, M3, M4)
assert m.rows == 0 and m.cols == 6
def test_vstack():
m = ShapingOnlyMatrix(4, 3, lambda i, j: i*3 + j)
m2 = ShapingOnlyMatrix(3, 4, lambda i, j: i*3 + j)
assert m == m.vstack(m)
assert m.vstack(m, m, m) == ShapingOnlyMatrix.vstack(m, m, m) == Matrix([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11],
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11],
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11]])
raises(ShapeError, lambda: m.vstack(m, m2))
assert Matrix.vstack() == Matrix()
# PropertiesOnlyMatrix tests
def test_atoms():
m = PropertiesOnlyMatrix(2, 2, [1, 2, x, 1 - 1/x])
assert m.atoms() == {S(1),S(2),S(-1), x}
assert m.atoms(Symbol) == {x}
def test_free_symbols():
assert PropertiesOnlyMatrix([[x], [0]]).free_symbols == {x}
def test_has():
A = PropertiesOnlyMatrix(((x, y), (2, 3)))
assert A.has(x)
assert not A.has(z)
assert A.has(Symbol)
A = PropertiesOnlyMatrix(((2, y), (2, 3)))
assert not A.has(x)
def test_is_anti_symmetric():
x = symbols('x')
assert PropertiesOnlyMatrix(2, 1, [1, 2]).is_anti_symmetric() is False
m = PropertiesOnlyMatrix(3, 3, [0, x**2 + 2*x + 1, y, -(x + 1)**2, 0, x*y, -y, -x*y, 0])
assert m.is_anti_symmetric() is True
assert m.is_anti_symmetric(simplify=False) is False
assert m.is_anti_symmetric(simplify=lambda x: x) is False
m = PropertiesOnlyMatrix(3, 3, [x.expand() for x in m])
assert m.is_anti_symmetric(simplify=False) is True
m = PropertiesOnlyMatrix(3, 3, [x.expand() for x in [S.One] + list(m)[1:]])
assert m.is_anti_symmetric() is False
def test_diagonal_symmetrical():
m = PropertiesOnlyMatrix(2, 2, [0, 1, 1, 0])
assert not m.is_diagonal()
assert m.is_symmetric()
assert m.is_symmetric(simplify=False)
m = PropertiesOnlyMatrix(2, 2, [1, 0, 0, 1])
assert m.is_diagonal()
m = PropertiesOnlyMatrix(3, 3, diag(1, 2, 3))
assert m.is_diagonal()
assert m.is_symmetric()
m = PropertiesOnlyMatrix(3, 3, [1, 0, 0, 0, 2, 0, 0, 0, 3])
assert m == diag(1, 2, 3)
m = PropertiesOnlyMatrix(2, 3, zeros(2, 3))
assert not m.is_symmetric()
assert m.is_diagonal()
m = PropertiesOnlyMatrix(((5, 0), (0, 6), (0, 0)))
assert m.is_diagonal()
m = PropertiesOnlyMatrix(((5, 0, 0), (0, 6, 0)))
assert m.is_diagonal()
m = Matrix(3, 3, [1, x**2 + 2*x + 1, y, (x + 1)**2, 2, 0, y, 0, 3])
assert m.is_symmetric()
assert not m.is_symmetric(simplify=False)
assert m.expand().is_symmetric(simplify=False)
def test_is_hermitian():
a = PropertiesOnlyMatrix([[1, I], [-I, 1]])
assert a.is_hermitian
a = PropertiesOnlyMatrix([[2*I, I], [-I, 1]])
assert a.is_hermitian is False
a = PropertiesOnlyMatrix([[x, I], [-I, 1]])
assert a.is_hermitian is None
a = PropertiesOnlyMatrix([[x, 1], [-I, 1]])
assert a.is_hermitian is False
def test_is_Identity():
assert eye_Properties(3).is_Identity
assert not PropertiesOnlyMatrix(zeros(3)).is_Identity
assert not PropertiesOnlyMatrix(ones(3)).is_Identity
# issue 6242
assert not PropertiesOnlyMatrix([[1, 0, 0]]).is_Identity
def test_is_symbolic():
a = PropertiesOnlyMatrix([[x, x], [x, x]])
assert a.is_symbolic() is True
a = PropertiesOnlyMatrix([[1, 2, 3, 4], [5, 6, 7, 8]])
assert a.is_symbolic() is False
a = PropertiesOnlyMatrix([[1, 2, 3, 4], [5, 6, x, 8]])
assert a.is_symbolic() is True
a = PropertiesOnlyMatrix([[1, x, 3]])
assert a.is_symbolic() is True
a = PropertiesOnlyMatrix([[1, 2, 3]])
assert a.is_symbolic() is False
a = PropertiesOnlyMatrix([[1], [x], [3]])
assert a.is_symbolic() is True
a = PropertiesOnlyMatrix([[1], [2], [3]])
assert a.is_symbolic() is False
def test_is_upper():
a = PropertiesOnlyMatrix([[1, 2, 3]])
assert a.is_upper is True
a = PropertiesOnlyMatrix([[1], [2], [3]])
assert a.is_upper is False
def test_is_lower():
a = PropertiesOnlyMatrix([[1, 2, 3]])
assert a.is_lower is False
a = PropertiesOnlyMatrix([[1], [2], [3]])
assert a.is_lower is True
def test_is_square():
m = PropertiesOnlyMatrix([[1],[1]])
m2 = PropertiesOnlyMatrix([[2,2],[2,2]])
assert not m.is_square
assert m2.is_square
def test_is_symmetric():
m = PropertiesOnlyMatrix(2, 2, [0, 1, 1, 0])
assert m.is_symmetric()
m = PropertiesOnlyMatrix(2, 2, [0, 1, 0, 1])
assert not m.is_symmetric()
def test_is_hessenberg():
A = PropertiesOnlyMatrix([[3, 4, 1], [2, 4, 5], [0, 1, 2]])
assert A.is_upper_hessenberg
A = PropertiesOnlyMatrix(3, 3, [3, 2, 0, 4, 4, 1, 1, 5, 2])
assert A.is_lower_hessenberg
A = PropertiesOnlyMatrix(3, 3, [3, 2, -1, 4, 4, 1, 1, 5, 2])
assert A.is_lower_hessenberg is False
assert A.is_upper_hessenberg is False
A = PropertiesOnlyMatrix([[3, 4, 1], [2, 4, 5], [3, 1, 2]])
assert not A.is_upper_hessenberg
def test_is_zero():
assert PropertiesOnlyMatrix(0, 0, []).is_zero
assert PropertiesOnlyMatrix([[0, 0], [0, 0]]).is_zero
assert PropertiesOnlyMatrix(zeros(3, 4)).is_zero
assert not PropertiesOnlyMatrix(eye(3)).is_zero
assert PropertiesOnlyMatrix([[x, 0], [0, 0]]).is_zero == None
assert PropertiesOnlyMatrix([[x, 1], [0, 0]]).is_zero == False
a = Symbol('a', nonzero=True)
assert PropertiesOnlyMatrix([[a, 0], [0, 0]]).is_zero == False
def test_values():
assert set(PropertiesOnlyMatrix(2,2,[0,1,2,3]).values()) == set([1,2,3])
x = Symbol('x', real=True)
assert set(PropertiesOnlyMatrix(2,2,[x,0,0,1]).values()) == set([x,1])
# OperationsOnlyMatrix tests
def test_applyfunc():
m0 = OperationsOnlyMatrix(eye(3))
assert m0.applyfunc(lambda x: 2*x) == eye(3)*2
assert m0.applyfunc(lambda x: 0) == zeros(3)
assert m0.applyfunc(lambda x: 1) == ones(3)
def test_adjoint():
dat = [[0, I], [1, 0]]
ans = OperationsOnlyMatrix([[0, 1], [-I, 0]])
assert ans.adjoint() == Matrix(dat)
def test_as_real_imag():
m1 = OperationsOnlyMatrix(2,2,[1,2,3,4])
m3 = OperationsOnlyMatrix(2,2,[1+S.ImaginaryUnit,2+2*S.ImaginaryUnit,3+3*S.ImaginaryUnit,4+4*S.ImaginaryUnit])
a,b = m3.as_real_imag()
assert a == m1
assert b == m1
def test_conjugate():
M = OperationsOnlyMatrix([[0, I, 5],
[1, 2, 0]])
assert M.T == Matrix([[0, 1],
[I, 2],
[5, 0]])
assert M.C == Matrix([[0, -I, 5],
[1, 2, 0]])
assert M.C == M.conjugate()
assert M.H == M.T.C
assert M.H == Matrix([[ 0, 1],
[-I, 2],
[ 5, 0]])
def test_doit():
a = OperationsOnlyMatrix([[Add(x,x, evaluate=False)]])
assert a[0] != 2*x
assert a.doit() == Matrix([[2*x]])
def test_evalf():
a = OperationsOnlyMatrix(2, 1, [sqrt(5), 6])
assert all(a.evalf()[i] == a[i].evalf() for i in range(2))
assert all(a.evalf(2)[i] == a[i].evalf(2) for i in range(2))
assert all(a.n(2)[i] == a[i].n(2) for i in range(2))
def test_expand():
m0 = OperationsOnlyMatrix([[x*(x + y), 2], [((x + y)*y)*x, x*(y + x*(x + y))]])
# Test if expand() returns a matrix
m1 = m0.expand()
assert m1 == Matrix(
[[x*y + x**2, 2], [x*y**2 + y*x**2, x*y + y*x**2 + x**3]])
a = Symbol('a', real=True)
assert OperationsOnlyMatrix(1, 1, [exp(I*a)]).expand(complex=True) == \
Matrix([cos(a) + I*sin(a)])
def test_refine():
m0 = OperationsOnlyMatrix([[Abs(x)**2, sqrt(x**2)],
[sqrt(x**2)*Abs(y)**2, sqrt(y**2)*Abs(x)**2]])
m1 = m0.refine(Q.real(x) & Q.real(y))
assert m1 == Matrix([[x**2, Abs(x)], [y**2*Abs(x), x**2*Abs(y)]])
m1 = m0.refine(Q.positive(x) & Q.positive(y))
assert m1 == Matrix([[x**2, x], [x*y**2, x**2*y]])
m1 = m0.refine(Q.negative(x) & Q.negative(y))
assert m1 == Matrix([[x**2, -x], [-x*y**2, -x**2*y]])
def test_replace():
F, G = symbols('F, G', cls=Function)
K = OperationsOnlyMatrix(2, 2, lambda i, j: G(i+j))
M = OperationsOnlyMatrix(2, 2, lambda i, j: F(i+j))
N = M.replace(F, G)
assert N == K
def test_replace_map():
F, G = symbols('F, G', cls=Function)
K = OperationsOnlyMatrix(2, 2, [(G(0), {F(0): G(0)}), (G(1), {F(1): G(1)}), (G(1), {F(1) \
: G(1)}), (G(2), {F(2): G(2)})])
M = OperationsOnlyMatrix(2, 2, lambda i, j: F(i+j))
N = M.replace(F, G, True)
assert N == K
def test_simplify():
n = Symbol('n')
f = Function('f')
M = OperationsOnlyMatrix([[ 1/x + 1/y, (x + x*y) / x ],
[ (f(x) + y*f(x))/f(x), 2 * (1/n - cos(n * pi)/n) / pi ]])
assert M.simplify() == Matrix([[ (x + y)/(x * y), 1 + y ],
[ 1 + y, 2*((1 - 1*cos(pi*n))/(pi*n)) ]])
eq = (1 + x)**2
M = OperationsOnlyMatrix([[eq]])
assert M.simplify() == Matrix([[eq]])
assert M.simplify(ratio=oo) == Matrix([[eq.simplify(ratio=oo)]])
def test_subs():
assert OperationsOnlyMatrix([[1, x], [x, 4]]).subs(x, 5) == Matrix([[1, 5], [5, 4]])
assert OperationsOnlyMatrix([[x, 2], [x + y, 4]]).subs([[x, -1], [y, -2]]) == \
Matrix([[-1, 2], [-3, 4]])
assert OperationsOnlyMatrix([[x, 2], [x + y, 4]]).subs([(x, -1), (y, -2)]) == \
Matrix([[-1, 2], [-3, 4]])
assert OperationsOnlyMatrix([[x, 2], [x + y, 4]]).subs({x: -1, y: -2}) == \
Matrix([[-1, 2], [-3, 4]])
assert OperationsOnlyMatrix([[x*y]]).subs({x: y - 1, y: x - 1}, simultaneous=True) == \
Matrix([[(x - 1)*(y - 1)]])
def test_trace():
M = OperationsOnlyMatrix([[1, 0, 0],
[0, 5, 0],
[0, 0, 8]])
assert M.trace() == 14
def test_xreplace():
assert OperationsOnlyMatrix([[1, x], [x, 4]]).xreplace({x: 5}) == \
Matrix([[1, 5], [5, 4]])
assert OperationsOnlyMatrix([[x, 2], [x + y, 4]]).xreplace({x: -1, y: -2}) == \
Matrix([[-1, 2], [-3, 4]])
def test_permute():
a = OperationsOnlyMatrix(3, 4, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
raises(IndexError, lambda: a.permute([[0,5]]))
b = a.permute_rows([[0, 2], [0, 1]])
assert a.permute([[0, 2], [0, 1]]) == b == Matrix([
[5, 6, 7, 8],
[9, 10, 11, 12],
[1, 2, 3, 4]])
b = a.permute_cols([[0, 2], [0, 1]])
assert a.permute([[0, 2], [0, 1]], orientation='cols') == b ==\
Matrix([
[ 2, 3, 1, 4],
[ 6, 7, 5, 8],
[10, 11, 9, 12]])
b = a.permute_cols([[0, 2], [0, 1]], direction='backward')
assert a.permute([[0, 2], [0, 1]], orientation='cols', direction='backward') == b ==\
Matrix([
[ 3, 1, 2, 4],
[ 7, 5, 6, 8],
[11, 9, 10, 12]])
assert a.permute([1, 2, 0, 3]) == Matrix([
[5, 6, 7, 8],
[9, 10, 11, 12],
[1, 2, 3, 4]])
from sympy.combinatorics import Permutation
assert a.permute(Permutation([1, 2, 0, 3])) == Matrix([
[5, 6, 7, 8],
[9, 10, 11, 12],
[1, 2, 3, 4]])
# ArithmeticOnlyMatrix tests
def test_abs():
m = ArithmeticOnlyMatrix([[1, -2], [x, y]])
assert abs(m) == ArithmeticOnlyMatrix([[1, 2], [Abs(x), Abs(y)]])
def test_add():
m = ArithmeticOnlyMatrix([[1, 2, 3], [x, y, x], [2*y, -50, z*x]])
assert m + m == ArithmeticOnlyMatrix([[2, 4, 6], [2*x, 2*y, 2*x], [4*y, -100, 2*z*x]])
n = ArithmeticOnlyMatrix(1, 2, [1, 2])
raises(ShapeError, lambda: m + n)
def test_multiplication():
a = ArithmeticOnlyMatrix((
(1, 2),
(3, 1),
(0, 6),
))
b = ArithmeticOnlyMatrix((
(1, 2),
(3, 0),
))
raises(ShapeError, lambda: b*a)
raises(TypeError, lambda: a*{})
c = a*b
assert c[0, 0] == 7
assert c[0, 1] == 2
assert c[1, 0] == 6
assert c[1, 1] == 6
assert c[2, 0] == 18
assert c[2, 1] == 0
try:
eval('c = a @ b')
except SyntaxError:
pass
else:
assert c[0, 0] == 7
assert c[0, 1] == 2
assert c[1, 0] == 6
assert c[1, 1] == 6
assert c[2, 0] == 18
assert c[2, 1] == 0
h = a.multiply_elementwise(c)
assert h == matrix_multiply_elementwise(a, c)
assert h[0, 0] == 7
assert h[0, 1] == 4
assert h[1, 0] == 18
assert h[1, 1] == 6
assert h[2, 0] == 0
assert h[2, 1] == 0
raises(ShapeError, lambda: a.multiply_elementwise(b))
c = b * Symbol("x")
assert isinstance(c, ArithmeticOnlyMatrix)
assert c[0, 0] == x
assert c[0, 1] == 2*x
assert c[1, 0] == 3*x
assert c[1, 1] == 0
c2 = x * b
assert c == c2
c = 5 * b
assert isinstance(c, ArithmeticOnlyMatrix)
assert c[0, 0] == 5
assert c[0, 1] == 2*5
assert c[1, 0] == 3*5
assert c[1, 1] == 0
try:
eval('c = 5 @ b')
except SyntaxError:
pass
else:
assert isinstance(c, ArithmeticOnlyMatrix)
assert c[0, 0] == 5
assert c[0, 1] == 2*5
assert c[1, 0] == 3*5
assert c[1, 1] == 0
def test_matmul():
a = Matrix([[1, 2], [3, 4]])
assert a.__matmul__(2) == NotImplemented
assert a.__rmatmul__(2) == NotImplemented
#This is done this way because @ is only supported in Python 3.5+
#To check 2@a case
try:
eval('2 @ a')
except SyntaxError:
pass
except TypeError: #TypeError is raised in case of NotImplemented is returned
pass
#Check a@2 case
try:
eval('a @ 2')
except SyntaxError:
pass
except TypeError: #TypeError is raised in case of NotImplemented is returned
pass
def test_power():
raises(NonSquareMatrixError, lambda: Matrix((1, 2))**2)
A = ArithmeticOnlyMatrix([[2, 3], [4, 5]])
assert (A**5)[:] == (6140, 8097, 10796, 14237)
A = ArithmeticOnlyMatrix([[2, 1, 3], [4, 2, 4], [6, 12, 1]])
assert (A**3)[:] == (290, 262, 251, 448, 440, 368, 702, 954, 433)
assert A**0 == eye(3)
assert A**1 == A
assert (ArithmeticOnlyMatrix([[2]]) ** 100)[0, 0] == 2**100
assert ArithmeticOnlyMatrix([[1, 2], [3, 4]])**Integer(2) == ArithmeticOnlyMatrix([[7, 10], [15, 22]])
def test_neg():
n = ArithmeticOnlyMatrix(1, 2, [1, 2])
assert -n == ArithmeticOnlyMatrix(1, 2, [-1, -2])
def test_sub():
n = ArithmeticOnlyMatrix(1, 2, [1, 2])
assert n - n == ArithmeticOnlyMatrix(1, 2, [0, 0])
def test_div():
n = ArithmeticOnlyMatrix(1, 2, [1, 2])
assert n/2 == ArithmeticOnlyMatrix(1, 2, [S(1)/2, S(2)/2])
# DeterminantOnlyMatrix tests
def test_det():
a = DeterminantOnlyMatrix(2,3,[1,2,3,4,5,6])
raises(NonSquareMatrixError, lambda: a.det())
z = zeros_Determinant(2)
ey = eye_Determinant(2)
assert z.det() == 0
assert ey.det() == 1
x = Symbol('x')
a = DeterminantOnlyMatrix(0,0,[])
b = DeterminantOnlyMatrix(1,1,[5])
c = DeterminantOnlyMatrix(2,2,[1,2,3,4])
d = DeterminantOnlyMatrix(3,3,[1,2,3,4,5,6,7,8,8])
e = DeterminantOnlyMatrix(4,4,[x,1,2,3,4,5,6,7,2,9,10,11,12,13,14,14])
# the method keyword for `det` doesn't kick in until 4x4 matrices,
# so there is no need to test all methods on smaller ones
assert a.det() == 1
assert b.det() == 5
assert c.det() == -2
assert d.det() == 3
assert e.det() == 4*x - 24
assert e.det(method='bareiss') == 4*x - 24
assert e.det(method='berkowitz') == 4*x - 24
raises(ValueError, lambda: e.det(iszerofunc="test"))
def test_adjugate():
x = Symbol('x')
e = DeterminantOnlyMatrix(4,4,[x,1,2,3,4,5,6,7,2,9,10,11,12,13,14,14])
adj = Matrix([
[ 4, -8, 4, 0],
[ 76, -14*x - 68, 14*x - 8, -4*x + 24],
[-122, 17*x + 142, -21*x + 4, 8*x - 48],
[ 48, -4*x - 72, 8*x, -4*x + 24]])
assert e.adjugate() == adj
assert e.adjugate(method='bareiss') == adj
assert e.adjugate(method='berkowitz') == adj
a = DeterminantOnlyMatrix(2,3,[1,2,3,4,5,6])
raises(NonSquareMatrixError, lambda: a.adjugate())
def test_cofactor_and_minors():
x = Symbol('x')
e = DeterminantOnlyMatrix(4,4,[x,1,2,3,4,5,6,7,2,9,10,11,12,13,14,14])
m = Matrix([
[ x, 1, 3],
[ 2, 9, 11],
[12, 13, 14]])
cm = Matrix([
[ 4, 76, -122, 48],
[-8, -14*x - 68, 17*x + 142, -4*x - 72],
[ 4, 14*x - 8, -21*x + 4, 8*x],
[ 0, -4*x + 24, 8*x - 48, -4*x + 24]])
sub = Matrix([
[x, 1, 2],
[4, 5, 6],
[2, 9, 10]])
assert e.minor_submatrix(1,2) == m
assert e.minor_submatrix(-1,-1) == sub
assert e.minor(1,2) == -17*x - 142
assert e.cofactor(1,2) == 17*x + 142
assert e.cofactor_matrix() == cm
assert e.cofactor_matrix(method="bareiss") == cm
assert e.cofactor_matrix(method="berkowitz") == cm
raises(ValueError, lambda: e.cofactor(4,5))
raises(ValueError, lambda: e.minor(4,5))
raises(ValueError, lambda: e.minor_submatrix(4,5))
a = DeterminantOnlyMatrix(2,3,[1,2,3,4,5,6])
assert a.minor_submatrix(0,0) == Matrix([[5, 6]])
raises(ValueError, lambda: DeterminantOnlyMatrix(0,0,[]).minor_submatrix(0,0))
raises(NonSquareMatrixError, lambda: a.cofactor(0,0))
raises(NonSquareMatrixError, lambda: a.minor(0,0))
raises(NonSquareMatrixError, lambda: a.cofactor_matrix())
def test_charpoly():
x, y = Symbol('x'), Symbol('y')
m = DeterminantOnlyMatrix(3,3,[1,2,3,4,5,6,7,8,9])
assert eye_Determinant(3).charpoly(x) == Poly((x - 1)**3, x)
assert eye_Determinant(3).charpoly(y) == Poly((y - 1)**3, y)
assert m.charpoly() == Poly(x**3 - 15*x**2 - 18*x, x)
raises(NonSquareMatrixError, lambda: Matrix([[1], [2]]).charpoly())
# ReductionsOnlyMatrix tests
def test_row_op():
e = eye_Reductions(3)
raises(ValueError, lambda: e.elementary_row_op("abc"))
raises(ValueError, lambda: e.elementary_row_op())
raises(ValueError, lambda: e.elementary_row_op('n->kn', row=5, k=5))
raises(ValueError, lambda: e.elementary_row_op('n->kn', row=-5, k=5))
raises(ValueError, lambda: e.elementary_row_op('n<->m', row1=1, row2=5))
raises(ValueError, lambda: e.elementary_row_op('n<->m', row1=5, row2=1))
raises(ValueError, lambda: e.elementary_row_op('n<->m', row1=-5, row2=1))
raises(ValueError, lambda: e.elementary_row_op('n<->m', row1=1, row2=-5))
raises(ValueError, lambda: e.elementary_row_op('n->n+km', row1=1, row2=5, k=5))
raises(ValueError, lambda: e.elementary_row_op('n->n+km', row1=5, row2=1, k=5))
raises(ValueError, lambda: e.elementary_row_op('n->n+km', row1=-5, row2=1, k=5))
raises(ValueError, lambda: e.elementary_row_op('n->n+km', row1=1, row2=-5, k=5))
raises(ValueError, lambda: e.elementary_row_op('n->n+km', row1=1, row2=1, k=5))
# test various ways to set arguments
assert e.elementary_row_op("n->kn", 0, 5) == Matrix([[5, 0, 0], [0, 1, 0], [0, 0, 1]])
assert e.elementary_row_op("n->kn", 1, 5) == Matrix([[1, 0, 0], [0, 5, 0], [0, 0, 1]])
assert e.elementary_row_op("n->kn", row=1, k=5) == Matrix([[1, 0, 0], [0, 5, 0], [0, 0, 1]])
assert e.elementary_row_op("n->kn", row1=1, k=5) == Matrix([[1, 0, 0], [0, 5, 0], [0, 0, 1]])
assert e.elementary_row_op("n<->m", 0, 1) == Matrix([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
assert e.elementary_row_op("n<->m", row1=0, row2=1) == Matrix([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
assert e.elementary_row_op("n<->m", row=0, row2=1) == Matrix([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
assert e.elementary_row_op("n->n+km", 0, 5, 1) == Matrix([[1, 5, 0], [0, 1, 0], [0, 0, 1]])
assert e.elementary_row_op("n->n+km", row=0, k=5, row2=1) == Matrix([[1, 5, 0], [0, 1, 0], [0, 0, 1]])
assert e.elementary_row_op("n->n+km", row1=0, k=5, row2=1) == Matrix([[1, 5, 0], [0, 1, 0], [0, 0, 1]])
# make sure the matrix doesn't change size
a = ReductionsOnlyMatrix(2, 3, [0]*6)
assert a.elementary_row_op("n->kn", 1, 5) == Matrix(2, 3, [0]*6)
assert a.elementary_row_op("n<->m", 0, 1) == Matrix(2, 3, [0]*6)
assert a.elementary_row_op("n->n+km", 0, 5, 1) == Matrix(2, 3, [0]*6)
def test_col_op():
e = eye_Reductions(3)
raises(ValueError, lambda: e.elementary_col_op("abc"))
raises(ValueError, lambda: e.elementary_col_op())
raises(ValueError, lambda: e.elementary_col_op('n->kn', col=5, k=5))
raises(ValueError, lambda: e.elementary_col_op('n->kn', col=-5, k=5))
raises(ValueError, lambda: e.elementary_col_op('n<->m', col1=1, col2=5))
raises(ValueError, lambda: e.elementary_col_op('n<->m', col1=5, col2=1))
raises(ValueError, lambda: e.elementary_col_op('n<->m', col1=-5, col2=1))
raises(ValueError, lambda: e.elementary_col_op('n<->m', col1=1, col2=-5))
raises(ValueError, lambda: e.elementary_col_op('n->n+km', col1=1, col2=5, k=5))
raises(ValueError, lambda: e.elementary_col_op('n->n+km', col1=5, col2=1, k=5))
raises(ValueError, lambda: e.elementary_col_op('n->n+km', col1=-5, col2=1, k=5))
raises(ValueError, lambda: e.elementary_col_op('n->n+km', col1=1, col2=-5, k=5))
raises(ValueError, lambda: e.elementary_col_op('n->n+km', col1=1, col2=1, k=5))
# test various ways to set arguments
assert e.elementary_col_op("n->kn", 0, 5) == Matrix([[5, 0, 0], [0, 1, 0], [0, 0, 1]])
assert e.elementary_col_op("n->kn", 1, 5) == Matrix([[1, 0, 0], [0, 5, 0], [0, 0, 1]])
assert e.elementary_col_op("n->kn", col=1, k=5) == Matrix([[1, 0, 0], [0, 5, 0], [0, 0, 1]])
assert e.elementary_col_op("n->kn", col1=1, k=5) == Matrix([[1, 0, 0], [0, 5, 0], [0, 0, 1]])
assert e.elementary_col_op("n<->m", 0, 1) == Matrix([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
assert e.elementary_col_op("n<->m", col1=0, col2=1) == Matrix([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
assert e.elementary_col_op("n<->m", col=0, col2=1) == Matrix([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
assert e.elementary_col_op("n->n+km", 0, 5, 1) == Matrix([[1, 0, 0], [5, 1, 0], [0, 0, 1]])
assert e.elementary_col_op("n->n+km", col=0, k=5, col2=1) == Matrix([[1, 0, 0], [5, 1, 0], [0, 0, 1]])
assert e.elementary_col_op("n->n+km", col1=0, k=5, col2=1) == Matrix([[1, 0, 0], [5, 1, 0], [0, 0, 1]])
# make sure the matrix doesn't change size
a = ReductionsOnlyMatrix(2, 3, [0]*6)
assert a.elementary_col_op("n->kn", 1, 5) == Matrix(2, 3, [0]*6)
assert a.elementary_col_op("n<->m", 0, 1) == Matrix(2, 3, [0]*6)
assert a.elementary_col_op("n->n+km", 0, 5, 1) == Matrix(2, 3, [0]*6)
def test_is_echelon():
zro = zeros_Reductions(3)
ident = eye_Reductions(3)
assert zro.is_echelon
assert ident.is_echelon
a = ReductionsOnlyMatrix(0, 0, [])
assert a.is_echelon
a = ReductionsOnlyMatrix(2, 3, [3, 2, 1, 0, 0, 6])
assert a.is_echelon
a = ReductionsOnlyMatrix(2, 3, [0, 0, 6, 3, 2, 1])
assert not a.is_echelon
x = Symbol('x')
a = ReductionsOnlyMatrix(3, 1, [x, 0, 0])
assert a.is_echelon
a = ReductionsOnlyMatrix(3, 1, [x, x, 0])
assert not a.is_echelon
a = ReductionsOnlyMatrix(3, 3, [0, 0, 0, 1, 2, 3, 0, 0, 0])
assert not a.is_echelon
def test_echelon_form():
# echelon form is not unique, but the result
# must be row-equivalent to the original matrix
# and it must be in echelon form.
a = zeros_Reductions(3)
e = eye_Reductions(3)
# we can assume the zero matrix and the identity matrix shouldn't change
assert a.echelon_form() == a
assert e.echelon_form() == e
a = ReductionsOnlyMatrix(0, 0, [])
assert a.echelon_form() == a
a = ReductionsOnlyMatrix(1, 1, [5])
assert a.echelon_form() == a
# now we get to the real tests
def verify_row_null_space(mat, rows, nulls):
for v in nulls:
assert all(t.is_zero for t in a_echelon*v)
for v in rows:
if not all(t.is_zero for t in v):
assert not all(t.is_zero for t in a_echelon*v.transpose())
a = ReductionsOnlyMatrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 9])
nulls = [Matrix([
[ 1],
[-2],
[ 1]])]
rows = [a[i,:] for i in range(a.rows)]
a_echelon = a.echelon_form()
assert a_echelon.is_echelon
verify_row_null_space(a, rows, nulls)
a = ReductionsOnlyMatrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 8])
nulls = []
rows = [a[i,:] for i in range(a.rows)]
a_echelon = a.echelon_form()
assert a_echelon.is_echelon
verify_row_null_space(a, rows, nulls)
a = ReductionsOnlyMatrix(3, 3, [2, 1, 3, 0, 0, 0, 2, 1, 3])
nulls = [Matrix([
[-S(1)/2],
[ 1],
[ 0]]),
Matrix([
[-S(3)/2],
[ 0],
[ 1]])]
rows = [a[i,:] for i in range(a.rows)]
a_echelon = a.echelon_form()
assert a_echelon.is_echelon
verify_row_null_space(a, rows, nulls)
# this one requires a row swap
a = ReductionsOnlyMatrix(3, 3, [2, 1, 3, 0, 0, 0, 1, 1, 3])
nulls = [Matrix([
[ 0],
[ -3],
[ 1]])]
rows = [a[i,:] for i in range(a.rows)]
a_echelon = a.echelon_form()
assert a_echelon.is_echelon
verify_row_null_space(a, rows, nulls)
a = ReductionsOnlyMatrix(3, 3, [0, 3, 3, 0, 2, 2, 0, 1, 1])
nulls = [Matrix([
[1],
[0],
[0]]),
Matrix([
[ 0],
[-1],
[ 1]])]
rows = [a[i,:] for i in range(a.rows)]
a_echelon = a.echelon_form()
assert a_echelon.is_echelon
verify_row_null_space(a, rows, nulls)
a = ReductionsOnlyMatrix(2, 3, [2, 2, 3, 3, 3, 0])
nulls = [Matrix([
[-1],
[1],
[0]])]
rows = [a[i,:] for i in range(a.rows)]
a_echelon = a.echelon_form()
assert a_echelon.is_echelon
verify_row_null_space(a, rows, nulls)
def test_rref():
e = ReductionsOnlyMatrix(0, 0, [])
assert e.rref(pivots=False) == e
e = ReductionsOnlyMatrix(1, 1, [1])
a = ReductionsOnlyMatrix(1, 1, [5])
assert e.rref(pivots=False) == a.rref(pivots=False) == e
a = ReductionsOnlyMatrix(3, 1, [1, 2, 3])
assert a.rref(pivots=False) == Matrix([[1], [0], [0]])
a = ReductionsOnlyMatrix(1, 3, [1, 2, 3])
assert a.rref(pivots=False) == Matrix([[1, 2, 3]])
a = ReductionsOnlyMatrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 9])
assert a.rref(pivots=False) == Matrix([
[1, 0, -1],
[0, 1, 2],
[0, 0, 0]])
a = ReductionsOnlyMatrix(3, 3, [1, 2, 3, 1, 2, 3, 1, 2, 3])
b = ReductionsOnlyMatrix(3, 3, [1, 2, 3, 0, 0, 0, 0, 0, 0])
c = ReductionsOnlyMatrix(3, 3, [0, 0, 0, 1, 2, 3, 0, 0, 0])
d = ReductionsOnlyMatrix(3, 3, [0, 0, 0, 0, 0, 0, 1, 2, 3])
assert a.rref(pivots=False) == \
b.rref(pivots=False) == \
c.rref(pivots=False) == \
d.rref(pivots=False) == b
e = eye_Reductions(3)
z = zeros_Reductions(3)
assert e.rref(pivots=False) == e
assert z.rref(pivots=False) == z
a = ReductionsOnlyMatrix([
[ 0, 0, 1, 2, 2, -5, 3],
[-1, 5, 2, 2, 1, -7, 5],
[ 0, 0, -2, -3, -3, 8, -5],
[-1, 5, 0, -1, -2, 1, 0]])
mat, pivot_offsets = a.rref()
assert mat == Matrix([
[1, -5, 0, 0, 1, 1, -1],
[0, 0, 1, 0, 0, -1, 1],
[0, 0, 0, 1, 1, -2, 1],
[0, 0, 0, 0, 0, 0, 0]])
assert pivot_offsets == (0, 2, 3)
a = ReductionsOnlyMatrix([[S(1)/19, S(1)/5, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[ 12, 13, 14, 15]])
assert a.rref(pivots=False) == Matrix([
[1, 0, 0, -S(76)/157],
[0, 1, 0, -S(5)/157],
[0, 0, 1, S(238)/157],
[0, 0, 0, 0]])
x = Symbol('x')
a = ReductionsOnlyMatrix(2, 3, [x, 1, 1, sqrt(x), x, 1])
for i, j in zip(a.rref(pivots=False),
[1, 0, sqrt(x)*(-x + 1)/(-x**(S(5)/2) + x),
0, 1, 1/(sqrt(x) + x + 1)]):
assert simplify(i - j).is_zero
# SpecialOnlyMatrix tests
def test_eye():
assert list(SpecialOnlyMatrix.eye(2,2)) == [1, 0, 0, 1]
assert list(SpecialOnlyMatrix.eye(2)) == [1, 0, 0, 1]
assert type(SpecialOnlyMatrix.eye(2)) == SpecialOnlyMatrix
assert type(SpecialOnlyMatrix.eye(2, cls=Matrix)) == Matrix
def test_ones():
assert list(SpecialOnlyMatrix.ones(2,2)) == [1, 1, 1, 1]
assert list(SpecialOnlyMatrix.ones(2)) == [1, 1, 1, 1]
assert SpecialOnlyMatrix.ones(2,3) == Matrix([[1, 1, 1], [1, 1, 1]])
assert type(SpecialOnlyMatrix.ones(2)) == SpecialOnlyMatrix
assert type(SpecialOnlyMatrix.ones(2, cls=Matrix)) == Matrix
def test_zeros():
assert list(SpecialOnlyMatrix.zeros(2,2)) == [0, 0, 0, 0]
assert list(SpecialOnlyMatrix.zeros(2)) == [0, 0, 0, 0]
assert SpecialOnlyMatrix.zeros(2,3) == Matrix([[0, 0, 0], [0, 0, 0]])
assert type(SpecialOnlyMatrix.zeros(2)) == SpecialOnlyMatrix
assert type(SpecialOnlyMatrix.zeros(2, cls=Matrix)) == Matrix
def test_diag_make():
diag = SpecialOnlyMatrix.diag
a = Matrix([[1, 2], [2, 3]])
b = Matrix([[3, x], [y, 3]])
c = Matrix([[3, x, 3], [y, 3, z], [x, y, z]])
assert diag(a, b, b) == Matrix([
[1, 2, 0, 0, 0, 0],
[2, 3, 0, 0, 0, 0],
[0, 0, 3, x, 0, 0],
[0, 0, y, 3, 0, 0],
[0, 0, 0, 0, 3, x],
[0, 0, 0, 0, y, 3],
])
assert diag(a, b, c) == Matrix([
[1, 2, 0, 0, 0, 0, 0],
[2, 3, 0, 0, 0, 0, 0],
[0, 0, 3, x, 0, 0, 0],
[0, 0, y, 3, 0, 0, 0],
[0, 0, 0, 0, 3, x, 3],
[0, 0, 0, 0, y, 3, z],
[0, 0, 0, 0, x, y, z],
])
assert diag(a, c, b) == Matrix([
[1, 2, 0, 0, 0, 0, 0],
[2, 3, 0, 0, 0, 0, 0],
[0, 0, 3, x, 3, 0, 0],
[0, 0, y, 3, z, 0, 0],
[0, 0, x, y, z, 0, 0],
[0, 0, 0, 0, 0, 3, x],
[0, 0, 0, 0, 0, y, 3],
])
a = Matrix([x, y, z])
b = Matrix([[1, 2], [3, 4]])
c = Matrix([[5, 6]])
# this "wandering diagonal" is what makes this
# a block diagonal where each block is independent
# of the others
assert diag(a, 7, b, c) == Matrix([
[x, 0, 0, 0, 0, 0],
[y, 0, 0, 0, 0, 0],
[z, 0, 0, 0, 0, 0],
[0, 7, 0, 0, 0, 0],
[0, 0, 1, 2, 0, 0],
[0, 0, 3, 4, 0, 0],
[0, 0, 0, 0, 5, 6]])
raises(ValueError, lambda: diag(a, 7, b, c, rows=5))
assert diag(1) == Matrix([[1]])
assert diag(1, rows=2) == Matrix([[1, 0], [0, 0]])
assert diag(1, cols=2) == Matrix([[1, 0], [0, 0]])
assert diag(1, rows=3, cols=2) == Matrix([[1, 0], [0, 0], [0, 0]])
assert diag(*[2, 3]) == Matrix([
[2, 0],
[0, 3]])
assert diag(Matrix([2, 3])) == Matrix([
[2],
[3]])
assert diag([1, [2, 3], 4], unpack=False) == \
diag([[1], [2, 3], [4]], unpack=False) == Matrix([
[1, 0],
[2, 3],
[4, 0]])
assert type(diag(1)) == SpecialOnlyMatrix
assert type(diag(1, cls=Matrix)) == Matrix
assert Matrix.diag([1, 2, 3]) == Matrix.diag(1, 2, 3)
assert Matrix.diag([1, 2, 3], unpack=False).shape == (3, 1)
assert Matrix.diag([[1, 2, 3]]).shape == (3, 1)
assert Matrix.diag([[1, 2, 3]], unpack=False).shape == (1, 3)
assert Matrix.diag([[[1, 2, 3]]]).shape == (1, 3)
# kerning can be used to move the starting point
assert Matrix.diag(ones(0, 2), 1, 2) == Matrix([
[0, 0, 1, 0],
[0, 0, 0, 2]])
assert Matrix.diag(ones(2, 0), 1, 2) == Matrix([
[0, 0],
[0, 0],
[1, 0],
[0, 2]])
def test_diagonal():
m = Matrix(3, 3, range(9))
d = m.diagonal()
assert d == m.diagonal(0)
assert tuple(d) == (0, 4, 8)
assert tuple(m.diagonal(1)) == (1, 5)
assert tuple(m.diagonal(-1)) == (3, 7)
assert tuple(m.diagonal(2)) == (2,)
assert type(m.diagonal()) == type(m)
s = SparseMatrix(3, 3, {(1, 1): 1})
assert type(s.diagonal()) == type(s)
assert type(m) != type(s)
raises(ValueError, lambda: m.diagonal(3))
raises(ValueError, lambda: m.diagonal(-3))
raises(ValueError, lambda: m.diagonal(pi))
def test_jordan_block():
assert SpecialOnlyMatrix.jordan_block(3, 2) == SpecialOnlyMatrix.jordan_block(3, eigenvalue=2) \
== SpecialOnlyMatrix.jordan_block(size=3, eigenvalue=2) \
== SpecialOnlyMatrix.jordan_block(3, 2, band='upper') \
== SpecialOnlyMatrix.jordan_block(
size=3, eigenval=2, eigenvalue=2) \
== Matrix([
[2, 1, 0],
[0, 2, 1],
[0, 0, 2]])
assert SpecialOnlyMatrix.jordan_block(3, 2, band='lower') == Matrix([
[2, 0, 0],
[1, 2, 0],
[0, 1, 2]])
# missing eigenvalue
raises(ValueError, lambda: SpecialOnlyMatrix.jordan_block(2))
# non-integral size
raises(ValueError, lambda: SpecialOnlyMatrix.jordan_block(3.5, 2))
# size not specified
raises(ValueError, lambda: SpecialOnlyMatrix.jordan_block(eigenvalue=2))
# inconsistent eigenvalue
raises(ValueError,
lambda: SpecialOnlyMatrix.jordan_block(
eigenvalue=2, eigenval=4))
# Deprecated feature
raises(SymPyDeprecationWarning,
lambda: SpecialOnlyMatrix.jordan_block(cols=3, eigenvalue=2))
raises(SymPyDeprecationWarning,
lambda: SpecialOnlyMatrix.jordan_block(rows=3, eigenvalue=2))
with warns_deprecated_sympy():
assert SpecialOnlyMatrix.jordan_block(3, 2) == \
SpecialOnlyMatrix.jordan_block(cols=3, eigenvalue=2) == \
SpecialOnlyMatrix.jordan_block(rows=3, eigenvalue=2)
with warns_deprecated_sympy():
assert SpecialOnlyMatrix.jordan_block(
rows=4, cols=3, eigenvalue=2) == \
Matrix([
[2, 1, 0],
[0, 2, 1],
[0, 0, 2],
[0, 0, 0]])
# Using alias keyword
assert SpecialOnlyMatrix.jordan_block(size=3, eigenvalue=2) == \
SpecialOnlyMatrix.jordan_block(size=3, eigenval=2)
# SubspaceOnlyMatrix tests
def test_columnspace():
m = SubspaceOnlyMatrix([[ 1, 2, 0, 2, 5],
[-2, -5, 1, -1, -8],
[ 0, -3, 3, 4, 1],
[ 3, 6, 0, -7, 2]])
basis = m.columnspace()
assert basis[0] == Matrix([1, -2, 0, 3])
assert basis[1] == Matrix([2, -5, -3, 6])
assert basis[2] == Matrix([2, -1, 4, -7])
assert len(basis) == 3
assert Matrix.hstack(m, *basis).columnspace() == basis
def test_rowspace():
m = SubspaceOnlyMatrix([[ 1, 2, 0, 2, 5],
[-2, -5, 1, -1, -8],
[ 0, -3, 3, 4, 1],
[ 3, 6, 0, -7, 2]])
basis = m.rowspace()
assert basis[0] == Matrix([[1, 2, 0, 2, 5]])
assert basis[1] == Matrix([[0, -1, 1, 3, 2]])
assert basis[2] == Matrix([[0, 0, 0, 5, 5]])
assert len(basis) == 3
def test_nullspace():
m = SubspaceOnlyMatrix([[ 1, 2, 0, 2, 5],
[-2, -5, 1, -1, -8],
[ 0, -3, 3, 4, 1],
[ 3, 6, 0, -7, 2]])
basis = m.nullspace()
assert basis[0] == Matrix([-2, 1, 1, 0, 0])
assert basis[1] == Matrix([-1, -1, 0, -1, 1])
# make sure the null space is really gets zeroed
assert all(e.is_zero for e in m*basis[0])
assert all(e.is_zero for e in m*basis[1])
def test_orthogonalize():
m = Matrix([[1, 2], [3, 4]])
assert m.orthogonalize(Matrix([[2], [1]])) == [Matrix([[2], [1]])]
assert m.orthogonalize(Matrix([[2], [1]]), normalize=True) == [Matrix([[2*sqrt(5)/5], [sqrt(5)/5]])]
assert m.orthogonalize(Matrix([[1], [2]]), Matrix([[-1], [4]])) == [Matrix([[1], [2]]), Matrix([[-S(12)/5], [S(6)/5]])]
assert m.orthogonalize(Matrix([[0], [0]]), Matrix([[-1], [4]])) == [Matrix([[-1], [4]])]
assert m.orthogonalize(Matrix([[0], [0]])) == []
n = Matrix([[9, 1, 9], [3, 6, 10], [8, 5, 2]])
vecs = [Matrix([[-5], [1]]), Matrix([[-5], [2]]), Matrix([[-5], [-2]])]
assert n.orthogonalize(*vecs) == [Matrix([[-5], [1]]), Matrix([[S(5)/26], [S(25)/26]])]
# EigenOnlyMatrix tests
def test_eigenvals():
M = EigenOnlyMatrix([[0, 1, 1],
[1, 0, 0],
[1, 1, 1]])
assert M.eigenvals() == {2*S.One: 1, -S.One: 1, S.Zero: 1}
# if we cannot factor the char poly, we raise an error
m = Matrix([
[3, 0, 0, 0, -3],
[0, -3, -3, 0, 3],
[0, 3, 0, 3, 0],
[0, 0, 3, 0, 3],
[3, 0, 0, 3, 0]])
raises(MatrixError, lambda: m.eigenvals())
def test_eigenvects():
M = EigenOnlyMatrix([[0, 1, 1],
[1, 0, 0],
[1, 1, 1]])
vecs = M.eigenvects()
for val, mult, vec_list in vecs:
assert len(vec_list) == 1
assert M*vec_list[0] == val*vec_list[0]
def test_left_eigenvects():
M = EigenOnlyMatrix([[0, 1, 1],
[1, 0, 0],
[1, 1, 1]])
vecs = M.left_eigenvects()
for val, mult, vec_list in vecs:
assert len(vec_list) == 1
assert vec_list[0]*M == val*vec_list[0]
def test_diagonalize():
m = EigenOnlyMatrix(2, 2, [0, -1, 1, 0])
raises(MatrixError, lambda: m.diagonalize(reals_only=True))
P, D = m.diagonalize()
assert D.is_diagonal()
assert D == Matrix([
[-I, 0],
[ 0, I]])
# make sure we use floats out if floats are passed in
m = EigenOnlyMatrix(2, 2, [0, .5, .5, 0])
P, D = m.diagonalize()
assert all(isinstance(e, Float) for e in D.values())
assert all(isinstance(e, Float) for e in P.values())
_, D2 = m.diagonalize(reals_only=True)
assert D == D2
def test_is_diagonalizable():
a, b, c = symbols('a b c')
m = EigenOnlyMatrix(2, 2, [a, c, c, b])
assert m.is_symmetric()
assert m.is_diagonalizable()
assert not EigenOnlyMatrix(2, 2, [1, 1, 0, 1]).is_diagonalizable()
m = EigenOnlyMatrix(2, 2, [0, -1, 1, 0])
assert m.is_diagonalizable()
assert not m.is_diagonalizable(reals_only=True)
def test_jordan_form():
m = Matrix(3, 2, [-3, 1, -3, 20, 3, 10])
raises(NonSquareMatrixError, lambda: m.jordan_form())
# the next two tests test the cases where the old
# algorithm failed due to the fact that the block structure can
# *NOT* be determined from algebraic and geometric multiplicity alone
# This can be seen most easily when one lets compute the J.c.f. of a matrix that
# is in J.c.f already.
m = EigenOnlyMatrix(4, 4, [2, 1, 0, 0,
0, 2, 1, 0,
0, 0, 2, 0,
0, 0, 0, 2
])
P, J = m.jordan_form()
assert m == J
m = EigenOnlyMatrix(4, 4, [2, 1, 0, 0,
0, 2, 0, 0,
0, 0, 2, 1,
0, 0, 0, 2
])
P, J = m.jordan_form()
assert m == J
A = Matrix([[ 2, 4, 1, 0],
[-4, 2, 0, 1],
[ 0, 0, 2, 4],
[ 0, 0, -4, 2]])
P, J = A.jordan_form()
assert simplify(P*J*P.inv()) == A
assert EigenOnlyMatrix(1,1,[1]).jordan_form() == (Matrix([1]), Matrix([1]))
assert EigenOnlyMatrix(1,1,[1]).jordan_form(calc_transform=False) == Matrix([1])
# make sure if we cannot factor the characteristic polynomial, we raise an error
m = Matrix([[3, 0, 0, 0, -3], [0, -3, -3, 0, 3], [0, 3, 0, 3, 0], [0, 0, 3, 0, 3], [3, 0, 0, 3, 0]])
raises(MatrixError, lambda: m.jordan_form())
# make sure that if the input has floats, the output does too
m = Matrix([
[ 0.6875, 0.125 + 0.1875*sqrt(3)],
[0.125 + 0.1875*sqrt(3), 0.3125]])
P, J = m.jordan_form()
assert all(isinstance(x, Float) or x == 0 for x in P)
assert all(isinstance(x, Float) or x == 0 for x in J)
def test_singular_values():
x = Symbol('x', real=True)
A = EigenOnlyMatrix([[0, 1*I], [2, 0]])
# if singular values can be sorted, they should be in decreasing order
assert A.singular_values() == [2, 1]
A = eye(3)
A[1, 1] = x
A[2, 2] = 5
vals = A.singular_values()
# since Abs(x) cannot be sorted, test set equality
assert set(vals) == set([5, 1, Abs(x)])
A = EigenOnlyMatrix([[sin(x), cos(x)], [-cos(x), sin(x)]])
vals = [sv.trigsimp() for sv in A.singular_values()]
assert vals == [S(1), S(1)]
A = EigenOnlyMatrix([
[2, 4],
[1, 3],
[0, 0],
[0, 0]
])
assert A.singular_values() == \
[sqrt(sqrt(221) + 15), sqrt(15 - sqrt(221))]
assert A.T.singular_values() == \
[sqrt(sqrt(221) + 15), sqrt(15 - sqrt(221)), 0, 0]
# CalculusOnlyMatrix tests
@XFAIL
def test_diff():
x, y = symbols('x y')
m = CalculusOnlyMatrix(2, 1, [x, y])
# TODO: currently not working as ``_MinimalMatrix`` cannot be sympified:
assert m.diff(x) == Matrix(2, 1, [1, 0])
def test_integrate():
x, y = symbols('x y')
m = CalculusOnlyMatrix(2, 1, [x, y])
assert m.integrate(x) == Matrix(2, 1, [x**2/2, y*x])
def test_jacobian2():
rho, phi = symbols("rho,phi")
X = CalculusOnlyMatrix(3, 1, [rho*cos(phi), rho*sin(phi), rho**2])
Y = CalculusOnlyMatrix(2, 1, [rho, phi])
J = Matrix([
[cos(phi), -rho*sin(phi)],
[sin(phi), rho*cos(phi)],
[ 2*rho, 0],
])
assert X.jacobian(Y) == J
m = CalculusOnlyMatrix(2, 2, [1, 2, 3, 4])
m2 = CalculusOnlyMatrix(4, 1, [1, 2, 3, 4])
raises(TypeError, lambda: m.jacobian(Matrix([1,2])))
raises(TypeError, lambda: m2.jacobian(m))
def test_limit():
x, y = symbols('x y')
m = CalculusOnlyMatrix(2, 1, [1/x, y])
assert m.limit(x, 5) == Matrix(2, 1, [S(1)/5, y])
def test_issue_13774():
M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
v = [1,1,1]
raises(TypeError, lambda: M*v)
raises(TypeError, lambda: v*M)
def test___eq__():
assert (EigenOnlyMatrix(
[[0, 1, 1],
[1, 0, 0],
[1, 1, 1]]) == {}) is False
| 33.400512 | 123 | 0.5279 | import collections
import random
from sympy.assumptions import Q
from sympy.core.add import Add
from sympy.core.compatibility import range
from sympy.core.function import (Function, diff)
from sympy.core.numbers import (E, Float, I, Integer, oo, pi)
from sympy.core.relational import (Eq, Lt)
from sympy.core.singleton import S
from sympy.core.symbol import (Symbol, symbols)
from sympy.functions.elementary.complexes import Abs
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.miscellaneous import (Max, Min, sqrt)
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.elementary.trigonometric import (cos, sin, tan)
from sympy.logic.boolalg import (And, Or)
from sympy.matrices.common import (ShapeError, MatrixError, NonSquareMatrixError,
_MinimalMatrix, MatrixShaping, MatrixProperties, MatrixOperations, MatrixArithmetic,
MatrixSpecial)
from sympy.matrices.matrices import (MatrixDeterminant,
MatrixReductions, MatrixSubspaces, MatrixEigen, MatrixCalculus)
from sympy.matrices import (Matrix, diag, eye,
matrix_multiply_elementwise, ones, zeros, SparseMatrix)
from sympy.polys.polytools import Poly
from sympy.simplify.simplify import simplify
from sympy.simplify.trigsimp import trigsimp
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.iterables import flatten
from sympy.utilities.pytest import (raises, XFAIL, slow, skip,
warns_deprecated_sympy)
from sympy.abc import a, b, c, d, x, y, z
class ShapingOnlyMatrix(_MinimalMatrix, MatrixShaping):
pass
def eye_Shaping(n):
return ShapingOnlyMatrix(n, n, lambda i, j: int(i == j))
def zeros_Shaping(n):
return ShapingOnlyMatrix(n, n, lambda i, j: 0)
class PropertiesOnlyMatrix(_MinimalMatrix, MatrixProperties):
pass
def eye_Properties(n):
return PropertiesOnlyMatrix(n, n, lambda i, j: int(i == j))
def zeros_Properties(n):
return PropertiesOnlyMatrix(n, n, lambda i, j: 0)
class OperationsOnlyMatrix(_MinimalMatrix, MatrixOperations):
pass
def eye_Operations(n):
return OperationsOnlyMatrix(n, n, lambda i, j: int(i == j))
def zeros_Operations(n):
return OperationsOnlyMatrix(n, n, lambda i, j: 0)
class ArithmeticOnlyMatrix(_MinimalMatrix, MatrixArithmetic):
pass
def eye_Arithmetic(n):
return ArithmeticOnlyMatrix(n, n, lambda i, j: int(i == j))
def zeros_Arithmetic(n):
return ArithmeticOnlyMatrix(n, n, lambda i, j: 0)
class DeterminantOnlyMatrix(_MinimalMatrix, MatrixDeterminant):
pass
def eye_Determinant(n):
return DeterminantOnlyMatrix(n, n, lambda i, j: int(i == j))
def zeros_Determinant(n):
return DeterminantOnlyMatrix(n, n, lambda i, j: 0)
class ReductionsOnlyMatrix(_MinimalMatrix, MatrixReductions):
pass
def eye_Reductions(n):
return ReductionsOnlyMatrix(n, n, lambda i, j: int(i == j))
def zeros_Reductions(n):
return ReductionsOnlyMatrix(n, n, lambda i, j: 0)
class SpecialOnlyMatrix(_MinimalMatrix, MatrixSpecial):
pass
class SubspaceOnlyMatrix(_MinimalMatrix, MatrixSubspaces):
pass
class EigenOnlyMatrix(_MinimalMatrix, MatrixEigen):
pass
class CalculusOnlyMatrix(_MinimalMatrix, MatrixCalculus):
pass
def test__MinimalMatrix():
x = _MinimalMatrix(2, 3, [1, 2, 3, 4, 5, 6])
assert x.rows == 2
assert x.cols == 3
assert x[2] == 3
assert x[1, 1] == 5
assert list(x) == [1, 2, 3, 4, 5, 6]
assert list(x[1, :]) == [4, 5, 6]
assert list(x[:, 1]) == [2, 5]
assert list(x[:, :]) == list(x)
assert x[:, :] == x
assert _MinimalMatrix(x) == x
assert _MinimalMatrix([[1, 2, 3], [4, 5, 6]]) == x
assert _MinimalMatrix(([1, 2, 3], [4, 5, 6])) == x
assert _MinimalMatrix([(1, 2, 3), (4, 5, 6)]) == x
assert _MinimalMatrix(((1, 2, 3), (4, 5, 6))) == x
assert not (_MinimalMatrix([[1, 2], [3, 4], [5, 6]]) == x)
def test_vec():
m = ShapingOnlyMatrix(2, 2, [1, 3, 2, 4])
m_vec = m.vec()
assert m_vec.cols == 1
for i in range(4):
assert m_vec[i] == i + 1
def test_tolist():
lst = [[S.One, S.Half, x*y, S.Zero], [x, y, z, x**2], [y, -S.One, z*x, 3]]
flat_lst = [S.One, S.Half, x*y, S.Zero, x, y, z, x**2, y, -S.One, z*x, 3]
m = ShapingOnlyMatrix(3, 4, flat_lst)
assert m.tolist() == lst
def test_row_col_del():
e = ShapingOnlyMatrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 9])
raises(ValueError, lambda: e.row_del(5))
raises(ValueError, lambda: e.row_del(-5))
raises(ValueError, lambda: e.col_del(5))
raises(ValueError, lambda: e.col_del(-5))
assert e.row_del(2) == e.row_del(-1) == Matrix([[1, 2, 3], [4, 5, 6]])
assert e.col_del(2) == e.col_del(-1) == Matrix([[1, 2], [4, 5], [7, 8]])
assert e.row_del(1) == e.row_del(-2) == Matrix([[1, 2, 3], [7, 8, 9]])
assert e.col_del(1) == e.col_del(-2) == Matrix([[1, 3], [4, 6], [7, 9]])
def test_get_diag_blocks1():
a = Matrix([[1, 2], [2, 3]])
b = Matrix([[3, x], [y, 3]])
c = Matrix([[3, x, 3], [y, 3, z], [x, y, z]])
assert a.get_diag_blocks() == [a]
assert b.get_diag_blocks() == [b]
assert c.get_diag_blocks() == [c]
def test_get_diag_blocks2():
a = Matrix([[1, 2], [2, 3]])
b = Matrix([[3, x], [y, 3]])
c = Matrix([[3, x, 3], [y, 3, z], [x, y, z]])
A, B, C, D = diag(a, b, b), diag(a, b, c), diag(a, c, b), diag(c, c, b)
A = ShapingOnlyMatrix(A.rows, A.cols, A)
B = ShapingOnlyMatrix(B.rows, B.cols, B)
C = ShapingOnlyMatrix(C.rows, C.cols, C)
D = ShapingOnlyMatrix(D.rows, D.cols, D)
assert A.get_diag_blocks() == [a, b, b]
assert B.get_diag_blocks() == [a, b, c]
assert C.get_diag_blocks() == [a, c, b]
assert D.get_diag_blocks() == [c, c, b]
def test_shape():
m = ShapingOnlyMatrix(1, 2, [0, 0])
m.shape == (1, 2)
def test_reshape():
m0 = eye_Shaping(3)
assert m0.reshape(1, 9) == Matrix(1, 9, (1, 0, 0, 0, 1, 0, 0, 0, 1))
m1 = ShapingOnlyMatrix(3, 4, lambda i, j: i + j)
assert m1.reshape(
4, 3) == Matrix(((0, 1, 2), (3, 1, 2), (3, 4, 2), (3, 4, 5)))
assert m1.reshape(2, 6) == Matrix(((0, 1, 2, 3, 1, 2), (3, 4, 2, 3, 4, 5)))
def test_row_col():
m = ShapingOnlyMatrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 9])
assert m.row(0) == Matrix(1, 3, [1, 2, 3])
assert m.col(0) == Matrix(3, 1, [1, 4, 7])
def test_row_join():
assert eye_Shaping(3).row_join(Matrix([7, 7, 7])) == \
Matrix([[1, 0, 0, 7],
[0, 1, 0, 7],
[0, 0, 1, 7]])
def test_col_join():
assert eye_Shaping(3).col_join(Matrix([[7, 7, 7]])) == \
Matrix([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[7, 7, 7]])
def test_row_insert():
r4 = Matrix([[4, 4, 4]])
for i in range(-4, 5):
l = [1, 0, 0]
l.insert(i, 4)
assert flatten(eye_Shaping(3).row_insert(i, r4).col(0).tolist()) == l
def test_col_insert():
c4 = Matrix([4, 4, 4])
for i in range(-4, 5):
l = [0, 0, 0]
l.insert(i, 4)
assert flatten(zeros_Shaping(3).col_insert(i, c4).row(0).tolist()) == l
assert eye_Shaping(6).col_insert(3, Matrix([[2, 2], [2, 2], [2, 2], [2, 2], [2, 2], [2, 2]])) == \
Matrix([[1, 0, 0, 2, 2, 0, 0, 0],
[0, 1, 0, 2, 2, 0, 0, 0],
[0, 0, 1, 2, 2, 0, 0, 0],
[0, 0, 0, 2, 2, 1, 0, 0],
[0, 0, 0, 2, 2, 0, 1, 0],
[0, 0, 0, 2, 2, 0, 0, 1]])
def test_extract():
m = ShapingOnlyMatrix(4, 3, lambda i, j: i*3 + j)
assert m.extract([0, 1, 3], [0, 1]) == Matrix(3, 2, [0, 1, 3, 4, 9, 10])
assert m.extract([0, 3], [0, 0, 2]) == Matrix(2, 3, [0, 0, 2, 9, 9, 11])
assert m.extract(range(4), range(3)) == m
raises(IndexError, lambda: m.extract([4], [0]))
raises(IndexError, lambda: m.extract([0], [3]))
def test_hstack():
m = ShapingOnlyMatrix(4, 3, lambda i, j: i*3 + j)
m2 = ShapingOnlyMatrix(3, 4, lambda i, j: i*3 + j)
assert m == m.hstack(m)
assert m.hstack(m, m, m) == ShapingOnlyMatrix.hstack(m, m, m) == Matrix([
[0, 1, 2, 0, 1, 2, 0, 1, 2],
[3, 4, 5, 3, 4, 5, 3, 4, 5],
[6, 7, 8, 6, 7, 8, 6, 7, 8],
[9, 10, 11, 9, 10, 11, 9, 10, 11]])
raises(ShapeError, lambda: m.hstack(m, m2))
assert Matrix.hstack() == Matrix()
1 = Matrix.zeros(0, 0)
M2 = Matrix.zeros(0, 1)
M3 = Matrix.zeros(0, 2)
M4 = Matrix.zeros(0, 3)
m = ShapingOnlyMatrix.hstack(M1, M2, M3, M4)
assert m.rows == 0 and m.cols == 6
def test_vstack():
m = ShapingOnlyMatrix(4, 3, lambda i, j: i*3 + j)
m2 = ShapingOnlyMatrix(3, 4, lambda i, j: i*3 + j)
assert m == m.vstack(m)
assert m.vstack(m, m, m) == ShapingOnlyMatrix.vstack(m, m, m) == Matrix([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11],
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11],
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11]])
raises(ShapeError, lambda: m.vstack(m, m2))
assert Matrix.vstack() == Matrix()
def test_atoms():
m = PropertiesOnlyMatrix(2, 2, [1, 2, x, 1 - 1/x])
assert m.atoms() == {S(1),S(2),S(-1), x}
assert m.atoms(Symbol) == {x}
def test_free_symbols():
assert PropertiesOnlyMatrix([[x], [0]]).free_symbols == {x}
def test_has():
A = PropertiesOnlyMatrix(((x, y), (2, 3)))
assert A.has(x)
assert not A.has(z)
assert A.has(Symbol)
A = PropertiesOnlyMatrix(((2, y), (2, 3)))
assert not A.has(x)
def test_is_anti_symmetric():
x = symbols('x')
assert PropertiesOnlyMatrix(2, 1, [1, 2]).is_anti_symmetric() is False
m = PropertiesOnlyMatrix(3, 3, [0, x**2 + 2*x + 1, y, -(x + 1)**2, 0, x*y, -y, -x*y, 0])
assert m.is_anti_symmetric() is True
assert m.is_anti_symmetric(simplify=False) is False
assert m.is_anti_symmetric(simplify=lambda x: x) is False
m = PropertiesOnlyMatrix(3, 3, [x.expand() for x in m])
assert m.is_anti_symmetric(simplify=False) is True
m = PropertiesOnlyMatrix(3, 3, [x.expand() for x in [S.One] + list(m)[1:]])
assert m.is_anti_symmetric() is False
def test_diagonal_symmetrical():
m = PropertiesOnlyMatrix(2, 2, [0, 1, 1, 0])
assert not m.is_diagonal()
assert m.is_symmetric()
assert m.is_symmetric(simplify=False)
m = PropertiesOnlyMatrix(2, 2, [1, 0, 0, 1])
assert m.is_diagonal()
m = PropertiesOnlyMatrix(3, 3, diag(1, 2, 3))
assert m.is_diagonal()
assert m.is_symmetric()
m = PropertiesOnlyMatrix(3, 3, [1, 0, 0, 0, 2, 0, 0, 0, 3])
assert m == diag(1, 2, 3)
m = PropertiesOnlyMatrix(2, 3, zeros(2, 3))
assert not m.is_symmetric()
assert m.is_diagonal()
m = PropertiesOnlyMatrix(((5, 0), (0, 6), (0, 0)))
assert m.is_diagonal()
m = PropertiesOnlyMatrix(((5, 0, 0), (0, 6, 0)))
assert m.is_diagonal()
m = Matrix(3, 3, [1, x**2 + 2*x + 1, y, (x + 1)**2, 2, 0, y, 0, 3])
assert m.is_symmetric()
assert not m.is_symmetric(simplify=False)
assert m.expand().is_symmetric(simplify=False)
def test_is_hermitian():
a = PropertiesOnlyMatrix([[1, I], [-I, 1]])
assert a.is_hermitian
a = PropertiesOnlyMatrix([[2*I, I], [-I, 1]])
assert a.is_hermitian is False
a = PropertiesOnlyMatrix([[x, I], [-I, 1]])
assert a.is_hermitian is None
a = PropertiesOnlyMatrix([[x, 1], [-I, 1]])
assert a.is_hermitian is False
def test_is_Identity():
assert eye_Properties(3).is_Identity
assert not PropertiesOnlyMatrix(zeros(3)).is_Identity
assert not PropertiesOnlyMatrix(ones(3)).is_Identity
assert not PropertiesOnlyMatrix([[1, 0, 0]]).is_Identity
def test_is_symbolic():
a = PropertiesOnlyMatrix([[x, x], [x, x]])
assert a.is_symbolic() is True
a = PropertiesOnlyMatrix([[1, 2, 3, 4], [5, 6, 7, 8]])
assert a.is_symbolic() is False
a = PropertiesOnlyMatrix([[1, 2, 3, 4], [5, 6, x, 8]])
assert a.is_symbolic() is True
a = PropertiesOnlyMatrix([[1, x, 3]])
assert a.is_symbolic() is True
a = PropertiesOnlyMatrix([[1, 2, 3]])
assert a.is_symbolic() is False
a = PropertiesOnlyMatrix([[1], [x], [3]])
assert a.is_symbolic() is True
a = PropertiesOnlyMatrix([[1], [2], [3]])
assert a.is_symbolic() is False
def test_is_upper():
a = PropertiesOnlyMatrix([[1, 2, 3]])
assert a.is_upper is True
a = PropertiesOnlyMatrix([[1], [2], [3]])
assert a.is_upper is False
def test_is_lower():
a = PropertiesOnlyMatrix([[1, 2, 3]])
assert a.is_lower is False
a = PropertiesOnlyMatrix([[1], [2], [3]])
assert a.is_lower is True
def test_is_square():
m = PropertiesOnlyMatrix([[1],[1]])
m2 = PropertiesOnlyMatrix([[2,2],[2,2]])
assert not m.is_square
assert m2.is_square
def test_is_symmetric():
m = PropertiesOnlyMatrix(2, 2, [0, 1, 1, 0])
assert m.is_symmetric()
m = PropertiesOnlyMatrix(2, 2, [0, 1, 0, 1])
assert not m.is_symmetric()
def test_is_hessenberg():
A = PropertiesOnlyMatrix([[3, 4, 1], [2, 4, 5], [0, 1, 2]])
assert A.is_upper_hessenberg
A = PropertiesOnlyMatrix(3, 3, [3, 2, 0, 4, 4, 1, 1, 5, 2])
assert A.is_lower_hessenberg
A = PropertiesOnlyMatrix(3, 3, [3, 2, -1, 4, 4, 1, 1, 5, 2])
assert A.is_lower_hessenberg is False
assert A.is_upper_hessenberg is False
A = PropertiesOnlyMatrix([[3, 4, 1], [2, 4, 5], [3, 1, 2]])
assert not A.is_upper_hessenberg
def test_is_zero():
assert PropertiesOnlyMatrix(0, 0, []).is_zero
assert PropertiesOnlyMatrix([[0, 0], [0, 0]]).is_zero
assert PropertiesOnlyMatrix(zeros(3, 4)).is_zero
assert not PropertiesOnlyMatrix(eye(3)).is_zero
assert PropertiesOnlyMatrix([[x, 0], [0, 0]]).is_zero == None
assert PropertiesOnlyMatrix([[x, 1], [0, 0]]).is_zero == False
a = Symbol('a', nonzero=True)
assert PropertiesOnlyMatrix([[a, 0], [0, 0]]).is_zero == False
def test_values():
assert set(PropertiesOnlyMatrix(2,2,[0,1,2,3]).values()) == set([1,2,3])
x = Symbol('x', real=True)
assert set(PropertiesOnlyMatrix(2,2,[x,0,0,1]).values()) == set([x,1])
def test_applyfunc():
m0 = OperationsOnlyMatrix(eye(3))
assert m0.applyfunc(lambda x: 2*x) == eye(3)*2
assert m0.applyfunc(lambda x: 0) == zeros(3)
assert m0.applyfunc(lambda x: 1) == ones(3)
def test_adjoint():
dat = [[0, I], [1, 0]]
ans = OperationsOnlyMatrix([[0, 1], [-I, 0]])
assert ans.adjoint() == Matrix(dat)
def test_as_real_imag():
m1 = OperationsOnlyMatrix(2,2,[1,2,3,4])
m3 = OperationsOnlyMatrix(2,2,[1+S.ImaginaryUnit,2+2*S.ImaginaryUnit,3+3*S.ImaginaryUnit,4+4*S.ImaginaryUnit])
a,b = m3.as_real_imag()
assert a == m1
assert b == m1
def test_conjugate():
M = OperationsOnlyMatrix([[0, I, 5],
[1, 2, 0]])
assert M.T == Matrix([[0, 1],
[I, 2],
[5, 0]])
assert M.C == Matrix([[0, -I, 5],
[1, 2, 0]])
assert M.C == M.conjugate()
assert M.H == M.T.C
assert M.H == Matrix([[ 0, 1],
[-I, 2],
[ 5, 0]])
def test_doit():
a = OperationsOnlyMatrix([[Add(x,x, evaluate=False)]])
assert a[0] != 2*x
assert a.doit() == Matrix([[2*x]])
def test_evalf():
a = OperationsOnlyMatrix(2, 1, [sqrt(5), 6])
assert all(a.evalf()[i] == a[i].evalf() for i in range(2))
assert all(a.evalf(2)[i] == a[i].evalf(2) for i in range(2))
assert all(a.n(2)[i] == a[i].n(2) for i in range(2))
def test_expand():
m0 = OperationsOnlyMatrix([[x*(x + y), 2], [((x + y)*y)*x, x*(y + x*(x + y))]])
m1 = m0.expand()
assert m1 == Matrix(
[[x*y + x**2, 2], [x*y**2 + y*x**2, x*y + y*x**2 + x**3]])
a = Symbol('a', real=True)
assert OperationsOnlyMatrix(1, 1, [exp(I*a)]).expand(complex=True) == \
Matrix([cos(a) + I*sin(a)])
def test_refine():
m0 = OperationsOnlyMatrix([[Abs(x)**2, sqrt(x**2)],
[sqrt(x**2)*Abs(y)**2, sqrt(y**2)*Abs(x)**2]])
m1 = m0.refine(Q.real(x) & Q.real(y))
assert m1 == Matrix([[x**2, Abs(x)], [y**2*Abs(x), x**2*Abs(y)]])
m1 = m0.refine(Q.positive(x) & Q.positive(y))
assert m1 == Matrix([[x**2, x], [x*y**2, x**2*y]])
m1 = m0.refine(Q.negative(x) & Q.negative(y))
assert m1 == Matrix([[x**2, -x], [-x*y**2, -x**2*y]])
def test_replace():
F, G = symbols('F, G', cls=Function)
K = OperationsOnlyMatrix(2, 2, lambda i, j: G(i+j))
M = OperationsOnlyMatrix(2, 2, lambda i, j: F(i+j))
N = M.replace(F, G)
assert N == K
def test_replace_map():
F, G = symbols('F, G', cls=Function)
K = OperationsOnlyMatrix(2, 2, [(G(0), {F(0): G(0)}), (G(1), {F(1): G(1)}), (G(1), {F(1) \
: G(1)}), (G(2), {F(2): G(2)})])
M = OperationsOnlyMatrix(2, 2, lambda i, j: F(i+j))
N = M.replace(F, G, True)
assert N == K
def test_simplify():
n = Symbol('n')
f = Function('f')
M = OperationsOnlyMatrix([[ 1/x + 1/y, (x + x*y) / x ],
[ (f(x) + y*f(x))/f(x), 2 * (1/n - cos(n * pi)/n) / pi ]])
assert M.simplify() == Matrix([[ (x + y)/(x * y), 1 + y ],
[ 1 + y, 2*((1 - 1*cos(pi*n))/(pi*n)) ]])
eq = (1 + x)**2
M = OperationsOnlyMatrix([[eq]])
assert M.simplify() == Matrix([[eq]])
assert M.simplify(ratio=oo) == Matrix([[eq.simplify(ratio=oo)]])
def test_subs():
assert OperationsOnlyMatrix([[1, x], [x, 4]]).subs(x, 5) == Matrix([[1, 5], [5, 4]])
assert OperationsOnlyMatrix([[x, 2], [x + y, 4]]).subs([[x, -1], [y, -2]]) == \
Matrix([[-1, 2], [-3, 4]])
assert OperationsOnlyMatrix([[x, 2], [x + y, 4]]).subs([(x, -1), (y, -2)]) == \
Matrix([[-1, 2], [-3, 4]])
assert OperationsOnlyMatrix([[x, 2], [x + y, 4]]).subs({x: -1, y: -2}) == \
Matrix([[-1, 2], [-3, 4]])
assert OperationsOnlyMatrix([[x*y]]).subs({x: y - 1, y: x - 1}, simultaneous=True) == \
Matrix([[(x - 1)*(y - 1)]])
def test_trace():
M = OperationsOnlyMatrix([[1, 0, 0],
[0, 5, 0],
[0, 0, 8]])
assert M.trace() == 14
def test_xreplace():
assert OperationsOnlyMatrix([[1, x], [x, 4]]).xreplace({x: 5}) == \
Matrix([[1, 5], [5, 4]])
assert OperationsOnlyMatrix([[x, 2], [x + y, 4]]).xreplace({x: -1, y: -2}) == \
Matrix([[-1, 2], [-3, 4]])
def test_permute():
a = OperationsOnlyMatrix(3, 4, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
raises(IndexError, lambda: a.permute([[0,5]]))
b = a.permute_rows([[0, 2], [0, 1]])
assert a.permute([[0, 2], [0, 1]]) == b == Matrix([
[5, 6, 7, 8],
[9, 10, 11, 12],
[1, 2, 3, 4]])
b = a.permute_cols([[0, 2], [0, 1]])
assert a.permute([[0, 2], [0, 1]], orientation='cols') == b ==\
Matrix([
[ 2, 3, 1, 4],
[ 6, 7, 5, 8],
[10, 11, 9, 12]])
b = a.permute_cols([[0, 2], [0, 1]], direction='backward')
assert a.permute([[0, 2], [0, 1]], orientation='cols', direction='backward') == b ==\
Matrix([
[ 3, 1, 2, 4],
[ 7, 5, 6, 8],
[11, 9, 10, 12]])
assert a.permute([1, 2, 0, 3]) == Matrix([
[5, 6, 7, 8],
[9, 10, 11, 12],
[1, 2, 3, 4]])
from sympy.combinatorics import Permutation
assert a.permute(Permutation([1, 2, 0, 3])) == Matrix([
[5, 6, 7, 8],
[9, 10, 11, 12],
[1, 2, 3, 4]])
def test_abs():
m = ArithmeticOnlyMatrix([[1, -2], [x, y]])
assert abs(m) == ArithmeticOnlyMatrix([[1, 2], [Abs(x), Abs(y)]])
def test_add():
m = ArithmeticOnlyMatrix([[1, 2, 3], [x, y, x], [2*y, -50, z*x]])
assert m + m == ArithmeticOnlyMatrix([[2, 4, 6], [2*x, 2*y, 2*x], [4*y, -100, 2*z*x]])
n = ArithmeticOnlyMatrix(1, 2, [1, 2])
raises(ShapeError, lambda: m + n)
def test_multiplication():
a = ArithmeticOnlyMatrix((
(1, 2),
(3, 1),
(0, 6),
))
b = ArithmeticOnlyMatrix((
(1, 2),
(3, 0),
))
raises(ShapeError, lambda: b*a)
raises(TypeError, lambda: a*{})
c = a*b
assert c[0, 0] == 7
assert c[0, 1] == 2
assert c[1, 0] == 6
assert c[1, 1] == 6
assert c[2, 0] == 18
assert c[2, 1] == 0
try:
eval('c = a @ b')
except SyntaxError:
pass
else:
assert c[0, 0] == 7
assert c[0, 1] == 2
assert c[1, 0] == 6
assert c[1, 1] == 6
assert c[2, 0] == 18
assert c[2, 1] == 0
h = a.multiply_elementwise(c)
assert h == matrix_multiply_elementwise(a, c)
assert h[0, 0] == 7
assert h[0, 1] == 4
assert h[1, 0] == 18
assert h[1, 1] == 6
assert h[2, 0] == 0
assert h[2, 1] == 0
raises(ShapeError, lambda: a.multiply_elementwise(b))
c = b * Symbol("x")
assert isinstance(c, ArithmeticOnlyMatrix)
assert c[0, 0] == x
assert c[0, 1] == 2*x
assert c[1, 0] == 3*x
assert c[1, 1] == 0
c2 = x * b
assert c == c2
c = 5 * b
assert isinstance(c, ArithmeticOnlyMatrix)
assert c[0, 0] == 5
assert c[0, 1] == 2*5
assert c[1, 0] == 3*5
assert c[1, 1] == 0
try:
eval('c = 5 @ b')
except SyntaxError:
pass
else:
assert isinstance(c, ArithmeticOnlyMatrix)
assert c[0, 0] == 5
assert c[0, 1] == 2*5
assert c[1, 0] == 3*5
assert c[1, 1] == 0
def test_matmul():
a = Matrix([[1, 2], [3, 4]])
assert a.__matmul__(2) == NotImplemented
assert a.__rmatmul__(2) == NotImplemented
try:
eval('2 @ a')
except SyntaxError:
pass
except TypeError:
pass
try:
eval('a @ 2')
except SyntaxError:
pass
except TypeError:
pass
def test_power():
raises(NonSquareMatrixError, lambda: Matrix((1, 2))**2)
A = ArithmeticOnlyMatrix([[2, 3], [4, 5]])
assert (A**5)[:] == (6140, 8097, 10796, 14237)
A = ArithmeticOnlyMatrix([[2, 1, 3], [4, 2, 4], [6, 12, 1]])
assert (A**3)[:] == (290, 262, 251, 448, 440, 368, 702, 954, 433)
assert A**0 == eye(3)
assert A**1 == A
assert (ArithmeticOnlyMatrix([[2]]) ** 100)[0, 0] == 2**100
assert ArithmeticOnlyMatrix([[1, 2], [3, 4]])**Integer(2) == ArithmeticOnlyMatrix([[7, 10], [15, 22]])
def test_neg():
n = ArithmeticOnlyMatrix(1, 2, [1, 2])
assert -n == ArithmeticOnlyMatrix(1, 2, [-1, -2])
def test_sub():
n = ArithmeticOnlyMatrix(1, 2, [1, 2])
assert n - n == ArithmeticOnlyMatrix(1, 2, [0, 0])
def test_div():
n = ArithmeticOnlyMatrix(1, 2, [1, 2])
assert n/2 == ArithmeticOnlyMatrix(1, 2, [S(1)/2, S(2)/2])
def test_det():
a = DeterminantOnlyMatrix(2,3,[1,2,3,4,5,6])
raises(NonSquareMatrixError, lambda: a.det())
z = zeros_Determinant(2)
ey = eye_Determinant(2)
assert z.det() == 0
assert ey.det() == 1
x = Symbol('x')
a = DeterminantOnlyMatrix(0,0,[])
b = DeterminantOnlyMatrix(1,1,[5])
c = DeterminantOnlyMatrix(2,2,[1,2,3,4])
d = DeterminantOnlyMatrix(3,3,[1,2,3,4,5,6,7,8,8])
e = DeterminantOnlyMatrix(4,4,[x,1,2,3,4,5,6,7,2,9,10,11,12,13,14,14])
# so there is no need to test all methods on smaller ones
assert a.det() == 1
assert b.det() == 5
assert c.det() == -2
assert d.det() == 3
assert e.det() == 4*x - 24
assert e.det(method='bareiss') == 4*x - 24
assert e.det(method='berkowitz') == 4*x - 24
raises(ValueError, lambda: e.det(iszerofunc="test"))
def test_adjugate():
x = Symbol('x')
e = DeterminantOnlyMatrix(4,4,[x,1,2,3,4,5,6,7,2,9,10,11,12,13,14,14])
adj = Matrix([
[ 4, -8, 4, 0],
[ 76, -14*x - 68, 14*x - 8, -4*x + 24],
[-122, 17*x + 142, -21*x + 4, 8*x - 48],
[ 48, -4*x - 72, 8*x, -4*x + 24]])
assert e.adjugate() == adj
assert e.adjugate(method='bareiss') == adj
assert e.adjugate(method='berkowitz') == adj
a = DeterminantOnlyMatrix(2,3,[1,2,3,4,5,6])
raises(NonSquareMatrixError, lambda: a.adjugate())
def test_cofactor_and_minors():
x = Symbol('x')
e = DeterminantOnlyMatrix(4,4,[x,1,2,3,4,5,6,7,2,9,10,11,12,13,14,14])
m = Matrix([
[ x, 1, 3],
[ 2, 9, 11],
[12, 13, 14]])
cm = Matrix([
[ 4, 76, -122, 48],
[-8, -14*x - 68, 17*x + 142, -4*x - 72],
[ 4, 14*x - 8, -21*x + 4, 8*x],
[ 0, -4*x + 24, 8*x - 48, -4*x + 24]])
sub = Matrix([
[x, 1, 2],
[4, 5, 6],
[2, 9, 10]])
assert e.minor_submatrix(1,2) == m
assert e.minor_submatrix(-1,-1) == sub
assert e.minor(1,2) == -17*x - 142
assert e.cofactor(1,2) == 17*x + 142
assert e.cofactor_matrix() == cm
assert e.cofactor_matrix(method="bareiss") == cm
assert e.cofactor_matrix(method="berkowitz") == cm
raises(ValueError, lambda: e.cofactor(4,5))
raises(ValueError, lambda: e.minor(4,5))
raises(ValueError, lambda: e.minor_submatrix(4,5))
a = DeterminantOnlyMatrix(2,3,[1,2,3,4,5,6])
assert a.minor_submatrix(0,0) == Matrix([[5, 6]])
raises(ValueError, lambda: DeterminantOnlyMatrix(0,0,[]).minor_submatrix(0,0))
raises(NonSquareMatrixError, lambda: a.cofactor(0,0))
raises(NonSquareMatrixError, lambda: a.minor(0,0))
raises(NonSquareMatrixError, lambda: a.cofactor_matrix())
def test_charpoly():
x, y = Symbol('x'), Symbol('y')
m = DeterminantOnlyMatrix(3,3,[1,2,3,4,5,6,7,8,9])
assert eye_Determinant(3).charpoly(x) == Poly((x - 1)**3, x)
assert eye_Determinant(3).charpoly(y) == Poly((y - 1)**3, y)
assert m.charpoly() == Poly(x**3 - 15*x**2 - 18*x, x)
raises(NonSquareMatrixError, lambda: Matrix([[1], [2]]).charpoly())
# ReductionsOnlyMatrix tests
def test_row_op():
e = eye_Reductions(3)
raises(ValueError, lambda: e.elementary_row_op("abc"))
raises(ValueError, lambda: e.elementary_row_op())
raises(ValueError, lambda: e.elementary_row_op('n->kn', row=5, k=5))
raises(ValueError, lambda: e.elementary_row_op('n->kn', row=-5, k=5))
raises(ValueError, lambda: e.elementary_row_op('n<->m', row1=1, row2=5))
raises(ValueError, lambda: e.elementary_row_op('n<->m', row1=5, row2=1))
raises(ValueError, lambda: e.elementary_row_op('n<->m', row1=-5, row2=1))
raises(ValueError, lambda: e.elementary_row_op('n<->m', row1=1, row2=-5))
raises(ValueError, lambda: e.elementary_row_op('n->n+km', row1=1, row2=5, k=5))
raises(ValueError, lambda: e.elementary_row_op('n->n+km', row1=5, row2=1, k=5))
raises(ValueError, lambda: e.elementary_row_op('n->n+km', row1=-5, row2=1, k=5))
raises(ValueError, lambda: e.elementary_row_op('n->n+km', row1=1, row2=-5, k=5))
raises(ValueError, lambda: e.elementary_row_op('n->n+km', row1=1, row2=1, k=5))
# test various ways to set arguments
assert e.elementary_row_op("n->kn", 0, 5) == Matrix([[5, 0, 0], [0, 1, 0], [0, 0, 1]])
assert e.elementary_row_op("n->kn", 1, 5) == Matrix([[1, 0, 0], [0, 5, 0], [0, 0, 1]])
assert e.elementary_row_op("n->kn", row=1, k=5) == Matrix([[1, 0, 0], [0, 5, 0], [0, 0, 1]])
assert e.elementary_row_op("n->kn", row1=1, k=5) == Matrix([[1, 0, 0], [0, 5, 0], [0, 0, 1]])
assert e.elementary_row_op("n<->m", 0, 1) == Matrix([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
assert e.elementary_row_op("n<->m", row1=0, row2=1) == Matrix([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
assert e.elementary_row_op("n<->m", row=0, row2=1) == Matrix([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
assert e.elementary_row_op("n->n+km", 0, 5, 1) == Matrix([[1, 5, 0], [0, 1, 0], [0, 0, 1]])
assert e.elementary_row_op("n->n+km", row=0, k=5, row2=1) == Matrix([[1, 5, 0], [0, 1, 0], [0, 0, 1]])
assert e.elementary_row_op("n->n+km", row1=0, k=5, row2=1) == Matrix([[1, 5, 0], [0, 1, 0], [0, 0, 1]])
# make sure the matrix doesn't change size
a = ReductionsOnlyMatrix(2, 3, [0]*6)
assert a.elementary_row_op("n->kn", 1, 5) == Matrix(2, 3, [0]*6)
assert a.elementary_row_op("n<->m", 0, 1) == Matrix(2, 3, [0]*6)
assert a.elementary_row_op("n->n+km", 0, 5, 1) == Matrix(2, 3, [0]*6)
def test_col_op():
e = eye_Reductions(3)
raises(ValueError, lambda: e.elementary_col_op("abc"))
raises(ValueError, lambda: e.elementary_col_op())
raises(ValueError, lambda: e.elementary_col_op('n->kn', col=5, k=5))
raises(ValueError, lambda: e.elementary_col_op('n->kn', col=-5, k=5))
raises(ValueError, lambda: e.elementary_col_op('n<->m', col1=1, col2=5))
raises(ValueError, lambda: e.elementary_col_op('n<->m', col1=5, col2=1))
raises(ValueError, lambda: e.elementary_col_op('n<->m', col1=-5, col2=1))
raises(ValueError, lambda: e.elementary_col_op('n<->m', col1=1, col2=-5))
raises(ValueError, lambda: e.elementary_col_op('n->n+km', col1=1, col2=5, k=5))
raises(ValueError, lambda: e.elementary_col_op('n->n+km', col1=5, col2=1, k=5))
raises(ValueError, lambda: e.elementary_col_op('n->n+km', col1=-5, col2=1, k=5))
raises(ValueError, lambda: e.elementary_col_op('n->n+km', col1=1, col2=-5, k=5))
raises(ValueError, lambda: e.elementary_col_op('n->n+km', col1=1, col2=1, k=5))
assert e.elementary_col_op("n->kn", 0, 5) == Matrix([[5, 0, 0], [0, 1, 0], [0, 0, 1]])
assert e.elementary_col_op("n->kn", 1, 5) == Matrix([[1, 0, 0], [0, 5, 0], [0, 0, 1]])
assert e.elementary_col_op("n->kn", col=1, k=5) == Matrix([[1, 0, 0], [0, 5, 0], [0, 0, 1]])
assert e.elementary_col_op("n->kn", col1=1, k=5) == Matrix([[1, 0, 0], [0, 5, 0], [0, 0, 1]])
assert e.elementary_col_op("n<->m", 0, 1) == Matrix([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
assert e.elementary_col_op("n<->m", col1=0, col2=1) == Matrix([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
assert e.elementary_col_op("n<->m", col=0, col2=1) == Matrix([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
assert e.elementary_col_op("n->n+km", 0, 5, 1) == Matrix([[1, 0, 0], [5, 1, 0], [0, 0, 1]])
assert e.elementary_col_op("n->n+km", col=0, k=5, col2=1) == Matrix([[1, 0, 0], [5, 1, 0], [0, 0, 1]])
assert e.elementary_col_op("n->n+km", col1=0, k=5, col2=1) == Matrix([[1, 0, 0], [5, 1, 0], [0, 0, 1]])
a = ReductionsOnlyMatrix(2, 3, [0]*6)
assert a.elementary_col_op("n->kn", 1, 5) == Matrix(2, 3, [0]*6)
assert a.elementary_col_op("n<->m", 0, 1) == Matrix(2, 3, [0]*6)
assert a.elementary_col_op("n->n+km", 0, 5, 1) == Matrix(2, 3, [0]*6)
def test_is_echelon():
zro = zeros_Reductions(3)
ident = eye_Reductions(3)
assert zro.is_echelon
assert ident.is_echelon
a = ReductionsOnlyMatrix(0, 0, [])
assert a.is_echelon
a = ReductionsOnlyMatrix(2, 3, [3, 2, 1, 0, 0, 6])
assert a.is_echelon
a = ReductionsOnlyMatrix(2, 3, [0, 0, 6, 3, 2, 1])
assert not a.is_echelon
x = Symbol('x')
a = ReductionsOnlyMatrix(3, 1, [x, 0, 0])
assert a.is_echelon
a = ReductionsOnlyMatrix(3, 1, [x, x, 0])
assert not a.is_echelon
a = ReductionsOnlyMatrix(3, 3, [0, 0, 0, 1, 2, 3, 0, 0, 0])
assert not a.is_echelon
def test_echelon_form():
# echelon form is not unique, but the result
# must be row-equivalent to the original matrix
# and it must be in echelon form.
a = zeros_Reductions(3)
e = eye_Reductions(3)
# we can assume the zero matrix and the identity matrix shouldn't change
assert a.echelon_form() == a
assert e.echelon_form() == e
a = ReductionsOnlyMatrix(0, 0, [])
assert a.echelon_form() == a
a = ReductionsOnlyMatrix(1, 1, [5])
assert a.echelon_form() == a
def verify_row_null_space(mat, rows, nulls):
for v in nulls:
assert all(t.is_zero for t in a_echelon*v)
for v in rows:
if not all(t.is_zero for t in v):
assert not all(t.is_zero for t in a_echelon*v.transpose())
a = ReductionsOnlyMatrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 9])
nulls = [Matrix([
[ 1],
[-2],
[ 1]])]
rows = [a[i,:] for i in range(a.rows)]
a_echelon = a.echelon_form()
assert a_echelon.is_echelon
verify_row_null_space(a, rows, nulls)
a = ReductionsOnlyMatrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 8])
nulls = []
rows = [a[i,:] for i in range(a.rows)]
a_echelon = a.echelon_form()
assert a_echelon.is_echelon
verify_row_null_space(a, rows, nulls)
a = ReductionsOnlyMatrix(3, 3, [2, 1, 3, 0, 0, 0, 2, 1, 3])
nulls = [Matrix([
[-S(1)/2],
[ 1],
[ 0]]),
Matrix([
[-S(3)/2],
[ 0],
[ 1]])]
rows = [a[i,:] for i in range(a.rows)]
a_echelon = a.echelon_form()
assert a_echelon.is_echelon
verify_row_null_space(a, rows, nulls)
a = ReductionsOnlyMatrix(3, 3, [2, 1, 3, 0, 0, 0, 1, 1, 3])
nulls = [Matrix([
[ 0],
[ -3],
[ 1]])]
rows = [a[i,:] for i in range(a.rows)]
a_echelon = a.echelon_form()
assert a_echelon.is_echelon
verify_row_null_space(a, rows, nulls)
a = ReductionsOnlyMatrix(3, 3, [0, 3, 3, 0, 2, 2, 0, 1, 1])
nulls = [Matrix([
[1],
[0],
[0]]),
Matrix([
[ 0],
[-1],
[ 1]])]
rows = [a[i,:] for i in range(a.rows)]
a_echelon = a.echelon_form()
assert a_echelon.is_echelon
verify_row_null_space(a, rows, nulls)
a = ReductionsOnlyMatrix(2, 3, [2, 2, 3, 3, 3, 0])
nulls = [Matrix([
[-1],
[1],
[0]])]
rows = [a[i,:] for i in range(a.rows)]
a_echelon = a.echelon_form()
assert a_echelon.is_echelon
verify_row_null_space(a, rows, nulls)
def test_rref():
e = ReductionsOnlyMatrix(0, 0, [])
assert e.rref(pivots=False) == e
e = ReductionsOnlyMatrix(1, 1, [1])
a = ReductionsOnlyMatrix(1, 1, [5])
assert e.rref(pivots=False) == a.rref(pivots=False) == e
a = ReductionsOnlyMatrix(3, 1, [1, 2, 3])
assert a.rref(pivots=False) == Matrix([[1], [0], [0]])
a = ReductionsOnlyMatrix(1, 3, [1, 2, 3])
assert a.rref(pivots=False) == Matrix([[1, 2, 3]])
a = ReductionsOnlyMatrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 9])
assert a.rref(pivots=False) == Matrix([
[1, 0, -1],
[0, 1, 2],
[0, 0, 0]])
a = ReductionsOnlyMatrix(3, 3, [1, 2, 3, 1, 2, 3, 1, 2, 3])
b = ReductionsOnlyMatrix(3, 3, [1, 2, 3, 0, 0, 0, 0, 0, 0])
c = ReductionsOnlyMatrix(3, 3, [0, 0, 0, 1, 2, 3, 0, 0, 0])
d = ReductionsOnlyMatrix(3, 3, [0, 0, 0, 0, 0, 0, 1, 2, 3])
assert a.rref(pivots=False) == \
b.rref(pivots=False) == \
c.rref(pivots=False) == \
d.rref(pivots=False) == b
e = eye_Reductions(3)
z = zeros_Reductions(3)
assert e.rref(pivots=False) == e
assert z.rref(pivots=False) == z
a = ReductionsOnlyMatrix([
[ 0, 0, 1, 2, 2, -5, 3],
[-1, 5, 2, 2, 1, -7, 5],
[ 0, 0, -2, -3, -3, 8, -5],
[-1, 5, 0, -1, -2, 1, 0]])
mat, pivot_offsets = a.rref()
assert mat == Matrix([
[1, -5, 0, 0, 1, 1, -1],
[0, 0, 1, 0, 0, -1, 1],
[0, 0, 0, 1, 1, -2, 1],
[0, 0, 0, 0, 0, 0, 0]])
assert pivot_offsets == (0, 2, 3)
a = ReductionsOnlyMatrix([[S(1)/19, S(1)/5, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[ 12, 13, 14, 15]])
assert a.rref(pivots=False) == Matrix([
[1, 0, 0, -S(76)/157],
[0, 1, 0, -S(5)/157],
[0, 0, 1, S(238)/157],
[0, 0, 0, 0]])
x = Symbol('x')
a = ReductionsOnlyMatrix(2, 3, [x, 1, 1, sqrt(x), x, 1])
for i, j in zip(a.rref(pivots=False),
[1, 0, sqrt(x)*(-x + 1)/(-x**(S(5)/2) + x),
0, 1, 1/(sqrt(x) + x + 1)]):
assert simplify(i - j).is_zero
def test_eye():
assert list(SpecialOnlyMatrix.eye(2,2)) == [1, 0, 0, 1]
assert list(SpecialOnlyMatrix.eye(2)) == [1, 0, 0, 1]
assert type(SpecialOnlyMatrix.eye(2)) == SpecialOnlyMatrix
assert type(SpecialOnlyMatrix.eye(2, cls=Matrix)) == Matrix
def test_ones():
assert list(SpecialOnlyMatrix.ones(2,2)) == [1, 1, 1, 1]
assert list(SpecialOnlyMatrix.ones(2)) == [1, 1, 1, 1]
assert SpecialOnlyMatrix.ones(2,3) == Matrix([[1, 1, 1], [1, 1, 1]])
assert type(SpecialOnlyMatrix.ones(2)) == SpecialOnlyMatrix
assert type(SpecialOnlyMatrix.ones(2, cls=Matrix)) == Matrix
def test_zeros():
assert list(SpecialOnlyMatrix.zeros(2,2)) == [0, 0, 0, 0]
assert list(SpecialOnlyMatrix.zeros(2)) == [0, 0, 0, 0]
assert SpecialOnlyMatrix.zeros(2,3) == Matrix([[0, 0, 0], [0, 0, 0]])
assert type(SpecialOnlyMatrix.zeros(2)) == SpecialOnlyMatrix
assert type(SpecialOnlyMatrix.zeros(2, cls=Matrix)) == Matrix
def test_diag_make():
diag = SpecialOnlyMatrix.diag
a = Matrix([[1, 2], [2, 3]])
b = Matrix([[3, x], [y, 3]])
c = Matrix([[3, x, 3], [y, 3, z], [x, y, z]])
assert diag(a, b, b) == Matrix([
[1, 2, 0, 0, 0, 0],
[2, 3, 0, 0, 0, 0],
[0, 0, 3, x, 0, 0],
[0, 0, y, 3, 0, 0],
[0, 0, 0, 0, 3, x],
[0, 0, 0, 0, y, 3],
])
assert diag(a, b, c) == Matrix([
[1, 2, 0, 0, 0, 0, 0],
[2, 3, 0, 0, 0, 0, 0],
[0, 0, 3, x, 0, 0, 0],
[0, 0, y, 3, 0, 0, 0],
[0, 0, 0, 0, 3, x, 3],
[0, 0, 0, 0, y, 3, z],
[0, 0, 0, 0, x, y, z],
])
assert diag(a, c, b) == Matrix([
[1, 2, 0, 0, 0, 0, 0],
[2, 3, 0, 0, 0, 0, 0],
[0, 0, 3, x, 3, 0, 0],
[0, 0, y, 3, z, 0, 0],
[0, 0, x, y, z, 0, 0],
[0, 0, 0, 0, 0, 3, x],
[0, 0, 0, 0, 0, y, 3],
])
a = Matrix([x, y, z])
b = Matrix([[1, 2], [3, 4]])
c = Matrix([[5, 6]])
assert diag(a, 7, b, c) == Matrix([
[x, 0, 0, 0, 0, 0],
[y, 0, 0, 0, 0, 0],
[z, 0, 0, 0, 0, 0],
[0, 7, 0, 0, 0, 0],
[0, 0, 1, 2, 0, 0],
[0, 0, 3, 4, 0, 0],
[0, 0, 0, 0, 5, 6]])
raises(ValueError, lambda: diag(a, 7, b, c, rows=5))
assert diag(1) == Matrix([[1]])
assert diag(1, rows=2) == Matrix([[1, 0], [0, 0]])
assert diag(1, cols=2) == Matrix([[1, 0], [0, 0]])
assert diag(1, rows=3, cols=2) == Matrix([[1, 0], [0, 0], [0, 0]])
assert diag(*[2, 3]) == Matrix([
[2, 0],
[0, 3]])
assert diag(Matrix([2, 3])) == Matrix([
[2],
[3]])
assert diag([1, [2, 3], 4], unpack=False) == \
diag([[1], [2, 3], [4]], unpack=False) == Matrix([
[1, 0],
[2, 3],
[4, 0]])
assert type(diag(1)) == SpecialOnlyMatrix
assert type(diag(1, cls=Matrix)) == Matrix
assert Matrix.diag([1, 2, 3]) == Matrix.diag(1, 2, 3)
assert Matrix.diag([1, 2, 3], unpack=False).shape == (3, 1)
assert Matrix.diag([[1, 2, 3]]).shape == (3, 1)
assert Matrix.diag([[1, 2, 3]], unpack=False).shape == (1, 3)
assert Matrix.diag([[[1, 2, 3]]]).shape == (1, 3)
assert Matrix.diag(ones(0, 2), 1, 2) == Matrix([
[0, 0, 1, 0],
[0, 0, 0, 2]])
assert Matrix.diag(ones(2, 0), 1, 2) == Matrix([
[0, 0],
[0, 0],
[1, 0],
[0, 2]])
def test_diagonal():
m = Matrix(3, 3, range(9))
d = m.diagonal()
assert d == m.diagonal(0)
assert tuple(d) == (0, 4, 8)
assert tuple(m.diagonal(1)) == (1, 5)
assert tuple(m.diagonal(-1)) == (3, 7)
assert tuple(m.diagonal(2)) == (2,)
assert type(m.diagonal()) == type(m)
s = SparseMatrix(3, 3, {(1, 1): 1})
assert type(s.diagonal()) == type(s)
assert type(m) != type(s)
raises(ValueError, lambda: m.diagonal(3))
raises(ValueError, lambda: m.diagonal(-3))
raises(ValueError, lambda: m.diagonal(pi))
def test_jordan_block():
assert SpecialOnlyMatrix.jordan_block(3, 2) == SpecialOnlyMatrix.jordan_block(3, eigenvalue=2) \
== SpecialOnlyMatrix.jordan_block(size=3, eigenvalue=2) \
== SpecialOnlyMatrix.jordan_block(3, 2, band='upper') \
== SpecialOnlyMatrix.jordan_block(
size=3, eigenval=2, eigenvalue=2) \
== Matrix([
[2, 1, 0],
[0, 2, 1],
[0, 0, 2]])
assert SpecialOnlyMatrix.jordan_block(3, 2, band='lower') == Matrix([
[2, 0, 0],
[1, 2, 0],
[0, 1, 2]])
raises(ValueError, lambda: SpecialOnlyMatrix.jordan_block(2))
raises(ValueError, lambda: SpecialOnlyMatrix.jordan_block(3.5, 2))
raises(ValueError, lambda: SpecialOnlyMatrix.jordan_block(eigenvalue=2))
raises(ValueError,
lambda: SpecialOnlyMatrix.jordan_block(
eigenvalue=2, eigenval=4))
raises(SymPyDeprecationWarning,
lambda: SpecialOnlyMatrix.jordan_block(cols=3, eigenvalue=2))
raises(SymPyDeprecationWarning,
lambda: SpecialOnlyMatrix.jordan_block(rows=3, eigenvalue=2))
with warns_deprecated_sympy():
assert SpecialOnlyMatrix.jordan_block(3, 2) == \
SpecialOnlyMatrix.jordan_block(cols=3, eigenvalue=2) == \
SpecialOnlyMatrix.jordan_block(rows=3, eigenvalue=2)
with warns_deprecated_sympy():
assert SpecialOnlyMatrix.jordan_block(
rows=4, cols=3, eigenvalue=2) == \
Matrix([
[2, 1, 0],
[0, 2, 1],
[0, 0, 2],
[0, 0, 0]])
assert SpecialOnlyMatrix.jordan_block(size=3, eigenvalue=2) == \
SpecialOnlyMatrix.jordan_block(size=3, eigenval=2)
def test_columnspace():
m = SubspaceOnlyMatrix([[ 1, 2, 0, 2, 5],
[-2, -5, 1, -1, -8],
[ 0, -3, 3, 4, 1],
[ 3, 6, 0, -7, 2]])
basis = m.columnspace()
assert basis[0] == Matrix([1, -2, 0, 3])
assert basis[1] == Matrix([2, -5, -3, 6])
assert basis[2] == Matrix([2, -1, 4, -7])
assert len(basis) == 3
assert Matrix.hstack(m, *basis).columnspace() == basis
def test_rowspace():
m = SubspaceOnlyMatrix([[ 1, 2, 0, 2, 5],
[-2, -5, 1, -1, -8],
[ 0, -3, 3, 4, 1],
[ 3, 6, 0, -7, 2]])
basis = m.rowspace()
assert basis[0] == Matrix([[1, 2, 0, 2, 5]])
assert basis[1] == Matrix([[0, -1, 1, 3, 2]])
assert basis[2] == Matrix([[0, 0, 0, 5, 5]])
assert len(basis) == 3
def test_nullspace():
m = SubspaceOnlyMatrix([[ 1, 2, 0, 2, 5],
[-2, -5, 1, -1, -8],
[ 0, -3, 3, 4, 1],
[ 3, 6, 0, -7, 2]])
basis = m.nullspace()
assert basis[0] == Matrix([-2, 1, 1, 0, 0])
assert basis[1] == Matrix([-1, -1, 0, -1, 1])
assert all(e.is_zero for e in m*basis[0])
assert all(e.is_zero for e in m*basis[1])
def test_orthogonalize():
m = Matrix([[1, 2], [3, 4]])
assert m.orthogonalize(Matrix([[2], [1]])) == [Matrix([[2], [1]])]
assert m.orthogonalize(Matrix([[2], [1]]), normalize=True) == [Matrix([[2*sqrt(5)/5], [sqrt(5)/5]])]
assert m.orthogonalize(Matrix([[1], [2]]), Matrix([[-1], [4]])) == [Matrix([[1], [2]]), Matrix([[-S(12)/5], [S(6)/5]])]
assert m.orthogonalize(Matrix([[0], [0]]), Matrix([[-1], [4]])) == [Matrix([[-1], [4]])]
assert m.orthogonalize(Matrix([[0], [0]])) == []
n = Matrix([[9, 1, 9], [3, 6, 10], [8, 5, 2]])
vecs = [Matrix([[-5], [1]]), Matrix([[-5], [2]]), Matrix([[-5], [-2]])]
assert n.orthogonalize(*vecs) == [Matrix([[-5], [1]]), Matrix([[S(5)/26], [S(25)/26]])]
def test_eigenvals():
M = EigenOnlyMatrix([[0, 1, 1],
[1, 0, 0],
[1, 1, 1]])
assert M.eigenvals() == {2*S.One: 1, -S.One: 1, S.Zero: 1}
m = Matrix([
[3, 0, 0, 0, -3],
[0, -3, -3, 0, 3],
[0, 3, 0, 3, 0],
[0, 0, 3, 0, 3],
[3, 0, 0, 3, 0]])
raises(MatrixError, lambda: m.eigenvals())
def test_eigenvects():
M = EigenOnlyMatrix([[0, 1, 1],
[1, 0, 0],
[1, 1, 1]])
vecs = M.eigenvects()
for val, mult, vec_list in vecs:
assert len(vec_list) == 1
assert M*vec_list[0] == val*vec_list[0]
def test_left_eigenvects():
M = EigenOnlyMatrix([[0, 1, 1],
[1, 0, 0],
[1, 1, 1]])
vecs = M.left_eigenvects()
for val, mult, vec_list in vecs:
assert len(vec_list) == 1
assert vec_list[0]*M == val*vec_list[0]
def test_diagonalize():
m = EigenOnlyMatrix(2, 2, [0, -1, 1, 0])
raises(MatrixError, lambda: m.diagonalize(reals_only=True))
P, D = m.diagonalize()
assert D.is_diagonal()
assert D == Matrix([
[-I, 0],
[ 0, I]])
m = EigenOnlyMatrix(2, 2, [0, .5, .5, 0])
P, D = m.diagonalize()
assert all(isinstance(e, Float) for e in D.values())
assert all(isinstance(e, Float) for e in P.values())
_, D2 = m.diagonalize(reals_only=True)
assert D == D2
def test_is_diagonalizable():
a, b, c = symbols('a b c')
m = EigenOnlyMatrix(2, 2, [a, c, c, b])
assert m.is_symmetric()
assert m.is_diagonalizable()
assert not EigenOnlyMatrix(2, 2, [1, 1, 0, 1]).is_diagonalizable()
m = EigenOnlyMatrix(2, 2, [0, -1, 1, 0])
assert m.is_diagonalizable()
assert not m.is_diagonalizable(reals_only=True)
def test_jordan_form():
m = Matrix(3, 2, [-3, 1, -3, 20, 3, 10])
raises(NonSquareMatrixError, lambda: m.jordan_form())
m = EigenOnlyMatrix(4, 4, [2, 1, 0, 0,
0, 2, 1, 0,
0, 0, 2, 0,
0, 0, 0, 2
])
P, J = m.jordan_form()
assert m == J
m = EigenOnlyMatrix(4, 4, [2, 1, 0, 0,
0, 2, 0, 0,
0, 0, 2, 1,
0, 0, 0, 2
])
P, J = m.jordan_form()
assert m == J
A = Matrix([[ 2, 4, 1, 0],
[-4, 2, 0, 1],
[ 0, 0, 2, 4],
[ 0, 0, -4, 2]])
P, J = A.jordan_form()
assert simplify(P*J*P.inv()) == A
assert EigenOnlyMatrix(1,1,[1]).jordan_form() == (Matrix([1]), Matrix([1]))
assert EigenOnlyMatrix(1,1,[1]).jordan_form(calc_transform=False) == Matrix([1])
m = Matrix([[3, 0, 0, 0, -3], [0, -3, -3, 0, 3], [0, 3, 0, 3, 0], [0, 0, 3, 0, 3], [3, 0, 0, 3, 0]])
raises(MatrixError, lambda: m.jordan_form())
m = Matrix([
[ 0.6875, 0.125 + 0.1875*sqrt(3)],
[0.125 + 0.1875*sqrt(3), 0.3125]])
P, J = m.jordan_form()
assert all(isinstance(x, Float) or x == 0 for x in P)
assert all(isinstance(x, Float) or x == 0 for x in J)
def test_singular_values():
x = Symbol('x', real=True)
A = EigenOnlyMatrix([[0, 1*I], [2, 0]])
assert A.singular_values() == [2, 1]
A = eye(3)
A[1, 1] = x
A[2, 2] = 5
vals = A.singular_values()
assert set(vals) == set([5, 1, Abs(x)])
A = EigenOnlyMatrix([[sin(x), cos(x)], [-cos(x), sin(x)]])
vals = [sv.trigsimp() for sv in A.singular_values()]
assert vals == [S(1), S(1)]
A = EigenOnlyMatrix([
[2, 4],
[1, 3],
[0, 0],
[0, 0]
])
assert A.singular_values() == \
[sqrt(sqrt(221) + 15), sqrt(15 - sqrt(221))]
assert A.T.singular_values() == \
[sqrt(sqrt(221) + 15), sqrt(15 - sqrt(221)), 0, 0]
@XFAIL
def test_diff():
x, y = symbols('x y')
m = CalculusOnlyMatrix(2, 1, [x, y])
assert m.diff(x) == Matrix(2, 1, [1, 0])
def test_integrate():
x, y = symbols('x y')
m = CalculusOnlyMatrix(2, 1, [x, y])
assert m.integrate(x) == Matrix(2, 1, [x**2/2, y*x])
def test_jacobian2():
rho, phi = symbols("rho,phi")
X = CalculusOnlyMatrix(3, 1, [rho*cos(phi), rho*sin(phi), rho**2])
Y = CalculusOnlyMatrix(2, 1, [rho, phi])
J = Matrix([
[cos(phi), -rho*sin(phi)],
[sin(phi), rho*cos(phi)],
[ 2*rho, 0],
])
assert X.jacobian(Y) == J
m = CalculusOnlyMatrix(2, 2, [1, 2, 3, 4])
m2 = CalculusOnlyMatrix(4, 1, [1, 2, 3, 4])
raises(TypeError, lambda: m.jacobian(Matrix([1,2])))
raises(TypeError, lambda: m2.jacobian(m))
def test_limit():
x, y = symbols('x y')
m = CalculusOnlyMatrix(2, 1, [1/x, y])
assert m.limit(x, 5) == Matrix(2, 1, [S(1)/5, y])
def test_issue_13774():
M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
v = [1,1,1]
raises(TypeError, lambda: M*v)
raises(TypeError, lambda: v*M)
def test___eq__():
assert (EigenOnlyMatrix(
[[0, 1, 1],
[1, 0, 0],
[1, 1, 1]]) == {}) is False
| true | true |
f71502012c2112fc320b40aba0ee9fe0ae69053c | 4,289 | py | Python | azure-batch/azure/batch/models/subtask_information.py | HydAu/AzureSDKForPython | 5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3 | [
"Apache-2.0"
] | null | null | null | azure-batch/azure/batch/models/subtask_information.py | HydAu/AzureSDKForPython | 5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3 | [
"Apache-2.0"
] | null | null | null | azure-batch/azure/batch/models/subtask_information.py | HydAu/AzureSDKForPython | 5cbe34e9e0b8ea1faacc9f205633ccc0b885c0f3 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SubtaskInformation(Model):
"""
Information about an Azure Batch subtask.
:param id: The id of the subtask.
:type id: int
:param node_info: Information about the compute node on which the subtask
ran.
:type node_info: :class:`ComputeNodeInformation
<azure.batch.models.ComputeNodeInformation>`
:param start_time: The time at which the subtask started running. If the
subtask has been restarted or retried, this is the most recent time at
which the subtask started running.
:type start_time: datetime
:param end_time: The time at which the subtask completed. This property
is set only if the subtask is in the Completed state.
:type end_time: datetime
:param exit_code: The exit code of the subtask. This property is set only
if the subtask is in the Completed state.
:type exit_code: int
:param scheduling_error: Details of any error encountered scheduling the
subtask.
:type scheduling_error: :class:`TaskSchedulingError
<azure.batch.models.TaskSchedulingError>`
:param state: The current state of the subtask. Possible values include:
'active', 'preparing', 'running', 'completed'
:type state: str or :class:`TaskState <azure.batch.models.TaskState>`
:param state_transition_time: The time at which the subtask entered its
current state.
:type state_transition_time: datetime
:param previous_state: The previous state of the subtask. This property
is not set if the subtask is in its initial Active state. Possible
values include: 'active', 'preparing', 'running', 'completed'
:type previous_state: str or :class:`TaskState
<azure.batch.models.TaskState>`
:param previous_state_transition_time: The time at which the subtask
entered its previous state. This property is not set if the subtask is
in its initial Active state.
:type previous_state_transition_time: datetime
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
'node_info': {'key': 'nodeInfo', 'type': 'ComputeNodeInformation'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'exit_code': {'key': 'exitCode', 'type': 'int'},
'scheduling_error': {'key': 'schedulingError', 'type': 'TaskSchedulingError'},
'state': {'key': 'state', 'type': 'TaskState'},
'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'},
'previous_state': {'key': 'previousState', 'type': 'TaskState'},
'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'},
}
def __init__(self, id=None, node_info=None, start_time=None, end_time=None, exit_code=None, scheduling_error=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None):
self.id = id
self.node_info = node_info
self.start_time = start_time
self.end_time = end_time
self.exit_code = exit_code
self.scheduling_error = scheduling_error
self.state = state
self.state_transition_time = state_transition_time
self.previous_state = previous_state
self.previous_state_transition_time = previous_state_transition_time
| 47.655556 | 217 | 0.683143 |
from msrest.serialization import Model
class SubtaskInformation(Model):
_attribute_map = {
'id': {'key': 'id', 'type': 'int'},
'node_info': {'key': 'nodeInfo', 'type': 'ComputeNodeInformation'},
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'end_time': {'key': 'endTime', 'type': 'iso-8601'},
'exit_code': {'key': 'exitCode', 'type': 'int'},
'scheduling_error': {'key': 'schedulingError', 'type': 'TaskSchedulingError'},
'state': {'key': 'state', 'type': 'TaskState'},
'state_transition_time': {'key': 'stateTransitionTime', 'type': 'iso-8601'},
'previous_state': {'key': 'previousState', 'type': 'TaskState'},
'previous_state_transition_time': {'key': 'previousStateTransitionTime', 'type': 'iso-8601'},
}
def __init__(self, id=None, node_info=None, start_time=None, end_time=None, exit_code=None, scheduling_error=None, state=None, state_transition_time=None, previous_state=None, previous_state_transition_time=None):
self.id = id
self.node_info = node_info
self.start_time = start_time
self.end_time = end_time
self.exit_code = exit_code
self.scheduling_error = scheduling_error
self.state = state
self.state_transition_time = state_transition_time
self.previous_state = previous_state
self.previous_state_transition_time = previous_state_transition_time
| true | true |
f71502d262586243fcb871571f56d5965f4c4430 | 1,805 | py | Python | misc/logger.py | abraker95/ultimate_osu_analyzer | bea58c997d13c3f461ccbe682f52799f0f88fdea | [
"MIT"
] | 23 | 2019-02-27T06:20:15.000Z | 2022-03-31T22:54:11.000Z | misc/logger.py | abraker95/ultimate_osu_analyzer | bea58c997d13c3f461ccbe682f52799f0f88fdea | [
"MIT"
] | 38 | 2019-03-03T17:35:39.000Z | 2021-08-23T20:43:34.000Z | misc/logger.py | abraker95/ultimate_osu_analyzer | bea58c997d13c3f461ccbe682f52799f0f88fdea | [
"MIT"
] | 4 | 2020-03-30T20:43:14.000Z | 2022-03-06T19:40:15.000Z | import logging
import traceback
import config
import pathlib
class Logger(logging.getLoggerClass()):
def __init__(self, name, level=logging.NOTSET):
super().__init__(name, level=logging.DEBUG)
formatter = logging.Formatter('%(levelname)s %(asctime)s [ %(name)s ] %(message)s')
self.sh = logging.StreamHandler()
self.sh.setFormatter(formatter)
if 'db' in config.runtime_mode: self.sh.setLevel(logging.DEBUG)
else: self.sh.setLevel(logging.INFO)
self.addHandler(self.sh)
# \TODO: Maybe break up the logging file if it goes over 1MB
# get file size
# if over 1MB, then rename current logging file to '{start_date}_{end_date}_{logger_name}.log'
# cut-paste into logging folder named '{logger_name}'
self.fh = logging.FileHandler(str(config.log_path / (name + '.log')))
self.fh.setFormatter(formatter)
self.fh.setLevel(logging.INFO)
self.addHandler(self.fh)
def __del__(self):
self.sh.close(); self.removeHandler(self.sh)
self.fh.close(); self.removeHandler(self.fh)
'''
def error(self, msg):
msg = msg.strip()
if msg == 'None' or msg == 'N/A' or len(msg) == 0:
self.exception(msg)
else:
self.error(msg)
def critical(self, msg):
msg = msg.strip()
if msg == 'None' or msg == 'N/A' or len(msg) == 0:
self.exception(msg)
else:
self.critical(msg)
'''
def exception(self, msg):
msg = msg.strip()
msg += '\n' + traceback.format_exc()
self.error(msg)
def testbench(self, msg):
if 'tb' not in config.runtime_mode: return
self.debug(msg) | 29.112903 | 104 | 0.574515 | import logging
import traceback
import config
import pathlib
class Logger(logging.getLoggerClass()):
def __init__(self, name, level=logging.NOTSET):
super().__init__(name, level=logging.DEBUG)
formatter = logging.Formatter('%(levelname)s %(asctime)s [ %(name)s ] %(message)s')
self.sh = logging.StreamHandler()
self.sh.setFormatter(formatter)
if 'db' in config.runtime_mode: self.sh.setLevel(logging.DEBUG)
else: self.sh.setLevel(logging.INFO)
self.addHandler(self.sh)
self.fh = logging.FileHandler(str(config.log_path / (name + '.log')))
self.fh.setFormatter(formatter)
self.fh.setLevel(logging.INFO)
self.addHandler(self.fh)
def __del__(self):
self.sh.close(); self.removeHandler(self.sh)
self.fh.close(); self.removeHandler(self.fh)
def exception(self, msg):
msg = msg.strip()
msg += '\n' + traceback.format_exc()
self.error(msg)
def testbench(self, msg):
if 'tb' not in config.runtime_mode: return
self.debug(msg) | true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.