hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf012147e574c984bc9c9887fc3ca72b19da557 | 19,794 | py | Python | mlrun/runtimes/utils.py | daniels290813/mlrun | c95d90c3b4ce78d9b71456333ccd201f932d60ea | [
"Apache-2.0"
] | null | null | null | mlrun/runtimes/utils.py | daniels290813/mlrun | c95d90c3b4ce78d9b71456333ccd201f932d60ea | [
"Apache-2.0"
] | null | null | null | mlrun/runtimes/utils.py | daniels290813/mlrun | c95d90c3b4ce78d9b71456333ccd201f932d60ea | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import json
import os
import re
import typing
from copy import deepcopy
from io import StringIO
from sys import stderr
import pandas as pd
from kubernetes import client
import mlrun
import mlrun.builder
import mlrun.utils.regex
from mlrun.api.utils.clients import nuclio
from mlrun.db import get_run_db
from mlrun.frameworks.parallel_coordinates import gen_pcp_plot
from mlrun.k8s_utils import get_k8s_helper
from mlrun.runtimes.constants import MPIJobCRDVersions
from ..artifacts import TableArtifact
from ..config import config
from ..utils import get_in, helpers, logger, verify_field_regex
from .generators import selector
class RunError(Exception):
pass
mlrun_key = "mlrun/"
class _ContextStore:
def __init__(self):
self._context = None
def get(self):
return self._context
def set(self, context):
self._context = context
global_context = _ContextStore()
cached_mpijob_crd_version = None
cached_nuclio_version = None
# resolve mpijob runtime according to the mpi-operator's supported crd-version
# if specified on mlrun config set it likewise,
# if not specified, try resolving it according to the mpi-operator, otherwise set to default
# since this is a heavy operation (sending requests to k8s/API), and it's unlikely that the crd version
# will change in any context - cache it
def resolve_mpijob_crd_version(api_context=False):
global cached_mpijob_crd_version
if not cached_mpijob_crd_version:
# config override everything
mpijob_crd_version = config.mpijob_crd_version
if not mpijob_crd_version:
in_k8s_cluster = get_k8s_helper(
silent=True
).is_running_inside_kubernetes_cluster()
if in_k8s_cluster:
k8s_helper = get_k8s_helper()
namespace = k8s_helper.resolve_namespace()
# try resolving according to mpi-operator that's running
res = k8s_helper.list_pods(
namespace=namespace, selector="component=mpi-operator"
)
if len(res) > 0:
mpi_operator_pod = res[0]
mpijob_crd_version = mpi_operator_pod.metadata.labels.get(
"crd-version"
)
elif not in_k8s_cluster and not api_context:
# connect will populate the config from the server config
# TODO: something nicer
get_run_db()
mpijob_crd_version = config.mpijob_crd_version
# If resolution failed simply use default
if not mpijob_crd_version:
mpijob_crd_version = MPIJobCRDVersions.default()
if mpijob_crd_version not in MPIJobCRDVersions.all():
raise ValueError(
f"unsupported mpijob crd version: {mpijob_crd_version}. "
f"supported versions: {MPIJobCRDVersions.all()}"
)
cached_mpijob_crd_version = mpijob_crd_version
return cached_mpijob_crd_version
def resolve_spark_operator_version():
try:
regex = re.compile("spark-([23])")
return int(regex.findall(config.spark_operator_version)[0])
except Exception:
raise ValueError("Failed to resolve spark operator's version")
# if nuclio version specified on mlrun config set it likewise,
# if not specified, get it from nuclio api client
# since this is a heavy operation (sending requests to API), and it's unlikely that the version
# will change - cache it (this means if we upgrade nuclio, we need to restart mlrun to re-fetch the new version)
def resolve_nuclio_version():
global cached_nuclio_version
if not cached_nuclio_version:
# config override everything
nuclio_version = config.nuclio_version
if not nuclio_version and config.nuclio_dashboard_url:
try:
nuclio_client = nuclio.Client()
nuclio_version = nuclio_client.get_dashboard_version()
except Exception as exc:
logger.warning("Failed to resolve nuclio version", exc=str(exc))
cached_nuclio_version = nuclio_version
return cached_nuclio_version
def calc_hash(func, tag=""):
# remove tag, hash, date from calculation
tag = tag or func.metadata.tag
status = func.status
func.metadata.tag = ""
func.metadata.hash = ""
func.status = None
func.metadata.updated = None
data = json.dumps(func.to_dict(), sort_keys=True).encode()
h = hashlib.sha1()
h.update(data)
hashkey = h.hexdigest()
func.metadata.tag = tag
func.metadata.hash = hashkey
func.status = status
return hashkey
def log_std(db, runobj, out, err="", skip=False, show=True, silent=False):
if out:
iteration = runobj.metadata.iteration
if iteration:
line = "> " + "-" * 15 + f" Iteration: ({iteration}) " + "-" * 15 + "\n"
out = line + out
if show:
print(out, flush=True)
if db and not skip:
uid = runobj.metadata.uid
project = runobj.metadata.project or ""
db.store_log(uid, project, out.encode(), append=True)
if err:
logger.error(f"exec error - {err}")
print(err, file=stderr)
if not silent:
raise RunError(err)
class AsyncLogWriter:
def __init__(self, db, runobj):
self.db = db
self.uid = runobj.metadata.uid
self.project = runobj.metadata.project or ""
self.iter = runobj.metadata.iteration
def write(self, data):
if self.db:
self.db.store_log(self.uid, self.project, data, append=True)
def flush(self):
# todo: verify writes are large enough, if not cache and use flush
pass
def add_code_metadata(path=""):
if path:
if "://" in path:
return None
if os.path.isfile(path):
path = os.path.dirname(path)
path = path or "./"
try:
from git import (
GitCommandNotFound,
InvalidGitRepositoryError,
NoSuchPathError,
Repo,
)
except ImportError:
return None
try:
repo = Repo(path, search_parent_directories=True)
remotes = [remote.url for remote in repo.remotes]
if len(remotes) > 0:
return f"{remotes[0]}#{repo.head.commit.hexsha}"
except (GitCommandNotFound, InvalidGitRepositoryError, NoSuchPathError, ValueError):
pass
return None
def set_if_none(struct, key, value):
if not struct.get(key):
struct[key] = value
def results_to_iter(results, runspec, execution):
if not results:
logger.error("got an empty results list in to_iter")
return
iter = []
failed = 0
running = 0
for task in results:
if task:
state = get_in(task, ["status", "state"])
id = get_in(task, ["metadata", "iteration"])
struct = {
"param": get_in(task, ["spec", "parameters"], {}),
"output": get_in(task, ["status", "results"], {}),
"state": state,
"iter": id,
}
if state == "error":
failed += 1
err = get_in(task, ["status", "error"], "")
logger.error(f"error in task {execution.uid}:{id} - {err}")
elif state != "completed":
running += 1
iter.append(struct)
if not iter:
execution.set_state("completed", commit=True)
logger.warning("warning!, zero iteration results")
return
if hasattr(pd, "json_normalize"):
df = pd.json_normalize(iter).sort_values("iter")
else:
df = pd.io.json.json_normalize(iter).sort_values("iter")
header = df.columns.values.tolist()
summary = [header] + df.values.tolist()
if not runspec:
return summary, df
criteria = runspec.spec.hyper_param_options.selector
item, id = selector(results, criteria)
if runspec.spec.selector and not id:
logger.warning(
f"no best result selected, check selector ({criteria}) or results"
)
if id:
logger.info(f"best iteration={id}, used criteria {criteria}")
task = results[item] if id and results else None
execution.log_iteration_results(id, summary, task)
log_iter_artifacts(execution, df, header)
if failed:
execution.set_state(
error=f"{failed} of {len(results)} tasks failed, check logs in db for details",
commit=False,
)
elif running == 0:
execution.set_state("completed", commit=False)
execution.commit()
def log_iter_artifacts(execution, df, header):
csv_buffer = StringIO()
df.to_csv(csv_buffer, index=False, line_terminator="\n", encoding="utf-8")
try:
# may fail due to lack of access credentials to the artifacts store
execution.log_artifact(
TableArtifact(
"iteration_results",
body=csv_buffer.getvalue(),
header=header,
viewer="table",
),
local_path="iteration_results.csv",
)
# may also fail due to missing plotly
execution.log_artifact(
"parallel_coordinates",
body=gen_pcp_plot(df, index_col="iter"),
local_path="parallel_coordinates.html",
)
except Exception as exc:
logger.warning(f"failed to log iter artifacts, {exc}")
def resolve_function_image_name(function, image: typing.Optional[str] = None) -> str:
project = function.metadata.project or config.default_project
name = function.metadata.name
tag = function.metadata.tag or "latest"
if image:
image_name_prefix = resolve_function_target_image_name_prefix(project, name)
registries_to_enforce_prefix = (
resolve_function_target_image_registries_to_enforce_prefix()
)
for registry in registries_to_enforce_prefix:
if image.startswith(registry):
prefix_with_registry = f"{registry}{image_name_prefix}"
if not image.startswith(prefix_with_registry):
raise mlrun.errors.MLRunInvalidArgumentError(
f"Configured registry enforces image name to start with this prefix: {image_name_prefix}"
)
return image
return generate_function_image_name(project, name, tag)
def generate_function_image_name(project: str, name: str, tag: str) -> str:
_, repository = helpers.get_parsed_docker_registry()
repository = helpers.get_docker_repository_or_default(repository)
return fill_function_image_name_template(
mlrun.builder.IMAGE_NAME_ENRICH_REGISTRY_PREFIX, repository, project, name, tag
)
def fill_function_image_name_template(
registry: str,
repository: str,
project: str,
name: str,
tag: str,
) -> str:
image_name_prefix = resolve_function_target_image_name_prefix(project, name)
return f"{registry}{repository}/{image_name_prefix}:{tag}"
def resolve_function_target_image_name_prefix(project: str, name: str):
return config.httpdb.builder.function_target_image_name_prefix_template.format(
project=project, name=name
)
def resolve_function_target_image_registries_to_enforce_prefix():
registry, repository = helpers.get_parsed_docker_registry()
repository = helpers.get_docker_repository_or_default(repository)
return [
f"{mlrun.builder.IMAGE_NAME_ENRICH_REGISTRY_PREFIX}{repository}/",
f"{registry}/{repository}/",
]
def set_named_item(obj, item):
if isinstance(item, dict):
obj[item["name"]] = item
else:
obj[item.name] = item
def get_item_name(item, attr="name"):
if isinstance(item, dict):
return item.get(attr)
else:
return getattr(item, attr, None)
def apply_kfp(modify, cop, runtime):
modify(cop)
# Have to do it here to avoid circular dependencies
from .pod import AutoMountType
if AutoMountType.is_auto_modifier(modify):
runtime.spec.disable_auto_mount = True
api = client.ApiClient()
for k, v in cop.pod_labels.items():
runtime.metadata.labels[k] = v
for k, v in cop.pod_annotations.items():
runtime.metadata.annotations[k] = v
if cop.container.env:
env_names = [
e.name if hasattr(e, "name") else e["name"] for e in runtime.spec.env
]
for e in api.sanitize_for_serialization(cop.container.env):
name = e["name"]
if name in env_names:
runtime.spec.env[env_names.index(name)] = e
else:
runtime.spec.env.append(e)
env_names.append(name)
cop.container.env.clear()
if cop.volumes and cop.container.volume_mounts:
vols = api.sanitize_for_serialization(cop.volumes)
mounts = api.sanitize_for_serialization(cop.container.volume_mounts)
runtime.spec.update_vols_and_mounts(vols, mounts)
cop.volumes.clear()
cop.container.volume_mounts.clear()
return runtime
def get_resource_labels(function, run=None, scrape_metrics=None):
scrape_metrics = (
scrape_metrics if scrape_metrics is not None else config.scrape_metrics
)
run_uid, run_name, run_project, run_owner = None, None, None, None
if run:
run_uid = run.metadata.uid
run_name = run.metadata.name
run_project = run.metadata.project
run_owner = run.metadata.labels.get("owner")
labels = deepcopy(function.metadata.labels)
labels[mlrun_key + "class"] = function.kind
labels[mlrun_key + "project"] = run_project or function.metadata.project
labels[mlrun_key + "function"] = str(function.metadata.name)
labels[mlrun_key + "tag"] = str(function.metadata.tag or "latest")
labels[mlrun_key + "scrape-metrics"] = str(scrape_metrics)
if run_uid:
labels[mlrun_key + "uid"] = run_uid
if run_name:
labels[mlrun_key + "name"] = run_name
if run_owner:
labels[mlrun_key + "owner"] = run_owner
return labels
def verify_limits(
resources_field_name,
mem=None,
cpu=None,
gpus=None,
gpu_type="nvidia.com/gpu",
):
if mem:
verify_field_regex(
f"function.spec.{resources_field_name}.limits.memory",
mem,
mlrun.utils.regex.k8s_resource_quantity_regex,
)
if cpu:
verify_field_regex(
f"function.spec.{resources_field_name}.limits.cpu",
cpu,
mlrun.utils.regex.k8s_resource_quantity_regex,
)
# https://kubernetes.io/docs/tasks/manage-gpus/scheduling-gpus/
if gpus:
verify_field_regex(
f"function.spec.{resources_field_name}.limits.gpus",
gpus,
mlrun.utils.regex.k8s_resource_quantity_regex,
)
return generate_resources(mem=mem, cpu=cpu, gpus=gpus, gpu_type=gpu_type)
def verify_requests(
resources_field_name,
mem=None,
cpu=None,
):
if mem:
verify_field_regex(
f"function.spec.{resources_field_name}.requests.memory",
mem,
mlrun.utils.regex.k8s_resource_quantity_regex,
)
if cpu:
verify_field_regex(
f"function.spec.{resources_field_name}.requests.cpu",
cpu,
mlrun.utils.regex.k8s_resource_quantity_regex,
)
return generate_resources(mem=mem, cpu=cpu)
def generate_resources(mem=None, cpu=None, gpus=None, gpu_type="nvidia.com/gpu"):
"""get pod cpu/memory/gpu resources dict"""
resources = {}
if gpus:
resources[gpu_type] = gpus
if mem:
resources["memory"] = mem
if cpu:
resources["cpu"] = cpu
return resources
def get_func_selector(project, name=None, tag=None):
s = [f"{mlrun_key}project={project}"]
if name:
s.append(f"{mlrun_key}function={name}")
s.append(f"{mlrun_key}tag={tag or 'latest'}")
return s
def parse_function_selector(selector: typing.List[str]) -> typing.Tuple[str, str, str]:
project, name, tag = None, None, None
for criteria in selector:
if f"{mlrun_key}project=" in criteria:
project = criteria[f"{mlrun_key}project=":]
if f"{mlrun_key}function=" in criteria:
name = criteria[f"{mlrun_key}function=":]
if f"{mlrun_key}tag=" in criteria:
tag = criteria[f"{mlrun_key}tag=":]
return project, name, tag
class k8s_resource:
kind = ""
per_run = False
per_function = False
k8client = None
def deploy_function(self, function):
pass
def release_function(self, function):
pass
def submit_run(self, function, runobj):
pass
def get_object(self, name, namespace=None):
return None
def get_status(self, name, namespace=None):
return None
def del_object(self, name, namespace=None):
pass
def get_pods(self, name, namespace=None, master=False):
return {}
def enrich_function_from_dict(function, function_dict):
override_function = mlrun.new_function(runtime=function_dict, kind=function.kind)
for attribute in [
"volumes",
"volume_mounts",
"env",
"resources",
"image_pull_policy",
"replicas",
"node_name",
"node_selector",
"affinity",
"priority_class_name",
"credentials",
]:
if attribute == "credentials":
override_value = getattr(override_function.metadata, attribute, None)
else:
override_value = getattr(override_function.spec, attribute, None)
if override_value:
if attribute == "env":
for env_dict in override_value:
if env_dict.get("value") is not None:
function.set_env(env_dict["name"], env_dict["value"])
else:
function.set_env(
env_dict["name"],
value_from=env_dict["valueFrom"],
)
elif attribute == "volumes":
function.spec.update_vols_and_mounts(override_value, [])
elif attribute == "volume_mounts":
# volume mounts don't have a well defined identifier (like name for volume) so we can't merge,
# only override
function.spec.volume_mounts = override_value
elif attribute == "resources":
# don't override if there are limits and requests but both are empty
if override_value.get("limits", {}) or override_value.get(
"requests", {}
):
setattr(function.spec, attribute, override_value)
elif attribute == "credentials":
if any(override_value.to_dict().values()):
function.metadata.credentials = override_value
else:
setattr(function.spec, attribute, override_value)
return function
| 32.44918 | 113 | 0.632313 |
acf0130c67742bada1ff1ed05d6bb9ce5a1f1def | 3,480 | py | Python | wsu/tools/simx/simx/python/simx/act/time_control.py | tinyos-io/tinyos-3.x-contrib | 3aaf036722a2afc0c0aad588459a5c3e00bd3c01 | [
"BSD-3-Clause",
"MIT"
] | 1 | 2020-02-28T20:35:09.000Z | 2020-02-28T20:35:09.000Z | wsu/tools/simx/simx/python/simx/act/time_control.py | tinyos-io/tinyos-3.x-contrib | 3aaf036722a2afc0c0aad588459a5c3e00bd3c01 | [
"BSD-3-Clause",
"MIT"
] | null | null | null | wsu/tools/simx/simx/python/simx/act/time_control.py | tinyos-io/tinyos-3.x-contrib | 3aaf036722a2afc0c0aad588459a5c3e00bd3c01 | [
"BSD-3-Clause",
"MIT"
] | null | null | null | from time import sleep
FOREVER = 2 ** 63
class Remote(object):
"""
Wrapper to control TossymSync and running events.
"""
__slots__ = ['tossim', 'sync', 'on_start', 'on_stop',
'is_running', 'stop_time', 'event',
'clock_mul']
def __init__(self, tossim, tossim_sync, tossim_event):
self.tossim = tossim
self.sync = tossim_sync
self.event = tossim_event
self.clock_mul = tossim_sync.getClockMul() # 0 means run unbounded
self.is_running = False
self.stop_time = 0
self.on_start = lambda: None
self.on_stop = lambda: None
self.run_forever()
def to_sim_time(self, time_str):
"""
Convert string of hh:mm:ss.fract into tossim time units.
If the string starts with a +, adds relative to the current
sim time.
"""
# PST-- is there a cleaner way?
if isinstance(time_str, (int, long)):
return time_str
tps = self.tossim.ticksPerSecond()
t = 0
# relative time?
if time_str[0] == "+":
time_str = time_str[1:]
t = self.tossim.time()
# can't combine below, imagine when fractional part not
# supplied
fract_split = time_str.split(".", 1)
if len(fract_split) > 1:
t += int(float("." + fract_split[1]) * tps)
# 0 to 3 (hh:mm:ss) components supported
hms_split = fract_split[0].split(":")
for part, mul in zip(reversed(hms_split),
[1*tps, 60*tps, 3600*tps]):
t += int(part) * mul
return t
def set_clock_mul(self, clock_mul):
"""
Set the clock multipler, as a float.
If clock_mul is <= 0 the simulation will "run at full speed".
"""
self.sync.setClockMul(float(clock_mul))
self.clock_mul = self.sync.getClockMul()
return self.clock_mul
def get_clock_mul(self):
"""
Get the clock multiplier, as a float.
Returns 0 if the clock multiplier is being ignored.
"""
return 0 if not self.clock_mul else self.sync.getClockMul()
def run_until(self, time):
"""
Run until the specified simulation time.
The time is parsed as with to_sim_time.
"""
# every re-start should resync to world-time
self.sync.synchronize()
if not time:
self.sync.setStopAt(0)
else:
sim_time = self.to_sim_time(time)
self.sync.setStopAt(sim_time)
self.is_running = True
self.on_start()
def run_forever(self):
"""
Start/continue simulation.
"""
self.run_until(0)
def stop(self):
"""
Stop the simulation.
"""
self.sync.setStopAt(-1)
self.is_running = False
self.on_stop()
def time(self):
"""
Current simulation time.
"""
return self.tossim.time()
def run_sim(self):
"""
Run (or don't run) a simulation cycle based on state.
"""
res = self.sync.runNextEventInTime()
if res == -1: # "at stop time"
if self.is_running:
# transitioned to non-running
self.stop()
else:
# minimize CPU-thrashing while stopped
sleep(0.0005)
return res
| 25.035971 | 74 | 0.537644 |
acf01345b7410f5086edeb8c798507878d126bf8 | 1,783 | py | Python | data/p4VQE/R4/benchmark/startCirq247.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R4/benchmark/startCirq247.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | data/p4VQE/R4/benchmark/startCirq247.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=11
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.X.on(input_qubit[2])) # number=6
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.Y.on(input_qubit[3])) # number=5
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8
c.append(cirq.X.on(input_qubit[0])) # number=9
c.append(cirq.X.on(input_qubit[0])) # number=10
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq247.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | 27.859375 | 77 | 0.689849 |
acf013c03305c73717eb3a05f03a72777e33693c | 11,772 | py | Python | theseus/core/tests/test_cost_function.py | jeffin07/theseus | 3498bbddf9cca740c2703d0c1aa3a78a7264cb15 | [
"MIT"
] | null | null | null | theseus/core/tests/test_cost_function.py | jeffin07/theseus | 3498bbddf9cca740c2703d0c1aa3a78a7264cb15 | [
"MIT"
] | null | null | null | theseus/core/tests/test_cost_function.py | jeffin07/theseus | 3498bbddf9cca740c2703d0c1aa3a78a7264cb15 | [
"MIT"
] | null | null | null | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import numpy as np
import pytest # noqa: F401
import torch
import theseus as th
from .common import (
MockCostFunction,
MockCostWeight,
MockVar,
check_another_theseus_function_is_copy,
create_mock_cost_functions,
)
def test_copy():
cost_weight = MockCostWeight(th.Variable(torch.ones(1)))
data = torch.ones(1, 1)
cost_functions, *_ = create_mock_cost_functions(data=data, cost_weight=cost_weight)
for cost_function in cost_functions:
cost_function.weight = cost_weight
def _check_new_cost_function(new_cost_function):
check_another_theseus_function_is_copy(
cost_function, new_cost_function, new_name=f"{cost_function.name}_copy"
)
check_another_theseus_function_is_copy(
cost_weight,
new_cost_function.weight,
new_name=f"{cost_weight.name}_copy",
)
_check_new_cost_function(cost_function.copy())
_check_new_cost_function(copy.deepcopy(cost_function))
def test_default_name_and_ids():
reps = 100
seen_ids = set()
for i in range(reps):
cost_function = MockCostFunction([], [], MockCostWeight(torch.ones(1)))
cost_function_name = f"MockCostFunction__{cost_function._id}"
seen_ids.add(cost_function._id)
assert cost_function.name == cost_function_name
assert len(seen_ids) == reps
def test_autodiff_cost_function_error_and_jacobians_shape():
for i in range(100):
num_optim_vars = np.random.randint(0, 5)
num_aux_vars = np.random.randint(0, 5)
batch_size = np.random.randint(1, 10)
err_dim = np.random.randint(1, 5)
optim_vars = []
aux_vars = []
variable_values = torch.randn(num_optim_vars + num_aux_vars)
idx = 0
for i in range(num_optim_vars):
optim_vars.append(
MockVar(
idx + 1,
data=torch.ones(batch_size, idx + 1) * variable_values[idx],
name=f"optim_var_{i}",
)
)
idx += 1
for i in range(num_aux_vars):
aux_vars.append(
MockVar(
idx + 1,
data=torch.ones(batch_size, idx + 1) * variable_values[idx],
name=f"aux_var_{i}",
)
)
idx += 1
cost_weight = MockCostWeight(torch.ones(1, 1))
# checks that the right number of optimization variables is passed
# checks that the variable values are correct
# returns the sum of the first elements of each tensor, which should be the
# same as the sum of variables_values
def error_fn(optim_vars, aux_vars):
assert isinstance(optim_vars, tuple)
assert len(optim_vars) == num_optim_vars
assert len(aux_vars) == num_aux_vars
ret_val = torch.zeros(batch_size, err_dim)
all_vars = optim_vars + aux_vars
vals = []
for i, arg in enumerate(all_vars):
assert isinstance(arg, th.Variable)
assert arg.shape == (batch_size, i + 1)
assert arg.data.allclose(variable_values[i] * torch.ones_like(arg.data))
vals.append(arg[0, 0])
return ret_val + torch.Tensor(vals).sum()
# this checks that 0 optimization variables is not allowed
if len(optim_vars) < 1:
with pytest.raises(ValueError):
th.AutoDiffCostFunction(
optim_vars,
error_fn,
1,
cost_weight=cost_weight,
aux_vars=aux_vars,
)
else:
# check that the error function returns the correct value
cost_function = th.AutoDiffCostFunction(
optim_vars,
error_fn,
err_dim,
cost_weight=cost_weight,
aux_vars=aux_vars,
)
err = cost_function.error()
assert err.allclose(variable_values.sum() * torch.ones(batch_size, err_dim))
# Now checking the jacobians
jacobians, err_jac = cost_function.jacobians()
assert err_jac.allclose(err)
assert len(jacobians) == num_optim_vars
for i in range(num_optim_vars):
# variable dim is i + 1 (see MockVar creation line)
assert jacobians[i].shape == (batch_size, err_dim, i + 1)
def test_autodiff_cost_function_cost_weight():
batch_size = 10
optim_vars = []
aux_vars = []
for i in range(5):
optim_vars.append(
MockVar(
1,
data=torch.ones(batch_size, 1) * torch.randn(1),
name=f"optim_var_{i}",
)
)
aux_vars.append(
MockVar(
1,
data=torch.ones(batch_size, 1) * torch.randn(1),
name=f"aux_var_{i}",
)
)
def error_fn(optim_vars, aux_vars):
return torch.ones(batch_size, 1)
# test verifying default CostWeight
cost_function = th.AutoDiffCostFunction(
optim_vars,
error_fn,
1,
aux_vars=aux_vars,
)
assert type(cost_function.weight).__name__ == "ScaleCostWeight"
assert torch.allclose(cost_function.weight.scale.data, torch.ones(1, 1))
weighted_error = cost_function.weighted_error()
assert torch.allclose(weighted_error, torch.ones(batch_size, 1))
# test overriding default CostWeight
for i in range(10):
cost_weight_value = torch.randn(1, 1)
cost_weight = MockCostWeight(cost_weight_value)
cost_function = th.AutoDiffCostFunction(
optim_vars,
error_fn,
1,
cost_weight=cost_weight,
aux_vars=aux_vars,
)
assert torch.allclose(cost_function.weight.the_data, cost_weight_value)
weighted_error = cost_function.weighted_error()
direct_error_computation = cost_weight_value * torch.ones(batch_size, 1)
assert torch.allclose(weighted_error, direct_error_computation)
def test_autodiff_cost_function_to():
batch_size = 10
optim_vars = []
aux_vars = []
for i in range(5):
optim_vars.append(
MockVar(
1,
data=torch.ones(batch_size, 1) * torch.randn(1),
name=f"optim_var_{i}",
)
)
aux_vars.append(
MockVar(
1,
data=torch.ones(batch_size, 1) * torch.randn(1),
name=f"aux_var_{i}",
)
)
def error_fn(optim_vars, aux_vars):
res = 0
for var in optim_vars:
res += var.data
return res
# test verifying default CostWeight
cost_function = th.AutoDiffCostFunction(
optim_vars,
error_fn,
1,
aux_vars=aux_vars,
)
for var in optim_vars:
var.to(dtype=torch.double)
# This fails because internal vars of the cost function have not been converted
# to double
with pytest.raises(ValueError):
cost_function.jacobians()
cost_function.to(dtype=torch.double)
cost_function.jacobians()
def test_autodiff_cost_function_error_and_jacobians_shape_on_SO3():
for i in range(100):
num_vars = np.random.randint(0, 5)
batch_size = np.random.randint(1, 10)
err_dim = 3
optim_vars = []
aux_vars = []
idx = 0
for i in range(num_vars):
optim_vars.append(th.SO3.rand(batch_size, dtype=torch.float64))
idx += 1
for i in range(num_vars):
aux_vars.append(th.Point3.rand(batch_size, dtype=torch.float64))
idx += 1
cost_weight = MockCostWeight(torch.ones(1, 1))
def error_fn(optim_vars, aux_vars):
assert isinstance(optim_vars, tuple)
assert len(optim_vars) == num_vars
assert len(aux_vars) == num_vars
ret_val = torch.zeros(batch_size, err_dim)
for optim_var, aux_var in zip(optim_vars, aux_vars):
ret_val += th.SO3(data=optim_var.data).rotate(aux_var).data
return ret_val
# this checks that 0 optimization variables is not allowed
if len(optim_vars) < 1:
with pytest.raises(ValueError):
th.AutoDiffCostFunction(
optim_vars,
error_fn,
1,
cost_weight=cost_weight,
aux_vars=aux_vars,
)
else:
# check that the error function returns the correct value
cost_function = th.AutoDiffCostFunction(
optim_vars,
error_fn,
err_dim,
cost_weight=cost_weight,
aux_vars=aux_vars,
)
err = cost_function.error()
# Now checking the jacobians
jacobians, err_jac = cost_function.jacobians()
assert err_jac.allclose(err)
assert len(jacobians) == num_vars
for i in range(num_vars):
# variable dim is i + 1 (see MockVar creation line)
assert jacobians[i].shape == (batch_size, err_dim, 3)
def test_autodiff_cost_function_error_and_jacobians_value_on_SO3():
for i in range(100):
num_vars = np.random.randint(0, 5)
batch_size = np.random.randint(1, 10)
err_dim = 3
optim_vars = []
aux_vars = []
idx = 0
for i in range(num_vars):
optim_vars.append(th.SO3.rand(batch_size, dtype=torch.float64))
idx += 1
for i in range(num_vars):
aux_vars.append(th.Point3.rand(batch_size, dtype=torch.float64))
idx += 1
cost_weight = MockCostWeight(torch.ones(1, 1, dtype=torch.float64))
def error_fn(optim_vars, aux_vars):
assert isinstance(optim_vars, tuple)
assert len(optim_vars) == num_vars
assert len(aux_vars) == num_vars
ret_val = torch.zeros(batch_size, err_dim, dtype=torch.float64)
for optim_var, aux_var in zip(optim_vars, aux_vars):
ret_val += th.SO3(data=optim_var.data).rotate(aux_var).data
return ret_val
# this checks that 0 optimization variables is not allowed
if len(optim_vars) < 1:
with pytest.raises(ValueError):
th.AutoDiffCostFunction(
optim_vars,
error_fn,
1,
cost_weight=cost_weight,
aux_vars=aux_vars,
)
else:
# check that the error function returns the correct value
cost_function = th.AutoDiffCostFunction(
optim_vars,
error_fn,
err_dim,
cost_weight=cost_weight,
aux_vars=aux_vars,
)
jac_actual, err_actual = cost_function.jacobians()
err_expected = torch.zeros(batch_size, 3, dtype=torch.float64)
for n in torch.arange(num_vars):
jac = []
err_expected += optim_vars[n].rotate(aux_vars[n], jacobians=jac).data
assert torch.allclose(jac_actual[n], jac[0])
assert torch.allclose(err_actual, err_expected)
| 33.730659 | 88 | 0.574839 |
acf013c8cad2f527cbaf7c018d4c08172b506e0a | 781 | py | Python | segmenter/zipsrc.py | trane293/nlp-project | 1db71a5cf2572b4a80245a3a545d43cb0e778a84 | [
"MIT"
] | null | null | null | segmenter/zipsrc.py | trane293/nlp-project | 1db71a5cf2572b4a80245a3a545d43cb0e778a84 | [
"MIT"
] | null | null | null | segmenter/zipsrc.py | trane293/nlp-project | 1db71a5cf2572b4a80245a3a545d43cb0e778a84 | [
"MIT"
] | 1 | 2021-01-27T01:20:00.000Z | 2021-01-27T01:20:00.000Z | """
Run:
python zipsrc.py
This will create a file `source.zip` which you can upload to Coursys (courses.cs.sfu.ca) as your submission.
To customize the files used by default, run:
python zipsrc.py -h
"""
import sys, os, optparse, shutil
if __name__ == '__main__':
optparser = optparse.OptionParser()
optparser.add_option("-a", "--answerdir", dest="answer_dir", default='answer', help="answer directory containing your source files")
optparser.add_option("-z", "--zipfile", dest="zipfile", default='source', help="zip file you should upload to Coursys (courses.cs.sfu.ca)")
(opts, _) = optparser.parse_args()
outputs_zipfile = shutil.make_archive(opts.zipfile, 'zip', opts.answer_dir)
print >>sys.stderr, "{0} created".format(outputs_zipfile)
| 33.956522 | 143 | 0.705506 |
acf014292d446ed8b6bd6fed4b7a74d48ff9ede8 | 4,009 | py | Python | pirates/effects/HitStar.py | ksmit799/POTCO-PS | 520d38935ae8df4b452c733a82c94dddac01e275 | [
"Apache-2.0"
] | 8 | 2017-01-24T04:33:29.000Z | 2020-11-01T08:36:24.000Z | pirates/effects/HitStar.py | ksmit799/Pirates-Online-Remake | 520d38935ae8df4b452c733a82c94dddac01e275 | [
"Apache-2.0"
] | 1 | 2017-03-02T18:05:17.000Z | 2017-03-14T06:47:10.000Z | pirates/effects/HitStar.py | ksmit799/Pirates-Online-Remake | 520d38935ae8df4b452c733a82c94dddac01e275 | [
"Apache-2.0"
] | 11 | 2017-03-02T18:46:07.000Z | 2020-11-01T08:36:26.000Z | # File: H (Python 2.4)
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.particles import ParticleEffect
from direct.particles import Particles
from direct.particles import ForceGroup
from EffectController import EffectController
from PooledEffect import PooledEffect
import random
class HitStar(PooledEffect, EffectController):
cardScale = 64.0
def __init__(self):
PooledEffect.__init__(self)
EffectController.__init__(self)
model = loader.loadModel('models/effects/particleMaps')
self.card = model.find('**/effectCandle')
self.setDepthWrite(0)
self.setLightOff()
self.setFogOff()
self.setColorScaleOff()
self.setBillboardPointEye(1.0)
self.f = ParticleEffect.ParticleEffect('HitStar')
self.f.reparentTo(self)
self.p0 = Particles.Particles('particles-1')
self.p0.setFactory('ZSpinParticleFactory')
self.p0.setRenderer('SpriteParticleRenderer')
self.p0.setEmitter('SphereSurfaceEmitter')
self.f.addParticles(self.p0)
self.p0.setPoolSize(32)
self.p0.setBirthRate(0.02)
self.p0.setLitterSize(32)
self.p0.setLitterSpread(0)
self.p0.setSystemLifespan(0.0)
self.p0.setLocalVelocityFlag(1)
self.p0.setSystemGrowsOlderFlag(0)
self.p0.factory.setLifespanBase(0.20000000000000001)
self.p0.factory.setLifespanSpread(0.050000000000000003)
self.p0.factory.setMassBase(1.0)
self.p0.factory.setMassSpread(0.0)
self.p0.factory.setTerminalVelocityBase(400.0)
self.p0.factory.setTerminalVelocitySpread(0.0)
self.p0.factory.setInitialAngle(0.0)
self.p0.factory.setInitialAngleSpread(360.0)
self.p0.factory.enableAngularVelocity(1)
self.p0.factory.setAngularVelocity(0.0)
self.p0.factory.setAngularVelocitySpread(0.0)
self.p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAINOUT)
self.p0.renderer.setUserAlpha(0.5)
self.p0.renderer.setFromNode(self.card)
self.p0.renderer.setColor(Vec4(1.0, 1.0, 1.0, 1.0))
self.p0.renderer.setXScaleFlag(1)
self.p0.renderer.setYScaleFlag(1)
self.p0.renderer.setAnimAngleFlag(1)
self.p0.renderer.setInitialXScale(0.0001 * self.cardScale)
self.p0.renderer.setFinalXScale(0.01 * self.cardScale)
self.p0.renderer.setInitialYScale(0.00050000000000000001 * self.cardScale)
self.p0.renderer.setFinalYScale(0.059999999999999998 * self.cardScale)
self.p0.renderer.setNonanimatedTheta(0.0)
self.p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
self.p0.renderer.setAlphaDisable(0)
self.p0.renderer.setColorBlendMode(ColorBlendAttrib.MAdd, ColorBlendAttrib.OIncomingColor, ColorBlendAttrib.OOneMinusIncomingAlpha)
self.p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
self.p0.emitter.setAmplitude(0.0)
self.p0.emitter.setAmplitudeSpread(0.0)
self.p0.emitter.setOffsetForce(Vec3(0.0, 0.0, 0.0))
self.p0.emitter.setExplicitLaunchVector(Vec3(0.0, 0.0, 0.0))
self.p0.emitter.setRadiateOrigin(Point3(0.0, 0.0, 0.0))
self.p0.emitter.setRadius(0.0001)
def createTrack(self):
self.startEffect = Sequence(Func(self.p0.setBirthRate, 0.02), Func(self.p0.clearToInitial), Func(self.f.start, self, self), Func(self.f.reparentTo, self))
self.endEffect = Sequence(Func(self.p0.setBirthRate, 2.0), Wait(1.5), Func(self.cleanUpEffect))
self.track = Sequence(self.startEffect, Wait(0.20000000000000001), self.endEffect)
def play(self):
if self.p0:
self.createTrack()
self.track.start()
def cleanUpEffect(self):
EffectController.cleanUpEffect(self)
self.checkInEffect(self)
def destroy(self):
EffectController.destroy(self)
PooledEffect.destroy(self)
| 41.329897 | 162 | 0.695685 |
acf0154cc4882d907a75e839cdf86832bde1cc74 | 389 | py | Python | api/tacticalrmm/clients/migrations/0002_auto_20200531_2058.py | infinite8co/tacticalrmm | bd7ce5417ec672552ec3cba325318795ccde972e | [
"MIT"
] | 903 | 2019-10-22T22:56:42.000Z | 2022-03-18T14:15:54.000Z | api/tacticalrmm/clients/migrations/0002_auto_20200531_2058.py | infinite8co/tacticalrmm | bd7ce5417ec672552ec3cba325318795ccde972e | [
"MIT"
] | 720 | 2019-12-07T08:11:26.000Z | 2022-03-17T21:47:04.000Z | api/tacticalrmm/clients/migrations/0002_auto_20200531_2058.py | infinite8co/tacticalrmm | bd7ce5417ec672552ec3cba325318795ccde972e | [
"MIT"
] | 231 | 2020-02-11T14:14:21.000Z | 2022-03-16T21:23:10.000Z | # Generated by Django 3.0.6 on 2020-05-31 20:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("clients", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="client",
name="client",
field=models.CharField(max_length=255, unique=True),
),
]
| 20.473684 | 64 | 0.59383 |
acf0157573e39388af5e4198db49af4326aad37c | 3,079 | py | Python | game.py | anykk/reversi | e1bf2bceeff2f19fc21aed967f4330a38965ee6f | [
"MIT"
] | null | null | null | game.py | anykk/reversi | e1bf2bceeff2f19fc21aed967f4330a38965ee6f | [
"MIT"
] | null | null | null | game.py | anykk/reversi | e1bf2bceeff2f19fc21aed967f4330a38965ee6f | [
"MIT"
] | null | null | null | from exceptions import IllegalArgumentError
EXTRA = 'E'
BLACK = 'X'
WHITE = 'O'
EMPTY = '.'
class Field:
"""Class which represents game field."""
_DIRECTIONS = ((0, 1), (1, 1), (1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1))
def __init__(self, size):
"""Initialize game field."""
if size % 2 or size < 4:
raise IllegalArgumentError("Field can't be not even or less than 4.")
self._size = size
self._skeleton = [[EMPTY for _ in range(self._size)] for _ in range(self._size)]
self._black_count = 0
self._white_count = 0
self._extra_count = 0
self.set_up()
def set_up(self):
"""Set disks on start positions."""
self[self._size // 2 - 1, self._size // 2 - 1] = WHITE
self[self._size // 2 - 1, self._size // 2] = BLACK
self[self._size // 2, self._size // 2 - 1] = BLACK
self[self._size // 2, self._size // 2] = WHITE
@property
def size(self):
"""Get size of field."""
return self._size
@property
def skeleton(self):
"""Get skeleton of field."""
return self._skeleton
@property
def directions(self):
"""Get directions."""
return self._DIRECTIONS
@property
def possibility_extra(self):
"""Said that we can or not place extra disk."""
return self._extra_count < self._size // 2
def flip(self, coords):
"""Flip disk. It mean that disk changes its color."""
if self[coords] == WHITE:
self._white_count -= 1
self[coords] = BLACK
elif self[coords] == BLACK:
self._black_count -= 1
self[coords] = WHITE
else:
raise TypeError("Can't flip EMPTY or another type of disk.")
def in_range(self, coords):
"""Check, that coordinates are correct."""
return 0 <= coords[0] < self._size and 0 <= coords[1] < self._size
@property
def white_count(self):
"""Get count of white disks."""
return self._white_count
@property
def black_count(self):
"""Get count of black disks."""
return self._black_count
@property
def extra_count(self):
"""Get count of extra disks."""
return self._extra_count
def __getitem__(self, coords):
"""Get disk from field."""
return self._skeleton[coords[0]][coords[1]]
def __setitem__(self, coords, color):
"""Set disk on the field and inc score."""
self._skeleton[coords[0]][coords[1]] = color
self._white_count += 1 if color == WHITE else 0
self._black_count += 1 if color == BLACK else 0
self._extra_count += 1 if color == EXTRA else 0
def __str__(self):
"""String representation of field."""
repr_ = []
for row in self:
for col in row:
repr_.append(col)
repr_.append('\n')
return ''.join(repr_)
def __iter__(self):
"""Iter trough the field."""
return self._skeleton.__iter__()
| 29.605769 | 88 | 0.559597 |
acf01578afaa03be02803a7a0e8ddc4a09b467f3 | 4,664 | py | Python | Integrations/python/deephaven/csv.py | chrisabidin/deephaven-core | ca6609e75dbc84fa4fa7fa89abf68f1e2bc81793 | [
"MIT"
] | null | null | null | Integrations/python/deephaven/csv.py | chrisabidin/deephaven-core | ca6609e75dbc84fa4fa7fa89abf68f1e2bc81793 | [
"MIT"
] | null | null | null | Integrations/python/deephaven/csv.py | chrisabidin/deephaven-core | ca6609e75dbc84fa4fa7fa89abf68f1e2bc81793 | [
"MIT"
] | null | null | null | #
# Copyright (c) 2016-2021 Deephaven Data Labs and Patent Pending
#
""" The deephaven.csv module supports reading an external CSV file into a Deephaven table and writing a
Deephaven table out as a CSV file.
"""
from enum import Enum
from typing import Dict, Any, List
import jpy
import wrapt
import deephaven.Types as dht
_JCsvHelpers = None
_JTableHeader = None
_JCsvTools = None
_JParsers = None
_JArrays = None
def _defineSymbols():
"""
Defines appropriate java symbol, which requires that the jvm has been initialized through the :class:`jpy` module,
for use throughout the module AT RUNTIME. This is versus static definition upon first import, which would lead to an
exception if the jvm wasn't initialized BEFORE importing the module.
"""
if not jpy.has_jvm():
raise SystemError("No java functionality can be used until the JVM has been initialized through the jpy module")
global _JCsvHelpers, _JTableHeader, _JCsvTools, _JParsers, _JArrays
if _JCsvHelpers is None:
# This will raise an exception if the desired object is not the classpath
_JCsvHelpers = jpy.get_type("io.deephaven.csv.CsvTools")
_JTableHeader = jpy.get_type("io.deephaven.qst.table.TableHeader")
_JCsvTools = jpy.get_type("io.deephaven.csv.CsvTools")
_JParsers = jpy.get_type("io.deephaven.csv.parsers.Parsers")
_JArrays = jpy.get_type("java.util.Arrays")
# every module method should be decorated with @_passThrough
@wrapt.decorator
def _passThrough(wrapped, instance, args, kwargs):
"""
For decoration of module methods, to define necessary symbols at runtime
:param wrapped: the method to be decorated
:param instance: the object to which the wrapped function was bound when it was called
:param args: the argument list for `wrapped`
:param kwargs: the keyword argument dictionary for `wrapped`
:return: the decorated version of the method
"""
_defineSymbols()
return wrapped(*args, **kwargs)
@_passThrough
def read(path: str,
header: Dict[str, dht.DataType] = None,
headless: bool = False,
delimiter: str = ",",
quote: str = "\"",
ignore_surrounding_spaces: bool = True,
trim: bool = False,
charset: str = "utf-8") -> object:
""" Read the CSV data specified by the path parameter as a table.
Args:
path (str): a file path or a URL string
header (Dict[str, DataType]): a dict to define the table columns with key being the name, value being the data type
headless (bool): indicates if the CSV data is headless, default is False
delimiter (str): the delimiter used by the CSV, default is the comma
quote (str): the quote character for the CSV, default is double quote
ignore_surrounding_spaces (bool): indicates whether surrounding white space should be ignored for unquoted text
fields, default is True
trim (bool) : indicates whether to trim white space inside a quoted string, default is False
charset (str): the name of the charset used for the CSV data, default is 'utf-8'
Returns:
a table
Raises:
Exception
"""
csv_specs_builder = _JCsvTools.builder()
if header:
csv_specs_builder.headers(_JArrays.asList(list(header.keys())))
parser_map = {
dht.bool_ : _JParsers.BOOLEAN,
dht.byte : _JParsers.BYTE,
dht.char : _JParsers.CHAR,
dht.short : _JParsers.SHORT,
dht.int_ : _JParsers.INT,
dht.long_ : _JParsers.LONG,
dht.float_ : _JParsers.FLOAT_FAST,
dht.double : _JParsers.DOUBLE,
dht.string : _JParsers.STRING,
dht.datetime : _JParsers.DATETIME
}
for column_name, column_type in header.items():
csv_specs_builder.putParserForName(column_name, parser_map[column_type])
csv_specs = (csv_specs_builder
.hasHeaderRow(not headless)
.delimiter(ord(delimiter))
.quote(ord(quote))
.ignoreSurroundingSpaces(ignore_surrounding_spaces)
.trim(trim)
.build())
return _JCsvHelpers.readCsv(path, csv_specs)
@_passThrough
def write(table: object, path: str, cols: List[str] = []) -> None:
""" Write a table to a standard CSV file.
Args:
table (Table): the source table
path (str): the path of the CSV file
cols (List[str]): the names of the columns to be written out
Raises:
Exception
"""
_JCsvTools.writeCsv(table, False, path, *cols)
| 35.333333 | 123 | 0.663379 |
acf015799936d727e187e2381a9290cb085ad712 | 7,206 | py | Python | fixture/work_with_db.py | dondemonz/detectors | c5b8cc119aef9306094c50bdecf17f30704a2996 | [
"Apache-2.0"
] | null | null | null | fixture/work_with_db.py | dondemonz/detectors | c5b8cc119aef9306094c50bdecf17f30704a2996 | [
"Apache-2.0"
] | null | null | null | fixture/work_with_db.py | dondemonz/detectors | c5b8cc119aef9306094c50bdecf17f30704a2996 | [
"Apache-2.0"
] | null | null | null | import psycopg2
from psycopg2.extras import DictCursor
from model.input_data import *
import time
class DbHelper:
def __init__(self, host="localhost", dbname=None, user="postgres", password="postgres", records=None, edge_template=None, action=None):
self.host = host
self.dbname = dbname
self.user = user
self.password = password
self.records = records
self.edge_template = edge_template
self.action = action
self.connection = psycopg2.connect(host=host, dbname=dbname, user=user, password=password)
self.connection.autocommit = True
def check_cam_defocus(self, id):
with self.connection as conn:
with conn.cursor(cursor_factory=DictCursor) as cursor:
cursor.execute('SELECT * FROM "OBJ_CAM_DEFOCUS" WHERE id =%s', (id,))
self.records = cursor.fetchall()
assert self.records != None
assert self.records != []
#print("records", self.records)
cursor.close()
return conn
def check_cam_defocus_from_db(self, db):
db.check_cam_defocus(id=defocus_id)
self.edge_template = db.records[0][12]
#print(self.edge_template)
def check_protocol_event_by_dbid(self, id):
with self.connection as conn:
with conn.cursor(cursor_factory=DictCursor) as cursor:
cursor.execute('SELECT * FROM "PROTOCOL" WHERE objid =%s', (id,))
assert self.records != []
self.records = cursor.fetchall()
#print("records", self.records)
cursor.close()
return conn
def check_action_from_db(self, db):
db.check_protocol_event_by_dbid()
self.action = db.records[0][4]
#print(self.edge_template)
def find_focus_time(self, t2):
with self.connection as conn:
with conn.cursor(cursor_factory=DictCursor) as cursor:
cursor.execute('SELECT * FROM "PROTOCOL" WHERE action =%s AND objid =%s AND time <%s', ("FOCUSED", defocus_id, t2,))
#print(t2)
self.records = cursor.fetchall()
assert self.records != None
#почемуто здесь периодически пусто, то есть данные, то нет.
assert self.records != []
#print("records", self.records)
cursor.close()
return conn
def find_defocus_time(self):
with self.connection as conn:
with conn.cursor(cursor_factory=DictCursor) as cursor:
cursor.execute('SELECT * FROM "PROTOCOL" WHERE action =%s AND objid =%s', ("DEFOCUSED", defocus_id,))
self.records = cursor.fetchall()
assert self.records != None
assert self.records != []
#print("records", self.records)
cursor.close()
return conn
def find_defocus_time_after_deactivation_zone(self, t3):
with self.connection as conn:
with conn.cursor(cursor_factory=DictCursor) as cursor:
cursor.execute('SELECT * FROM "PROTOCOL" WHERE action =%s AND objid =%s AND time >%s', ("DEFOCUSED", defocus_id, t3))
self.records = cursor.fetchall()
assert self.records != None
#print("records", self.records)
cursor.close()
return conn
def find_light_on(self):
with self.connection as conn:
with conn.cursor(cursor_factory=DictCursor) as cursor:
cursor.execute('SELECT * FROM "PROTOCOL" WHERE action =%s AND objid =%s', ("LIGHT_ON", ld_id,))
self.records = cursor.fetchall()
assert self.records != None
assert self.records != []
print("records", self.records)
cursor.close()
return conn
def find_light_off(self):
with self.connection as conn:
with conn.cursor(cursor_factory=DictCursor) as cursor:
cursor.execute('SELECT * FROM "PROTOCOL" WHERE action =%s AND objid =%s', ("LIGHT_OFF", ld_id,))
self.records = cursor.fetchall()
assert self.records != None
assert self.records != []
print("records", self.records)
cursor.close()
return conn
def find_shifted(self):
with self.connection as conn:
with conn.cursor(cursor_factory=DictCursor) as cursor:
cursor.execute('SELECT * FROM "PROTOCOL" WHERE action =%s AND objid =%s', ("SHIFTED", defocus_id,))
self.records = cursor.fetchall()
assert self.records != None
assert self.records != []
print("records", self.records)
cursor.close()
return conn
def find_blinding(self):
with self.connection as conn:
with conn.cursor(cursor_factory=DictCursor) as cursor:
cursor.execute('SELECT * FROM "PROTOCOL" WHERE action =%s AND objid =%s', ("BLINDING", cam_id,))
self.records = cursor.fetchall()
assert self.records != None
assert self.records != []
print("records", self.records)
cursor.close()
return conn
def find_unblinding(self):
with self.connection as conn:
with conn.cursor(cursor_factory=DictCursor) as cursor:
cursor.execute('SELECT * FROM "PROTOCOL" WHERE action =%s AND objid =%s', ("UNBLINDING", cam_id,))
self.records = cursor.fetchall()
assert self.records != None
assert self.records != []
print("records", self.records)
cursor.close()
return conn
def find_blinding_time(self):
with self.connection as conn:
with conn.cursor(cursor_factory=DictCursor) as cursor:
cursor.execute('SELECT * FROM "PROTOCOL" WHERE action =%s AND objid =%s', ("BLINDING", cam_id,))
self.records = cursor.fetchall()
assert self.records != None
assert self.records != []
#print("records", self.records)
cursor.close()
return conn
def find_blinding_time_after_deactivation_zone(self, t3):
with self.connection as conn:
with conn.cursor(cursor_factory=DictCursor) as cursor:
cursor.execute('SELECT * FROM "PROTOCOL" WHERE action =%s AND objid =%s AND time >%s', ("BLINDING", cam_id, t3))
self.records = cursor.fetchall()
assert self.records != None
#print("records", self.records)
cursor.close()
return conn
#cursor.execute('SELECT * FROM audit_events WHERE event_action=%s', (event_action,))
#CONVERT_TZ(created_at, '+00:00', '+08:00') between "2018-01-24" and "2018-01-25"
def clean_db(self):
with self.connection as conn:
with conn.cursor() as cursor:
cursor.execute('DELETE FROM "PROTOCOL"')
conn.commit()
cursor.close()
def close_connection(self):
self.connection.close()
print("close")
| 39.593407 | 139 | 0.581182 |
acf016b0036d7513f114174adbcd7082e0314d37 | 259 | py | Python | rob/exceptions.py | dan-osull/rob | 25f2781cc5124570a04a48b56ec7d7f802b0650b | [
"MIT"
] | 3 | 2022-02-08T20:10:21.000Z | 2022-02-08T20:18:54.000Z | rob/exceptions.py | dan-osull/rob | 25f2781cc5124570a04a48b56ec7d7f802b0650b | [
"MIT"
] | null | null | null | rob/exceptions.py | dan-osull/rob | 25f2781cc5124570a04a48b56ec7d7f802b0650b | [
"MIT"
] | null | null | null | from click import secho
def echo_red_error(message, *args, **kwargs):
"""Patch for `click.exceptions.echo`"""
if str(message).startswith("Error: "):
return secho(message, *args, **kwargs, fg="red")
return secho(message, *args, **kwargs)
| 28.777778 | 56 | 0.65251 |
acf0170149ba3c4ea2bd58f0ea2fa39f0d3a05d4 | 16,321 | py | Python | src/datashare/azext_datashare/vendored_sdks/datashare/aio/operations_async/_data_set_mapping_operations_async.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 207 | 2017-11-29T06:59:41.000Z | 2022-03-31T10:00:53.000Z | src/datashare/azext_datashare/vendored_sdks/datashare/aio/operations_async/_data_set_mapping_operations_async.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 4,061 | 2017-10-27T23:19:56.000Z | 2022-03-31T23:18:30.000Z | src/datashare/azext_datashare/vendored_sdks/datashare/aio/operations_async/_data_set_mapping_operations_async.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 802 | 2017-10-11T17:36:26.000Z | 2022-03-31T22:24:32.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DataSetMappingOperations:
"""DataSetMappingOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~data_share_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
account_name: str,
share_subscription_name: str,
data_set_mapping_name: str,
**kwargs
) -> "models.DataSetMapping":
"""Get a DataSetMapping in a shareSubscription.
Get DataSetMapping in a shareSubscription.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param account_name: The name of the share account.
:type account_name: str
:param share_subscription_name: The name of the shareSubscription.
:type share_subscription_name: str
:param data_set_mapping_name: The name of the dataSetMapping.
:type data_set_mapping_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataSetMapping or the result of cls(response)
:rtype: ~data_share_management_client.models.DataSetMapping
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DataSetMapping"]
error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError})
api_version = "2019-11-01"
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'shareSubscriptionName': self._serialize.url("share_subscription_name", share_subscription_name, 'str'),
'dataSetMappingName': self._serialize.url("data_set_mapping_name", data_set_mapping_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.DataShareError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('DataSetMapping', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shareSubscriptions/{shareSubscriptionName}/dataSetMappings/{dataSetMappingName}'}
async def create(
self,
resource_group_name: str,
account_name: str,
share_subscription_name: str,
data_set_mapping_name: str,
data_set_mapping: "models.DataSetMapping",
**kwargs
) -> "models.DataSetMapping":
"""Create a DataSetMapping.
Maps a source data set in the source share to a sink data set in the share subscription.
Enables copying the data set from source to destination.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param account_name: The name of the share account.
:type account_name: str
:param share_subscription_name: The name of the share subscription which will hold the data set
sink.
:type share_subscription_name: str
:param data_set_mapping_name: The name of the data set mapping to be created.
:type data_set_mapping_name: str
:param data_set_mapping: Destination data set configuration details.
:type data_set_mapping: ~data_share_management_client.models.DataSetMapping
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataSetMapping or the result of cls(response)
:rtype: ~data_share_management_client.models.DataSetMapping or ~data_share_management_client.models.DataSetMapping
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DataSetMapping"]
error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError})
api_version = "2019-11-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self.create.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'shareSubscriptionName': self._serialize.url("share_subscription_name", share_subscription_name, 'str'),
'dataSetMappingName': self._serialize.url("data_set_mapping_name", data_set_mapping_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = 'application/json'
# Construct and send request
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(data_set_mapping, 'DataSetMapping')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.DataShareError, response)
raise HttpResponseError(response=response, model=error)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('DataSetMapping', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DataSetMapping', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shareSubscriptions/{shareSubscriptionName}/dataSetMappings/{dataSetMappingName}'}
async def delete(
self,
resource_group_name: str,
account_name: str,
share_subscription_name: str,
data_set_mapping_name: str,
**kwargs
) -> None:
"""Delete a DataSetMapping in a shareSubscription.
Delete DataSetMapping in a shareSubscription.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param account_name: The name of the share account.
:type account_name: str
:param share_subscription_name: The name of the shareSubscription.
:type share_subscription_name: str
:param data_set_mapping_name: The name of the dataSetMapping.
:type data_set_mapping_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError})
api_version = "2019-11-01"
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'shareSubscriptionName': self._serialize.url("share_subscription_name", share_subscription_name, 'str'),
'dataSetMappingName': self._serialize.url("data_set_mapping_name", data_set_mapping_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
# Construct and send request
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.DataShareError, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shareSubscriptions/{shareSubscriptionName}/dataSetMappings/{dataSetMappingName}'}
def list_by_share_subscription(
self,
resource_group_name: str,
account_name: str,
share_subscription_name: str,
skip_token: Optional[str] = None,
**kwargs
) -> "models.DataSetMappingList":
"""List DataSetMappings in a share subscription.
List DataSetMappings in a share subscription.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param account_name: The name of the share account.
:type account_name: str
:param share_subscription_name: The name of the share subscription.
:type share_subscription_name: str
:param skip_token: Continuation token.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataSetMappingList or the result of cls(response)
:rtype: ~data_share_management_client.models.DataSetMappingList
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DataSetMappingList"]
error_map = kwargs.pop('error_map', {404: ResourceNotFoundError, 409: ResourceExistsError})
api_version = "2019-11-01"
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list_by_share_subscription.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'shareSubscriptionName': self._serialize.url("share_subscription_name", share_subscription_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
else:
url = next_link
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = 'application/json'
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DataSetMappingList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.DataShareError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_share_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shareSubscriptions/{shareSubscriptionName}/dataSetMappings'}
| 48.865269 | 232 | 0.679983 |
acf017baf0f8d0c3ef28efce2e82878e57b25512 | 5,833 | py | Python | torch/visualize.py | rllab-snu/Trust-Region-CVaR | 6b97655117f49d045b9e811dc444b685c097f2ae | [
"MIT"
] | 2 | 2022-03-11T06:27:36.000Z | 2022-03-31T06:18:39.000Z | torch/visualize.py | rllab-snu/Trust-Region-CVaR | 6b97655117f49d045b9e811dc444b685c097f2ae | [
"MIT"
] | null | null | null | torch/visualize.py | rllab-snu/Trust-Region-CVaR | 6b97655117f49d045b9e811dc444b685c097f2ae | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
from matplotlib import rc
from copy import deepcopy
import numpy as np
import pickle
import glob
import sys
import os
def main():
fig_size = 3
window_size = 500
interp_steps = 1000
item_list = ['metric', 'score', 'cv', 'total_cv']
env_name = "Point"
window_size = 500
algo_list = []
algo_list.append({
'name': 'TRC',
'logs': [f'results/TRC-Point_s{i}' for i in [1]]
})
draw(env_name, item_list, algo_list, fig_size, window_size, interp_steps, is_horizon=True)
def draw(env_name, item_list, algo_list, fig_size, window_size, interp_steps, is_horizon=False):
if is_horizon:
fig, ax_list = plt.subplots(nrows=1, ncols=len(item_list), figsize=(fig_size*len(item_list), fig_size))
else:
fig, ax_list = plt.subplots(nrows=len(item_list), ncols=1, figsize=(fig_size*1.3, fig_size*len(item_list)))
if len(item_list) == 1:
ax_list = [ax_list]
for item_idx in range(len(item_list)):
ax = ax_list[item_idx]
item_name = item_list[item_idx]
min_value = np.inf
max_value = -np.inf
for algo_idx in range(len(algo_list)):
algo_dict = algo_list[algo_idx]
algo_name = algo_dict['name']
algo_logs = algo_dict['logs']
algo_dirs = ['{}/{}_log'.format(dir_item, item_name.replace('total_', '').replace('metric', 'score')) for dir_item in algo_logs]
linspace, means, stds = parse(algo_dirs, item_name, window_size, interp_steps)
if item_name == "cv":
means /= 1000.0
stds /= 1000.0
ax.plot(linspace, means, lw=2, label=algo_name)
ax.fill_between(linspace, means - stds, means + stds, alpha=0.15)
max_value = max(max_value, np.max(means + stds))
min_value = min(min_value, np.max(means - stds))
ax.set_xlabel('Steps')
prefix, postfix = "", ""
fontsize = "x-large"
if item_idx == 0 and not is_horizon:
ax.legend(bbox_to_anchor=(0.0, 1.01, 1.0, 0.101), loc='lower left', ncol=3, mode="expand", borderaxespad=0.)
postfix = "\n\n"
if item_idx == 0 and is_horizon:
ax.legend(loc='upper left', ncol=1, borderaxespad=0.)
if item_name == "metric":
ax.set_title(f'{prefix}Score{postfix}', fontsize=fontsize)
if "doggo" in env_name.lower():
ax.set_ylim(-3.0, 20.0)
if 'jackal' in env_name.lower():
ax.set_ylim(-2.0, 12.0)
elif item_name == "score":
ax.set_title(f'{prefix}Reward Sum{postfix}', fontsize=fontsize)
elif item_name == "cv":
ax.set_title(f'{prefix}CV{postfix}', fontsize=fontsize)
ax.set_ylim(0, max_value)
elif item_name == "total_cv":
ax.set_title(f'{prefix}Total CV{postfix}', fontsize=fontsize)
ax.set_ylim(0, max_value)
if 'jackal' in env_name.lower():
ax.set_ylim(0., 240000)
else:
ax.set_title(item_name)
if 'doggo' in env_name.lower():
ax.set_xlim(0, 3e7)
elif 'jackal' in env_name.lower():
ax.set_xlim(0, 1e6)
else:
ax.set_xlim(0, 1e7)
ax.grid()
fig.tight_layout()
save_dir = "./imgs"
item_names = '&'.join(item_list)
env_name = env_name.replace(' ', '')
if not os.path.isdir(save_dir): os.makedirs(save_dir)
plt.savefig(f'{save_dir}/{env_name}_{item_names}3.png')
plt.show()
def parse(algo_dirs, item_name, window_size, interp_steps):
algo_datas = []
min_linspace = None
min_len = np.inf
print(f'[parsing] {algo_dirs}')
for algo_dir in algo_dirs:
record_paths = glob.glob('./{}/*.pkl'.format(algo_dir))
record_paths.sort()
record = []
for record_path in record_paths:
with open(record_path, 'rb') as f:
record += pickle.load(f)
if item_name == "metric":
cv_record_paths = glob.glob('./{}/*.pkl'.format(algo_dir.replace('score', 'cv')))
cv_record_paths.sort()
cv_record = []
for record_path in cv_record_paths:
with open(record_path, 'rb') as f:
cv_record += pickle.load(f)
steps = [0]
data = [0.0]
for step_idx in range(len(record)):
steps.append(steps[-1] + record[step_idx][0])
if item_name == 'metric':
data.append(record[step_idx][1]/(cv_record[step_idx][1] + 1))
elif 'total' in item_name:
data.append(data[-1] + record[step_idx][1])
else:
data.append(record[step_idx][1])
linspace = np.linspace(steps[0], steps[-1], int((steps[-1]-steps[0])/interp_steps + 1))
if min_len > len(linspace):
min_linspace = linspace[:]
min_len = len(linspace)
interp_data = np.interp(linspace, steps, data)
algo_datas.append(interp_data)
algo_len = min([len(data) for data in algo_datas])
algo_datas = [data[:algo_len] for data in algo_datas]
smoothed_means, smoothed_stds = smoothing(algo_datas, window_size)
return min_linspace, smoothed_means, smoothed_stds
def smoothing(data, window_size):
means = []
stds = []
for i in range(1, len(data[0]) + 1):
if i < window_size:
start_idx = 0
else:
start_idx = i - window_size
end_idx = i
concat_data = np.concatenate([item[start_idx:end_idx] for item in data])
a = np.mean(concat_data)
b = np.std(concat_data)
means.append(a)
stds.append(b)
return np.array(means), np.array(stds)
if __name__ == "__main__":
main()
| 35.567073 | 140 | 0.580662 |
acf0188fb713aca9150b978d89f0db651e45d655 | 1,011 | py | Python | learning_curves/collect_perf_data.py | hzi-bifo/Predicting_PA_AMR_paper | 0fc7e1f8400ba8133fedf169089dfcb4c756530a | [
"Apache-2.0"
] | 4 | 2020-09-24T00:44:21.000Z | 2021-02-18T09:09:44.000Z | learning_curves/collect_perf_data.py | hzi-bifo/Fighting_PA_AMR_paper | 0fc7e1f8400ba8133fedf169089dfcb4c756530a | [
"Apache-2.0"
] | null | null | null | learning_curves/collect_perf_data.py | hzi-bifo/Fighting_PA_AMR_paper | 0fc7e1f8400ba8133fedf169089dfcb4c756530a | [
"Apache-2.0"
] | 1 | 2021-02-18T09:09:19.000Z | 2021-02-18T09:09:19.000Z |
import pandas as pd
def read(performances, out_table):
out = pd.DataFrame()
for performance in performances:
perf = pd.read_csv(performance, sep = "\t", index_col = 0)
cv_mode = "_".join(performance.split("/")[0].split("_")[-2:])
mode = "_".join(performance.split("/")[0].split("_")[1:-2])
out_temp = pd.DataFrame(perf)
out_temp.loc[:, "cv_mode"] = cv_mode
out_temp.loc[:, "mode"] = mode
out = pd.concat([out, out_temp], axis = 0)
out.index = [drug.split("_")[0] for drug in out.index]
out.index.name = "drug"
out.to_csv(out_table, sep = "\t")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("prepare performance vs number of features for plotting")
parser.add_argument("out_table", help='output table to be used for plotting')
parser.add_argument("--performances", nargs = "*", help='Model-T nested-cv performance overview file')
args = parser.parse_args()
read(**vars(args))
| 40.44 | 106 | 0.632047 |
acf018d2ffe2d2b62a9fe5f5abb0688eb379b40a | 485 | py | Python | tests/integration/credentials/utils_test.py | kennylajara/docker-py | a48a5a9647761406d66e8271f19fab7fa0c5f582 | [
"Apache-2.0"
] | 5,611 | 2015-01-02T16:46:16.000Z | 2022-03-31T21:49:58.000Z | tests/integration/credentials/utils_test.py | sdrees/docker-py | 8595cca8186b5d53c04ef71a1a3db86b7c53b012 | [
"Apache-2.0"
] | 2,176 | 2015-01-01T00:57:56.000Z | 2022-03-31T13:21:54.000Z | tests/integration/credentials/utils_test.py | sdrees/docker-py | 8595cca8186b5d53c04ef71a1a3db86b7c53b012 | [
"Apache-2.0"
] | 1,774 | 2015-01-05T12:49:03.000Z | 2022-03-29T13:27:47.000Z | import os
from docker.credentials.utils import create_environment_dict
from unittest import mock
@mock.patch.dict(os.environ)
def test_create_environment_dict():
base = {'FOO': 'bar', 'BAZ': 'foobar'}
os.environ = base
assert create_environment_dict({'FOO': 'baz'}) == {
'FOO': 'baz', 'BAZ': 'foobar',
}
assert create_environment_dict({'HELLO': 'world'}) == {
'FOO': 'bar', 'BAZ': 'foobar', 'HELLO': 'world',
}
assert os.environ == base
| 25.526316 | 60 | 0.624742 |
acf0190e03f6f9f2db5e0e55d6a0dc7bb92d65bb | 11,238 | py | Python | lbry/wallet/bip32.py | vertbyqb/lbry-sdk | 8076000c2761d7ca077049f1e9e1de177553d6f0 | [
"MIT"
] | 4,996 | 2019-06-21T04:44:34.000Z | 2022-03-31T14:24:52.000Z | lbry/wallet/bip32.py | vertbyqb/lbry-sdk | 8076000c2761d7ca077049f1e9e1de177553d6f0 | [
"MIT"
] | 1,103 | 2019-06-21T06:28:42.000Z | 2022-03-28T20:50:25.000Z | lbry/wallet/bip32.py | vertbyqb/lbry-sdk | 8076000c2761d7ca077049f1e9e1de177553d6f0 | [
"MIT"
] | 244 | 2019-06-24T08:32:32.000Z | 2022-03-12T17:42:02.000Z | from asn1crypto.keys import PrivateKeyInfo, ECPrivateKey
from coincurve import PublicKey as cPublicKey, PrivateKey as cPrivateKey
from coincurve.utils import (
pem_to_der, lib as libsecp256k1, ffi as libsecp256k1_ffi
)
from coincurve.ecdsa import CDATA_SIG_LENGTH
from lbry.crypto.hash import hmac_sha512, hash160, double_sha256
from lbry.crypto.base58 import Base58
from .util import cachedproperty
class KeyPath:
RECEIVE = 0
CHANGE = 1
CHANNEL = 2
class DerivationError(Exception):
""" Raised when an invalid derivation occurs. """
class _KeyBase:
""" A BIP32 Key, public or private. """
def __init__(self, ledger, chain_code, n, depth, parent):
if not isinstance(chain_code, (bytes, bytearray)):
raise TypeError('chain code must be raw bytes')
if len(chain_code) != 32:
raise ValueError('invalid chain code')
if not 0 <= n < 1 << 32:
raise ValueError('invalid child number')
if not 0 <= depth < 256:
raise ValueError('invalid depth')
if parent is not None:
if not isinstance(parent, type(self)):
raise TypeError('parent key has bad type')
self.ledger = ledger
self.chain_code = chain_code
self.n = n
self.depth = depth
self.parent = parent
def _hmac_sha512(self, msg):
""" Use SHA-512 to provide an HMAC, returned as a pair of 32-byte objects. """
hmac = hmac_sha512(self.chain_code, msg)
return hmac[:32], hmac[32:]
def _extended_key(self, ver_bytes, raw_serkey):
""" Return the 78-byte extended key given prefix version bytes and serialized key bytes. """
if not isinstance(ver_bytes, (bytes, bytearray)):
raise TypeError('ver_bytes must be raw bytes')
if len(ver_bytes) != 4:
raise ValueError('ver_bytes must have length 4')
if not isinstance(raw_serkey, (bytes, bytearray)):
raise TypeError('raw_serkey must be raw bytes')
if len(raw_serkey) != 33:
raise ValueError('raw_serkey must have length 33')
return (
ver_bytes + bytes((self.depth,))
+ self.parent_fingerprint() + self.n.to_bytes(4, 'big')
+ self.chain_code + raw_serkey
)
def identifier(self):
raise NotImplementedError
def extended_key(self):
raise NotImplementedError
def fingerprint(self):
""" Return the key's fingerprint as 4 bytes. """
return self.identifier()[:4]
def parent_fingerprint(self):
""" Return the parent key's fingerprint as 4 bytes. """
return self.parent.fingerprint() if self.parent else bytes((0,)*4)
def extended_key_string(self):
""" Return an extended key as a base58 string. """
return Base58.encode_check(self.extended_key())
class PublicKey(_KeyBase):
""" A BIP32 public key. """
def __init__(self, ledger, pubkey, chain_code, n, depth, parent=None):
super().__init__(ledger, chain_code, n, depth, parent)
if isinstance(pubkey, cPublicKey):
self.verifying_key = pubkey
else:
self.verifying_key = self._verifying_key_from_pubkey(pubkey)
@classmethod
def _verifying_key_from_pubkey(cls, pubkey):
""" Converts a 33-byte compressed pubkey into an coincurve.PublicKey object. """
if not isinstance(pubkey, (bytes, bytearray)):
raise TypeError('pubkey must be raw bytes')
if len(pubkey) != 33:
raise ValueError('pubkey must be 33 bytes')
if pubkey[0] not in (2, 3):
raise ValueError('invalid pubkey prefix byte')
return cPublicKey(pubkey)
@cachedproperty
def pubkey_bytes(self):
""" Return the compressed public key as 33 bytes. """
return self.verifying_key.format(True)
@cachedproperty
def address(self):
""" The public key as a P2PKH address. """
return self.ledger.public_key_to_address(self.pubkey_bytes)
def ec_point(self):
return self.verifying_key.point()
def child(self, n: int) -> 'PublicKey':
""" Return the derived child extended pubkey at index N. """
if not 0 <= n < (1 << 31):
raise ValueError('invalid BIP32 public key child number')
msg = self.pubkey_bytes + n.to_bytes(4, 'big')
L_b, R_b = self._hmac_sha512(msg) # pylint: disable=invalid-name
derived_key = self.verifying_key.add(L_b)
return PublicKey(self.ledger, derived_key, R_b, n, self.depth + 1, self)
def identifier(self):
""" Return the key's identifier as 20 bytes. """
return hash160(self.pubkey_bytes)
def extended_key(self):
""" Return a raw extended public key. """
return self._extended_key(
self.ledger.extended_public_key_prefix,
self.pubkey_bytes
)
def verify(self, signature, data):
""" Produce a signature for piece of data by double hashing it and signing the hash. """
return self.verifying_key.verify(signature, data, hasher=double_sha256)
class PrivateKey(_KeyBase):
"""A BIP32 private key."""
HARDENED = 1 << 31
def __init__(self, ledger, privkey, chain_code, n, depth, parent=None):
super().__init__(ledger, chain_code, n, depth, parent)
if isinstance(privkey, cPrivateKey):
self.signing_key = privkey
else:
self.signing_key = self._signing_key_from_privkey(privkey)
@classmethod
def _signing_key_from_privkey(cls, private_key):
""" Converts a 32-byte private key into an coincurve.PrivateKey object. """
return cPrivateKey.from_int(PrivateKey._private_key_secret_exponent(private_key))
@classmethod
def _private_key_secret_exponent(cls, private_key):
""" Return the private key as a secret exponent if it is a valid private key. """
if not isinstance(private_key, (bytes, bytearray)):
raise TypeError('private key must be raw bytes')
if len(private_key) != 32:
raise ValueError('private key must be 32 bytes')
return int.from_bytes(private_key, 'big')
@classmethod
def from_seed(cls, ledger, seed) -> 'PrivateKey':
# This hard-coded message string seems to be coin-independent...
hmac = hmac_sha512(b'Bitcoin seed', seed)
privkey, chain_code = hmac[:32], hmac[32:]
return cls(ledger, privkey, chain_code, 0, 0)
@classmethod
def from_pem(cls, ledger, pem) -> 'PrivateKey':
der = pem_to_der(pem.encode())
try:
key_int = ECPrivateKey.load(der).native['private_key']
except ValueError:
key_int = PrivateKeyInfo.load(der).native['private_key']['private_key']
private_key = cPrivateKey.from_int(key_int)
return cls(ledger, private_key, bytes((0,)*32), 0, 0)
@cachedproperty
def private_key_bytes(self):
""" Return the serialized private key (no leading zero byte). """
return self.signing_key.secret
@cachedproperty
def public_key(self) -> PublicKey:
""" Return the corresponding extended public key. """
verifying_key = self.signing_key.public_key
parent_pubkey = self.parent.public_key if self.parent else None
return PublicKey(
self.ledger, verifying_key, self.chain_code,
self.n, self.depth, parent_pubkey
)
def ec_point(self):
return self.public_key.ec_point()
def secret_exponent(self):
""" Return the private key as a secret exponent. """
return self.signing_key.to_int()
def wif(self):
""" Return the private key encoded in Wallet Import Format. """
return self.ledger.private_key_to_wif(self.private_key_bytes)
@property
def address(self):
""" The public key as a P2PKH address. """
return self.public_key.address
def child(self, n) -> 'PrivateKey':
""" Return the derived child extended private key at index N."""
if not 0 <= n < (1 << 32):
raise ValueError('invalid BIP32 private key child number')
if n >= self.HARDENED:
serkey = b'\0' + self.private_key_bytes
else:
serkey = self.public_key.pubkey_bytes
msg = serkey + n.to_bytes(4, 'big')
L_b, R_b = self._hmac_sha512(msg) # pylint: disable=invalid-name
derived_key = self.signing_key.add(L_b)
return PrivateKey(self.ledger, derived_key, R_b, n, self.depth + 1, self)
def sign(self, data):
""" Produce a signature for piece of data by double hashing it and signing the hash. """
return self.signing_key.sign(data, hasher=double_sha256)
def sign_compact(self, digest):
""" Produce a compact signature. """
key = self.signing_key
signature = libsecp256k1_ffi.new('secp256k1_ecdsa_signature *')
signed = libsecp256k1.secp256k1_ecdsa_sign(
key.context.ctx, signature, digest, key.secret,
libsecp256k1_ffi.NULL, libsecp256k1_ffi.NULL
)
if not signed:
raise ValueError('The private key was invalid.')
serialized = libsecp256k1_ffi.new('unsigned char[%d]' % CDATA_SIG_LENGTH)
compacted = libsecp256k1.secp256k1_ecdsa_signature_serialize_compact(
key.context.ctx, serialized, signature
)
if compacted != 1:
raise ValueError('The signature could not be compacted.')
return bytes(libsecp256k1_ffi.buffer(serialized, CDATA_SIG_LENGTH))
def identifier(self):
"""Return the key's identifier as 20 bytes."""
return self.public_key.identifier()
def extended_key(self):
"""Return a raw extended private key."""
return self._extended_key(
self.ledger.extended_private_key_prefix,
b'\0' + self.private_key_bytes
)
def to_pem(self):
return self.signing_key.to_pem()
def _from_extended_key(ledger, ekey):
"""Return a PublicKey or PrivateKey from an extended key raw bytes."""
if not isinstance(ekey, (bytes, bytearray)):
raise TypeError('extended key must be raw bytes')
if len(ekey) != 78:
raise ValueError('extended key must have length 78')
depth = ekey[4]
n = int.from_bytes(ekey[9:13], 'big')
chain_code = ekey[13:45]
if ekey[:4] == ledger.extended_public_key_prefix:
pubkey = ekey[45:]
key = PublicKey(ledger, pubkey, chain_code, n, depth)
elif ekey[:4] == ledger.extended_private_key_prefix:
if ekey[45] != 0:
raise ValueError('invalid extended private key prefix byte')
privkey = ekey[46:]
key = PrivateKey(ledger, privkey, chain_code, n, depth)
else:
raise ValueError('version bytes unrecognised')
return key
def from_extended_key_string(ledger, ekey_str):
"""Given an extended key string, such as
xpub6BsnM1W2Y7qLMiuhi7f7dbAwQZ5Cz5gYJCRzTNainXzQXYjFwtuQXHd
3qfi3t3KJtHxshXezfjft93w4UE7BGMtKwhqEHae3ZA7d823DVrL
return a PublicKey or PrivateKey.
"""
return _from_extended_key(ledger, Base58.decode_check(ekey_str))
| 36.368932 | 100 | 0.64451 |
acf01a3d5fef9da5042cad266a17ce026a8ad045 | 3,502 | py | Python | django_eveonline_connector/views/sso.py | KryptedGaming/django-eveonline-connector | 95fa146f4fcdf6bce84548b5cac1e5bf09cd72a0 | [
"MIT"
] | 3 | 2020-03-07T13:58:45.000Z | 2021-02-06T20:16:50.000Z | django_eveonline_connector/views/sso.py | KryptedGaming/django-eveonline-connector | 95fa146f4fcdf6bce84548b5cac1e5bf09cd72a0 | [
"MIT"
] | 66 | 2019-12-17T20:54:22.000Z | 2021-06-10T20:39:04.000Z | django_eveonline_connector/views/sso.py | KryptedGaming/django-eveonline-connector | 95fa146f4fcdf6bce84548b5cac1e5bf09cd72a0 | [
"MIT"
] | 2 | 2020-01-17T20:04:52.000Z | 2021-07-11T22:11:42.000Z | from django.shortcuts import render, redirect
from django_eveonline_connector.models import EveClient, EveToken, EveCharacter, EveScope, EveCorporation, PrimaryEveCharacterAssociation
from django.contrib import messages
from django.contrib.auth.decorators import login_required, permission_required
from django.conf import settings
import logging
logger = logging.getLogger(__name__)
"""
SSO Views
"""
@login_required
def sso_callback(request):
code = request.GET.get('code', None)
eve_client = EveClient.get_instance()
# verify token
esi_security = EveClient.get_esi_security()
esi_token = esi_security.auth(code)
esi_character = esi_security.verify()
# create new token
new_token = EveToken.objects.get_or_create(
access_token=esi_token['access_token'],
refresh_token=esi_token['refresh_token'],
expires_in=esi_token['expires_in'],
user=request.user
)[0]
# set scopes M2M
scopes = EveScope.objects.filter(name__in=esi_character['scp'])
if scopes.count() != len(esi_character['scp']):
logger.error(
f"Whoa there. Somehow we added a scope we don't know about. Pass this to Krypted Developers: \n ${esi_character['scp']}")
new_token.scopes.set(scopes)
# find or create character
if EveCharacter.objects.filter(external_id=esi_character['sub'].split(":")[-1]).exists():
character = EveCharacter.objects.get(
external_id=esi_character['sub'].split(":")[-1])
if character.token:
old_token = character.token
old_token.delete()
character.token = new_token
character.save()
else:
character = EveCharacter.objects.create(
external_id=esi_character['sub'].split(":")[-1],
name=esi_character['name'],
token=new_token,
)
# if no primary user, set
if not PrimaryEveCharacterAssociation.objects.filter(user=request.user).exists():
PrimaryEveCharacterAssociation.objects.create(
user=request.user,
character=character
)
return redirect('/')
@login_required
def add_sso_token(request):
try:
sso_url = EveClient.get_instance().get_sso_url()
return redirect(sso_url)
except Exception:
logger.exception("Failed to get SSO url from EveClient")
messages.warning(
request, "Eve Settings are not configured correctly. Contact your administrator.")
return redirect('/')
@login_required
def update_sso_token(request, token_id):
eve_token = EveToken.objects.get(pk=token_id)
return redirect(EveClient.get_instance().get_sso_url(
EveScope.convert_to_list(eve_token.requested_scopes.all())
))
@login_required
def remove_sso_token(request, pk):
eve_token = EveToken.objects.get(pk=pk)
if request.user == eve_token.user:
try:
if PrimaryEveCharacterAssociation.objects.filter(character=eve_token.evecharacter).exists():
PrimaryEveCharacterAssociation.objects.filter(
character=eve_token.evecharacter).delete()
except Exception:
logger.exception(
"Encountered error when deleting token character associations")
eve_token.delete()
else:
messages.error(request, "You cannot delete someone elses token.")
messages.success(
request, "Successfully deleted EVE Online token and character data")
return redirect("/")
| 33.037736 | 137 | 0.682182 |
acf01a787f6c09f517fc0118f2b5251eb22d3b59 | 274 | py | Python | qiang09_object_oriented/q01_ooa.py | 13528770807/practice | d2dbb7828f3689f359b6b25c4abb5712a145230a | [
"MIT"
] | null | null | null | qiang09_object_oriented/q01_ooa.py | 13528770807/practice | d2dbb7828f3689f359b6b25c4abb5712a145230a | [
"MIT"
] | null | null | null | qiang09_object_oriented/q01_ooa.py | 13528770807/practice | d2dbb7828f3689f359b6b25c4abb5712a145230a | [
"MIT"
] | null | null | null |
class Zqiang():
def zqiang(self): # 第一个参数(self)代表实例化出来的对象
print(self)
print(self.__class__)
obj = Zqiang()
obj.zqiang()
class Zqiang():
def zqiang(runoob):
print(runoob)
print(runoob.__class__)
obj2 = Zqiang()
obj2.zqiang()
| 12.454545 | 46 | 0.60219 |
acf01b32ccc6a18fa68354e7b4a6373eca36268a | 12,952 | py | Python | views.py | varungupta3009/UE17CS342 | cb0923d498ea62b4b4d67f8cd3ce0aa49a53c09d | [
"Unlicense"
] | null | null | null | views.py | varungupta3009/UE17CS342 | cb0923d498ea62b4b4d67f8cd3ce0aa49a53c09d | [
"Unlicense"
] | null | null | null | views.py | varungupta3009/UE17CS342 | cb0923d498ea62b4b4d67f8cd3ce0aa49a53c09d | [
"Unlicense"
] | null | null | null | from flask import *
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
import sqlite3
import random
import re
import os
UPLOAD_FOLDER='static/media'
app=Flask(__name__)
app.secret_key = "abc"
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
conn = sqlite3.connect('KM.db')
conn.execute("")
@app.route('/signup',methods=["GET","POST"])
def signup():
if request.method=="GET" and 'emailId' not in session:
return render_template("signup.html")
elif(request.method=="POST") and 'emailId' not in session:
try:
data=request.form
conn = sqlite3.connect('KM.db')
c=conn.cursor()
c.execute("insert into lecture(email,fname,lname,password,phone,gender) values('"+data['emailId']+"','"+data['name']+"','"+data['branch']+"','"+data['password']+"','"+data['phone']+"','"+data['gender']+"');")
conn.commit()
return redirect('/signin')
except Exception as e:
print(e,data)
return render_template('signup.html',data="Username Already Exists")
else:
return redirect('/')
@app.route('/studentPassword',methods=["GET","POST"])
def studentSet():
if request.method=="GET":
return render_template("student.html",msg="")
else:
data=request.form
conn = sqlite3.connect('KM.db')
c=conn.cursor()
c.execute("select * from student where srn='"+data['srn']+"' and emailID='"+data['email']+"';")
a=list(map(lambda x:list(x),list(c)))
print(list(c),len(list(c)))
print(a,len(a))
if(a==0):
return render_template("student.html",msg="Student Does not exist")
else:
print(list(c),len(list(c)))
if(a[0][8]!=""):
return render_template("student.html",msg="Password Alredy Set Can't alter")
c.execute("update student set password='"+data['password']+"' where emailId='"+data['email']+"' and srn='"+data['srn']+"';")
conn.commit()
return redirect('/signin')
@app.route('/signin',methods=["GET","POST"])
def signin():
if request.method=="GET" and 'emailId' not in session:
return render_template("signin.html")
elif 'emailId' not in session:
data=request.form
conn = sqlite3.connect('KM.db')
c=conn.cursor()
d=conn.cursor()
c.execute("select * from student where emailId='"+data['emailId']+"' and password='"+data['password']+"';")
d.execute("select * from lecture where email='"+data['emailId']+"' and password='"+data['password']+"';")
c=list(c)
d=list(d)
print(c)
if(len(c)>0):
e=c
session['lec']=0
session['admin']=0
elif(len(d)>0):
e=d
session['lec']=1
if d[0][6]==1:
session['admin']=1
else:
session['admin']=0
else:
return render_template('signin.html',data="Username and password does not match")
session['emailId']=data['emailId']
msg="SignIn Successfull"
return redirect('/')
else:
return redirect('/')
@app.route('/signout')
def signout():
if 'emailId' in session:
del session['emailId']
del session['lec']
del session['admin']
return redirect('/')
else:
abort(400)
@app.route('/brainstorm', methods=['GET','POST','DELETE'])
def brainstorm():
conn = sqlite3.connect('KM.db')
if 'emailId' in session:
if request.method=="GET":
if 'emailId' in session:
c=conn.cursor()
c.execute("select * from brainstorm;")
c=list(c)
return render_template('brainstorm.html',data=c)
return redirect('/signin')
elif request.method=="POST":
data=request.form
c=conn.cursor()
date=str(datetime.now())
c.execute("insert into brainstorm values('"+session['emailId']+"','"+date+"','"+data['post']+"');")
conn.commit()
return redirect('/brainstorm')
else:
return redirect('/signin')
@app.route('/brainRefresh')
def refresh():
l=[]
conn = sqlite3.connect('KM.db')
e=conn.cursor()
e.execute("select * from brainstorm;")
e=list(e)
for i in e:
c=conn.cursor()
d=conn.cursor()
c.execute("select * from student where emailId='"+i[0]+"';")
d.execute("select * from lecture where email='"+i[0]+"';")
c=list(c)
d=list(d)
if(len(c)>0):
l.append(0)
elif(len(d)>0):
if(d[0][6]==0):
l.append(1)
else:
l.append(2)
#print(render_template('chatbox.html',data=c))
return render_template('chatbox.html',data=zip(e,l))
@app.route('/blog')
def blog():
conn = sqlite3.connect('KM.db')
hashtag=request.args.get('hashtag')
if hashtag:
c=conn.cursor()
c.execute("select post_id from hashtag where hashtag='"+hashtag+"';")
d=list(c)
l=[]
for i in d:
c.execute("select * from blog where post_id="+str(i[0])+";")
l.append(list(c)[0])
print(c)
print(l)
return render_template('blog1.html',data=l)
else:
c=conn.cursor()
c.execute("select * from blog;")
c=list(c)
return render_template('blog1.html',data=c)
@app.route('/newpost', methods=['GET','POST'])
def newpost():
conn = sqlite3.connect('KM.db')
if request.method=="GET":
if 'emailId' in session:
return render_template('newpost.html')
else:
return redirect('/signin')
elif request.method=="POST":
try:
data=request.form
file = request.files['img']
date=str(datetime.now())
c=conn.cursor()
filename = file.filename
c.execute("insert into blog(posted_by,heading,datetime,post,img) values('"+session['emailId']+"','"+data['heading']+"','"+date+"','"+data['post']+"','"+filename+"');")
conn.commit()
c.execute("select post_id from blog where posted_by='"+session['emailId']+"' and heading='"+data['heading']+"';")
post_id=list(c)[0][0]
hashtags1=re.findall(r"#(\w+)", data['post'])
hashtags2=re.findall(r"#(\w+)", data['hashtags'])
hashtag=hashtags1+hashtags2
print(post_id,hashtag)
print(data)
if file.filename!='':
filename = file.filename
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
for i in hashtag:
try:
print(type(post_id),type(i))
c.execute("insert into hashtag(post_id,hashtag) values("+str(post_id)+",'"+i+"');")
except Exception as e:
print("hastag reused",e)
conn.commit()
return redirect('/blog')
except Exception as e:
print(e)
return redirect('/newpost')
@app.route('/post/<post_id>')
def post(post_id):
conn = sqlite3.connect('KM.db')
c=conn.cursor()
c.execute("select * from blog where post_id="+post_id+";")
e=list(c)[0]
d={}
d["post"]=e
c.execute("select hashtag from hashtag where post_id="+post_id+";")
l=[]
l=list(map(lambda x: x[0],c))
d["hashtag"]=l
print(l)
return render_template('post.html',data=d)
@app.route('/student_select',methods=["POST","GET"])
def student_select():
if 'emailId' not in session:
return redirect('/signin')
elif session['lec']==0:
abort(403)
if(request.method=="GET"):
return render_template("student_select.html")
else:
data=request.form
conn = sqlite3.connect('KM.db')
ct=conn.cursor()
print(data)
c=""
w=""
c_count=0
w_count=0
if(data["name"]==""):
if(c_count==0):
c=c+"name"
c_count+=1
else:
c=c+" , "+ "name"
elif(data["name"]!="No"):
if(w_count==0):
w=w+" name like \'" + data["name"] + "%\'"
w_count=w_count+1
else:
w=w+" AND " + " name like \'" + data["name"] + "%\'"
if(data["gender"]=="m"):
if(w_count==0):
w=w+" gender like \'" + "Male\'"
w_count=w_count+1
else:
w=w+" AND "+" gender like \'" + "Male\'"
elif(data["gender"]=="f"):
if(w_count==0):
w=w+" gender like \'" + "Female\'"
w_count=w_count+1
else:
w=w+" AND "+" gender like \'" + "Female\'"
elif(data["gender"]=="any"):
if(c_count==0):
c=c+"gender"
c_count+=1
else:
c=c+" , "+"gender"
if(data["srn"]==""):
if(c_count==0):
c=c+"srn"
c_count+=1
else:
c=c+" , "+ "srn"
elif(data["srn"]!="No"):
if(w_count==0):
w=w+" srn like \'" + data["srn"]+"\'"
w_count=w_count+1
else:
w=w+" AND " + " srn like \'" + data["srn"]+"\'"
if(data["emailid"]==""):
if(c_count==0):
c=c+"emailid"
c_count+=1
else:
c=c+" , "+ "emailid"
elif(data["emailid"]!="No"):
if(w_count==0):
w=w+" emailid like \'" + data["emailid"]+"\'"
w_count=w_count+1
else:
w=w+" AND " + " emailid like " + data["emailid"]
if(data["program"]==""):
if(c_count==0):
c=c+"program"
c_count+=1
else:
c=c+" , "+ "program"
elif(data["program"]!="No"):
temp="B.Tech in "
if(data["program"]=="CE"):
temp=temp+"Civil Engineering"
elif(data["program"]=="ME"):
temp=temp+"Mechanical Engineering"
elif(data["program"]=="Biotech"):
temp=temp+"Biotechnology"
elif(data["program"]=="CSE"):
temp=temp+"Computer Science & Engineering"
elif(data["program"]=="ECE"):
temp=temp+"Electronics & Communication Engineering"
elif(data["program"]=="EEE"):
temp=temp+"Electrical & Electronics Engineering"
if(w_count==0):
w=w+" program like \'" + temp +"\'"
w_count=w_count+1
else:
w=w+" AND " + " program like \'" + temp + "\'"
if(data["enrolment_id"]==""):
if(c_count==0):
c=c+"enrolment_id"
c_count+=1
else:
c=c+" , "+ "enrolment_id"
elif(data["enrolment_id"]!="No"):
if(w_count==0):
w=w+" enrolment_id = " + data["enrolment_id"]
w_count=w_count+1
else:
w=w+" AND " + " enrolment_id = " + data["enrolment_id"]
if(data["phone"]==""):
if(c_count==0):
c=c+"phone"
c_count+=1
else:
c=c+" , "+ "phone"
elif(data["phone"]!="No"):
if(w_count==0):
w=w+" phone like \'" + data["phone"]+"\'"
w_count=w_count+1
else:
w=w+" AND " + " phone like \'" + data["phone"]+"\'"
if(data["year"]==""):
if(c_count==0):
c=c+"year"
c_count+=1
else:
c=c+" , "+ "year"
elif(data["year"]!="No"):
if(w_count==0):
w=w+" year = " + data["year"]
w_count=w_count+1
else:
w=w+" AND " + " year = " + data["year"]
"""if(data["password"]=="y"):
if(c_count==0):
c=c+"password"
c_count=c_count+1
else:
c=c+" , "+"password"""
print(c,'\n',w)
if(c=="" and w==""):
return "khaali"
elif(c==""):
c="*"
query=""
if(w==""):
query="select " + c + " from student"
ct.execute(query)
else:
query="select " + c + " from student where " + w
ct.execute(query)
conn.commit()
print("Ye query hai "+query)
print("Aur ye hai uska output")
ct=list(ct) # output of the query in the list
print(ct)
c=c.split(',')
c=list(map(lambda x:x.upper(),c))
return render_template('result.html',data=ct,heading=c)
@app.route('/about')
def about():
return render_template('about.html')
@app.route('/contact')
def contact():
return render_template('contact.html')
@app.route('/courses')
def courses():
return render_template('courses.html')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/teacher')
def teacher():
return render_template('teacher.html')
@app.route('/pollsList')
def root():
conn = sqlite3.connect('KM.db')
c=conn.cursor()
c.execute("select count(*) from poll;")
c=list(c)
return render_template('pollsList.html' , count=c[0][0])
@app.route('/poll/<poll_id>')
def poll(poll_id):
poll_data={}
vote = request.args.get('field')
conn = sqlite3.connect('KM.db')
c=conn.cursor()
c.execute('select question from poll where poll_id='+poll_id+';')
poll_data['question']=list(c)[0][0]
c.execute('select data from poll_data where poll_id='+poll_id+';')
poll_data['fields']=list(map(lambda x:x[0],list(c)))
#print(render_template('poll.html' , data=poll_data))
if vote:
print("here")
c.execute("update poll_data SET count=count+1 where poll_id="+poll_id+" and data='"+vote+"';")
conn.commit()
return render_template('thankyou.html', data=poll_data)
else:
return render_template('poll.html' , data=poll_data,poll_id=poll_id)
@app.route('/resultList')
def res():
conn = sqlite3.connect('KM.db')
c=conn.cursor()
c.execute("select count(*) from poll;")
c=list(c)
return render_template('resultList.html' , count=c[0][0])
@app.route('/results/<poll_id>')
def show_results(poll_id):
votes = {}
poll_data={}
conn = sqlite3.connect('KM.db')
c=conn.cursor()
c.execute('select question from poll where poll_id='+poll_id+';')
poll_data['question']=list(c)[0][0]
c.execute('select data from poll_data where poll_id='+poll_id+';')
poll_data['fields']=list(map(lambda x:x[0],list(c)))
c.execute('select data,count from poll_data where poll_id='+poll_id+';')
#print(list(map(lambda x:x[0],list(c))))
datass=[]
valuees=[]
for i in list(c):
datass.append(i[0])
valuees.append(i[1])
#datass=list(map(lambda x:x[0],list(c)))
#valuees=list(map(lambda x:x[1],list(c)))
print(datass,valuees)
for i,j in zip(datass,valuees):
votes[i]=j
print(votes)
return render_template('results.html', data=poll_data, votes=votes,poll_id=poll_id)
if __name__=="__main__":
app.run(port=5678,debug=True)
| 27.913793 | 220 | 0.60593 |
acf01c17b2c766a54fc3423c5dc12f94d7092dfa | 322 | py | Python | deploy/verify_local_cluster.py | qixiuai/vega | 3e6588ea4aedb03e3594a549a97ffdb86adb88d1 | [
"MIT"
] | 6 | 2020-11-13T15:44:47.000Z | 2021-12-02T08:14:06.000Z | deploy/verify_local_cluster.py | JacobLee121/vega | 19256aca4d047bfad3b461f0a927e1c2abb9eb03 | [
"MIT"
] | 3 | 2021-03-31T20:15:40.000Z | 2022-02-09T23:50:46.000Z | built-in/TensorFlow/Research/cv/image_classification/Darts_for_TensorFlow/automl/deploy/verify_local_cluster.py | Huawei-Ascend/modelzoo | df51ed9c1d6dbde1deef63f2a037a369f8554406 | [
"Apache-2.0"
] | 2 | 2021-06-25T09:42:32.000Z | 2021-08-06T18:00:09.000Z | # -*- coding: utf-8 -*-
"""Verify local cluster."""
import sys
from dask.distributed import Client
def verify(master_ip):
"""Verify cluster."""
try:
Client("{}".format(master_ip))
except Exception:
raise ValueError("Client can't running")
if __name__ == "__main__":
verify(sys.argv[1])
| 18.941176 | 48 | 0.627329 |
acf01c671d97f98386811b1db3b5bd47826d1a4d | 2,059 | py | Python | select subset photos for photoframe.py | artjomsR/photo-randomizer | 366593afb86f224a9ac72366c7a6dfd5d43bbb6b | [
"CNRI-Python"
] | null | null | null | select subset photos for photoframe.py | artjomsR/photo-randomizer | 366593afb86f224a9ac72366c7a6dfd5d43bbb6b | [
"CNRI-Python"
] | null | null | null | select subset photos for photoframe.py | artjomsR/photo-randomizer | 366593afb86f224a9ac72366c7a6dfd5d43bbb6b | [
"CNRI-Python"
] | null | null | null | import os, sys, glob, random, distutils, shutil
from pathlib import Path
from typing import DefaultDict
maximum_no_of_files = 1735
minimum_number_of_files_to_be_trimmed = 4
original_dir = 'E:\JPG Export\Photo Frame\Sorted'
trim_dir = 'E:/PHOTOFRAME_PHOTOS_DELETE_ME'
folder_separator = '-- '
def copy_folder_with_overwrite(from_path, to_path):
if os.path.exists(to_path):
shutil.rmtree(to_path)
print(f'copying {from_path} to {to_path}...')
shutil.copytree(from_path, to_path)
def folder_structure_is_flat(path):
return len(os.listdir(path)) > maximum_no_of_files
def put_files_in_subfolders(source_dir_path):
for combined_filename in os.listdir(source_dir_path):
basename, extension = os.path.splitext(combined_filename)
parent_folder = basename.split(folder_separator)
path_to_parent_folder = f'{source_dir_path}\{parent_folder[0]}';
if not os.path.exists(path_to_parent_folder):
os.makedirs(path_to_parent_folder)
shutil.move(f'{source_dir_path}\{combined_filename}', f'{path_to_parent_folder}\{combined_filename}')
if not (os.path.isdir(original_dir)):
input(f'{original_dir} does not exist, aborting...')
sys.exit()
copy_folder_with_overwrite(original_dir, trim_dir)
if folder_structure_is_flat(trim_dir):
put_files_in_subfolders(trim_dir)
total_files = len(list(Path(trim_dir).rglob("*.jpg")))
deletion_coefficient = maximum_no_of_files / total_files
print(f'Total files:{total_files}, deletion_coefficient = {deletion_coefficient}')
for root, subdirs, files in os.walk(trim_dir):
for subdir in subdirs:
print(os.path.join(root, subdir))
os.chdir(os.path.join(root, subdir))
folder_jpgs = glob.glob("*.jpg")
if len(folder_jpgs) > minimum_number_of_files_to_be_trimmed:
no_of_files_to_delete = len(folder_jpgs) - int(len(folder_jpgs) * deletion_coefficient)
print(f'files:{len(folder_jpgs)}, files to delete:{no_of_files_to_delete}')
for file in random.sample(folder_jpgs, no_of_files_to_delete):
os.remove(file)
input("DONE") | 37.436364 | 104 | 0.755707 |
acf01d136d3bb65c969be7efcb3344db11aa3ed9 | 9,895 | py | Python | setup/host_setup_runner.py | CarbonROM/android_tools_acloud | 0ed5352df639789767d8ea6fe0a510d7a84cfdcc | [
"Apache-2.0"
] | null | null | null | setup/host_setup_runner.py | CarbonROM/android_tools_acloud | 0ed5352df639789767d8ea6fe0a510d7a84cfdcc | [
"Apache-2.0"
] | null | null | null | setup/host_setup_runner.py | CarbonROM/android_tools_acloud | 0ed5352df639789767d8ea6fe0a510d7a84cfdcc | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2018 - The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""host setup runner
A setup sub task runner to support setting up the local host for AVD local
instance.
"""
from __future__ import print_function
import getpass
import logging
import os
import shutil
import subprocess
import sys
import tempfile
from acloud.internal import constants
from acloud.internal.lib import utils
from acloud.setup import base_task_runner
from acloud.setup import setup_common
import distro
logger = logging.getLogger(__name__)
# Packages "devscripts" and "equivs" are required for "mk-build-deps".
_AVD_REQUIRED_PKGS_MAP = {
"debian":
["devscripts", "equivs", "libvirt-clients", "libvirt-daemon-system"],
"arch": ["libvirt", "dnsmasq"],
}
_BASE_REQUIRED_PKGS_MAP = {
"debian":
["ssvnc", "lzop", "python3-tk"],
"arch": ["AUR:ssvnc-nojava", "python", "lzop", "xterm"],
}
_CUTTLEFISH_COMMOM_PKG = "cuttlefish-common-git" if distro.like() == "arch" else "cuttlefish-common"
_CF_COMMOM_FOLDER = "cf-common"
_PROC_VENDOR_ID = subprocess.check_output("cat /proc/cpuinfo | grep vendor_id | awk '{print $3}'",
shell=True).strip().decode("utf-8")
if _PROC_VENDOR_ID.startswith("AuthenticAMD"):
_PROC_KVM_MODULE = "kvm_amd"
elif _PROC_VENDOR_ID.startswith("GenuineIntel"):
_PROC_KVM_MODULE = "kvm_intel"
_LIST_OF_MODULES = [_PROC_KVM_MODULE, "kvm"]
_UPDATE_APT_GET_CMD = "sudo apt-get update"
_INSTALL_CUTTLEFISH_COMMOM_CMD_MAP = {
"debian": [
"git clone https://github.com/google/android-cuttlefish.git {git_folder}",
"cd {git_folder}",
"yes | sudo mk-build-deps -i -r -B",
"dpkg-buildpackage -uc -us",
"sudo apt-get install -y -f ../cuttlefish-common_*_amd64.deb"
],
"arch": [
"git clone https://aur.archlinux.org/cuttlefish-common-git.git {git_folder}",
"cd {git_folder}",
"makepkg --noconfirm -s -i -f",
]
}
class BasePkgInstaller(base_task_runner.BaseTaskRunner):
"""Subtask base runner class for installing packages."""
# List of packages for child classes to override.
PACKAGES = []
def ShouldRun(self):
"""Check if required packages are all installed.
Returns:
Boolean, True if required packages are not installed.
"""
if not utils.IsSupportedPlatform():
return False
# Any required package is not installed or not up-to-date will need to
# run installation task.
for pkg_name in self.PACKAGES:
if not setup_common.PackageInstalled(pkg_name):
return True
return False
def _Run(self):
"""Install specified packages."""
installable_pkgs = []
aur_pkgs = []
for pkg in self.PACKAGES:
if not setup_common.PackageInstalled(pkg):
if not pkg.startswith("AUR:"):
installable_pkgs.append(pkg)
else:
aur_pkgs.append(pkg)
cmd = "\n".join(
[setup_common.PKG_INSTALL_CMD_MAP[distro.like()] % pkg
for pkg in installable_pkgs])
if len(installable_pkgs) > 0:
if not utils.GetUserAnswerYes("\nStart to install package(s):\n%s"
"\nEnter 'y' to continue, otherwise N or "
"enter to exit: " % cmd):
sys.exit(constants.EXIT_BY_USER)
if distro.like() == 'debian':
setup_common.CheckCmdOutput(_UPDATE_APT_GET_CMD, shell=True)
for pkg in installable_pkgs:
setup_common.InstallPackage(pkg)
if len(aur_pkgs) > 0:
for pkg in aur_pkgs:
# Tell user about AUR package
utils.PrintColorString("AUR package missing: %s" % pkg[len("AUR:"):], utils.TextColors.FAIL)
sys.exit(constants.EXIT_BY_ERROR)
logger.info("All package(s) installed now.")
class AvdPkgInstaller(BasePkgInstaller):
"""Subtask runner class for installing packages for local instances."""
WELCOME_MESSAGE_TITLE = ("Install required packages for host setup for "
"local instances")
WELCOME_MESSAGE = ("This step will walk you through the required packages "
"installation for running Android cuttlefish devices "
"on your host.")
PACKAGES = _AVD_REQUIRED_PKGS_MAP[distro.like()]
class HostBasePkgInstaller(BasePkgInstaller):
"""Subtask runner class for installing base host packages."""
WELCOME_MESSAGE_TITLE = "Install base packages on the host"
WELCOME_MESSAGE = ("This step will walk you through the base packages "
"installation for your host.")
PACKAGES = _BASE_REQUIRED_PKGS_MAP[distro.like()]
class CuttlefishCommonPkgInstaller(base_task_runner.BaseTaskRunner):
"""Subtask base runner class for installing cuttlefish-common."""
WELCOME_MESSAGE_TITLE = "Install cuttlefish-common packages on the host"
WELCOME_MESSAGE = ("This step will walk you through the cuttlefish-common "
"packages installation for your host.")
def ShouldRun(self):
"""Check if cuttlefish-common package is installed.
Returns:
Boolean, True if cuttlefish-common is not installed.
"""
if not utils.IsSupportedPlatform():
return False
# Any required package is not installed or not up-to-date will need to
# run installation task.
if not setup_common.PackageInstalled(_CUTTLEFISH_COMMOM_PKG):
return True
return False
def _Run(self):
"""Install cuttlefilsh-common packages."""
cf_common_path = os.path.join(tempfile.mkdtemp(), _CF_COMMOM_FOLDER)
logger.debug("cuttlefish-common path: %s", cf_common_path)
cmd = "\n".join(sub_cmd.format(git_folder=cf_common_path)
for sub_cmd in _INSTALL_CUTTLEFISH_COMMOM_CMD_MAP[distro.like()])
if not utils.GetUserAnswerYes("\nStart to install cuttlefish-common :\n%s"
"\nEnter 'y' to continue, otherwise N or "
"enter to exit: " % cmd):
sys.exit(constants.EXIT_BY_USER)
try:
setup_common.CheckCmdOutput(cmd, shell=True)
finally:
shutil.rmtree(os.path.dirname(cf_common_path))
logger.info("Cuttlefish-common package installed now.")
class CuttlefishHostSetup(base_task_runner.BaseTaskRunner):
"""Subtask class that setup host for cuttlefish."""
WELCOME_MESSAGE_TITLE = "Host Enviornment Setup"
WELCOME_MESSAGE = (
"This step will help you to setup enviornment for running Android "
"cuttlefish devices on your host. That includes adding user to kvm "
"related groups and checking required linux modules."
)
def ShouldRun(self):
"""Check host user groups and modules.
Returns:
Boolean: False if user is in all required groups and all modules
are reloaded.
"""
if not utils.IsSupportedPlatform():
return False
return not (utils.CheckUserInGroups(constants.LIST_CF_USER_GROUPS)
and self._CheckLoadedModules(_LIST_OF_MODULES))
@staticmethod
def _CheckLoadedModules(module_list):
"""Check if the modules are all in use.
Args:
module_list: The list of module name.
Returns:
True if all modules are in use.
"""
logger.info("Checking if modules are loaded: %s", module_list)
lsmod_output = setup_common.CheckCmdOutput("lsmod", print_cmd=False)
current_modules = [r.split()[0] for r in lsmod_output.splitlines()]
all_modules_present = True
for module in module_list:
if module not in current_modules:
logger.info("missing module: %s", module)
all_modules_present = False
return all_modules_present
def _Run(self):
"""Setup host environment for local cuttlefish instance support."""
# TODO: provide --uid args to let user use prefered username
username = getpass.getuser()
setup_cmds = [
"sudo rmmod " + _PROC_KVM_MODULE,
"sudo rmmod kvm",
"sudo modprobe kvm",
"sudo modprobe " + _PROC_KVM_MODULE]
for group in constants.LIST_CF_USER_GROUPS:
setup_cmds.append("sudo usermod -aG %s % s" % (group, username))
print("Below commands will be run:")
for setup_cmd in setup_cmds:
print(setup_cmd)
if self._ConfirmContinue():
for setup_cmd in setup_cmds:
setup_common.CheckCmdOutput(setup_cmd, shell=True)
print("Host environment setup has done!")
@staticmethod
def _ConfirmContinue():
"""Ask user if they want to continue.
Returns:
True if user answer yes.
"""
answer_client = utils.InteractWithQuestion(
"\nEnter 'y' to continue, otherwise N or enter to exit: ",
utils.TextColors.WARNING)
return answer_client in constants.USER_ANSWER_YES
| 36.378676 | 108 | 0.635877 |
acf01daa7d2fe8fe6d68fd7bc68459ac1c8268ec | 3,553 | py | Python | lstm_ai_coder_token_many2many.py | chvcks/RNN-AI-Programmer | 1303edbaffd3ae37cc33442ae974e7bd909a6229 | [
"Apache-2.0"
] | null | null | null | lstm_ai_coder_token_many2many.py | chvcks/RNN-AI-Programmer | 1303edbaffd3ae37cc33442ae974e7bd909a6229 | [
"Apache-2.0"
] | null | null | null | lstm_ai_coder_token_many2many.py | chvcks/RNN-AI-Programmer | 1303edbaffd3ae37cc33442ae974e7bd909a6229 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import LSTM
from keras.layers import TimeDistributed
from keras.optimizers import RMSprop
import numpy as np
import random
import sys
path = "./jdk-tokens.txt"
filetext = open(path).read().lower()
slice = len(filetext)/100
slice = int (slice)
filetext = filetext[:slice]
tokenized = filetext.split()
print('# of tokens:', len(tokenized))
uniqueTokens = sorted(list(set(tokenized)))
print('total # of unique tokens:', len(uniqueTokens))
token_indices = dict((c, i) for i, c in enumerate(uniqueTokens))
indices_token = dict((i, c) for i, c in enumerate(uniqueTokens))
# cut the text in semi-redundant sequences of maxlen characters
NUM_INPUT_TOKENS = 10
step = 3
sequences = []
for i in range(0, len(tokenized) - NUM_INPUT_TOKENS-1, step):
sequences.append(tokenized[i: i + NUM_INPUT_TOKENS+1])
print('# of training sequences:', len(sequences))
print('Vectorization...')
X_temp = np.zeros((len(sequences), NUM_INPUT_TOKENS + 1, len(uniqueTokens)), dtype=np.bool)
X = np.zeros((len(sequences), NUM_INPUT_TOKENS, len(uniqueTokens)), dtype=np.bool)
y = np.zeros((len(sequences), NUM_INPUT_TOKENS, len(uniqueTokens)), dtype=np.bool)
for i, sequence in enumerate(sequences):
for t, char in enumerate(sequence):
X_temp[i, t, token_indices[char]] = 1
num_sequences = len(X_temp)
for i, vec in enumerate(X_temp):
y[i] = vec[1:]
X[i]= vec[:-1]
# build the model: a single LSTM
print('Build model...')
model = Sequential()
# 1-layer LSTM
model.add(LSTM(128, input_shape=(NUM_INPUT_TOKENS, len(uniqueTokens)), return_sequences=True))
# 2-layer LSTM
#model.add(LSTM(128, return_sequences=True, input_shape=(NUM_INPUT_TOKENS, len(uniqueTokens))))
#model.add(LSTM(128, return_sequences=True))
model.add(TimeDistributed(Dense(len(uniqueTokens))))
model.add(Activation('softmax'))
optimizer = RMSprop(lr=0.01)
model.compile(loss='categorical_crossentropy', optimizer=optimizer)
print(model.summary())
def sample(preds, temperature=1.0):
preds = np.asarray(preds).astype('float64')
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
# train the model, output generated text after each iteration
for iteration in range(1, 60):
print()
print('-' * 50)
print('Iteration', iteration)
model.fit(X, y, batch_size=128, epochs=1)
start_index = random.randint(0, len(tokenized) - NUM_INPUT_TOKENS - 1)
for diversity in [0.2, 0.5, 1.0, 1.2]:
print()
print('----- diversity:', diversity)
generated = [] #''
sequence = tokenized[start_index: start_index + NUM_INPUT_TOKENS]
generated=list(sequence)
print('----- Generating with seed: "' + ' '.join(sequence) + '"-------')
sys.stdout.write(' '.join(generated))
for i in range(100):
x = np.zeros((1, NUM_INPUT_TOKENS, len(uniqueTokens)))
for t, char in enumerate(sequence):
x[0, t, token_indices[char]] = 1.
preds = model.predict(x, verbose=0)[0][-1] # only get the last element
next_index = sample(preds, diversity)
next_pred_token = indices_token[next_index]
sequence = sequence[1:]
sequence.append(next_pred_token)
sys.stdout.write(next_pred_token+" ")
sys.stdout.flush()
print()
| 30.110169 | 95 | 0.678581 |
acf01e081747ac7a6b4fd91fd7c133833275160b | 2,315 | py | Python | npyscreen/eveventhandler.py | TheSciBoy/npyscreen | ee7f4712d3df5d96c4b1fe8ce3ea7229dcc0a60d | [
"BSD-2-Clause"
] | null | null | null | npyscreen/eveventhandler.py | TheSciBoy/npyscreen | ee7f4712d3df5d96c4b1fe8ce3ea7229dcc0a60d | [
"BSD-2-Clause"
] | null | null | null | npyscreen/eveventhandler.py | TheSciBoy/npyscreen | ee7f4712d3df5d96c4b1fe8ce3ea7229dcc0a60d | [
"BSD-2-Clause"
] | null | null | null | import weakref
class Event(object):
# a basic event class
def __init__(self, name, payload=None):
self.name = name
self.payload = payload
class EventHandler(object):
# This partial base class provides the framework to handle events.
def __init__(self):
self.event_handlers = None
def initialize_event_handling(self):
self.event_handlers = {}
def add_event_handler(self, event_name, handler):
if event_name not in self.event_handlers:
self.event_handlers[event_name] = set() # weakref.WeakSet() #Why doesn't the WeakSet work?
self.event_handlers[event_name].add(handler)
parent_app = self.find_parent_app()
if parent_app:
parent_app.register_for_event(self, event_name)
else:
# Probably are the parent App!
# but could be a form outside a proper application environment
try:
self.register_for_event(self, event_name)
except AttributeError:
pass
def remove_event_handler(self, event_name, handler):
if event_name in self.event_handlers:
self.event_handlers[event_name].remove(handler)
if not self.event_handlers[event_name]:
self.event_handlers.pop({})
def handle_event(self, event):
"""
Handle the given event.
:param event: The event to handle.
:return: True if the event was handled. Return False if the application should stop sending this event.
"""
if event.name not in self.event_handlers:
return False
else:
remove_list = []
for handler in self.event_handlers[event.name]:
try:
handler(event)
except weakref.ReferenceError:
remove_list.append(handler)
for dead_handler in remove_list:
self.event_handlers[event.name].remove(dead_handler)
return True
def find_parent_app(self):
if hasattr(self, "parentApp"):
return self.parentApp
elif hasattr(self, "parent") and hasattr(self.parent, "parentApp"):
return self.parent.parentApp
else:
return None
| 34.044118 | 111 | 0.603888 |
acf01edfbee17561eb9cf8729d496272cd0ed5b6 | 1,176 | py | Python | compss/programming_model/bindings/python/src/pycompss/tests/resources/functions.py | eflows4hpc/compss | c497f6d34722103c6c8f83ebc314b495573ce054 | [
"Apache-2.0"
] | 31 | 2018-03-06T09:30:03.000Z | 2022-03-23T09:51:05.000Z | compss/programming_model/bindings/python/src/pycompss/tests/resources/functions.py | eflows4hpc/compss | c497f6d34722103c6c8f83ebc314b495573ce054 | [
"Apache-2.0"
] | 3 | 2020-08-28T17:16:50.000Z | 2021-11-11T21:58:02.000Z | compss/programming_model/bindings/python/src/pycompss/tests/resources/functions.py | eflows4hpc/compss | c497f6d34722103c6c8f83ebc314b495573ce054 | [
"Apache-2.0"
] | 15 | 2018-06-07T10:03:27.000Z | 2022-02-23T14:59:42.000Z | #!/usr/bin/python
#
# Copyright 2002-2021 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pycompss.functions.data import generator
def check_generator():
random_data = generator((12, 12), 4, 5, "random", True)
normal_data = generator((12, 12), 4, 5, "normal", True)
uniform_data = generator((12, 12), 4, 5, "uniform", True)
assert (
random_data != normal_data != uniform_data
), "The generator did not produce different data for different distributions" # noqa: E501
def main():
check_generator()
# Uncomment for command line check:
# if __name__ == "__main__":
# main()
| 30.947368 | 95 | 0.708333 |
acf01fa6f564bd94c630cb21974ff04d56929876 | 751 | py | Python | forms/migrations/0001_initial.py | TheDjangoBoys/Gymkhana-Nominations | 6ce13fb3a21fe91630e0c8fdaf597e61c87f2d06 | [
"MIT"
] | null | null | null | forms/migrations/0001_initial.py | TheDjangoBoys/Gymkhana-Nominations | 6ce13fb3a21fe91630e0c8fdaf597e61c87f2d06 | [
"MIT"
] | null | null | null | forms/migrations/0001_initial.py | TheDjangoBoys/Gymkhana-Nominations | 6ce13fb3a21fe91630e0c8fdaf597e61c87f2d06 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-25 13:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Questionnaire',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, null=True)),
('description', models.TextField(default="I'm a description!", max_length=250)),
('status', models.CharField(blank=True, max_length=100, null=True)),
],
),
]
| 28.884615 | 114 | 0.601864 |
acf02096fffe8b38e68824878fa698ed69d3895c | 15,016 | py | Python | tensorflow/python/profiler/model_analyzer.py | tianyapiaozi/tensorflow | fb3ce0467766a8e91f1da0ad7ada7c24fde7a73a | [
"Apache-2.0"
] | 71 | 2017-05-25T16:02:15.000Z | 2021-06-09T16:08:08.000Z | tensorflow/python/profiler/model_analyzer.py | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 133 | 2017-04-26T16:49:49.000Z | 2019-10-15T11:39:26.000Z | tensorflow/python/profiler/model_analyzer.py | shrikunjsarda/tensorflow | 7e8927e7af0c51ac20a63bd4eab6ff83df1a39ae | [
"Apache-2.0"
] | 31 | 2018-09-11T02:17:17.000Z | 2021-12-15T10:33:35.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model Analyzer.
Analyze model, including shape, params, time, memory, structure, etc.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import six
from google.protobuf import message
from tensorflow.core.profiler import tfprof_options_pb2
from tensorflow.core.profiler import tfprof_output_pb2
from tensorflow.python import pywrap_tensorflow as print_mdl
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.profiler import option_builder
from tensorflow.python.profiler import tfprof_logger
from tensorflow.python.util.tf_export import tf_export
_DEFAULT_PROFILE_OPTIONS = 0
_DEFAULT_ADVISE_OPTIONS = 0
# The following options are for 'advise' cmd.
# Show all advice.
ALL_ADVICE = {
'ExpensiveOperationChecker': {},
'AcceleratorUtilizationChecker': {},
'JobChecker': {}, # Only available internally.
'OperationChecker': {},
}
def _graph_string(graph):
"""Helper to serialize a graph to string."""
if graph:
return graph.as_graph_def(add_shapes=True).SerializeToString()
else:
return b''
def _build_options(options):
"""Build tfprof.OptionsProto.
Args:
options: A dictionary of options.
Returns:
tfprof.OptionsProto.
"""
opts = tfprof_options_pb2.OptionsProto()
opts.max_depth = options.get('max_depth', 10)
opts.min_bytes = options.get('min_bytes', 0)
opts.min_peak_bytes = options.get('min_peak_bytes', 0)
opts.min_residual_bytes = options.get('min_residual_bytes', 0)
opts.min_output_bytes = options.get('min_output_bytes', 0)
opts.min_micros = options.get('min_micros', 0)
opts.min_accelerator_micros = options.get('min_accelerator_micros', 0)
opts.min_cpu_micros = options.get('min_cpu_micros', 0)
opts.min_params = options.get('min_params', 0)
opts.min_float_ops = options.get('min_float_ops', 0)
opts.min_occurrence = options.get('min_occurrence', 0)
opts.step = options.get('step', -1)
opts.order_by = options.get('order_by', 'name')
for p in options.get('account_type_regexes', []):
opts.account_type_regexes.append(p)
for p in options.get('start_name_regexes', []):
opts.start_name_regexes.append(p)
for p in options.get('trim_name_regexes', []):
opts.trim_name_regexes.append(p)
for p in options.get('show_name_regexes', []):
opts.show_name_regexes.append(p)
for p in options.get('hide_name_regexes', []):
opts.hide_name_regexes.append(p)
opts.account_displayed_op_only = options.get('account_displayed_op_only',
False)
for p in options.get('select', []):
opts.select.append(p)
opts.output = options.get('output', 'stdout')
opts.dump_to_file = options.get('dump_to_file', '')
return opts
def _build_advisor_options(options):
"""Build tfprof.AdvisorOptionsProto.
Args:
options: A dictionary of options. See ALL_ADVICE example.
Returns:
tfprof.AdvisorOptionsProto.
"""
opts = tfprof_options_pb2.AdvisorOptionsProto()
if options is None:
return opts
for checker, checker_opts in six.iteritems(options):
checker_ops_pb = tfprof_options_pb2.AdvisorOptionsProto.CheckerOption()
for k, v in six.iteritems(checker_opts):
checker_ops_pb[k] = v
opts.checkers[checker].MergeFrom(checker_ops_pb)
return opts
@tf_export('profiler.Profiler')
class Profiler(object):
"""TensorFlow multi-step profiler.
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md
```python
Typical use case:
# Currently we are only allowed to create 1 profiler per process.
profiler = Profiler(sess.graph)
for i in xrange(total_steps):
if i % 10000 == 0:
run_meta = tf.RunMetadata()
_ = sess.run(...,
options=tf.RunOptions(
trace_level=tf.RunOptions.FULL_TRACE),
run_metadata=run_meta)
profiler.add_step(i, run_meta)
# Profile the parameters of your model.
profiler.profile_name_scope(options=(option_builder.ProfileOptionBuilder
.trainable_variables_parameter()))
# Or profile the timing of your model operations.
opts = option_builder.ProfileOptionBuilder.time_and_memory()
profiler.profile_operations(options=opts)
# Or you can generate a timeline:
opts = (option_builder.ProfileOptionBuilder(
option_builder.ProfileOptionBuilder.time_and_memory())
.with_step(i)
.with_timeline_output(filename).build())
profiler.profile_graph(options=opts)
else:
_ = sess.run(...)
# Auto detect problems and generate advice.
profiler.advise()
```
"""
def __init__(self, graph=None, op_log=None):
"""Constructor.
Args:
graph: tf.Graph. If None and eager execution is not enabled, use
default graph.
op_log: optional. tensorflow::tfprof::OpLogProto proto. Used to define
extra op types.
"""
if not graph and not context.executing_eagerly():
graph = ops.get_default_graph()
self._coverage = 0.0
self._graph = graph
# pylint: disable=protected-access
op_log = tfprof_logger.merge_default_with_oplog(
self._graph, op_log=op_log)
# pylint: enable=protected-access
print_mdl.NewProfiler(
_graph_string(self._graph), op_log.SerializeToString())
def __del__(self):
print_mdl.DeleteProfiler()
def add_step(self, step, run_meta):
"""Add statistics of a step.
Args:
step: int, An id used to group one or more different `run_meta` together.
When profiling with the profile_xxx APIs, user can use the `step`
id in the `options` to profile these `run_meta` together.
run_meta: RunMetadata proto that contains statistics of a session run.
"""
# pylint: disable=protected-access
op_log = tfprof_logger.merge_default_with_oplog(
self._graph, run_meta=run_meta)
# pylint: enable=protected-access
# TODO(xpan): P1: Better to find the current graph.
self._coverage = print_mdl.AddStep(step, _graph_string(self._graph),
run_meta.SerializeToString(),
op_log.SerializeToString())
def profile_python(self, options):
"""Profile the statistics of the Python codes.
By default, it shows the call stack from root. To avoid
redundant output, you may use options to filter as below
options['show_name_regexes'] = ['.*my_code.py.*']
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a MultiGraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('code'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
return tfprof_node
def profile_operations(self, options):
"""Profile the statistics of the Operation types (e.g. MatMul, Conv2D).
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a MultiGraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('op'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
return tfprof_node
def profile_name_scope(self, options):
"""Profile the statistics of graph nodes, organized by name scope.
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a GraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.GraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('scope'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
return tfprof_node
def profile_graph(self, options):
"""Profile the statistics of graph nodes, organized by dataflow graph.
Args:
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
a GraphNodeProto that records the results.
"""
opts = _build_options(options)
tfprof_node = tfprof_output_pb2.GraphNodeProto()
try:
tfprof_node.ParseFromString(
print_mdl.Profile('graph'.encode('utf-8'), opts.SerializeToString()))
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
return tfprof_node
def advise(self, options):
"""Automatically detect problems and generate reports.
Args:
options: A dict of options. See ALL_ADVICE example above.
Returns:
A Advise proto that conains the reports from all checkers.
"""
advise_pb = tfprof_output_pb2.AdviceProto()
opts = _build_advisor_options(options)
advise_pb.ParseFromString(
print_mdl.Profile('advise'.encode('utf-8'), opts.SerializeToString()))
return advise_pb
def serialize_to_string(self):
"""Serialize the ProfileProto to a binary string.
Users can write it to file for offline analysis by tfprof commandline
or graphical interface.
Returns:
ProfileProto binary string.
"""
return print_mdl.SerializeToString()
def _write_profile(self, filename):
"""Writes the profile to a file."""
print_mdl.WriteProfile(filename)
@tf_export('profiler.profile')
def profile(graph=None,
run_meta=None,
op_log=None,
cmd='scope',
options=_DEFAULT_PROFILE_OPTIONS):
"""Profile model.
Tutorials and examples can be found in:
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md
Args:
graph: tf.Graph. If None and eager execution is not enabled, use
default graph.
run_meta: optional tensorflow.RunMetadata proto. It is necessary to
to support run time information profiling, such as time and memory.
op_log: tensorflow.tfprof.OpLogProto proto. User can assign "types" to
graph nodes with op_log. "types" allow user to flexibly group and
account profiles using options['accounted_type_regexes'].
cmd: string. Either 'op', 'scope', 'graph' or 'code'.
'op' view organizes profile using operation type. (e.g. MatMul)
'scope' view organizes profile using graph node name scope.
'graph' view organizes profile using graph node inputs/outputs.
'code' view organizes profile using Python call stack.
options: A dict of options. See core/profiler/g3doc/options.md.
Returns:
If cmd is 'scope' or 'graph', returns GraphNodeProto proto.
If cmd is 'op' or 'code', returns MultiGraphNodeProto proto.
Side effect: stdout/file/timeline.json depending on options['output']
"""
if not graph and not context.executing_eagerly():
graph = ops.get_default_graph()
if options == _DEFAULT_PROFILE_OPTIONS:
options = (option_builder.ProfileOptionBuilder
.trainable_variables_parameter())
# pylint: disable=protected-access
op_log = tfprof_logger.merge_default_with_oplog(
graph, op_log, run_meta, add_trace=cmd == 'code')
# pylint: enable=protected-access
opts = _build_options(options)
run_meta_str = run_meta.SerializeToString() if run_meta else b''
graph_str = _graph_string(graph)
if cmd == 'code' or cmd == 'op':
tfprof_node = tfprof_output_pb2.MultiGraphNodeProto()
ret = print_mdl.PrintModelAnalysis(graph_str, run_meta_str,
op_log.SerializeToString(),
cmd.encode('utf-8'),
opts.SerializeToString())
try:
tfprof_node.ParseFromString(ret)
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
elif cmd == 'graph' or cmd == 'scope':
tfprof_node = tfprof_output_pb2.GraphNodeProto()
ret = print_mdl.PrintModelAnalysis(graph_str, run_meta_str,
op_log.SerializeToString(),
cmd.encode('utf-8'),
opts.SerializeToString())
try:
tfprof_node.ParseFromString(ret)
except message.DecodeError as e:
sys.stderr.write('Cannot parse returned proto: %s.\n' % e)
else:
raise errors.InvalidArgumentError(
None, None, 'unknown cmd: %s\n' % cmd)
return tfprof_node
@tf_export('profiler.advise')
def advise(graph=None, run_meta=None, options=_DEFAULT_ADVISE_OPTIONS):
"""Auto profile and advise.
Builds profiles and automatically check anomalies of various
aspects. For more details:
https://github.com/tensorflow/tensorflow/tree/master/tensorflow/core/profiler/README.md
Args:
graph: tf.Graph. If None and eager execution is not enabled, use
default graph.
run_meta: optional tensorflow.RunMetadata proto. It is necessary to
to support run time information profiling, such as time and memory.
options: see ALL_ADVICE example above. Default checks everything.
Returns:
Returns AdviceProto proto
"""
if not graph and context.in_eager_execution():
graph = ops.get_default_graph()
if options == _DEFAULT_ADVISE_OPTIONS:
options = ALL_ADVICE.copy()
# pylint: disable=protected-access
op_log = tfprof_logger.merge_default_with_oplog(
graph, None, run_meta, add_trace=True)
# pylint: enable=protected-access
run_meta_str = run_meta.SerializeToString() if run_meta else b''
opts = _build_advisor_options(options)
ret = tfprof_output_pb2.AdviceProto()
ret.ParseFromString(
print_mdl.PrintModelAnalysis(
_graph_string(graph), run_meta_str, op_log.SerializeToString(),
'advise'.encode('utf-8'), opts.SerializeToString()))
return ret
| 35.667458 | 91 | 0.689331 |
acf020b71f04bffea904b7532665b26fab96af0f | 7,601 | py | Python | pynetdicom3/apps/echoscp/echoscp.py | mcgregorandrew/pynetdicom3 | 1c798f9b0ad086cf0a8d0619e57f2bc2fbbf13f1 | [
"MIT"
] | 2 | 2019-02-07T08:02:30.000Z | 2019-03-20T04:00:20.000Z | pynetdicom3/apps/echoscp/echoscp.py | mcgregorandrew/pynetdicom3 | 1c798f9b0ad086cf0a8d0619e57f2bc2fbbf13f1 | [
"MIT"
] | null | null | null | pynetdicom3/apps/echoscp/echoscp.py | mcgregorandrew/pynetdicom3 | 1c798f9b0ad086cf0a8d0619e57f2bc2fbbf13f1 | [
"MIT"
] | 2 | 2020-09-27T06:41:41.000Z | 2021-02-07T06:53:02.000Z | #!/usr/bin/env python
"""
An echoscp application.
Used for verifying basic DICOM connectivity and as such has a focus on
providing useful debugging and logging information.
"""
import argparse
import logging
import os
import socket
import sys
from pydicom.uid import (
ExplicitVRLittleEndian, ImplicitVRLittleEndian, ExplicitVRBigEndian
)
from pynetdicom3 import AE
from pynetdicom3.sop_class import VerificationSOPClass
LOGGER = logging.Logger('echoscp')
stream_logger = logging.StreamHandler()
formatter = logging.Formatter('%(levelname).1s: %(message)s')
stream_logger.setFormatter(formatter)
LOGGER.addHandler(stream_logger)
LOGGER.setLevel(logging.ERROR)
VERSION = '0.4.1'
def _setup_argparser():
"""Setup the command line arguments"""
# Description
parser = argparse.ArgumentParser(
description="The echoscp application implements a Service Class "
"Provider (SCP) for the Verification SOP Class. It listens "
"for a DICOM C-ECHO message from a Service Class User "
"(SCU) and sends a response. The application can be used "
"to verify basic DICOM connectivity.",
usage="echoscp [options] port")
# Parameters
req_opts = parser.add_argument_group('Parameters')
req_opts.add_argument("port",
help="TCP/IP port number to listen on",
type=int)
# General Options
gen_opts = parser.add_argument_group('General Options')
gen_opts.add_argument("--version",
help="print version information and exit",
action="store_true")
gen_opts.add_argument("-q", "--quiet",
help="quiet mode, print no warnings and errors",
action="store_true")
gen_opts.add_argument("-v", "--verbose",
help="verbose mode, print processing details",
action="store_true")
gen_opts.add_argument("-d", "--debug",
help="debug mode, print debug information",
action="store_true")
gen_opts.add_argument("-ll", "--log-level", metavar='[l]',
help="use level l for the LOGGER (fatal, error, warn, "
"info, debug, trace)",
type=str,
choices=['fatal', 'error', 'warn',
'info', 'debug', 'trace'])
gen_opts.add_argument("-lc", "--log-config", metavar='[f]',
help="use config file f for the LOGGER",
type=str)
# Network Options
net_opts = parser.add_argument_group('Network Options')
net_opts.add_argument("-aet", "--aetitle", metavar='[a]etitle',
help="set my AE title (default: ECHOSCP)",
type=str,
default='ECHOSCP')
net_opts.add_argument("-to", "--timeout", metavar='[s]econds',
help="timeout for connection requests",
type=int,
default=None)
net_opts.add_argument("-ta", "--acse-timeout", metavar='[s]econds',
help="timeout for ACSE messages",
type=int,
default=60)
net_opts.add_argument("-td", "--dimse-timeout", metavar='[s]econds',
help="timeout for DIMSE messages",
type=int,
default=None)
net_opts.add_argument("-pdu", "--max-pdu", metavar='[n]umber of bytes',
help="set max receive pdu to n bytes (4096..131072)",
type=int,
default=16382)
# Transfer Syntaxes
ts_opts = parser.add_argument_group('Preferred Transfer Syntaxes')
ts_opts.add_argument("-x=", "--prefer-uncompr",
help="prefer explicit VR local byte order (default)",
action="store_true", default=True)
ts_opts.add_argument("-xe", "--prefer-little",
help="prefer explicit VR little endian TS",
action="store_true")
ts_opts.add_argument("-xb", "--prefer-big",
help="prefer explicit VR big endian TS",
action="store_true")
ts_opts.add_argument("-xi", "--implicit",
help="accept implicit VR little endian TS only",
action="store_true")
return parser.parse_args()
args = _setup_argparser()
# Logging/Output
if args.quiet:
for h in LOGGER.handlers:
LOGGER.removeHandler(h)
LOGGER.addHandler(logging.NullHandler())
pynetdicom_logger = logging.getLogger('pynetdicom3')
for h in pynetdicom_logger.handlers:
pynetdicom_logger.removeHandler(h)
pynetdicom_logger.addHandler(logging.NullHandler())
if args.verbose:
LOGGER.setLevel(logging.INFO)
pynetdicom_logger = logging.getLogger('pynetdicom3')
pynetdicom_logger.setLevel(logging.INFO)
if args.debug:
LOGGER.setLevel(logging.DEBUG)
pynetdicom_logger = logging.getLogger('pynetdicom3')
pynetdicom_logger.setLevel(logging.DEBUG)
if args.log_level:
levels = {'critical' : logging.CRITICAL,
'error' : logging.ERROR,
'warn' : logging.WARNING,
'info' : logging.INFO,
'debug' : logging.DEBUG}
LOGGER.setLevel(levels[args.log_level])
pynetdicom_logger = logging.getLogger('pynetdicom3')
pynetdicom_logger.setLevel(levels[args.log_level])
if args.log_config:
fileConfig(args.log_config)
LOGGER.debug('echoscp.py v{0!s}'.format(VERSION))
LOGGER.debug('')
# Validate port
if isinstance(args.port, int):
test_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
test_socket.bind((os.popen('hostname').read()[:-1], args.port))
except socket.error:
LOGGER.error("Cannot listen on port {}, insufficient privileges or "
"already in use".format(args.port))
sys.exit()
# Set Transfer Syntax options
transfer_syntax = [ImplicitVRLittleEndian,
ExplicitVRLittleEndian,
ExplicitVRBigEndian]
if args.prefer_uncompr:
transfer_syntax = [ExplicitVRLittleEndian,
ExplicitVRBigEndian,
ImplicitVRLittleEndian]
if args.implicit:
transfer_syntax = [ImplicitVRLittleEndian]
if args.prefer_little and ExplicitVRLittleEndian in transfer_syntax:
transfer_syntax.remove(ExplicitVRLittleEndian)
transfer_syntax.insert(0, ExplicitVRLittleEndian)
if args.prefer_big and ExplicitVRBigEndian in transfer_syntax:
transfer_syntax.remove(ExplicitVRBigEndian)
transfer_syntax.insert(0, ExplicitVRBigEndian)
def on_c_echo(context, info):
"""Optional implementation of the AE.on_c_echo callback."""
# Return a Success response to the peer
# We could also return a pydicom Dataset with a (0000, 0900) Status
# element
return 0x0000
# Create application entity
ae = AE(ae_title=args.aetitle, port=args.port)
ae.add_supported_context(VerificationSOPClass, transfer_syntax)
ae.maximum_pdu_size = args.max_pdu
# Set timeouts
ae.network_timeout = args.timeout
ae.acse_timeout = args.acse_timeout
ae.dimse_timeout = args.dimse_timeout
# Set callback
ae.on_c_echo = on_c_echo
ae.start()
| 36.023697 | 81 | 0.611893 |
acf02182f382e5ee6bb37880309d6c995f339a01 | 3,707 | py | Python | PaperExperiments/XHExp562/parameters.py | stefan-c-kremer/TE_World2 | 8e1fae218af8a1eabae776deecac62192c22e0ca | [
"MIT"
] | null | null | null | PaperExperiments/XHExp562/parameters.py | stefan-c-kremer/TE_World2 | 8e1fae218af8a1eabae776deecac62192c22e0ca | [
"MIT"
] | null | null | null | PaperExperiments/XHExp562/parameters.py | stefan-c-kremer/TE_World2 | 8e1fae218af8a1eabae776deecac62192c22e0ca | [
"MIT"
] | null | null | null |
# parameters.py
"""
Exp 562 - {'Initial_genes': '500', 'Host_mutation_rate': '0.03', 'TE_progeny': '0.15, 0, 0.55, 1, 0.30, 2', 'TE_Insertion_Distribution': 'Triangle( pmax=0, pzero=3.0/3.0 )', 'Carrying_capacity': '30', 'TE_excision_rate': '0.1', 'Junk_BP': '14', 'Gene_Insertion_Distribution': 'Triangle( pzero=1.0/3.0, pmax=1 )', 'mutation_effect': '0.10', 'TE_death_rate': '0.005'}
"""
from TEUtil import *;
# note that "#" indicates a comment
# set the following to True if you want messages printed to the screen
# while the program runs - search for these keywords in TESim.py to see
# what each one prints out
output = {
"SPLAT": False,
"SPLAT FITNESS": False,
"INITIALIZATION": False,
"GENERATION": True,
"HOST EXTINCTION": True,
"TE EXTINCTION": True,
"TRIAL NO": True,
"GENE INIT": False,
"TE INIT": False,
};
TE_Insertion_Distribution = Triangle( pmax=0, pzero=3.0/3.0 );
Gene_Insertion_Distribution = Triangle( pzero=1.0/3.0, pmax=1 );
# Triangle( pmax, pzero ) generates values between pmax and pzero with
# a triangular probability distribution, where pmax is the point of highest
# probability, and pzero is the point of lowest probability
# - you can change the orientation of the triangle by reversing the values
# of pmax and pzero
# Flat() generates values between 0 and 1 with uniform probability
Gene_length = 1000; # use 1000?
TE_length = 1000; # use 1000?
TE_death_rate = 0.005;
TE_excision_rate = 0.1; # set this to zero for retro transposons
# for retro transposons this is the probability of the given number of progeny
# for dna transposons this is the probability of the given number of progeny
# ___PLUS___ the original re-inserting
TE_progeny = ProbabilityTable( 0.15, 0, 0.55, 1, 0.30, 2 );
Initial_genes = 500;
Append_gene = True; # True: when the intialization routine tries to place
# a gene inside another gene, it instead appends it
# at the end of the original gene (use this with small
# amounts of Junk_BP).
# False: when the intialization routine tries to place
# a gene inside another gene, try to place it somewhere
# else again (don't use theis option with samll amounts
# of Junk_BP).
Initial_TEs = 1;
MILLION = 1000000;
Junk_BP = 14 * MILLION;
Host_start_fitness = 1.0;
Host_mutation_rate = 0.03;
Host_mutation = ProbabilityTable( 0.40, lambda fit: 0.0,
0.30, lambda fit: fit - random.random()*0.10,
0.15, lambda fit: fit,
0.15, lambda fit: fit + random.random()*0.10
);
# what happens when a TA hits a gene
Insertion_effect = ProbabilityTable(0.30, lambda fit: 0.0,
0.20, lambda fit: fit - random.random()*0.10,
0.30, lambda fit: fit,
0.20, lambda fit: fit + random.random()*0.10
);
Carrying_capacity = 30;
Host_reproduction_rate = 1; # how many offspring each host has
Host_survival_rate = lambda propfit: min( Carrying_capacity * propfit, 0.95 );
# propfit = proportion of fitness owned by this individual
Maximum_generations = 1500;
Terminate_no_TEs = True; # end simulation if there are no TEs left
# seed = 0;
seed = None; # if seed = None, the random number generator's initial state is
# set "randomly"
save_frequency = 50; # Frequency with with which to save state of experiment
saved = None; # if saved = None then we start a new simulation from scratch
# if saves = string, then we open that file and resume a simulation
| 38.614583 | 365 | 0.648773 |
acf022345e3b098b170a3bfcb610c9fb6444dafe | 1,336 | py | Python | panel/template/golden/__init__.py | Jacob-Barhak/panel | 04cad38ea703e4e69fb76f063a27f4ffe40688e8 | [
"BSD-3-Clause"
] | 1 | 2021-07-06T21:07:45.000Z | 2021-07-06T21:07:45.000Z | panel/template/golden/__init__.py | Jacob-Barhak/panel | 04cad38ea703e4e69fb76f063a27f4ffe40688e8 | [
"BSD-3-Clause"
] | 2 | 2022-01-13T03:54:51.000Z | 2022-03-12T01:01:00.000Z | panel/template/golden/__init__.py | Jacob-Barhak/panel | 04cad38ea703e4e69fb76f063a27f4ffe40688e8 | [
"BSD-3-Clause"
] | null | null | null | """
GoldenTemplate based on the golden-layout library.
"""
import pathlib
import param
from ...layout import Card
from ..base import BasicTemplate
from ..theme import DarkTheme, DefaultTheme
class GoldenTemplate(BasicTemplate):
"""
GoldenTemplate is built on top of golden-layout library.
"""
_css = pathlib.Path(__file__).parent / 'golden.css'
_template = pathlib.Path(__file__).parent / 'golden.html'
_modifiers = {
Card: {
'children': {'margin': (10, 10)},
'button_css_classes': ['golden-card-button']
},
}
_resources = {
'css': {
'goldenlayout': "https://golden-layout.com/files/latest/css/goldenlayout-base.css",
},
'js': {
'jquery': "http://code.jquery.com/jquery-1.11.1.min.js",
'goldenlayout': "https://golden-layout.com/files/latest/js/goldenlayout.js"
}
}
def _apply_root(self, name, model, tags):
if 'main' in tags:
model.margin = (10, 15, 10, 10)
class GoldenDefaultTheme(DefaultTheme):
css = param.Filename(default=pathlib.Path(__file__).parent / 'default.css')
_template = GoldenTemplate
class GoldenDarkTheme(DarkTheme):
css = param.Filename(default=pathlib.Path(__file__).parent / 'dark.css')
_template = GoldenTemplate
| 24.290909 | 95 | 0.627994 |
acf022bdf1f27a2d338f7c77260a57f764a64986 | 48 | py | Python | Python/2-1/7.py | Xi-Plus/Codecademy | e703eeafd2e36ef558ece0577243070e2802403d | [
"MIT"
] | null | null | null | Python/2-1/7.py | Xi-Plus/Codecademy | e703eeafd2e36ef558ece0577243070e2802403d | [
"MIT"
] | null | null | null | Python/2-1/7.py | Xi-Plus/Codecademy | e703eeafd2e36ef558ece0577243070e2802403d | [
"MIT"
] | null | null | null | parrot = "norwegian blue"
print(parrot.upper()) | 16 | 25 | 0.729167 |
acf02310fa1bddaadfd63ab3acb3eea26bf8ed2b | 712 | py | Python | checkov/serverless/checks/plugin/base_plugin_check.py | pmalkki/checkov | b6cdf386dd976fe27c16fed6d550756a678a5d7b | [
"Apache-2.0"
] | 4,013 | 2019-12-09T13:16:54.000Z | 2022-03-31T14:31:01.000Z | checkov/serverless/checks/plugin/base_plugin_check.py | pmalkki/checkov | b6cdf386dd976fe27c16fed6d550756a678a5d7b | [
"Apache-2.0"
] | 1,258 | 2019-12-17T09:55:51.000Z | 2022-03-31T19:17:17.000Z | checkov/serverless/checks/plugin/base_plugin_check.py | pmalkki/checkov | b6cdf386dd976fe27c16fed6d550756a678a5d7b | [
"Apache-2.0"
] | 638 | 2019-12-19T08:57:38.000Z | 2022-03-30T21:38:37.000Z | from abc import abstractmethod
from checkov.common.checks.base_check import BaseCheck
from checkov.serverless.checks.plugin.registry import plugin_registry
class BasePluginCheck(BaseCheck):
def __init__(self, name, id, categories, supported_entities, guideline=None):
super().__init__(name=name, id=id, categories=categories,
supported_entities=supported_entities,
block_type="serverless", guideline=guideline)
plugin_registry.register(self)
def scan_entity_conf(self, conf, entity_type):
return self.scan_plugin_list(conf)
@abstractmethod
def scan_plugin_list(self, plugin_list):
raise NotImplementedError()
| 35.6 | 81 | 0.72191 |
acf0235c75f1dd912b1bdca28cb1f0c5cf818756 | 229 | py | Python | tests/__init__.py | cromulus/check-list | 5e6d7f657ac14688dae181839d1bf38cedf6a232 | [
"MIT"
] | null | null | null | tests/__init__.py | cromulus/check-list | 5e6d7f657ac14688dae181839d1bf38cedf6a232 | [
"MIT"
] | null | null | null | tests/__init__.py | cromulus/check-list | 5e6d7f657ac14688dae181839d1bf38cedf6a232 | [
"MIT"
] | null | null | null |
import os
# Set ENVIRONMENT=TESTING before loading any packages so that they load with the TESTING configuration
# config.py checks environment variable ENVIRONMENT for setting: MONGODB_DB,
os.environ["ENVIRONMENT"] = "TESTING" | 38.166667 | 102 | 0.80786 |
acf02361f8712bdffd2d9810d2dbeecb637d32ac | 2,084 | py | Python | code/validation/test-formatting.py | adrianxdev/covid19-forecast-hub | f6950b88023aa173025f4f742a9f0e9a0845e76c | [
"MIT"
] | null | null | null | code/validation/test-formatting.py | adrianxdev/covid19-forecast-hub | f6950b88023aa173025f4f742a9f0e9a0845e76c | [
"MIT"
] | null | null | null | code/validation/test-formatting.py | adrianxdev/covid19-forecast-hub | f6950b88023aa173025f4f742a9f0e9a0845e76c | [
"MIT"
] | null | null | null | from zoltpy.covid19 import validate_quantile_csv_file
import glob
from pprint import pprint
import sys
import os
import pandas as pd
import datetime
# Check for metadata file
def check_for_metadata(my_path):
for path in glob.iglob(my_path + "**/**/", recursive=False):
team_model = os.path.basename(os.path.dirname(path))
metadata_filename = "metadata-" + team_model + ".txt"
txt_files = []
for metadata_file in glob.iglob(path + "*.txt", recursive=False):
txt_files += [os.path.basename(metadata_file)]
if metadata_filename not in txt_files:
print("MISSING ", metadata_filename)
# Check forecast formatting
def check_formatting(my_path):
output_errors = {}
df = pd.read_csv('code/validation/validated_files.csv')
previous_checked = list(df['file_path'])
# Iterate through processed csvs
for path in glob.iglob(my_path + "**/**/", recursive=False):
for filepath in glob.iglob(path + "*.csv", recursive=False):
if filepath not in previous_checked:
file_error = validate_quantile_csv_file(filepath)
if file_error != 'no errors':
output_errors[filepath] = file_error
else:
# add to previously checked files
current_time = datetime.datetime.now()
df = df.append({'file_path': filepath,
'validation_date': current_time}, ignore_index=True)
# update previously checked files
df.to_csv('code/validation/validated_files.csv', index=False)
# Output list of Errors
if len(output_errors) > 0:
for filename, errors in output_errors.items():
print("\n* ERROR IN '", filename, "'")
for error in errors:
print(error)
sys.exit("\n ERRORS FOUND EXITING BUILD...")
else:
print("✓ no errors")
def main():
my_path = "./data-processed"
check_for_metadata(my_path)
check_formatting(my_path)
if __name__ == "__main__":
main()
| 34.163934 | 88 | 0.622361 |
acf023cb89a43c83f0e9ae2b0d7348b3c13aacf0 | 13,118 | py | Python | jenkinsapi/build.py | ifwe/jenkinsapi | 31a7fbb07efcd48e226f7dcf643fd2a2625416c0 | [
"MIT"
] | 1 | 2015-01-12T14:15:59.000Z | 2015-01-12T14:15:59.000Z | jenkinsapi/build.py | moustuk/jenkinsapi-1 | d18c1e669965c209093763f3295f79c9d3ccdeea | [
"MIT"
] | null | null | null | jenkinsapi/build.py | moustuk/jenkinsapi-1 | d18c1e669965c209093763f3295f79c9d3ccdeea | [
"MIT"
] | null | null | null | """
A jenkins build represents a single execution of a Jenkins Job.
Builds can be thought of as the second level of the jenkins heirarchy
beneath Jobs. Builds can have state, such as whether they are running or
not. They can also have outcomes, such as wether they passed or failed.
Build objects can be associated with Results and Artifacts.g
"""
import time
import pytz
import logging
import warnings
import datetime
from time import sleep
from jenkinsapi import config
from jenkinsapi.artifact import Artifact
from jenkinsapi.result_set import ResultSet
from jenkinsapi.jenkinsbase import JenkinsBase
from jenkinsapi.constants import STATUS_SUCCESS
from jenkinsapi.custom_exceptions import NoResults
log = logging.getLogger(__name__)
class Build(JenkinsBase):
"""
Represents a jenkins build, executed in context of a job.
"""
STR_TOTALCOUNT = "totalCount"
STR_TPL_NOTESTS_ERR = "%s has status %s, and does not have any test results"
def __init__(self, url, buildno, job):
assert type(buildno) == int
self.buildno = buildno
self.job = job
JenkinsBase.__init__(self, url)
def _poll(self):
#For build's we need more information for downstream and upstream builds
#so we override the poll to get at the extra data for build objects
url = self.python_api_url(self.baseurl) + '?depth=2'
return self.get_data(url)
def __str__(self):
return self._data['fullDisplayName']
@property
def name(self):
return str(self)
def get_number(self):
return self._data["number"]
def get_status(self):
return self._data["result"]
def get_revision(self):
vcs = self._data['changeSet']['kind'] or 'git'
return getattr(self, '_get_%s_rev' % vcs, lambda: None)()
def get_revision_branch(self):
vcs = self._data['changeSet']['kind'] or 'git'
return getattr(self, '_get_%s_rev_branch' % vcs, lambda: None)()
def _get_svn_rev(self):
warnings.warn("This untested function may soon be removed from Jenkinsapi.")
maxRevision = 0
for repoPathSet in self._data["changeSet"]["revisions"]:
maxRevision = max(repoPathSet["revision"], maxRevision)
return maxRevision
def _get_git_rev(self):
# Sometimes we have None as part of actions. Filter those actions
# which have lastBuiltRevision in them
_actions = [x for x in self._data['actions']
if x and "lastBuiltRevision" in x]
return _actions[0]["lastBuiltRevision"]["SHA1"]
def _get_hg_rev(self):
warnings.warn("This untested function may soon be removed from Jenkinsapi.")
return [x['mercurialNodeName'] for x in self._data['actions'] if 'mercurialNodeName' in x][0]
def _get_svn_rev_branch(self):
raise NotImplementedError('_get_svn_rev_branch is not yet implemented')
def _get_git_rev_branch(self):
# Sometimes we have None as part of actions. Filter those actions
# which have lastBuiltRevision in them
_actions = [x for x in self._data['actions']
if x and "lastBuiltRevision" in x]
return _actions[0]["lastBuiltRevision"]["branch"]
def _get_hg_rev_branch(self):
raise NotImplementedError('_get_hg_rev_branch is not yet implemented')
def get_duration(self):
return datetime.timedelta(milliseconds=self._data["duration"])
def get_artifacts(self):
for afinfo in self._data["artifacts"]:
url = "%s/artifact/%s" % (self.baseurl, afinfo["relativePath"])
af = Artifact(afinfo["fileName"], url, self)
yield af
def get_artifact_dict(self):
return dict(
(af.filename, af) for af in self.get_artifacts()
)
def get_upstream_job_name(self):
"""
Get the upstream job name if it exist, None otherwise
:return: String or None
"""
try:
return self.get_actions()['causes'][0]['upstreamProject']
except KeyError:
return None
def get_upstream_job(self):
"""
Get the upstream job object if it exist, None otherwise
:return: Job or None
"""
if self.get_upstream_job_name():
return self.get_jenkins_obj().get_job(self.get_upstream_job_name())
else:
return None
def get_upstream_build_number(self):
"""
Get the upstream build number if it exist, None otherwise
:return: int or None
"""
try:
return int(self.get_actions()['causes'][0]['upstreamBuild'])
except KeyError:
return None
def get_upstream_build(self):
"""
Get the upstream build if it exist, None otherwise
:return Build or None
"""
upstream_job = self.get_upstream_job()
if upstream_job:
return upstream_job.get_build(self.get_upstream_build_number())
else:
return None
def get_master_job_name(self):
"""
Get the master job name if it exist, None otherwise
:return: String or None
"""
warnings.warn("This untested function may soon be removed from Jenkinsapi.")
try:
return self.get_actions()['parameters'][0]['value']
except KeyError:
return None
def get_master_job(self):
"""
Get the master job object if it exist, None otherwise
:return: Job or None
"""
warnings.warn("This untested function may soon be removed from Jenkinsapi.")
if self.get_master_job_name():
return self.get_jenkins_obj().get_job(self.get_master_job_name())
else:
return None
def get_master_build_number(self):
"""
Get the master build number if it exist, None otherwise
:return: int or None
"""
warnings.warn("This untested function may soon be removed from Jenkinsapi.")
try:
return int(self.get_actions()['parameters'][1]['value'])
except KeyError:
return None
def get_master_build(self):
"""
Get the master build if it exist, None otherwise
:return Build or None
"""
warnings.warn("This untested function may soon be removed from Jenkinsapi.")
master_job = self.get_master_job()
if master_job:
return master_job.get_build(self.get_master_build_number())
else:
return None
def get_downstream_jobs(self):
"""
Get the downstream jobs for this build
:return List of jobs or None
"""
warnings.warn("This untested function may soon be removed from Jenkinsapi.")
downstream_jobs = []
try:
for job_name in self.get_downstream_job_names():
downstream_jobs.append(self.get_jenkins_obj().get_job(job_name))
return downstream_jobs
except (IndexError, KeyError):
return []
def get_downstream_job_names(self):
"""
Get the downstream job names for this build
:return List of string or None
"""
# <<<<<<< HEAD
# downstream_jobs_names = self.job.get_downstream_job_names()
# fingerprint_data = self.get_data("%s?depth=2&tree=fingerprint[usage[name]]" \
# % self.python_api_url(self.baseurl))
# try:
# fingerprints = fingerprint_data['fingerprint'][0]
# return [
# f['name']
# for f in fingerprints['usage']
# if f['name'] in downstream_jobs_names
# ]
# =======
downstream_job_names = self.job.get_downstream_job_names()
downstream_names = []
try:
fingerprints = self._data["fingerprint"]
for fingerprint in fingerprints:
for job_usage in fingerprint['usage']:
if job_usage['name'] in downstream_job_names:
downstream_names.append(job_usage['name'])
return downstream_names
# >>>>>>> unstable
except (IndexError, KeyError):
return []
def get_downstream_builds(self):
"""
Get the downstream builds for this build
:return List of Build or None
"""
# <<<<<<< HEAD
# downstream_jobs_names = set(self.job.get_downstream_job_names())
# msg = "%s?depth=2&tree=fingerprint[usage[name,ranges[ranges[end,start]]]]"
# fingerprint_data = self.get_data(msg % self.python_api_url(self.baseurl))
# try:
# fingerprints = fingerprint_data['fingerprint'][0]
# return [
# self.get_jenkins_obj().get_job(f['name']).get_build(f['ranges']['ranges'][0]['start'])
# for f in fingerprints['usage']
# if f['name'] in downstream_jobs_names
# ]
# =======
downstream_job_names = self.get_downstream_job_names()
downstream_builds = []
try:
fingerprints = self._data["fingerprint"]
for fingerprint in fingerprints:
for job_usage in fingerprint['usage']:
if job_usage['name'] in downstream_job_names:
job = self.get_jenkins_obj().get_job(job_usage['name'])
for job_range in job_usage['ranges']['ranges']:
for build_id in range(job_range['start'],
job_range['end']):
downstream_builds.append(job.get_build(build_id))
return downstream_builds
# >>>>>>> unstable
except (IndexError, KeyError):
return []
def get_matrix_runs(self):
"""
For a matrix job, get the individual builds for each
matrix configuration
:return: Generator of Build
"""
if "runs" in self._data:
for rinfo in self._data["runs"]:
yield Build(rinfo["url"], rinfo["number"], self.job)
def is_running(self):
"""
Return a bool if running.
"""
self.poll()
return self._data["building"]
def block(self):
while self.is_running():
time.sleep(1)
def is_good(self):
"""
Return a bool, true if the build was good.
If the build is still running, return False.
"""
return (not self.is_running()) and self._data["result"] == STATUS_SUCCESS
def block_until_complete(self, delay=15):
assert isinstance(delay, int)
count = 0
while self.is_running():
total_wait = delay * count
log.info(msg="Waited %is for %s #%s to complete" % (total_wait, self.job.name, self.name))
sleep(delay)
count += 1
def get_jenkins_obj(self):
return self.job.get_jenkins_obj()
def get_result_url(self):
"""
Return the URL for the object which provides the job's result summary.
"""
url_tpl = r"%stestReport/%s"
return url_tpl % (self._data["url"], config.JENKINS_API)
def get_resultset(self):
"""
Obtain detailed results for this build.
"""
result_url = self.get_result_url()
if self.STR_TOTALCOUNT not in self.get_actions():
raise NoResults("%s does not have any published results" % str(self))
buildstatus = self.get_status()
if not self.get_actions()[self.STR_TOTALCOUNT]:
raise NoResults(self.STR_TPL_NOTESTS_ERR % (str(self), buildstatus))
obj_results = ResultSet(result_url, build=self)
return obj_results
def has_resultset(self):
"""
Return a boolean, true if a result set is available. false if not.
"""
return self.STR_TOTALCOUNT in self.get_actions()
def get_actions(self):
all_actions = {}
for dct_action in self._data["actions"]:
if dct_action is None:
continue
all_actions.update(dct_action)
return all_actions
def get_timestamp(self):
'''
Returns build timestamp in UTC
'''
# Java timestamps are given in miliseconds since the epoch start!
naive_timestamp = datetime.datetime(*time.gmtime(self._data['timestamp'] / 1000.0)[:6])
return pytz.utc.localize(naive_timestamp)
def get_console(self):
"""
Return the current state of the text console.
"""
url = "%s/consoleText" % self.baseurl
return self.job.jenkins.requester.get_url(url).content
def stop(self):
"""
Stops the build execution if it's running
:return boolean True if succeded False otherwise or the build is not running
"""
if self.is_running():
url = "%s/stop" % self.baseurl
self.job.jenkins.requester.post_and_confirm_status(url, data='')
return True
return False
| 34.612137 | 104 | 0.602912 |
acf02414f7f2a37275a02f0e64ddaa70f640e022 | 3,615 | py | Python | client_cifar.py | sandracl72/flower | bb7f6e2e1f52753820784d262618113b4e7ebc42 | [
"Apache-2.0"
] | null | null | null | client_cifar.py | sandracl72/flower | bb7f6e2e1f52753820784d262618113b4e7ebc42 | [
"Apache-2.0"
] | null | null | null | client_cifar.py | sandracl72/flower | bb7f6e2e1f52753820784d262618113b4e7ebc42 | [
"Apache-2.0"
] | null | null | null | from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torchvision.datasets import CIFAR10
import src.py.flwr as fl
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def load_data():
"""Load CIFAR-10 (training and test set)."""
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
trainset = CIFAR10(".", train=True, download=True, transform=transform)
testset = CIFAR10(".", train=False, download=True, transform=transform)
trainloader = DataLoader(trainset, batch_size=32, shuffle=True)
testloader = DataLoader(testset, batch_size=32)
num_examples = {"trainset" : len(trainset), "testset" : len(testset)}
return trainloader, testloader, num_examples
def train(net, trainloader, epochs):
"""Train the network on the training set."""
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
for _ in range(epochs):
for images, labels in trainloader:
images, labels = images.to(DEVICE), labels.to(DEVICE)
optimizer.zero_grad()
loss = criterion(net(images), labels)
loss.backward()
optimizer.step()
def test(net, testloader):
"""Validate the network on the entire test set."""
criterion = torch.nn.CrossEntropyLoss()
correct, total, loss = 0, 0, 0.0
with torch.no_grad():
for data in testloader:
images, labels = data[0].to(DEVICE), data[1].to(DEVICE)
outputs = net(images)
loss += criterion(outputs, labels).item()
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = correct / total
return loss, accuracy
class Net(nn.Module):
def __init__(self) -> None:
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# Load model and data
net = Net().to(DEVICE)
trainloader, testloader, num_examples = load_data()
class CifarClient(fl.client.NumPyClient):
def get_parameters(self):
return [val.cpu().numpy() for _, val in net.state_dict().items()]
def set_parameters(self, parameters):
params_dict = zip(net.state_dict().keys(), parameters)
state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict})
net.load_state_dict(state_dict, strict=True)
def get_properties(self, config):
return {}
def fit(self, parameters, config):
self.set_parameters(parameters)
train(net, trainloader, epochs=1)
return self.get_parameters(), num_examples["trainset"], {}
def evaluate(self, parameters, config):
self.set_parameters(parameters)
loss, accuracy = test(net, testloader)
return float(loss), num_examples["testset"], {"accuracy": float(accuracy)}
fl.client.start_numpy_client("0.0.0.0:8080", client=CifarClient()) | 36.15 | 83 | 0.638728 |
acf024290898b847f6e59b897b0288ebedf9f26f | 12,496 | py | Python | qa/rpc-tests/util.py | drcoul/StorOfWealth | 61d7a246fbb1b19d73fba258b3cf59fd82bc256d | [
"MIT"
] | null | null | null | qa/rpc-tests/util.py | drcoul/StorOfWealth | 61d7a246fbb1b19d73fba258b3cf59fd82bc256d | [
"MIT"
] | null | null | null | qa/rpc-tests/util.py | drcoul/StorOfWealth | 61d7a246fbb1b19d73fba258b3cf59fd82bc256d | [
"MIT"
] | null | null | null | # Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2014-2015 The Dash developers
# Copyright (c) 2015-2017 The Store of Wealth Coin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "storofwealth.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
storofwealthd and storofwealth-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run storofwealthd:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "storofwealthd"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("BITCOINCLI", "storofwealth-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:rt@127.0.0.1:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in storofwealth.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
"""
Start a storofwealthd and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("BITCOIND", "storofwealthd"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("BITCOINCLI", "storofwealth-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
"""
Start multiple storofwealthds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using it's output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
| 36.115607 | 112 | 0.649808 |
acf024f461fedf3b4ad7001d691389229e520595 | 288 | py | Python | src/damage.py | j-benson/damage | 51930bc05411b6c53251c16edaa3212292caf2ae | [
"MIT"
] | 1 | 2019-12-23T10:09:05.000Z | 2019-12-23T10:09:05.000Z | src/damage.py | j-benson/damage | 51930bc05411b6c53251c16edaa3212292caf2ae | [
"MIT"
] | 2 | 2019-09-18T10:45:09.000Z | 2020-02-03T21:55:10.000Z | src/damage.py | j-benson/damage | 51930bc05411b6c53251c16edaa3212292caf2ae | [
"MIT"
] | null | null | null | import message
import slack
import slack_api
import util
def main():
try:
spend = slack.sum_spend(util.yesterday())
if spend > 0:
damage_message = message.create_message(spend)
slack_api.post_message(damage_message)
except Exception as e:
print(e)
raise e | 20.571429 | 52 | 0.708333 |
acf025214951629e8aec52c5f0638ccf4b96f65d | 5,050 | py | Python | vim/base/YouCompleteMe/python/ycm/client/tests/completion_request_test.py | petchw/subvim1 | 1eb6ad9db9d5cff24462ac60482a7a2be04d3124 | [
"Vim"
] | null | null | null | vim/base/YouCompleteMe/python/ycm/client/tests/completion_request_test.py | petchw/subvim1 | 1eb6ad9db9d5cff24462ac60482a7a2be04d3124 | [
"Vim"
] | null | null | null | vim/base/YouCompleteMe/python/ycm/client/tests/completion_request_test.py | petchw/subvim1 | 1eb6ad9db9d5cff24462ac60482a7a2be04d3124 | [
"Vim"
] | null | null | null | # Copyright (C) 2015 YouCompleteMe Contributors
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import * # noqa
from nose.tools import eq_
from ycm.test_utils import MockVimModule
vim_mock = MockVimModule()
from .. import completion_request
class ConvertCompletionResponseToVimDatas_test( object ):
""" This class tests the
completion_request._ConvertCompletionResponseToVimDatas method """
def _Check( self, completion_data, expected_vim_data ):
vim_data = completion_request.ConvertCompletionDataToVimData(
completion_data )
try:
eq_( expected_vim_data, vim_data )
except:
print( "Expected:\n'{0}'\nwhen parsing:\n'{1}'\nBut found:\n'{2}'".format(
expected_vim_data,
completion_data,
vim_data ) )
raise
def All_Fields_test( self ):
self._Check( {
'insertion_text': 'INSERTION TEXT',
'menu_text': 'MENU TEXT',
'extra_menu_info': 'EXTRA MENU INFO',
'kind': 'K',
'detailed_info': 'DETAILED INFO',
'extra_data': {
'doc_string': 'DOC STRING',
},
}, {
'word' : 'INSERTION TEXT',
'abbr' : 'MENU TEXT',
'menu' : 'EXTRA MENU INFO',
'kind' : 'k',
'info' : 'DETAILED INFO\nDOC STRING',
'dup' : 1,
'empty': 1,
} )
def Just_Detailed_Info_test( self ):
self._Check( {
'insertion_text': 'INSERTION TEXT',
'menu_text': 'MENU TEXT',
'extra_menu_info': 'EXTRA MENU INFO',
'kind': 'K',
'detailed_info': 'DETAILED INFO',
}, {
'word' : 'INSERTION TEXT',
'abbr' : 'MENU TEXT',
'menu' : 'EXTRA MENU INFO',
'kind' : 'k',
'info' : 'DETAILED INFO',
'dup' : 1,
'empty': 1,
} )
def Just_Doc_String_test( self ):
self._Check( {
'insertion_text': 'INSERTION TEXT',
'menu_text': 'MENU TEXT',
'extra_menu_info': 'EXTRA MENU INFO',
'kind': 'K',
'extra_data': {
'doc_string': 'DOC STRING',
},
}, {
'word' : 'INSERTION TEXT',
'abbr' : 'MENU TEXT',
'menu' : 'EXTRA MENU INFO',
'kind' : 'k',
'info' : 'DOC STRING',
'dup' : 1,
'empty': 1,
} )
def Extra_Info_No_Doc_String_test( self ):
self._Check( {
'insertion_text': 'INSERTION TEXT',
'menu_text': 'MENU TEXT',
'extra_menu_info': 'EXTRA MENU INFO',
'kind': 'K',
'extra_data': {
},
}, {
'word' : 'INSERTION TEXT',
'abbr' : 'MENU TEXT',
'menu' : 'EXTRA MENU INFO',
'kind' : 'k',
'dup' : 1,
'empty': 1,
} )
def Extra_Info_No_Doc_String_With_Detailed_Info_test( self ):
self._Check( {
'insertion_text': 'INSERTION TEXT',
'menu_text': 'MENU TEXT',
'extra_menu_info': 'EXTRA MENU INFO',
'kind': 'K',
'detailed_info': 'DETAILED INFO',
'extra_data': {
},
}, {
'word' : 'INSERTION TEXT',
'abbr' : 'MENU TEXT',
'menu' : 'EXTRA MENU INFO',
'kind' : 'k',
'info' : 'DETAILED INFO',
'dup' : 1,
'empty': 1,
} )
def Empty_Insertion_Text_test( self ):
self._Check( {
'insertion_text': '',
'menu_text': 'MENU TEXT',
'extra_menu_info': 'EXTRA MENU INFO',
'kind': 'K',
'detailed_info': 'DETAILED INFO',
'extra_data': {
'doc_string': 'DOC STRING',
},
}, {
'word' : '',
'abbr' : 'MENU TEXT',
'menu' : 'EXTRA MENU INFO',
'kind' : 'k',
'info' : 'DETAILED INFO\nDOC STRING',
'dup' : 1,
'empty': 1,
} )
def No_Insertion_Text_test( self ):
self._Check( {
'menu_text': 'MENU TEXT',
'extra_menu_info': 'EXTRA MENU INFO',
'kind': 'K',
'detailed_info': 'DETAILED INFO',
'extra_data': {
'doc_string': 'DOC STRING',
},
}, {
'word' : '',
'abbr' : 'MENU TEXT',
'menu' : 'EXTRA MENU INFO',
'kind' : 'k',
'info' : 'DETAILED INFO\nDOC STRING',
'dup' : 1,
'empty': 1,
} )
| 27.005348 | 80 | 0.562772 |
acf0254eca5264c94202abe0ea27498ed249a65e | 292 | py | Python | src/autocompletion/helpers/_cwl-ica_list_user_names.py | kevin3/cwl-ica | cf706ea42993d563f364c0847ee4b882f8fe067c | [
"MIT"
] | 8 | 2021-12-08T05:33:58.000Z | 2022-03-07T00:40:48.000Z | src/autocompletion/helpers/_cwl-ica_list_user_names.py | kevin3/cwl-ica | cf706ea42993d563f364c0847ee4b882f8fe067c | [
"MIT"
] | 34 | 2021-08-11T03:59:33.000Z | 2022-03-10T05:39:26.000Z | src/autocompletion/helpers/_cwl-ica_list_user_names.py | kevin3/cwl-ica | cf706ea42993d563f364c0847ee4b882f8fe067c | [
"MIT"
] | 1 | 2022-01-08T07:34:55.000Z | 2022-01-08T07:34:55.000Z | #!/usr/bin/env python
"""
List all of the category names in the categories.yaml file
"""
from utils.repo import get_user_yaml_path
from utils.miscell import read_yaml
# Import yaml and print each project name
for user in read_yaml(get_user_yaml_path())["users"]:
print(user["username"]) | 26.545455 | 58 | 0.760274 |
acf025a0eb2e8306749debbdc82cca5ef85dbe78 | 37,780 | py | Python | simulator/uqr.py | ondiiik/meteoink | 9bc7af929de12ed5eb2fafd64fcfe447f07b6eeb | [
"MIT"
] | 2 | 2021-05-27T13:32:16.000Z | 2022-03-30T01:23:34.000Z | simulator/uqr.py | ondiiik/meteoink | 9bc7af929de12ed5eb2fafd64fcfe447f07b6eeb | [
"MIT"
] | 1 | 2021-05-22T15:33:56.000Z | 2021-05-23T13:33:05.000Z | ports/esp32/boards/TWATCH_2020/modules/uqr.py | ondiiik/micropython-twatch-2020 | fb526a1ed19a741354e9552a4c077c2b832d4c4f | [
"MIT"
] | null | null | null | import re
from framebuf import FrameBuffer, MONO_HLSB
from micropython import const
ERROR_CORRECT_L = const(1)
ERROR_CORRECT_M = const(0)
ERROR_CORRECT_Q = const(3)
ERROR_CORRECT_H = const(2)
rsPoly_LUT = {
10: [1, 216, 194, 159, 111, 199, 94, 95, 113, 157, 193],
13: [1, 137, 73, 227, 17, 177, 17, 52, 13, 46, 43, 83, 132, 120],
15: [1, 29, 196, 111, 163, 112, 74, 10, 105, 105, 139, 132, 151, 32, 134, 26],
16: [1, 59, 13, 104, 189, 68, 209, 30, 8, 163, 65, 41, 229, 98, 50, 36, 59],
17: [1, 119, 66, 83, 120, 119, 22, 197, 83, 249, 41, 143, 134, 85, 53, 125, 99, 79],
18: [1, 239, 251, 183, 113, 149, 175, 199, 215, 240, 220, 73, 82, 173, 75, 32, 67, 217, 146],
20: [1, 152, 185, 240, 5, 111, 99, 6, 220, 112, 150, 69, 36, 187, 22, 228, 198, 121, 121, 165, 174],
22: [1, 89, 179, 131, 176, 182, 244, 19, 189, 69, 40, 28, 137, 29, 123, 67, 253, 86, 218, 230, 26, 145, 245],
24: [1, 122, 118, 169, 70, 178, 237, 216, 102, 115, 150, 229, 73, 130, 72, 61, 43, 206, 1, 237, 247, 127, 217, 144, 117],
26: [1, 246, 51, 183, 4, 136, 98, 199, 152, 77, 56, 206, 24, 145, 40, 209, 117, 233, 42, 135, 68, 70, 144, 146, 77, 43, 94]
}
###
# Base
#
# Formerly in base.py
###
gexp = b'\x01\x02\x04\x08\x10 @\x80\x1d:t\xe8\xcd\x87\x13&L\x98-Z\xb4u\xea\xc9\x8f\x03\x06\x0c\x180`\xc0\x9d\'N\x9c%J\x945j\xd4\xb5w\xee\xc1\x9f#F\x8c\x05\n\x14(P\xa0]\xbai\xd2\xb9o\xde\xa1_\xbea\xc2\x99/^\xbce\xca\x89\x0f\x1e<x\xf0\xfd\xe7\xd3\xbbk\xd6\xb1\x7f\xfe\xe1\xdf\xa3[\xb6q\xe2\xd9\xafC\x86\x11"D\x88\r\x1a4h\xd0\xbdg\xce\x81\x1f>|\xf8\xed\xc7\x93;v\xec\xc5\x973f\xcc\x85\x17.\\\xb8m\xda\xa9O\x9e!B\x84\x15*T\xa8M\x9a)R\xa4U\xaaI\x929r\xe4\xd5\xb7s\xe6\xd1\xbfc\xc6\x91?~\xfc\xe5\xd7\xb3{\xf6\xf1\xff\xe3\xdb\xabK\x961b\xc4\x957n\xdc\xa5W\xaeA\x82\x192d\xc8\x8d\x07\x0e\x1c8p\xe0\xdd\xa7S\xa6Q\xa2Y\xb2y\xf2\xf9\xef\xc3\x9b+V\xacE\x8a\t\x12$H\x90=z\xf4\xf5\xf7\xf3\xfb\xeb\xcb\x8b\x0b\x16,X\xb0}\xfa\xe9\xcf\x83\x1b6l\xd8\xadG\x8e\x01'
glog = b'\x00\x00\x01\x19\x022\x1a\xc6\x03\xdf3\xee\x1bh\xc7K\x04d\xe0\x0e4\x8d\xef\x81\x1c\xc1i\xf8\xc8\x08Lq\x05\x8ae/\xe1$\x0f!5\x93\x8e\xda\xf0\x12\x82E\x1d\xb5\xc2}j\'\xf9\xb9\xc9\x9a\txM\xe4r\xa6\x06\xbf\x8bbf\xdd0\xfd\xe2\x98%\xb3\x10\x91"\x886\xd0\x94\xce\x8f\x96\xdb\xbd\xf1\xd2\x13\\\x838F@\x1eB\xb6\xa3\xc3H~nk:(T\xfa\x85\xba=\xca^\x9b\x9f\n\x15y+N\xd4\xe5\xacs\xf3\xa7W\x07p\xc0\xf7\x8c\x80c\rgJ\xde\xed1\xc5\xfe\x18\xe3\xa5\x99w&\xb8\xb4|\x11D\x92\xd9# \x89.7?\xd1[\x95\xbc\xcf\xcd\x90\x87\x97\xb2\xdc\xfc\xbea\xf2V\xd3\xab\x14*]\x9e\x84<9SGmA\xa2\x1f-C\xd8\xb7{\xa4v\xc4\x17I\xec\x7f\x0co\xf6l\xa1;R)\x9dU\xaa\xfb`\x86\xb1\xbb\xcc>Z\xcbY_\xb0\x9c\xa9\xa0Q\x0b\xf5\x16\xebzu,\xd7O\xae\xd5\xe9\xe6\xe7\xad\xe8t\xd6\xf4\xea\xa8PX\xaf'
RS_BLOCK_OFFSET = {
ERROR_CORRECT_L: 0,
ERROR_CORRECT_M: 1,
ERROR_CORRECT_Q: 2,
ERROR_CORRECT_H: 3,
}
RS_BLOCK_TABLE = (b'\x01\x1a\x13',
b'\x01\x1a\x10',
b'\x01\x1a\r',
b'\x01\x1a\t',
b'\x01,"',
b'\x01,\x1c',
b'\x01,\x16',
b'\x01,\x10',
b'\x01F7',
b'\x01F,',
b'\x02#\x11',
b'\x02#\r',
b'\x01dP',
b'\x022 ',
b'\x022\x18',
b'\x04\x19\t',
b'\x01\x86l',
b'\x02C+',
b'\x02!\x0f\x02"\x10',
b'\x02!\x0b\x02"\x0c',
b'\x02VD',
b'\x04+\x1b',
b'\x04+\x13',
b'\x04+\x0f',
b'\x02bN',
b'\x041\x1f',
b'\x02 \x0e\x04!\x0f',
b"\x04\'\r\x01(\x0e",
b'\x02ya',
b"\x02<&\x02=\'",
b'\x04(\x12\x02,\x13',
b'\x04(\x0e\x02,\x0f',
b'\x02\x92t',
b'\x03:$\x02;%',
b'\x04$\x10\x04%\x11',
b'\x04$\x0c\x04%\r',
b'\x02VD\x02WE',
b'\x04E+\x01F,',
b'\x06+\x13\x02,\x14',
b'\x06+\x0f\x02,\x10',
b'\x04eQ',
b'\x01P2\x04Q3',
b'\x042\x16\x043\x17',
b'\x03$\x0c\x08%\r',
b'\x02t\\\x02u]',
b'\x06:$\x02;%',
b'\x04.\x14\x06/\x15',
b'\x07*\x0e\x04+\x0f',
b'\x04\x85k',
b'\x08;%\x01<&',
b'\x08,\x14\x04-\x15',
b'\x0c!\x0b\x04"\x0c',
b'\x03\x91s\x01\x92t',
b'\x04@(\x05A,',
b'\x0b$\x10\x05%\x11',
b'\x0b$\x0c\x05%\r',
b'\x05mW\x01nX',
b'\x05A,\x05B*',
b'\x056\x18\x077\x19',
b'\x0b$\x0c\x07%\r',
b'\x05zb\x01{c',
b'\x07I-\x03J.',
b'\x0f+\x13\x02,\x14',
b'\x03-\x0f\r.\x10',
b'\x01\x87k\x05\x88l',
b'\nJ.\x01K/',
b'\x012\x16\x0f3\x17',
b'\x02*\x0e\x11+\x0f',
b'\x05\x96x\x01\x97y',
b'\tE+\x04F,',
b'\x112\x16\x013\x17',
b'\x02*\x0e\x13+\x0f',
b'\x03\x8dq\x04\x8er',
b'\x03F,\x0bG-',
b'\x11/\x15\x040\x16',
b"\t\'\r\x10(\x0e",
b'\x03\x87k\x05\x88l',
b'\x03C,\rD*',
b'\x0f6\x18\x057\x19',
b'\x0f+\x0f\n,\x10',
b'\x04\x90t\x04\x91u',
b'\x11D*',
b'\x112\x16\x063\x17',
b'\x13.\x10\x06/\x11',
b'\x02\x8bo\x07\x8cp',
b'\x11J.',
b'\x076\x18\x107\x19',
b'"%\r',
b'\x04\x97y\x05\x98z',
b'\x04K/\x0eL0',
b'\x0b6\x18\x0e7\x19',
b'\x10-\x0f\x0e.\x10',
b'\x06\x93u\x04\x94v',
b'\x06I-\x0eJ.',
b'\x0b6\x18\x107\x19',
b'\x1e.\x10\x02/\x11',
b'\x08\x84j\x04\x85k',
b'\x08K/\rL0',
b'\x076\x18\x167\x19',
b'\x16-\x0f\r.\x10',
b'\n\x8er\x02\x8fs',
b'\x13J.\x04K/',
b'\x1c2\x16\x063\x17',
b'!.\x10\x04/\x11',
b'\x08\x98z\x04\x99{',
b'\x16I-\x03J.',
b'\x085\x17\x1a6\x18',
b'\x0c-\x0f\x1c.\x10',
b'\x03\x93u\n\x94v',
b'\x03I-\x17J.',
b'\x046\x18\x1f7\x19',
b'\x0b-\x0f\x1f.\x10',
b'\x07\x92t\x07\x93u',
b'\x15I-\x07J.',
b'\x015\x17%6\x18',
b'\x13-\x0f\x1a.\x10',
b'\x05\x91s\n\x92t',
b'\x13K/\nL0',
b'\x0f6\x18\x197\x19',
b'\x17-\x0f\x19.\x10',
b'\r\x91s\x03\x92t',
b'\x02J.\x1dK/',
b'*6\x18\x017\x19',
b'\x17-\x0f\x1c.\x10',
b'\x11\x91s',
b'\nJ.\x17K/',
b'\n6\x18#7\x19',
b'\x13-\x0f#.\x10',
b'\x11\x91s\x01\x92t',
b'\x0eJ.\x15K/',
b'\x1d6\x18\x137\x19',
b'\x0b-\x0f..\x10',
b'\r\x91s\x06\x92t',
b'\x0eJ.\x17K/',
b',6\x18\x077\x19',
b';.\x10\x01/\x11',
b'\x0c\x97y\x07\x98z',
b'\x0cK/\x1aL0',
b"\'6\x18\x0e7\x19",
b'\x16-\x0f,.\x10',
b'\x06\x97y\x0e\x98z',
b'\x06K/"L0',
b'.6\x18\n7\x19',
b'\x02-\x0f@.\x10',
b'\x11\x98z\x04\x99{',
b'\x1dJ.\x0eK/',
b'16\x18\n7\x19',
b'\x18-\x0f..\x10',
b'\x04\x98z\x12\x99{',
b'\rJ. K/',
b'06\x18\x0e7\x19',
b'*-\x0f .\x10',
b'\x14\x93u\x04\x94v',
b'(K/\x07L0',
b'+6\x18\x167\x19',
b'\n-\x0fC.\x10',
b'\x13\x94v\x06\x95w',
b'\x12K/\x1fL0',
b'"6\x18"7\x19',
b'\x14-\x0f=.\x10')
class Polynomial:
def __init__(self, num, shift):
if not num: # pragma: no cover
raise Exception("%s/%s" % (len(num), shift))
for offset in range(len(num)):
if num[offset] != 0:
break
else:
offset += 1
self.num = num[offset:] + [0] * shift
def __getitem__(self, index):
return self.num[index]
def __iter__(self):
return iter(self.num)
def __len__(self):
return len(self.num)
def __mul__(self, other):
num = [0] * (len(self) + len(other) - 1)
for i, item in enumerate(self):
for j, other_item in enumerate(other):
num[i + j] ^= gexp[(glog[item] + glog[other_item]) % 255]
return Polynomial(num, 0)
###
# EDIT
###
def __mod__(self, other):
this = self
while True:
difference = len(this) - len(other)
if difference < 0:
break
ratio = glog[this[0]] - glog[other[0]]
num = [
item ^ gexp[(glog[other_item] + ratio) % 255]
for item, other_item in zip(this, other)]
if difference:
num.extend(this[-difference:])
this = Polynomial(num, 0)
return this
class RSBlock:
def __init__(self, total_count, data_count):
self.total_count = total_count
self.data_count = data_count
def make_rs_blocks(version, error_correction):
if error_correction not in RS_BLOCK_OFFSET: # pragma: no cover
raise Exception(
"bad rs block @ version: %s / error_correction: %s" %
(version, error_correction))
offset = RS_BLOCK_OFFSET[error_correction]
rs_block = RS_BLOCK_TABLE[(version - 1) * 4 + offset]
blocks = []
for i in range(0, len(rs_block), 3):
count, total_count, data_count = rs_block[i:i + 3]
for j in range(count):
blocks.append(RSBlock(total_count, data_count))
return blocks
###
# Utilities
#
# Formerly in utils.py
###
# QR encoding modes.
MODE_NUMBER = 1 << 0
MODE_ALPHA_NUM = 1 << 1
MODE_8BIT_BYTE = 1 << 2
MODE_KANJI = 1 << 3
# Encoding mode sizes.
MODE_SIZE_SMALL = {
MODE_NUMBER: 10,
MODE_ALPHA_NUM: 9,
MODE_8BIT_BYTE: 8,
MODE_KANJI: 8,
}
MODE_SIZE_MEDIUM = {
MODE_NUMBER: 12,
MODE_ALPHA_NUM: 11,
MODE_8BIT_BYTE: 16,
MODE_KANJI: 10,
}
MODE_SIZE_LARGE = {
MODE_NUMBER: 14,
MODE_ALPHA_NUM: 13,
MODE_8BIT_BYTE: 16,
MODE_KANJI: 12,
}
ALPHA_NUM = b'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ $%*+-./:'
ESCAPED_ALPHA_NUM = b'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ\\ \\$\\%\\*\\+\\-\\.\\/\\:'
RE_ALPHA_NUM = re.compile(b'^[' + ESCAPED_ALPHA_NUM + b']*\Z')
# The number of bits for numeric delimited data lengths.
NUMBER_LENGTH = {3: 10, 2: 7, 1: 4}
PATTERN_POSITION_TABLE = [
[],
[6, 18],
[6, 22],
[6, 26],
[6, 30],
[6, 34],
[6, 22, 38],
[6, 24, 42],
[6, 26, 46],
[6, 28, 50],
[6, 30, 54],
[6, 32, 58],
[6, 34, 62],
[6, 26, 46, 66],
[6, 26, 48, 70],
[6, 26, 50, 74],
[6, 30, 54, 78],
[6, 30, 56, 82],
[6, 30, 58, 86],
[6, 34, 62, 90],
[6, 28, 50, 72, 94],
[6, 26, 50, 74, 98],
[6, 30, 54, 78, 102],
[6, 28, 54, 80, 106],
[6, 32, 58, 84, 110],
[6, 30, 58, 86, 114],
[6, 34, 62, 90, 118],
[6, 26, 50, 74, 98, 122],
[6, 30, 54, 78, 102, 126],
[6, 26, 52, 78, 104, 130],
[6, 30, 56, 82, 108, 134],
[6, 34, 60, 86, 112, 138],
[6, 30, 58, 86, 114, 142],
[6, 34, 62, 90, 118, 146],
[6, 30, 54, 78, 102, 126, 150],
[6, 24, 50, 76, 102, 128, 154],
[6, 28, 54, 80, 106, 132, 158],
[6, 32, 58, 84, 110, 136, 162],
[6, 26, 54, 82, 110, 138, 166],
[6, 30, 58, 86, 114, 142, 170]
]
G15 = (
(1 << 10) | (1 << 8) | (1 << 5) | (1 << 4) | (1 << 2) | (1 << 1) |
(1 << 0))
G18 = (
(1 << 12) | (1 << 11) | (1 << 10) | (1 << 9) | (1 << 8) | (1 << 5) |
(1 << 2) | (1 << 0))
G15_MASK = (1 << 14) | (1 << 12) | (1 << 10) | (1 << 4) | (1 << 1)
PAD0 = 0xEC
PAD1 = 0x11
# Precompute bit count limits, indexed by error correction level and code size
_data_count = lambda block: block.data_count
BIT_LIMIT_TABLE = [
[0] + [8*sum(map(_data_count, make_rs_blocks(version, error_correction)))
for version in range(1, 41)]
for error_correction in range(4)
]
def BCH_type_info(data):
d = data << 10
while BCH_digit(d) - BCH_digit(G15) >= 0:
d ^= (G15 << (BCH_digit(d) - BCH_digit(G15)))
return ((data << 10) | d) ^ G15_MASK
def BCH_type_number(data):
d = data << 12
while BCH_digit(d) - BCH_digit(G18) >= 0:
d ^= (G18 << (BCH_digit(d) - BCH_digit(G18)))
return (data << 12) | d
def BCH_digit(data):
digit = 0
while data != 0:
digit += 1
data >>= 1
return digit
def pattern_position(version):
return PATTERN_POSITION_TABLE[version - 1]
def make_mask_func(pattern):
###
# Return the mask function for the given mask pattern.
###
if pattern == 0: # 000
return lambda i, j: (i + j) % 2 == 0
if pattern == 1: # 001
return lambda i, j: i % 2 == 0
if pattern == 2: # 010
return lambda i, j: j % 3 == 0
if pattern == 3: # 011
return lambda i, j: (i + j) % 3 == 0
if pattern == 4: # 100
return lambda i, j: (int(i / 2) + int(j / 3)) % 2 == 0
if pattern == 5: # 101
return lambda i, j: (i * j) % 2 + (i * j) % 3 == 0
if pattern == 6: # 110
return lambda i, j: ((i * j) % 2 + (i * j) % 3) % 2 == 0
if pattern == 7: # 111
return lambda i, j: ((i * j) % 3 + (i + j) % 2) % 2 == 0
raise TypeError("Bad mask pattern: " + pattern) # pragma: no cover
def mode_sizes_for_version(version):
if version < 10:
return MODE_SIZE_SMALL
elif version < 27:
return MODE_SIZE_MEDIUM
else:
return MODE_SIZE_LARGE
def length_in_bits(mode, version):
if mode not in (
MODE_NUMBER, MODE_ALPHA_NUM, MODE_8BIT_BYTE, MODE_KANJI):
raise TypeError("Invalid mode (%s)" % mode) # pragma: no cover
if version < 1 or version > 40: # pragma: no cover
raise ValueError(
"Invalid version (was %s, expected 1 to 40)" % version)
return mode_sizes_for_version(version)[mode]
def make_lost_point(modules):
modules_count = len(modules)
lost_point = 0
lost_point = _lost_point_level1(modules, modules_count)
lost_point += _lost_point_level2(modules, modules_count)
lost_point += _lost_point_level3(modules, modules_count)
lost_point += _lost_point_level4(modules, modules_count)
return lost_point
def _lost_point_level1(modules, modules_count):
lost_point = 0
modules_range = range(modules_count)
container = [0] * (modules_count + 1)
for row in modules_range:
this_row = modules[row]
previous_color = this_row[0]
length = 0
for col in modules_range:
if this_row[col] == previous_color:
length += 1
else:
if length >= 5:
container[length] += 1
length = 1
previous_color = this_row[col]
if length >= 5:
container[length] += 1
for col in modules_range:
previous_color = modules[0][col]
length = 0
for row in modules_range:
if modules[row][col] == previous_color:
length += 1
else:
if length >= 5:
container[length] += 1
length = 1
previous_color = modules[row][col]
if length >= 5:
container[length] += 1
lost_point += sum(container[each_length] * (each_length - 2)
for each_length in range(5, modules_count + 1))
return lost_point
def _lost_point_level2(modules, modules_count):
lost_point = 0
modules_range = range(modules_count - 1)
for row in modules_range:
this_row = modules[row]
next_row = modules[row + 1]
# use iter() and next() to skip next four-block. e.g.
# d a f if top-right a != b botton-right,
# c b e then both abcd and abef won't lost any point.
modules_range_iter = iter(modules_range)
for col in modules_range_iter:
top_right = this_row[col + 1]
if top_right != next_row[col + 1]:
# reduce 33.3% of runtime via next().
# None: raise nothing if there is no next item.
try:
next(modules_range_iter)
except StopIteration:
pass
elif top_right != this_row[col]:
continue
elif top_right != next_row[col]:
continue
else:
lost_point += 3
return lost_point
def _lost_point_level3(modules, modules_count):
# 1 : 1 : 3 : 1 : 1 ratio (dark:light:dark:light:dark) pattern in
# row/column, preceded or followed by light area 4 modules wide. From ISOIEC.
# pattern1: 10111010000
# pattern2: 00001011101
modules_range = range(modules_count)
modules_range_short = range(modules_count-10)
lost_point = 0
for row in modules_range:
this_row = modules[row]
modules_range_short_iter = iter(modules_range_short)
col = 0
for col in modules_range_short_iter:
if (
not this_row[col + 1]
and this_row[col + 4]
and not this_row[col + 5]
and this_row[col + 6]
and not this_row[col + 9]
and (
this_row[col + 0]
and this_row[col + 2]
and this_row[col + 3]
and not this_row[col + 7]
and not this_row[col + 8]
and not this_row[col + 10]
or
not this_row[col + 0]
and not this_row[col + 2]
and not this_row[col + 3]
and this_row[col + 7]
and this_row[col + 8]
and this_row[col + 10]
)
):
lost_point += 40
# horspool algorithm.
# if this_row[col + 10] == True, pattern1 shift 4, pattern2 shift 2. So min=2.
# if this_row[col + 10] == False, pattern1 shift 1, pattern2 shift 1. So min=1.
if this_row[col + 10]:
try:
next(modules_range_short_iter)
except StopIteration:
pass
for col in modules_range:
modules_range_short_iter = iter(modules_range_short)
row = 0
for row in modules_range_short_iter:
if (
not modules[row + 1][col]
and modules[row + 4][col]
and not modules[row + 5][col]
and modules[row + 6][col]
and not modules[row + 9][col]
and (
modules[row + 0][col]
and modules[row + 2][col]
and modules[row + 3][col]
and not modules[row + 7][col]
and not modules[row + 8][col]
and not modules[row + 10][col]
or
not modules[row + 0][col]
and not modules[row + 2][col]
and not modules[row + 3][col]
and modules[row + 7][col]
and modules[row + 8][col]
and modules[row + 10][col]
)
):
lost_point += 40
if modules[row + 10][col]:
try:
next(modules_range_short_iter)
except StopIteration:
pass
return lost_point
def _lost_point_level4(modules, modules_count):
dark_count = sum(map(sum, modules))
percent = float(dark_count) / (modules_count**2)
# Every 5% departure from 50%, rating++
rating = int(abs(percent * 100 - 50) / 5)
return rating * 10
def optimal_data_chunks(data, minimum=4):
###
# An iterator returning QRData chunks optimized to the data content.
#
# :param minimum: The minimum number of bytes in a row to split as a chunk.
###
data = to_bytestring(data)
re_repeat = (
b'{' + str(minimum).encode('ascii') + b',}')
num_pattern = re.compile(b'\d' + re_repeat)
num_bits = _optimal_split(data, num_pattern)
alpha_pattern = re.compile(
b'[' + ESCAPED_ALPHA_NUM + b']' + re_repeat)
for is_num, chunk in num_bits:
if is_num:
yield QRData(chunk, mode=MODE_NUMBER, check_data=False)
else:
for is_alpha, sub_chunk in _optimal_split(chunk, alpha_pattern):
if is_alpha:
mode = MODE_ALPHA_NUM
else:
mode = MODE_8BIT_BYTE
yield QRData(sub_chunk, mode=mode, check_data=False)
def _optimal_split(data, pattern):
while data:
match = pattern.search(data)
if not match:
break
start, end = match.start(), match.end()
if start:
yield False, data[:start]
yield True, data[start:end]
data = data[end:]
if data:
yield False, data
def to_bytestring(data):
###
# Convert data to a (utf-8 encoded) byte-string if it isn't a byte-string
# already.
###
if not isinstance(data, bytes):
data = str(data).encode('utf-8')
return data
def optimal_mode(data):
###
# Calculate the optimal mode for this chunk of data.
###
if data.isdigit():
return MODE_NUMBER
if RE_ALPHA_NUM.match(data):
return MODE_ALPHA_NUM
return MODE_8BIT_BYTE
class QRData:
def __init__(self, data, mode=None, check_data=True):
if check_data:
data = to_bytestring(data)
if mode is None:
self.mode = optimal_mode(data)
else:
self.mode = mode
if mode not in (MODE_NUMBER, MODE_ALPHA_NUM, MODE_8BIT_BYTE):
raise TypeError("Invalid mode (%s)" % mode) # pragma: no cover
if check_data and mode < optimal_mode(data): # pragma: no cover
raise ValueError(
"Provided data can not be represented in mode "
"{0}".format(mode))
self.data = data
def __len__(self):
return len(self.data)
def write(self, buffer):
if self.mode == MODE_NUMBER:
for i in range(0, len(self.data), 3):
chars = self.data[i:i + 3]
bit_length = NUMBER_LENGTH[len(chars)]
buffer.put(int(chars), bit_length)
elif self.mode == MODE_ALPHA_NUM:
for i in range(0, len(self.data), 2):
chars = self.data[i:i + 2]
if len(chars) > 1:
buffer.put(
ALPHA_NUM.find(chars[0]) * 45 +
ALPHA_NUM.find(chars[1]), 11)
else:
buffer.put(ALPHA_NUM.find(chars), 6)
else:
data = self.data
for c in data:
buffer.put(c, 8)
def __repr__(self):
return repr(self.data)
class BitBuffer:
def __init__(self):
self.buffer = []
self.length = 0
def __repr__(self):
return ".".join([str(n) for n in self.buffer])
def get(self, index):
buf_index = int(index / 8)
return ((self.buffer[buf_index] >> (7 - index % 8)) & 1) == 1
def put(self, num, length):
for i in range(length):
self.put_bit(((num >> (length - i - 1)) & 1) == 1)
def __len__(self):
return self.length
def put_bit(self, bit):
buf_index = self.length // 8
if len(self.buffer) <= buf_index:
self.buffer.append(0)
if bit:
self.buffer[buf_index] |= (0x80 >> (self.length % 8))
self.length += 1
def create_bytes(buffer, rs_blocks):
offset = 0
maxDcCount = 0
maxEcCount = 0
dcdata = [0] * len(rs_blocks)
ecdata = [0] * len(rs_blocks)
for r in range(len(rs_blocks)):
dcCount = rs_blocks[r].data_count
ecCount = rs_blocks[r].total_count - dcCount
maxDcCount = max(maxDcCount, dcCount)
maxEcCount = max(maxEcCount, ecCount)
dcdata[r] = [0] * dcCount
for i in range(len(dcdata[r])):
dcdata[r][i] = 0xff & buffer.buffer[i + offset]
offset += dcCount
# Get error correction polynomial.
if ecCount in rsPoly_LUT:
rsPoly = Polynomial(rsPoly_LUT[ecCount], 0)
else:
rsPoly = Polynomial([1], 0)
for i in range(ecCount):
rsPoly = rsPoly * Polynomial([1, gexp[i % 255]], 0)
rawPoly = Polynomial(dcdata[r], len(rsPoly) - 1)
modPoly = rawPoly % rsPoly
ecdata[r] = [0] * (len(rsPoly) - 1)
for i in range(len(ecdata[r])):
modIndex = i + len(modPoly) - len(ecdata[r])
if (modIndex >= 0):
ecdata[r][i] = modPoly[modIndex]
else:
ecdata[r][i] = 0
totalCodeCount = 0
for rs_block in rs_blocks:
totalCodeCount += rs_block.total_count
data = [None] * totalCodeCount
index = 0
for i in range(maxDcCount):
for r in range(len(rs_blocks)):
if i < len(dcdata[r]):
data[index] = dcdata[r][i]
index += 1
for i in range(maxEcCount):
for r in range(len(rs_blocks)):
if i < len(ecdata[r]):
data[index] = ecdata[r][i]
index += 1
return data
def create_data(version, error_correction, data_list):
buffer = BitBuffer()
for data in data_list:
buffer.put(data.mode, 4)
buffer.put(len(data), length_in_bits(data.mode, version))
data.write(buffer)
# Calculate the maximum number of bits for the given version.
rs_blocks = make_rs_blocks(version, error_correction)
bit_limit = 0
for block in rs_blocks:
bit_limit += block.data_count * 8
if len(buffer) > bit_limit:
raise RuntimeError(
"Code length overflow. Data size (%s) > size available (%s)" %
(len(buffer), bit_limit))
# Terminate the bits (add up to four 0s).
for i in range(min(bit_limit - len(buffer), 4)):
buffer.put_bit(False)
# Delimit the string into 8-bit words, padding with 0s if necessary.
delimit = len(buffer) % 8
if delimit:
for i in range(8 - delimit):
buffer.put_bit(False)
# Add special alternating padding bitstrings until buffer is full.
bytes_to_fill = (bit_limit - len(buffer)) // 8
for i in range(bytes_to_fill):
if i % 2 == 0:
buffer.put(PAD0, 8)
else:
buffer.put(PAD1, 8)
return create_bytes(buffer, rs_blocks)
###
# Main
#
# Formerly in app.py
###
def make(data=None, **kwargs):
qr = QRCode(**kwargs)
qr.add_data(data)
return qr.make_image()
def _check_version(version):
if version < 1 or version > 40:
raise ValueError(
"Invalid version (was %s, expected 1 to 40)" % version)
def _check_box_size(size):
if int(size) <= 0:
raise ValueError(
"Invalid box size (was %s, expected larger than 0)" % size)
def _check_mask_pattern(mask_pattern):
if mask_pattern is None:
return
if not isinstance(mask_pattern, int):
raise TypeError(
"Invalid mask pattern (was %s, expected int)" % type(mask_pattern))
if mask_pattern < 0 or mask_pattern > 7:
raise ValueError(
"Mask pattern should be in range(8) (got %s)" % mask_pattern)
class QRCode:
def __init__(self, version=None,
error_correction=ERROR_CORRECT_M,
box_size=10, border=4,
mask_pattern=None):
_check_box_size(box_size)
self.version = version and int(version)
self.error_correction = int(error_correction)
self.box_size = int(box_size)
# Spec says border should be at least four boxes wide, but allow for
# any (e.g. for producing printable QR codes).
self.border = int(border)
_check_mask_pattern(mask_pattern)
self.mask_pattern = mask_pattern
self.clear()
def clear(self):
###
# Reset the internal data.
###
self.modules = None
self.modules_count = 0
self.data_cache = None
self.data_list = []
def add_data(self, data):
if isinstance(data, QRData):
self.data_list.append(data)
else:
self.data_list.append(QRData(data))
self.data_cache = None
def make(self, fit=True):
###
# Compile the data into a QR Code array.
#
# :param fit: If ``True`` (or if a size has not been provided), find the
# best fit for the data to avoid data overflow errors.
###
if fit or (self.version is None):
self.best_fit(start=self.version)
if self.mask_pattern is None:
self.makeImpl(False, self.best_mask_pattern())
else:
self.makeImpl(False, self.mask_pattern)
def makeImpl(self, test, mask_pattern):
_check_version(self.version)
self.modules_count = self.version * 4 + 17
self.modules = [None] * self.modules_count
for row in range(self.modules_count):
self.modules[row] = [None] * self.modules_count
for col in range(self.modules_count):
self.modules[row][col] = None # (col + row) % 3
self.setup_position_probe_pattern(0, 0)
self.setup_position_probe_pattern(self.modules_count - 7, 0)
self.setup_position_probe_pattern(0, self.modules_count - 7)
self.setup_position_adjust_pattern()
self.setup_timing_pattern()
self.setup_type_info(test, mask_pattern)
if self.version >= 7:
self.setup_type_number(test)
if self.data_cache is None:
self.data_cache = create_data(
self.version, self.error_correction, self.data_list)
self.map_data(self.data_cache, mask_pattern)
def setup_position_probe_pattern(self, row, col):
for r in range(-1, 8):
if row + r <= -1 or self.modules_count <= row + r:
continue
for c in range(-1, 8):
if col + c <= -1 or self.modules_count <= col + c:
continue
if (0 <= r and r <= 6 and (c == 0 or c == 6)
or (0 <= c and c <= 6 and (r == 0 or r == 6))
or (2 <= r and r <= 4 and 2 <= c and c <= 4)):
self.modules[row + r][col + c] = True
else:
self.modules[row + r][col + c] = False
def best_fit(self, start=None):
###
# Find the minimum size required to fit in the data.
###
if start is None:
start = 1
_check_version(start)
# Corresponds to the code in create_data, except we don't yet know
# version, so optimistically assume start and check later
mode_sizes = mode_sizes_for_version(start)
buffer = BitBuffer()
for data in self.data_list:
buffer.put(data.mode, 4)
buffer.put(len(data), mode_sizes[data.mode])
data.write(buffer)
needed_bits = len(buffer)
self.version = start
end = len(BIT_LIMIT_TABLE[self.error_correction])
while (self.version < end and
needed_bits > BIT_LIMIT_TABLE[self.error_correction][self.version]):
self.version += 1
if self.version == 41:
raise RuntimeError('Version overflow')
# Now check whether we need more bits for the mode sizes, recursing if
# our guess was too low
if mode_sizes is not mode_sizes_for_version(self.version):
self.best_fit(start=self.version)
return self.version
def best_mask_pattern(self):
###
# Find the most efficient mask pattern.
###
min_lost_point = 0
pattern = 0
for i in range(8):
self.makeImpl(True, i)
lost_point = make_lost_point(self.modules)
if i == 0 or min_lost_point > lost_point:
min_lost_point = lost_point
pattern = i
return pattern
def setup_timing_pattern(self):
for r in range(8, self.modules_count - 8):
if self.modules[r][6] is not None:
continue
self.modules[r][6] = (r % 2 == 0)
for c in range(8, self.modules_count - 8):
if self.modules[6][c] is not None:
continue
self.modules[6][c] = (c % 2 == 0)
def setup_position_adjust_pattern(self):
pos = pattern_position(self.version)
for i in range(len(pos)):
for j in range(len(pos)):
row = pos[i]
col = pos[j]
if self.modules[row][col] is not None:
continue
for r in range(-2, 3):
for c in range(-2, 3):
if (r == -2 or r == 2 or c == -2 or c == 2 or
(r == 0 and c == 0)):
self.modules[row + r][col + c] = True
else:
self.modules[row + r][col + c] = False
def setup_type_number(self, test):
bits = BCH_type_number(self.version)
for i in range(18):
mod = (not test and ((bits >> i) & 1) == 1)
self.modules[i // 3][i % 3 + self.modules_count - 8 - 3] = mod
for i in range(18):
mod = (not test and ((bits >> i) & 1) == 1)
self.modules[i % 3 + self.modules_count - 8 - 3][i // 3] = mod
def setup_type_info(self, test, mask_pattern):
data = (self.error_correction << 3) | mask_pattern
bits = BCH_type_info(data)
# vertical
for i in range(15):
mod = (not test and ((bits >> i) & 1) == 1)
if i < 6:
self.modules[i][8] = mod
elif i < 8:
self.modules[i + 1][8] = mod
else:
self.modules[self.modules_count - 15 + i][8] = mod
# horizontal
for i in range(15):
mod = (not test and ((bits >> i) & 1) == 1)
if i < 8:
self.modules[8][self.modules_count - i - 1] = mod
elif i < 9:
self.modules[8][15 - i - 1 + 1] = mod
else:
self.modules[8][15 - i - 1] = mod
# fixed module
self.modules[self.modules_count - 8][8] = (not test)
def map_data(self, data, mask_pattern):
inc = -1
row = self.modules_count - 1
bitIndex = 7
byteIndex = 0
mask_func = make_mask_func(mask_pattern)
data_len = len(data)
for col in range(self.modules_count - 1, 0, -2):
if col <= 6:
col -= 1
col_range = (col, col-1)
while True:
for c in col_range:
if self.modules[row][c] is None:
dark = False
if byteIndex < data_len:
dark = (((data[byteIndex] >> bitIndex) & 1) == 1)
if mask_func(row, c):
dark = not dark
self.modules[row][c] = dark
bitIndex -= 1
if bitIndex == -1:
byteIndex += 1
bitIndex = 7
row += inc
if row < 0 or self.modules_count <= row:
row -= inc
inc = -inc
break
def get_matrix(self):
if self.data_cache is None:
self.make()
if not self.border:
return self.modules
width = len(self.modules) + self.border * 2
buf = bytearray(width * (width + 7) // 8)
fb = FrameBuffer(buf, width, width, MONO_HLSB)
fb.fill(0)
y = self.border
for module in self.modules:
x = self.border
for p in module:
fb.pixel(x, y, p)
x += 1
y += 1
return (fb, width)
| 31.694631 | 745 | 0.495844 |
acf026738f0558ceea0c102389c27873876201b3 | 3,218 | py | Python | backend/project/predictor.py | MagicAP-QA/document-layout-analysis-app | 39467658cc8704d734c8e50055449493f71e9495 | [
"MIT"
] | null | null | null | backend/project/predictor.py | MagicAP-QA/document-layout-analysis-app | 39467658cc8704d734c8e50055449493f71e9495 | [
"MIT"
] | null | null | null | backend/project/predictor.py | MagicAP-QA/document-layout-analysis-app | 39467658cc8704d734c8e50055449493f71e9495 | [
"MIT"
] | null | null | null | import base64
import datetime
import pathlib
import time
import numpy as np
import cv2
import yaml
from detectron2.config import get_cfg
from detectron2.data import MetadataCatalog
from detectron2.structures.boxes import Boxes
from project.d2predictor import VisualizationDemo
from project.file_utils import download_file
# MODEL_DOWNLOAD_URL = "https://www.dropbox.com/sh/wgt9skz67usliei/AABPmqM77ERycAd87vubWc4Ua/model_final_trimmed.pth?dl=1"
# MODEL NAME: DLA_mask_rcnn_X_101_32x8d_FPN_3x
MODEL_DOWNLOAD_URL = "https://www.dropbox.com/sh/1098ym6vhad4zi6/AAD8Y-SVN6EbfAWEDYuZHG8xa/model_final_trimmed.pth?dl=1"
with open(pathlib.Path().parent / "model_config.yaml") as f:
model_config = yaml.full_load(f)
classes =None
cfg_file=None
model_weights = None
if not classes:
print("Loading classes")
classes = model_config["categories"]
if not cfg_file:
print("Loading cfg_file")
cfg_file = model_config["cfg_file"]
if not model_weights:
print("Loading model_weights")
model_weights = model_config["model_file"]
def prepare_predictor():
print(f"Loaded config: {cfg_file}")
print(f"Loaded model: {model_weights}")
# create config
cfg = get_cfg()
cfg.merge_from_file(cfg_file)
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
cfg.MODEL.WEIGHTS = model_weights
cfg.MODEL.DEVICE = "cpu"
MetadataCatalog.get("dla_val").thing_classes = classes
if not pathlib.Path(model_weights).exists():
print(f"Downloading {model_weights}...")
download_file(MODEL_DOWNLOAD_URL, model_weights)
print("Download complete!")
predictor = VisualizationDemo(cfg)
print("Predictor has been initialized.")
return predictor
def extract_instances(instances):
boxes = instances.pred_boxes
print(f"instances: {len(boxes)}")
if isinstance(boxes, Boxes):
boxes = boxes.tensor.numpy()
else:
boxes = np.asarray(boxes)
scores = instances.scores
pred_classes = instances.pred_classes
labels = [classes[i] for i in pred_classes]
labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)]
return boxes, pred_classes, scores, labels
def make_predictions(image, return_json, predictor):
start_time = time.time()
# image = image[:, :, ::-1]
predictions, _ = predictor.run_on_image(image)
inference_time = int(time.time() - start_time)
print(f"inference time: {datetime.timedelta(seconds=inference_time)}")
boxes, pred_classes, scores, labels = extract_instances(predictions["instances"])
# img = vis_output.get_image()
if return_json:
# retval, buffer = cv2.imencode(".jpg", img)
# jpg_as_text = base64.b64encode(buffer).decode("utf-8")
total_time = int(time.time() - start_time)
json_data = {
"predictions": {
"scores": scores.tolist(),
"pred_classes": pred_classes.tolist(),
"pred_boxes": boxes.tolist(),
"classes": classes,
},
"instances": len(boxes),
"img": "",
"inference_time": f"{inference_time}s",
}
return json_data
else:
return ""
| 27.042017 | 122 | 0.677129 |
acf02674186552cc6d05a026d11bb2ace471dff7 | 1,840 | py | Python | phase1/consumer-to-SQL_2nd_method.py | nortonlyr/Kafka3-Data | 85c4eb476dd74ec0c4d416a99fef7f0ac0767c1d | [
"MIT"
] | null | null | null | phase1/consumer-to-SQL_2nd_method.py | nortonlyr/Kafka3-Data | 85c4eb476dd74ec0c4d416a99fef7f0ac0767c1d | [
"MIT"
] | null | null | null | phase1/consumer-to-SQL_2nd_method.py | nortonlyr/Kafka3-Data | 85c4eb476dd74ec0c4d416a99fef7f0ac0767c1d | [
"MIT"
] | null | null | null | from kafka import KafkaConsumer, TopicPartition
from json import loads
import os
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy import Integer, String, Column
mysqlkey = os.environ.get('mysql_key')
class XactionConsumer:
def __init__(self):
self.consumer = KafkaConsumer('bank-customer-events',
bootstrap_servers=['localhost:9092'],
# auto_offset_reset='earliest',
value_deserializer=lambda m: loads(m.decode('ascii')))
self.ledger = {}
self.custBalances = {}
self.mysql_engine = create_engine("mysql+pymysql://root:" + mysqlkey + "@localhost/bank-customer-events")
self.conn = self.mysql_engine.connect()
def handleMessages(self):
for message in self.consumer:
message = message.value
print('{} received'.format(message))
self.ledger[message['custid']] = message
#Create a table in mysql first with id (auto_increment primary key), custid, type, date and amt
self.conn.execute("INSERT INTO transaction4 VALUES (%s,%s,%s,%s,%s)",
(int(), int(message['custid']), str(message['type']), int(message['date']), int(message['amt'])))
if message['custid'] not in self.custBalances:
self.custBalances[message['custid']] = 0
if message['type'] == 'dep':
self.custBalances[message['custid']] += message['amt']
else:
self.custBalances[message['custid']] -= message['amt']
print(self.custBalances)
if __name__ == "__main__":
c = XactionConsumer()
c.handleMessages() | 40 | 127 | 0.599457 |
acf026c803f645c1c22d484589efd57d63fa3c31 | 1,497 | py | Python | salt/modules/nginx.py | jeblair/salt | 24bdca62c1d43df198e07e54cbdd0e6397243f37 | [
"Apache-2.0"
] | 1 | 2020-09-06T16:03:14.000Z | 2020-09-06T16:03:14.000Z | salt/modules/nginx.py | jeblair/salt | 24bdca62c1d43df198e07e54cbdd0e6397243f37 | [
"Apache-2.0"
] | null | null | null | salt/modules/nginx.py | jeblair/salt | 24bdca62c1d43df198e07e54cbdd0e6397243f37 | [
"Apache-2.0"
] | null | null | null | '''
Support for nginx
'''
import salt.utils
__outputter__ = {
'signal': 'txt',
}
def __virtual__():
'''
Only load the module if nginx is installed
'''
cmd = __detect_os()
if salt.utils.which(cmd):
return 'nginx'
return False
def __detect_os():
return 'nginx'
def version():
'''
Return server version from nginx -v
CLI Example::
salt '*' nginx.version
'''
cmd = __detect_os() + ' -v'
out = __salt__['cmd.run'](cmd).split('\n')
ret = out[0].split(': ')
return ret[2]
def signal(signal=None):
'''
Signals httpd to start, restart, or stop.
CLI Example::
salt '*' nginx.signal reload
'''
valid_signals = ('reopen', 'stop', 'quit', 'reload')
if signal not in valid_signals:
return
# Make sure you use the right arguments
if signal in valid_signals:
arguments = ' -s {0}'.format(signal)
else:
arguments = ' {0}'.format(signal)
cmd = __detect_os() + arguments
out = __salt__['cmd.run_all'](cmd)
# A non-zero return code means fail
if out['retcode'] and out['stderr']:
ret = out['stderr'].strip()
# 'nginxctl configtest' returns 'Syntax OK' to stderr
elif out['stderr']:
ret = out['stderr'].strip()
elif out['stdout']:
ret = out['stdout'].strip()
# No output for something like: nginxctl graceful
else:
ret = 'Command: "{0}" completed successfully!'.format(cmd)
return ret
| 21.695652 | 66 | 0.580494 |
acf028c36018a9b24ebb0526c51638826f80432d | 2,632 | py | Python | main.py | v4rgas/centollabot4All | 702156d242167a06a9e22e1bce75f54f2e1ae761 | [
"MIT"
] | null | null | null | main.py | v4rgas/centollabot4All | 702156d242167a06a9e22e1bce75f54f2e1ae761 | [
"MIT"
] | null | null | null | main.py | v4rgas/centollabot4All | 702156d242167a06a9e22e1bce75f54f2e1ae761 | [
"MIT"
] | null | null | null | # Importando dependencias
import configparser
import discord
import asyncio
from discord.ext import commands
import sys
from os import walk
# Tomamos la info del archivo config
config = configparser.ConfigParser()
config.read('config.ini')
if 'MAIN' not in config:
raise ValueError('No se encontro la secccion MAIN en el archivo config')
prefix = config['MAIN']['PREFIJO']
# Poder obtener los modulos de la carpeta comandos
sys.path.append('./commands')
# Array vacio para poner los nombres de los comandos
f = []
# Usando la funcion walk de operating system Buscamos los archivos
for (dirpath, dirnames, filenames) in walk('./commands'):
# Array con los nombres
for x in filenames:
# Nos aseguramos que todos los archivos que tomemos sean de pyhton
if not x[-3:] == '.py':
break
# Guardamos en nuestro array el nombre, pero sin la extension
if x[:-3] != 'autoexec':
f.append(x[:-3])
break
# Client de Discord.py
client = commands.Bot(command_prefix='-')
# Simple evento ready para avisar cuando el Bot esta listo
@client.event
async def on_ready():
print('Iniciado como ' + client.user.name)
# Un Array para poner la info de cada comando
utilidad = []
# Parecido a lo que hicimos arriba, pero esta vez guardaremos
# el objeto que creamos en cada comando en un array (Para luego usarlo)
for (dirpath, dirnames, filenames) in walk('./commands'):
for x in filenames:
if not x[-3:] == '.py':
break
util = __import__(x[:-3])
if hasattr(util, 'info'):
utilidad.append(util.info)
else:
print('El comando ' + x + ' no tiene el objeto de información')
# Evento mensaje que va a a ser la raiz de nuestro handler
@client.event
async def on_message(message):
# Ignoremos todos los mensajes que son de otros Bots
if message.author.bot is True:
return
# Vemos si el mensaje comienza con el prefijo
if message.content[0] == prefix:
# Si lo hace separamos el comando
command = message.content.split(' ')[0][1:]
# Obtenemos los argumentos
args = message.content.split(' ')
# Borramos el primer elemento de los Argumentos (El comando)
del args[0]
# Buscamos en nuestro array de comandos si el comando ese esta
if command in f:
# Si esta, lo importamos y lo ejecutamos
comando = __import__(command)
# Pasando como parametros el mensaje , los argumentos, y la ulilidad
await comando.run(message, args, utilidad)
client.run(config['MAIN']['TOKEN'])
| 29.909091 | 80 | 0.661094 |
acf028cb9c5690eb3ab29a09b8a8b6426d314770 | 14,036 | py | Python | splinter/driver/lxmldriver.py | maxoja/splinter | 50beb20ae89ebb775c3f2c95866a1bcb66a626e8 | [
"BSD-3-Clause"
] | null | null | null | splinter/driver/lxmldriver.py | maxoja/splinter | 50beb20ae89ebb775c3f2c95866a1bcb66a626e8 | [
"BSD-3-Clause"
] | null | null | null | splinter/driver/lxmldriver.py | maxoja/splinter | 50beb20ae89ebb775c3f2c95866a1bcb66a626e8 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2014 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from __future__ import with_statement
import os.path
import re
import time
import sys
import lxml.etree
import lxml.html
from lxml.cssselect import CSSSelector
from splinter.driver import DriverAPI, ElementAPI
from splinter.driver.element_present import ElementPresentMixIn
from splinter.element_list import ElementList
from splinter.exceptions import ElementDoesNotExist
class LxmlDriver(ElementPresentMixIn, DriverAPI):
def __init__(self, user_agent=None, wait_time=2):
self.wait_time = wait_time
self._history = []
self._last_urls = []
self._forms = {}
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def _do_method(self, action, url, data=None):
raise NotImplementedError(
"%s doesn't support doing http methods." % self.driver_name
)
def visit(self, url):
self._do_method("get", url)
def serialize(self, form):
data = {}
for key in form.inputs.keys():
input = form.inputs[key]
if getattr(input, "type", "") == "submit":
try:
form.remove(input)
# Issue 595: throws ValueError: Element not child of this node
except ValueError:
pass
for k, v in form.fields.items():
if v is None:
continue
if isinstance(v, lxml.html.MultipleSelectOptions):
data[k] = [val for val in v]
else:
data[k] = v
for key in form.inputs.keys():
input = form.inputs[key]
if getattr(input, "type", "") == "file" and key in data:
data[key] = open(data[key], "rb")
return data
def submit(self, form):
method = form.attrib.get("method", "get").lower()
action = form.attrib.get("action", "")
if action.strip() != ".":
url = os.path.join(self._url, action)
else:
url = self._url
self._url = url
data = self.serialize(form)
self._do_method(method, url, data=data)
return self._response
def submit_data(self, form):
raise NotImplementedError(
"%s doesn't support submitting then getting the data." % self.driver_name
)
def back(self):
self._last_urls.insert(0, self.url)
self.visit(self._last_urls[1])
def forward(self):
try:
self.visit(self._last_urls.pop())
except IndexError:
pass
def reload(self):
self.visit(self._url)
def quit(self):
pass
@property
def htmltree(self):
try:
return self._html
except AttributeError:
self._html = lxml.html.fromstring(self.html)
return self._html
@property
def title(self):
html = self.htmltree
return html.xpath("//title")[0].text_content().strip()
@property
def html(self):
raise NotImplementedError(
"%s doesn't support getting the html of the response." % self.driver_name
)
@property
def url(self):
return self._url
def find_option_by_value(self, value):
html = self.htmltree
element = html.xpath('//option[@value="%s"]' % value)[0]
control = LxmlControlElement(element.getparent(), self)
return ElementList(
[LxmlOptionElement(element, control)], find_by="value", query=value
)
def find_option_by_text(self, text):
html = self.htmltree
element = html.xpath('//option[normalize-space(text())="%s"]' % text)[0]
control = LxmlControlElement(element.getparent(), self)
return ElementList(
[LxmlOptionElement(element, control)], find_by="text", query=text
)
def find_by_css(self, selector):
xpath = CSSSelector(selector).path
return self.find_by_xpath(
xpath, original_find="css", original_selector=selector
)
def find_by_xpath(self, xpath, original_find=None, original_selector=None):
html = self.htmltree
elements = []
for xpath_element in html.xpath(xpath):
if self._element_is_link(xpath_element):
return self._find_links_by_xpath(xpath)
elif self._element_is_control(xpath_element):
elements.append((LxmlControlElement, xpath_element))
else:
elements.append((LxmlElement, xpath_element))
find_by = original_find or "xpath"
query = original_selector or xpath
return ElementList(
[element_class(element, self) for element_class, element in elements],
find_by=find_by,
query=query,
)
def find_by_tag(self, tag):
return self.find_by_xpath(
"//%s" % tag, original_find="tag", original_selector=tag
)
def find_by_value(self, value):
return self.find_by_xpath(
'//*[@value="%s"]' % value, original_find="value", original_selector=value
)
def find_by_text(self, text):
return self.find_by_xpath(
'//*[text()="%s"]' % text, original_find="text", original_selector=text
)
def find_by_id(self, id_value):
return self.find_by_xpath(
'//*[@id="%s"][1]' % id_value,
original_find="id",
original_selector=id_value,
)
def find_by_name(self, name):
html = self.htmltree
xpath = '//*[@name="%s"]' % name
elements = []
for xpath_element in html.xpath(xpath):
elements.append(xpath_element)
find_by = "name"
query = xpath
return ElementList(
[LxmlControlElement(element, self) for element in elements],
find_by=find_by,
query=query,
)
def find_link_by_text(self, text):
return self._find_links_by_xpath("//a[text()='%s']" % text)
def find_link_by_href(self, href):
return self._find_links_by_xpath("//a[@href='%s']" % href)
def find_link_by_partial_href(self, partial_href):
return self._find_links_by_xpath("//a[contains(@href, '%s')]" % partial_href)
def find_link_by_partial_text(self, partial_text):
return self._find_links_by_xpath(
"//a[contains(normalize-space(.), '%s')]" % partial_text
)
def fill(self, name, value):
self.find_by_name(name=name).first.fill(value)
def fill_form(self, field_values, form_id=None, name=None):
form = None
if name is not None:
form = self.find_by_name(name)
if form_id is not None:
form = self.find_by_id(form_id)
for name, value in field_values.items():
if form:
element = form.find_by_name(name)
control = element.first._element
else:
element = self.find_by_name(name)
control = element.first._control
control_type = control.get("type")
if control_type == "checkbox":
if value:
control.value = value # control.options
else:
control.value = []
elif control_type == "radio":
control.value = (
value
) # [option for option in control.options if option == value]
elif control_type == "select":
if isinstance(value, list):
control.value = value
else:
control.value = [value]
else:
# text, textarea, password, tel
control.value = value
def choose(self, name, value):
self.find_by_name(name).first._control.value = value
def check(self, name):
control = self.find_by_name(name).first._control
control.value = ["checked"]
def uncheck(self, name):
control = self.find_by_name(name).first._control
control.value = []
def attach_file(self, name, file_path):
control = self.find_by_name(name).first._control
control.value = file_path
def _find_links_by_xpath(self, xpath):
html = self.htmltree
links = html.xpath(xpath)
return ElementList(
[LxmlLinkElement(link, self) for link in links],
find_by="xpath",
query=xpath,
)
def select(self, name, value):
self.find_by_name(name).first._control.value = value
def is_text_present(self, text, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
if self._is_text_present(text):
return True
return False
def _is_text_present(self, text):
try:
body = self.find_by_tag("body").first
return text in body.text
except ElementDoesNotExist:
# This exception will be thrown if the body tag isn't present
# This has occasionally been observed. Assume that the
# page isn't fully loaded yet
return False
def is_text_not_present(self, text, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
if not self._is_text_present(text):
return True
return False
def _element_is_link(self, element):
return element.tag == "a"
def _element_is_control(self, element):
return element.tag in ["button", "input", "textarea"]
@property
def cookies(self):
return self._cookie_manager
class LxmlElement(ElementAPI):
def __init__(self, element, parent):
self._element = element
self.parent = parent
def __getitem__(self, attr):
return self._element.attrib[attr]
def find_by_css(self, selector):
elements = self._element.cssselect(selector)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_xpath(self, selector):
elements = self._element.xpath(selector)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_name(self, name):
elements = self._element.cssselect('[name="%s"]' % name)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_tag(self, name):
elements = self._element.cssselect(name)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_value(self, value):
elements = self._element.cssselect('[value="%s"]' % value)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_text(self, text):
return self.find_by_xpath('./*[text()="%s"]' % text)
def find_by_id(self, id):
elements = self._element.cssselect("#%s" % id)
return ElementList([self.__class__(element, self) for element in elements])
@property
def value(self):
return self._element.text_content()
@property
def text(self):
return self.value
@property
def outer_html(self):
return lxml.html.tostring(self._element, encoding="unicode").strip()
@property
def html(self):
return re.match(r"^<[^<>]+>(.*)</[^<>]+>$", self.outer_html, re.MULTILINE | re.DOTALL).group(1)
def has_class(self, class_name):
return len(self._element.find_class(class_name)) > 0
class LxmlLinkElement(LxmlElement):
def __init__(self, element, parent):
super(LxmlLinkElement, self).__init__(element, parent)
self._browser = parent
def __getitem__(self, attr):
return super(LxmlLinkElement, self).__getitem__(attr)
def click(self):
return self._browser.visit(self["href"])
class LxmlControlElement(LxmlElement):
def __init__(self, control, parent):
self._control = control
self.parent = parent
def __getitem__(self, attr):
return self._control.attrib[attr]
@property
def value(self):
return self._control.value
@property
def checked(self):
return bool(self._control.value)
def click(self):
parent_form = self._get_parent_form()
if self._control.get("type") == "submit":
name = self._control.get("name")
if name:
value = self._control.get("value", "")
parent_form.append(
lxml.html.Element("input", name=name, value=value, type="hidden")
)
return self.parent.submit_data(parent_form)
def fill(self, value):
parent_form = self._get_parent_form()
if sys.version_info[0] > 2:
parent_form.fields[self["name"]] = value
else:
if not isinstance(value, unicode):
value = value.decode("utf-8")
parent_form.fields[self["name"]] = value
def select(self, value):
self._control.value = value
def _get_parent_form(self):
parent_form = next(self._control.iterancestors("form"))
return self.parent._forms.setdefault(parent_form._name(), parent_form)
class LxmlOptionElement(LxmlElement):
def __init__(self, control, parent):
self._control = control
self.parent = parent
def __getitem__(self, attr):
return self._control.attrib[attr]
@property
def text(self):
return self._control.text
@property
def value(self):
return self._control.attrib["value"]
@property
def selected(self):
return self.parent.value == self.value
| 30.380952 | 103 | 0.597321 |
acf02944b046a6353dee7f6456d050b66fe2d2b5 | 109 | py | Python | python/lambda-layer/layer/python/common.py | marclyo/aws-cdk-examples | f041f07ebd4c94897e16d37ff813a38eb32645a1 | [
"Apache-2.0"
] | 2,941 | 2019-02-08T15:29:36.000Z | 2022-03-31T23:57:42.000Z | python/lambda-layer/layer/python/common.py | marclyo/aws-cdk-examples | f041f07ebd4c94897e16d37ff813a38eb32645a1 | [
"Apache-2.0"
] | 558 | 2019-02-14T23:32:02.000Z | 2022-03-30T00:35:11.000Z | python/lambda-layer/layer/python/common.py | marclyo/aws-cdk-examples | f041f07ebd4c94897e16d37ff813a38eb32645a1 | [
"Apache-2.0"
] | 1,409 | 2019-02-12T19:13:04.000Z | 2022-03-31T18:46:21.000Z | def layer_function() -> str:
"""
Layer helper function
"""
return "Hello From Helper Layer!"
| 18.166667 | 37 | 0.59633 |
acf029f6a125c917e33b682b871988204c2feb5c | 6,978 | py | Python | mojo/public/tools/bindings/pylib/mojom/generate/generator.py | zipated/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | mojo/public/tools/bindings/pylib/mojom/generate/generator.py | cangulcan/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | mojo/public/tools/bindings/pylib/mojom/generate/generator.py | cangulcan/src | 2b8388091c71e442910a21ada3d97ae8bc1845d3 | [
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Code shared by the various language-specific code generators."""
from functools import partial
import os.path
import re
import module as mojom
import mojom.fileutil as fileutil
import pack
def ExpectedArraySize(kind):
if mojom.IsArrayKind(kind):
return kind.length
return None
def ToCamel(identifier, lower_initial=False, dilimiter='_'):
"""Splits |identifier| using |dilimiter|, makes the first character of each
word uppercased (but makes the first character of the first word lowercased
if |lower_initial| is set to True), and joins the words. Please note that for
each word, all the characters except the first one are untouched.
"""
result = ''.join(word[0].upper() + word[1:]
for word in identifier.split(dilimiter) if word)
if lower_initial and result:
result = result[0].lower() + result[1:]
return result
class Stylizer(object):
"""Stylizers specify naming rules to map mojom names to names in generated
code. For example, if you would like method_name in mojom to be mapped to
MethodName in the generated code, you need to define a subclass of Stylizer
and override StylizeMethod to do the conversion."""
def StylizeConstant(self, mojom_name):
return mojom_name
def StylizeField(self, mojom_name):
return mojom_name
def StylizeStruct(self, mojom_name):
return mojom_name
def StylizeUnion(self, mojom_name):
return mojom_name
def StylizeParameter(self, mojom_name):
return mojom_name
def StylizeMethod(self, mojom_name):
return mojom_name
def StylizeInterface(self, mojom_name):
return mojom_name
def StylizeEnumField(self, mojom_name):
return mojom_name
def StylizeEnum(self, mojom_name):
return mojom_name
def StylizeModule(self, mojom_namespace):
return mojom_namespace
def WriteFile(contents, full_path):
# If |contents| is same with the file content, we skip updating.
if os.path.isfile(full_path):
with open(full_path, 'rb') as destination_file:
if destination_file.read() == contents:
return
# Make sure the containing directory exists.
full_dir = os.path.dirname(full_path)
fileutil.EnsureDirectoryExists(full_dir)
# Dump the data to disk.
with open(full_path, "wb") as f:
f.write(contents)
def AddComputedData(module):
"""Adds computed data to the given module. The data is computed once and
used repeatedly in the generation process."""
def _AddStructComputedData(exported, struct):
struct.packed = pack.PackedStruct(struct)
struct.bytes = pack.GetByteLayout(struct.packed)
struct.versions = pack.GetVersionInfo(struct.packed)
struct.exported = exported
def _AddUnionComputedData(union):
ordinal = 0
for field in union.fields:
if field.ordinal is not None:
ordinal = field.ordinal
field.ordinal = ordinal
ordinal += 1
def _AddInterfaceComputedData(interface):
next_ordinal = 0
interface.version = 0
for method in interface.methods:
if method.ordinal is None:
method.ordinal = next_ordinal
next_ordinal = method.ordinal + 1
if method.min_version is not None:
interface.version = max(interface.version, method.min_version)
method.param_struct = _GetStructFromMethod(method)
interface.version = max(interface.version,
method.param_struct.versions[-1].version)
if method.response_parameters is not None:
method.response_param_struct = _GetResponseStructFromMethod(method)
interface.version = max(
interface.version,
method.response_param_struct.versions[-1].version)
else:
method.response_param_struct = None
def _GetStructFromMethod(method):
"""Converts a method's parameters into the fields of a struct."""
params_class = "%s_%s_Params" % (method.interface.mojom_name,
method.mojom_name)
struct = mojom.Struct(params_class, module=method.interface.module)
for param in method.parameters:
struct.AddField(param.mojom_name, param.kind, param.ordinal,
attributes=param.attributes)
_AddStructComputedData(False, struct)
return struct
def _GetResponseStructFromMethod(method):
"""Converts a method's response_parameters into the fields of a struct."""
params_class = "%s_%s_ResponseParams" % (method.interface.mojom_name,
method.mojom_name)
struct = mojom.Struct(params_class, module=method.interface.module)
for param in method.response_parameters:
struct.AddField(param.mojom_name, param.kind, param.ordinal,
attributes=param.attributes)
_AddStructComputedData(False, struct)
return struct
for struct in module.structs:
_AddStructComputedData(True, struct)
for union in module.unions:
_AddUnionComputedData(union)
for interface in module.interfaces:
_AddInterfaceComputedData(interface)
class Generator(object):
# Pass |output_dir| to emit files to disk. Omit |output_dir| to echo all
# files to stdout.
def __init__(self, module, output_dir=None, typemap=None, variant=None,
bytecode_path=None, for_blink=False, use_once_callback=False,
js_bindings_mode="new", export_attribute=None,
export_header=None, generate_non_variant_code=False,
support_lazy_serialization=False, disallow_native_types=False,
disallow_interfaces=False, generate_message_ids=False,
generate_fuzzing=False):
self.module = module
self.output_dir = output_dir
self.typemap = typemap or {}
self.variant = variant
self.bytecode_path = bytecode_path
self.for_blink = for_blink
self.use_once_callback = use_once_callback
self.js_bindings_mode = js_bindings_mode
self.export_attribute = export_attribute
self.export_header = export_header
self.generate_non_variant_code = generate_non_variant_code
self.support_lazy_serialization = support_lazy_serialization
self.disallow_native_types = disallow_native_types
self.disallow_interfaces = disallow_interfaces
self.generate_message_ids = generate_message_ids
self.generate_fuzzing = generate_fuzzing
def Write(self, contents, filename):
if self.output_dir is None:
print contents
return
full_path = os.path.join(self.output_dir, filename)
WriteFile(contents, full_path)
def GenerateFiles(self, args):
raise NotImplementedError("Subclasses must override/implement this method")
def GetJinjaParameters(self):
"""Returns default constructor parameters for the jinja environment."""
return {}
def GetGlobals(self):
"""Returns global mappings for the template generation."""
return {}
| 34.374384 | 79 | 0.719547 |
acf02a03045d1bdb39fc62df23c67e17af247ac3 | 5,867 | py | Python | test/functional/prioritise_transaction.py | plc-ultima/plcu | d99eb669ac339c4d0dcedb77bc68ccd0dfe29d4f | [
"MIT"
] | 1 | 2022-03-28T02:13:10.000Z | 2022-03-28T02:13:10.000Z | test/functional/prioritise_transaction.py | plc-ultima/plcu | d99eb669ac339c4d0dcedb77bc68ccd0dfe29d4f | [
"MIT"
] | null | null | null | test/functional/prioritise_transaction.py | plc-ultima/plcu | d99eb669ac339c4d0dcedb77bc68ccd0dfe29d4f | [
"MIT"
] | 2 | 2022-03-26T23:59:01.000Z | 2022-03-31T13:27:08.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the prioritisetransaction mining RPC."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import COIN, MAX_BLOCK_BASE_SIZE
class PrioritiseTransactionTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [["-printpriority=1", "-maxmempool=10"], ["-printpriority=1", "-maxmempool=10"]]
def run_test(self):
self.txouts = gen_return_txouts()
self.relayfee = self.nodes[0].getnetworkinfo()['relayfee']
utxo_count = 90
utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], utxo_count)
base_fee = self.relayfee*100 # our transactions are smaller than 100kb
txids = []
# Create 3 batches of transactions at 3 different fee rate levels
range_size = utxo_count // 3
for i in range(3):
txids.append([])
start_range = i * range_size
end_range = start_range + range_size
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], end_range - start_range, (i+1)*base_fee)
# Make sure that the size of each group of transactions exceeds
# MAX_BLOCK_BASE_SIZE -- otherwise the test needs to be revised to create
# more transactions.
mempool = self.nodes[0].getrawmempool(True)
sizes = [0, 0, 0]
for i in range(3):
for j in txids[i]:
assert(j in mempool)
sizes[i] += mempool[j]['size']
assert(sizes[i] > MAX_BLOCK_BASE_SIZE) # Fail => raise utxo_count
# add a fee delta to something in the cheapest bucket and make sure it gets mined
# also check that a different entry in the cheapest bucket is NOT mined
self.nodes[0].prioritisetransaction(txid=txids[0][0], fee_delta=int(3*base_fee*COIN))
self.nodes[0].generate(1)
mempool = self.nodes[0].getrawmempool()
self.log.info("Assert that prioritised transaction was mined")
assert(txids[0][0] not in mempool)
assert(txids[0][1] in mempool)
high_fee_tx = None
for x in txids[2]:
if x not in mempool:
high_fee_tx = x
# Something high-fee should have been mined!
assert(high_fee_tx != None)
# Add a prioritisation before a tx is in the mempool (de-prioritising a
# high-fee transaction so that it's now low fee).
self.nodes[0].prioritisetransaction(txid=high_fee_tx, fee_delta=-int(2*base_fee*COIN))
# Add everything back to mempool
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Check to make sure our high fee rate tx is back in the mempool
mempool = self.nodes[0].getrawmempool()
assert(high_fee_tx in mempool)
# Now verify the modified-high feerate transaction isn't mined before
# the other high fee transactions. Keep mining until our mempool has
# decreased by all the high fee size that we calculated above.
while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]):
self.nodes[0].generate(1)
# High fee transaction should not have been mined, but other high fee rate
# transactions should have been.
mempool = self.nodes[0].getrawmempool()
self.log.info("Assert that de-prioritised transaction is still in mempool")
assert(high_fee_tx in mempool)
for x in txids[2]:
if (x != high_fee_tx):
assert(x not in mempool)
# Create a free transaction. Should be rejected.
utxo_list = self.nodes[0].listunspent()
assert(len(utxo_list) > 0)
utxo = utxo_list[0]
inputs = []
outputs = {}
inputs.append({"txid" : utxo["txid"], "vout" : utxo["vout"]})
(burn1, burn2, amount) = BurnedAndChangeAmount(utxo["amount"])
outputs[self.nodes[0].getnewaddress()] = amount
outputs[GRAVE_ADDRESS_1] = burn1
outputs[GRAVE_ADDRESS_2] = burn2
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
tx_hex = self.nodes[0].signrawtransaction(raw_tx)["hex"]
tx_id = self.nodes[0].decoderawtransaction(tx_hex)["txid"]
# This will raise an exception due to min relay fee not being met
assert_raises_rpc_error(-26, "66: min relay fee not met", self.nodes[0].sendrawtransaction, tx_hex)
assert(tx_id not in self.nodes[0].getrawmempool())
# This is a less than 1000-byte transaction, so just set the fee
# to be the minimum for a 1000 byte transaction and check that it is
# accepted.
self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=int(self.relayfee*COIN))
self.log.info("Assert that prioritised free transaction is accepted to mempool")
assert_equal(self.nodes[0].sendrawtransaction(tx_hex), tx_id)
assert(tx_id in self.nodes[0].getrawmempool())
# Test that calling prioritisetransaction is sufficient to trigger
# getblocktemplate to (eventually) return a new block.
mock_time = int(time.time())
self.nodes[0].setmocktime(mock_time)
template = self.nodes[0].getblocktemplate()
self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=-int(self.relayfee*COIN))
self.nodes[0].setmocktime(mock_time+10)
new_template = self.nodes[0].getblocktemplate()
assert(template != new_template)
if __name__ == '__main__':
PrioritiseTransactionTest().main()
| 44.44697 | 153 | 0.657747 |
acf02ad57c9b39e810093dedcc63181776aed1c1 | 656 | py | Python | Part 2/Chapter 02/Exercises/excercise_16.py | phuycke/Practice-of-computing-using-Python | 9e477bcaecb0e447dfa7184d2071ca338801c86f | [
"MIT"
] | 1 | 2019-08-13T11:12:59.000Z | 2019-08-13T11:12:59.000Z | Part 2/Chapter 02/Exercises/excercise_16.py | phuycke/Practice-of-computing-using-Python | 9e477bcaecb0e447dfa7184d2071ca338801c86f | [
"MIT"
] | null | null | null | Part 2/Chapter 02/Exercises/excercise_16.py | phuycke/Practice-of-computing-using-Python | 9e477bcaecb0e447dfa7184d2071ca338801c86f | [
"MIT"
] | 1 | 2021-05-16T11:42:19.000Z | 2021-05-16T11:42:19.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Pieter Huycke
email: pieter.huycke@ugent.be
GitHub: phuycke
"""
#%%
top_num_str = input("What is the upper number for the range:")
top_num = int(top_num_str)
number = 2
for number in range(number, top_num + 1):
sum_of_divisors = 0
for divisor in range(1, number):
if number % divisor == 0:
sum_of_divisors = sum_of_divisors + divisor
if number == sum_of_divisors:
print(number,"is perfect")
elif number < sum_of_divisors:
print(number,"is abundant")
else:
print(number,"is deficient")
number += 1 | 23.428571 | 63 | 0.61128 |
acf02aea550ebda1b15b396aa9a56669855b5508 | 2,289 | py | Python | core/chat_graph/chat_graph.py | functioncall/rescue-habit | 98c9d3f2feff0ed100523f9822865a139f6f7648 | [
"MIT"
] | null | null | null | core/chat_graph/chat_graph.py | functioncall/rescue-habit | 98c9d3f2feff0ed100523f9822865a139f6f7648 | [
"MIT"
] | null | null | null | core/chat_graph/chat_graph.py | functioncall/rescue-habit | 98c9d3f2feff0ed100523f9822865a139f6f7648 | [
"MIT"
] | null | null | null | from core.chat_node.chat_node import ChatNode
from commons.errors import ChatNodeNameTypeError, ChatNodeNotFound
import os
def setup_data(obj):
"""
1. Extracts text, quick replies, attachments from object
2. Adds available entities to a dict and returns it
:param obj: <dict>
:return: <dict>
"""
data = {}
for prop in obj:
if prop in ["text", "quick_replies", "attachments"]:
data[prop] = obj[prop]
return data
class Graph(object):
"""
A graph which contains:
1. chat-nodes in a layout; containing { node-name: node } as <dict>
2. contains the start and end nodes for a chat instance.
"""
def __init__(self, json):
self.layout = {}
self.json = json
self.start = None
self.end = None
def get_node(self, node_name):
"""
:param node_name: <str> should correspond to key in self.layout
:return: <ChatNode>
"""
if type(node_name) is not str:
raise ChatNodeNameTypeError(node_name)
if node_name not in self.layout:
raise ChatNodeNotFound(node_name)
return self.layout[node_name]["node"]
def node_in_graph(self, node_name):
if type(node_name) is not str:
raise ChatNodeNameTypeError(node_name)
elif node_name in self.layout:
return True
else:
raise ChatNodeNotFound(node_name)
def get_node_triggers(self, node_name):
"""
Use if regular expression based triggers are provided
:param node_name:
:return:
"""
return self.layout[node_name]["triggers"]
def draw_graph(self):
"""
sets up graph instance after reading json file
"""
for i, prop in enumerate(self.json):
nxt = prop["next"] if "next" in self.json[i] else None
pre = prop["pre"] if "pre" in self.json[i] else None
cn = ChatNode(prop["name"], setup_data(self.json[i]), nxt, pre)
self.layout[prop["name"]] = {
"node": cn,
"triggers": self.json[i]["triggers"] if "triggers" in self.json[i] else []
}
if self.start is None:
self.start = cn
self.end = cn
| 30.932432 | 90 | 0.579729 |
acf02b523fc781f2ae0d0fb6054103bb3ab1f75e | 34,924 | py | Python | biosimulators_utils/omex_meta/io.py | biosimulators/Biosimulators_utils | c1363467263120bf1166da2b75e38fc7f56dc94f | [
"MIT"
] | 2 | 2021-06-02T13:26:34.000Z | 2021-12-27T23:12:47.000Z | biosimulators_utils/omex_meta/io.py | biosimulators/Biosimulators_utils | c1363467263120bf1166da2b75e38fc7f56dc94f | [
"MIT"
] | 102 | 2020-12-06T19:47:43.000Z | 2022-03-31T12:56:17.000Z | biosimulators_utils/omex_meta/io.py | biosimulators/Biosimulators_utils | c1363467263120bf1166da2b75e38fc7f56dc94f | [
"MIT"
] | 4 | 2021-01-27T19:56:34.000Z | 2022-02-03T21:08:20.000Z | """ Methods for reading and writing OMEX Metadata files
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2021-06-23
:Copyright: 2021, Center for Reproducible Biomedical Modeling
:License: MIT
"""
from ..combine.data_model import CombineArchive, CombineArchiveContentFormatPattern # noqa: F401
from ..config import get_config, Config # noqa: F401
from .data_model import (Triple, OmexMetadataOutputFormat, OmexMetadataSchema,
BIOSIMULATIONS_ROOT_URI_PATTERN,
BIOSIMULATIONS_PREDICATE_TYPES)
from .utils import get_local_combine_archive_content_uri, get_global_combine_archive_content_uri
from .validation import validate_biosimulations_metadata
import abc
import json
import os
import pyomexmeta
import rdflib
import re
import tempfile
__all__ = [
'read_omex_meta_file',
'write_omex_meta_file',
'read_omex_meta_files_for_archive',
'TriplesOmexMetaReader',
'TriplesOmexMetaWriter',
'BiosimulationsOmexMetaReader',
'BiosimulationsOmexMetaWriter',
]
def read_omex_meta_file(filename_or_filenames, archive=None, working_dir=None, config=None):
""" Read an OMEX Metadata file
Args:
filename_or_filenames (:obj:`str` or :obj:`list` of :obj:`str`): path or paths to OMEX Metadata files
archive (:obj:`CombineArchive`, optional): parent COMBINE archive
working_dir (:obj:`str`, optional): working directory (e.g., directory of the parent COMBINE/OMEX archive)
config (:obj:`Config`, optional): configuration
Returns:
:obj:`tuple`:
* :obj:`object`: representation of the OMEX Metadata file in :obj:`schema`
* nested :obj:`list` of :obj:`str`: nested list of errors with the OMEX Metadata file
* nested :obj:`list` of :obj:`str`: nested list of warnings with the OMEX Metadata file
"""
content = None
errors = []
warnings = []
if config is None:
config = get_config()
if config.OMEX_METADATA_SCHEMA == OmexMetadataSchema.biosimulations:
return BiosimulationsOmexMetaReader().run(filename_or_filenames, archive=archive, working_dir=working_dir, config=config)
elif config.OMEX_METADATA_SCHEMA == OmexMetadataSchema.rdf_triples:
return TriplesOmexMetaReader().run(filename_or_filenames, archive=archive, working_dir=working_dir, config=config)
else:
errors.append(['Schema `{}` is not supported. The following schemas are supported:',
[['None']] + sorted([
[schema.value] for schema in OmexMetadataSchema.__members__.values()
])])
return (content, errors, warnings)
def write_omex_meta_file(content, filename, config=None):
""" Write an OMEX Metadata file
Args:
content (:obj:`object`): representation of the OMEX Metadata file in :obj:`schema`
filename (:obj:`str`): path to save OMEX Metadata file
config (:obj:`Config`, optional): configuration
"""
if config is None:
config = get_config()
if config.OMEX_METADATA_SCHEMA == OmexMetadataSchema.biosimulations:
return BiosimulationsOmexMetaWriter().run(content, filename, config=config)
elif config.OMEX_METADATA_SCHEMA == OmexMetadataSchema.rdf_triples:
return TriplesOmexMetaWriter().run(content, filename, config=config)
else:
msg = 'Schema `{}` is not supported. The following schemas are supported:\n {}'.format(
config.OMEX_METADATA_SCHEMA.value if config.OMEX_METADATA_SCHEMA else None,
'\n '.join(['None'] + sorted([schema.value for schema in OmexMetadataSchema.__members__.values()])))
raise NotImplementedError(msg)
def read_omex_meta_files_for_archive(archive, archive_dirname, config=None):
""" Read all of the OMEX Metadata files in an archive
Args:
archive (:obj:`CombineArchive`): COMBINE/OMEX archive
archive_dirname (:obj:`str`): directory with the content of the archive
config (:obj:`Config`, optional): configuration
Returns:
:obj:`tuple`:
* :obj:`object`: representation of the OMEX Metadata file in :obj:`schema`
* nested :obj:`list` of :obj:`str`: nested list of errors with the OMEX Metadata file
* nested :obj:`list` of :obj:`str`: nested list of warnings with the OMEX Metadata file
"""
content = []
errors = []
warnings = []
if config is None:
config = get_config()
filenames = []
for item in archive.contents:
if item.format and re.match(CombineArchiveContentFormatPattern.OMEX_METADATA.value, item.format):
filenames.append(os.path.join(archive_dirname, item.location))
if filenames:
return read_omex_meta_file(filenames, archive=archive, working_dir=archive_dirname, config=config)
else:
content = []
errors = [[(
'The COMBINE/OMEX does not contain an OMEX Metadata file. '
'Archives must contain metadata for publication to BioSimulations.'
)]]
warnings = []
return (content, errors, warnings)
class OmexMetaReader(abc.ABC):
""" Base class for reading OMEX Metadata files """
@ abc.abstractmethod
def run(self, filename_or_filenames, archive=None, working_dir=None, config=None):
""" Read an OMEX Metadata file
Args:
filename_or_filenames (:obj:`str` or :obj:`list` of :obj:`str`): path or paths to OMEX Metadata files
archive (:obj:`CombineArchive`, optional): parent COMBINE archive
working_dir (:obj:`str`, optional): working directory (e.g., directory of the parent COMBINE/OMEX archive)
config (:obj:`Config`, optional): configuration
Returns:
:obj:`tuple`:
* :obj:`object`: representation of the OMEX Metadata file
* nested :obj:`list` of :obj:`str`: nested list of errors with the OMEX Metadata file
* nested :obj:`list` of :obj:`str`: nested list of warnings with the OMEX Metadata file
"""
pass # pragma: no cover
@classmethod
def read_rdf(cls, filename, config=None):
""" Read an RDF file
Args:
filename (:obj:`str`): path to the RDF file
config (:obj:`Config`, optional): configuration
Returns:
:obj:`tuple`:
* :obj:`pyomexmeta.RDF`: RDF representation of the file
* nested :obj:`list` of :obj:`str`: nested list of errors with the RDF file
* nested :obj:`list` of :obj:`str`: nested list of warnings with the RDF file
"""
if config is None:
config = get_config()
rdf = None
errors = []
warnings = []
if not os.path.isfile(filename):
errors.append(['`{}` is not a file.'.format(filename)])
return (rdf, errors, warnings)
pyomexmeta_log_level = pyomexmeta.Logger.get_level()
pyomexmeta.Logger.clear()
pyomexmeta.Logger.set_level(pyomexmeta.eLogLevel.warn)
with open(filename, 'rb') as file:
line = file.readline()
temp_filename = None
if line.startswith(b'<?xml ') and b'?>' in line:
decl, sep, after_decl = line.partition(b'?>')
if b' version="1.1"' in decl or b" version='1.1'" in decl:
decl = decl.replace(b' version="1.1"', b' version="1.0"').replace(b" version='1.1'", b" version='1.0'")
line = decl + sep + after_decl
with open(filename, 'rb') as file:
lines = file.readlines()
lines[0] = line
temp_fid, temp_filename = tempfile.mkstemp()
os.close(temp_fid)
with open(temp_filename, 'wb') as file:
for line in lines:
file.write(line)
filename = temp_filename
rdf = pyomexmeta.RDF.from_file(filename, config.OMEX_METADATA_INPUT_FORMAT.value)
if temp_filename:
os.remove(temp_filename)
pyomexmeta.Logger.set_level(pyomexmeta_log_level)
logger = pyomexmeta.Logger()
num_messages = len(logger)
for i_message in range(num_messages):
message = logger[i_message]
type = message.get_level()
message = message.get_message()
if type in ['warn', 'warning']:
warnings.append([message])
else:
rdf = None
errors.append([message])
return (rdf, errors, warnings)
@classmethod
def get_rdf_triples(cls, rdf):
""" Read an RDF file
Args:
rdf (:obj:`pyomexmeta.RDF`): RDF representation of the file
Returns:
* :obj:`list` of :obj:`Triple`: representation of the OMEX Metadata file as list of triples
"""
query = "SELECT ?subject ?predicate ?object WHERE { ?subject ?predicate ?object }"
plain_triples = json.loads(rdf.query_results_as_string(query, 'json'))['results']['bindings']
triples = []
for plain_triple in plain_triples:
subject = cls.make_rdf_node(plain_triple['subject'])
predicate = cls.make_rdf_node(plain_triple['predicate'])
object = cls.make_rdf_node(plain_triple['object'])
triples.append(Triple(
subject=subject,
predicate=predicate,
object=object,
))
return triples
@classmethod
def make_rdf_node(cls, node):
""" Make an RDF node
Args:
node (:obj:`dict`): node
Returns:
:obj:`rdflib.term.BNode`, :obj:`rdflib.term.Literal`, or :obj:`rdflib.term.URIRef`: node
"""
if node['type'] == 'bnode':
return rdflib.term.BNode(node['value'])
elif node['type'] == 'literal':
return rdflib.term.Literal(node['value'])
else:
return rdflib.term.URIRef(node['value'])
class OmexMetaWriter(abc.ABC):
""" Base class for writing OMEX Metadata files """
@ abc.abstractmethod
def run(self, content, filename, config=None):
""" Write an OMEX Metadata file
Args:
content (:obj:`object`): representation of the OMEX Metadata file
filename (:obj:`str`): path to save OMEX Metadata file
config (:obj:`Config`, optional): configuration
"""
pass # pragma: no cover
class TriplesOmexMetaReader(OmexMetaReader):
""" Utility for reading an OMEX Metadata file into a list of triples """
def run(self, filename_or_filenames, archive=None, working_dir=None, config=None):
""" Read an OMEX Metadata file into a list of triples
Args:
filename_or_filenames (:obj:`str` or :obj:`list` of :obj:`str`): path or paths to OMEX Metadata files
archive (:obj:`CombineArchive`, optional): parent COMBINE archive
working_dir (:obj:`str`, optional): working directory (e.g., directory of the parent COMBINE/OMEX archive)
config (:obj:`Config`, optional): configuration
Returns:
:obj:`tuple`:
* :obj:`list` of :obj:`dict`: representation of the OMEX Metadata file as list of triples
* nested :obj:`list` of :obj:`str`: nested list of errors with the OMEX Metadata file
* nested :obj:`list` of :obj:`str`: nested list of warnings with the OMEX Metadata file
"""
if config is None:
config = get_config()
triples = None
errors = []
warnings = []
if isinstance(filename_or_filenames, (tuple, list)):
filenames = filename_or_filenames
else:
filenames = [filename_or_filenames]
for filename in filenames:
rdf, temp_errors, temp_warnings = self.read_rdf(filename, config=config)
if working_dir:
error_filename = os.path.relpath(filename, working_dir)
else:
error_filename = filename
if temp_errors:
if isinstance(filename_or_filenames, (tuple, list)):
errors.append(['The OMEX Metadata file at location `{}` is invalid.'.format(error_filename), temp_errors])
else:
errors.extend(temp_errors)
if temp_warnings:
if isinstance(filename_or_filenames, (tuple, list)):
warnings.append(['The OMEX Metadata file at location `{}` may be invalid.'.format(error_filename), temp_warnings])
else:
warnings.extend(temp_warnings)
if errors:
return (triples, errors, warnings)
triples = self.get_rdf_triples(rdf)
return (triples, errors, warnings)
class TriplesOmexMetaWriter(OmexMetaWriter):
""" Utility for writing a list of triples to an OMEX Metadata file """
def run(self, triples, filename, namespaces=None, config=None):
""" Write a list of triples to an OMEX Metadata file
Args:
triples (:obj:`list` of :obj:`Triple`): representation of the OMEX Metadata file as list of triples
filename (:obj:`str`): path to OMEX Metadata file
config (:obj:`Config`, optional): configuration
"""
if config is None:
config = get_config()
graph = rdflib.Graph()
for prefix, namespace in (namespaces or {}).items():
graph.namespace_manager.bind(prefix, namespace)
# graph.namespace_manager.bind('omexLibrary', rdflib.Namespace('http://omex-library.org/'))
# graph.namespace_manager.bind('identifiers', rdflib.Namespace('http://identifiers.org/'))
for triple in triples:
graph.add((triple.subject, triple.predicate, triple.object))
if config.OMEX_METADATA_OUTPUT_FORMAT == OmexMetadataOutputFormat.rdfxml:
graph.serialize(filename, format="xml")
elif config.OMEX_METADATA_OUTPUT_FORMAT == OmexMetadataOutputFormat.turtle:
graph.serialize(filename, format="turtle")
else:
graph.serialize(filename, format="xml", version="1.0")
rdf = pyomexmeta.RDF.from_file(filename, 'rdfxml')
if rdf.to_file(filename, config.OMEX_METADATA_OUTPUT_FORMAT.value) != 0:
raise RuntimeError('Metadata could not be saved to `{}` in `{}` format.'.format(
filename, config.OMEX_METADATA_OUTPUT_FORMAT.value))
class BiosimulationsOmexMetaReader(OmexMetaReader):
""" Utility for reading the metadata about a COMBINE/OMEX archive in an OMEX Metadata
file into a dictionary with BioSimulations schema """
def run(self, filename_or_filenames, archive=None, working_dir=None, config=None):
""" Read the metadata about a COMBINE/OMEX archive in an OMEX Metadata file into a dictionary
with BioSimulations schema
Args:
filename_or_filenames (:obj:`str` or :obj:`list` of :obj:`str`): path or paths to OMEX Metadata files
archive (:obj:`CombineArchive`, optional): parent COMBINE archive
working_dir (:obj:`str`, optional): working directory (e.g., directory of the parent COMBINE/OMEX archive)
config (:obj:`Config`, optional): configuration
Returns:
:obj:`tuple`:
* :obj:`dict`: representation of the metadata about a COMBINE/OMEX
archive in an OMEX Metadata file as a dictionary with BioSimulations schema
* nested :obj:`list` of :obj:`str`: nested list of errors with the OMEX Metadata file
* nested :obj:`list` of :obj:`str`: nested list of warnings with the OMEX Metadata file
"""
if config is None:
config = get_config()
el_metadatas = None
errors = []
warnings = []
if isinstance(filename_or_filenames, (tuple, list)):
filenames = filename_or_filenames
else:
filenames = [filename_or_filenames]
triples = []
for filename in filenames:
rdf, temp_errors, temp_warnings = self.read_rdf(filename, config=config)
if working_dir:
error_filename = os.path.relpath(filename, working_dir)
else:
error_filename = filename
if temp_errors:
if isinstance(filename_or_filenames, (tuple, list)):
errors.append(['The OMEX Metadata file at location `{}` is invalid.'.format(error_filename), temp_errors])
else:
errors.extend(temp_errors)
else:
triples.extend(self.get_rdf_triples(rdf))
if temp_warnings:
if isinstance(filename_or_filenames, (tuple, list)):
warnings.append(['The OMEX Metadata file at location `{}` may be invalid.'.format(error_filename), temp_warnings])
else:
warnings.extend(temp_warnings)
if errors:
return (el_metadatas, errors, warnings)
combine_archive_uri, temp_errors, temp_warnings = self.get_combine_archive_uri(triples)
errors.extend(temp_errors)
warnings.extend(temp_warnings)
if errors:
return (el_metadatas, errors, warnings)
el_metadatas, temp_errors, temp_warnings = self.parse_triples_to_schema(triples, combine_archive_uri)
errors.extend(temp_errors)
warnings.extend(temp_warnings)
if errors:
return (el_metadatas, errors, warnings)
temp_errors, temp_warnings = validate_biosimulations_metadata(el_metadatas, archive=archive, working_dir=working_dir)
errors.extend(temp_errors)
warnings.extend(temp_warnings)
return (el_metadatas, errors, warnings)
@classmethod
def get_combine_archive_uri(cls, triples):
""" Get the URI used to the describe the COMBINE/OMEX archive in a list of RDF triples
Args:
triples (:obj:`list` of :obj:`dict`): representation of the OMEX Metadata file as list of triples
Returns:
:obj:`str`: URI used to the describe the COMBINE/OMEX archive in the list of triples
"""
archive_uris = set()
for triple in triples:
if isinstance(triple.subject, rdflib.term.URIRef):
archive_uri = re.match(BIOSIMULATIONS_ROOT_URI_PATTERN, str(triple.subject))
if archive_uri:
archive_uris.add(archive_uri.group(1))
if len(archive_uris) == 0:
msg = 'File does not contain metadata about an OMEX archive.'
return(None, [[msg]], [])
elif len(archive_uris) > 1:
msg = 'File contains metadata about multiple OMEX archives. File must contains data about 1 archive.'
return(None, [[msg]], [])
else:
return (list(archive_uris)[0], [], [])
@classmethod
def parse_triples_to_schema(cls, triples, combine_archive_uri):
""" Convert a graph of RDF triples into BioSimulations' metadata schema
Args:
triples (:obj:`list` of :obj:`dict`): representation of the OMEX Meta file as list of triples
combine_archive_uri (:obj:`str`): URI used to the describe the COMBINE/OMEX archive in the list of triples
Returns:
:obj:`list` of :obj:`object`: representation of the triples in BioSimulations' metadata schema
"""
errors = []
warnings = []
objects = {}
for triple in triples:
for node, is_subject in [(triple.subject, True), (triple.object, False)]:
object = objects.get(str(node), None)
if object is None:
object = objects[str(node)] = {
'type': node.__class__.__name__,
'is_subject': is_subject,
'is_object': not is_subject,
}
if isinstance(node, (rdflib.term.BNode, rdflib.term.URIRef)):
object['uri'] = str(node)
else:
object['label'] = str(node)
object['is_subject'] = object['is_subject'] or is_subject
object['is_object'] = object['is_object'] or not is_subject
for triple in triples:
subject = str(triple.subject)
predicate = str(triple.predicate)
object = str(triple.object)
predicate_type = BIOSIMULATIONS_PREDICATE_TYPES.get(predicate, None)
if predicate_type is None:
attr = 'other'
value = objects[object]
else:
attr = predicate_type['attribute']
value = objects[object]
if attr not in objects[subject]:
objects[subject][attr] = []
objects[subject][attr].append({
'predicate': predicate,
'value': value,
})
el_metadatas = []
for uri, raw_metadata in objects.items():
if (
raw_metadata['type'] != 'URIRef'
or raw_metadata['uri'].startswith('local:')
or not raw_metadata['is_subject']
):
continue
metadata = {}
metadata['uri'], metadata['combine_archive_uri'] = get_local_combine_archive_content_uri(uri, combine_archive_uri)
el_metadatas.append(metadata)
ignored_statements = []
for predicate_uri, predicate_type in BIOSIMULATIONS_PREDICATE_TYPES.items():
metadata[predicate_type['attribute']] = raw_metadata.get(predicate_type['attribute'], [])
values = []
for el in metadata[predicate_type['attribute']]:
if predicate_type['has_uri'] and predicate_type['has_label']:
value = {
'uri': None,
'label': None,
}
for sub_el in el['value'].get('other', []):
if (
sub_el['predicate'] == 'http://dublincore.org/specifications/dublin-core/dcmi-terms/identifier'
and 'uri' in sub_el['value']
):
value['uri'] = sub_el['value']['uri']
elif (
sub_el['predicate'] == 'http://www.w3.org/2000/01/rdf-schema#label'
and 'label' in sub_el['value']
):
value['label'] = sub_el['value']['label']
if value['label'] is None:
if el['value']['type'] != 'BNode':
msg = '({}, {}, {}) does not contain an rdf:label.'.format(
uri, predicate_uri, el['value'].get('uri', None)
)
ignored_statements.append([msg])
else:
values.append(value)
else:
if predicate_type['has_uri']:
value = el['value'].get('uri', None)
if value is None:
msg = '({}, {}) does not contain an URI.'.format(
uri, predicate_uri
)
ignored_statements.append([msg])
else:
values.append(value)
for sub_el in el['value'].get('other', []):
if 'uri' in sub_el['value']:
values.append(sub_el['value']['uri'])
else:
value = el['value'].get('label', None) or None
if value is None:
if el['value']['type'] != 'BNode':
msg = '({}, {}, {}) does not contain an rdf:label.'.format(
uri, predicate_uri, el['value'].get('uri', None)
)
ignored_statements.append([msg])
else:
values.append(value)
for sub_el in el['value'].get('other', []):
if 'label' in sub_el['value']:
values.append(sub_el['value']['label'])
metadata[predicate_type['attribute']] = values
if not predicate_type['multiple_allowed']:
if len(metadata[predicate_type['attribute']]) == 0:
metadata[predicate_type['attribute']] = None
elif len(metadata[predicate_type['attribute']]) == 1:
metadata[predicate_type['attribute']] = metadata[predicate_type['attribute']][0]
else:
metadata[predicate_type['attribute']] = metadata[predicate_type['attribute']][0]
msg = 'The COMBINE archive should only contain one instance of predicate `{}`.'.format(
predicate_uri
)
errors.append([msg])
metadata['other'] = []
for other_md in raw_metadata.get('other', []):
value = {
'attribute': {
'uri': other_md['predicate'],
'label': None,
},
'value': {
'uri': None,
'label': None
},
}
for el in other_md['value'].get('description', []):
if (
el['predicate'] == 'http://dublincore.org/specifications/dublin-core/dcmi-terms/description'
and 'label' in el['value']
):
value['attribute']['label'] = el['value']['label']
for el in other_md['value'].get('other', []):
if (
el['predicate'] == 'http://dublincore.org/specifications/dublin-core/dcmi-terms/identifier'
and 'uri' in el['value']
):
value['value']['uri'] = el['value']['uri']
if (
el['predicate'] == 'http://www.w3.org/2000/01/rdf-schema#label'
and 'label' in el['value']
):
value['value']['label'] = el['value']['label']
if value['attribute']['label'] is None or value['value']['label'] is None:
msg = '({}, {}, {}) does not contain an rdf:label.'.format(
uri, other_md['predicate'],
other_md['value'].get('label', None)
if other_md['value']['type'] == 'Literal'
else other_md['value'].get('uri', None)
)
ignored_statements.append([msg])
else:
metadata['other'].append(value)
for i_thumbnail, thumbnail in enumerate(metadata['thumbnails']):
if thumbnail.startswith(combine_archive_uri + '/'):
metadata['thumbnails'][i_thumbnail] = './' + thumbnail[len(combine_archive_uri)+1:]
else:
msg = 'Thumbnail URIs must begin with the URI of their parent archive ({}), not `{}`'.format(
combine_archive_uri, thumbnail)
errors.append([msg])
if ignored_statements:
warnings.append(['Some statements were ignored:', ignored_statements])
if errors:
return (el_metadatas, errors, warnings)
return (el_metadatas, errors, warnings)
class BiosimulationsOmexMetaWriter(OmexMetaWriter):
""" Utility for writing the metadata about a COMBINE/OMEX archive to an OMEX Metadata
file """
def run(self, el_metadatas, filename, config=None):
""" Write an OMEX Metadata file
Args:
el_metadatas (:obj:`list` of :obj:`dict`): representation of the metadata about the elements in
a COMBINE/OMEX archive in an OMEX Metadata file
filename (:obj:`str`): path to save OMEX Metadata file
config (:obj:`Config`, optional): configuration
"""
if config is None:
config = get_config()
# convert to triples
triples = []
local_id = 0
namespaces = {
'dc': rdflib.Namespace('http://dublincore.org/specifications/dublin-core/dcmi-terms/'),
'dcterms': rdflib.Namespace('http://purl.org/dc/terms/'),
'foaf': rdflib.Namespace('http://xmlns.com/foaf/0.1/'),
'rdfs': rdflib.Namespace('http://www.w3.org/2000/01/rdf-schema#'),
}
for predicate_type in BIOSIMULATIONS_PREDICATE_TYPES.values():
namespaces[predicate_type['namespace']['prefix']] = rdflib.Namespace(predicate_type['namespace']['uri'])
for el_metadata in el_metadatas:
el_uri = get_global_combine_archive_content_uri(el_metadata['uri'], el_metadata['combine_archive_uri'])
file_uri_ref = rdflib.term.URIRef(el_uri)
for predicate_type in BIOSIMULATIONS_PREDICATE_TYPES.values():
namespace = namespaces[predicate_type['namespace']['prefix']]
predicate = getattr(namespace, predicate_type['uri'].replace(predicate_type['namespace']['uri'], ''))
if predicate_type['multiple_allowed']:
values = el_metadata.get(predicate_type['attribute'], [])
else:
value = el_metadata.get(predicate_type['attribute'], None)
if value is None:
values = []
else:
values = [value]
for value in values:
if predicate_type['has_uri'] and predicate_type['has_label']:
local_id += 1
object = rdflib.term.URIRef('local:{:05d}'.format(local_id))
if value.get('uri', None) is not None:
triples.append(Triple(
object,
namespaces['dc'].identifier,
rdflib.term.URIRef(value['uri'])
))
if value.get('label', None) is not None:
triples.append(Triple(
object,
namespaces['rdfs'].label,
rdflib.term.Literal(value['label'])
))
if predicate_type['uri'] in [
'http://dublincore.org/specifications/dublin-core/dcmi-terms/creator',
'http://dublincore.org/specifications/dublin-core/dcmi-terms/contributor',
]:
if value.get('uri', None) is not None:
triples.append(Triple(
object,
namespaces['foaf'].accountName,
rdflib.term.URIRef(value['uri']
.replace('https://identifiers.org/orcid:',
'https://orcid.org/'))
))
if value.get('label', None) is not None:
triples.append(Triple(
object,
namespaces['foaf'].name,
rdflib.term.Literal(value['label'])
))
elif predicate_type['has_uri']:
if predicate_type['uri'] == 'http://www.collex.org/schema#thumbnail':
value = get_global_combine_archive_content_uri(value, el_metadata['combine_archive_uri'])
object = rdflib.term.URIRef(value)
else:
object = rdflib.term.Literal(value)
triples.append(Triple(
subject=file_uri_ref,
predicate=predicate,
object=object,
))
for other in el_metadata.get('other', []):
if '#' in other['attribute']['uri']:
namespace, _, predicate = other['attribute']['uri'].partition('#')
namespace += '#'
else:
namespace, _, predicate = other['attribute']['uri'].rpartition('/')
namespace += '/'
namespace = rdflib.Namespace(namespace)
predicate = getattr(namespace, predicate)
local_id += 1
object = rdflib.term.URIRef('local:{:05d}'.format(local_id))
triples.append(Triple(
subject=file_uri_ref,
predicate=predicate,
object=object,
))
if other.get('attribute', {}).get('label', None) is not None:
triples.append(Triple(
object,
namespaces['dc'].description,
rdflib.term.Literal(other['attribute']['label'])
))
if other.get('value', {}).get('uri', None) is not None:
triples.append(Triple(
object,
namespaces['dc'].identifier,
rdflib.term.URIRef(other['value']['uri'])
))
if other.get('value', {}).get('label', None) is not None:
triples.append(Triple(
object,
namespaces['rdfs'].label,
rdflib.term.Literal(other['value']['label'])
))
# save triples to file
TriplesOmexMetaWriter().run(triples, filename, namespaces=namespaces, config=config)
| 41.725209 | 134 | 0.541691 |
acf02bb9c7bd4acb32907f8b5cd1e320c3ae6f12 | 1,202 | py | Python | src/gui/menu.py | larashores/spotify-analyzer | 98022b178ce3ef1b07a8f005aeba2aeb573125ee | [
"MIT"
] | null | null | null | src/gui/menu.py | larashores/spotify-analyzer | 98022b178ce3ef1b07a8f005aeba2aeb573125ee | [
"MIT"
] | null | null | null | src/gui/menu.py | larashores/spotify-analyzer | 98022b178ce3ef1b07a8f005aeba2aeb573125ee | [
"MIT"
] | null | null | null | import tkinter as tk
from tkinter.filedialog import askdirectory
from typing import Callable, Optional
from type_hints import Parent
class Menu(tk.Menu):
def __init__(self, parent: Parent, *, on_load: Optional[Callable[[str], None]] = None):
tk.Menu.__init__(self, parent)
if parent:
self._top_level = parent.winfo_toplevel()
self._base_title = self._top_level.wm_title()
else:
self._top_level = None
self._base_title = ""
self._file_menu = tk.Menu(self, tearoff=False)
self._file_menu.add_command(label="Load", command=self._on_load)
self.add_cascade(label="File", menu=self._file_menu)
self.bind_all("<Control-o>", lambda event: self._on_load())
self.bind_all("<Control-O>", lambda event: self._on_load())
self._on_load_callback = on_load
def _on_load(self) -> None:
path = askdirectory(
title="Select folder containing Spotify data",
)
if path and self._on_load_callback:
if self._top_level:
self._top_level.wm_title("{} - {}".format(path, self._base_title))
self._on_load_callback(path)
| 34.342857 | 91 | 0.637271 |
acf02bd75665c8108c598106779362b56c472246 | 8,002 | py | Python | deepcell_spots/postprocessing_utils.py | vanvalenlab/deepcell-spots | 252c683d23bd18a7c099c3046452ba0057c61f05 | [
"Apache-2.0"
] | 1 | 2021-12-09T09:12:33.000Z | 2021-12-09T09:12:33.000Z | deepcell_spots/postprocessing_utils.py | vanvalenlab/deepcell-spots | 252c683d23bd18a7c099c3046452ba0057c61f05 | [
"Apache-2.0"
] | 2 | 2021-12-08T23:45:48.000Z | 2022-01-29T02:40:03.000Z | deepcell_spots/postprocessing_utils.py | vanvalenlab/deepcell-spots | 252c683d23bd18a7c099c3046452ba0057c61f05 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019-2022 The Van Valen Lab at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/deepcell-spots/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# vanvalenlab@gmail.com
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions that convert deep learning model output to list of detected spots"""
import numpy as np
from skimage import measure
from skimage.feature import peak_local_max
def y_annotations_to_point_list(y_pred, threshold=0.95):
"""Convert raw prediction to a predicted point list: classification of
pixel as containing dot > threshold, , and their corresponding regression
values will be used to create a final spot position prediction which will
be added to the output spot center coordinates list.
Args:
y_pred: a dictionary of predictions with keys 'classification' and
'offset_regression' corresponding to the named outputs of the
dot_net_2D model
ind: the index of the image in the batch for which to convert the
annotations
threshold: a number in [0, 1]. Pixels with classification
score > threshold are considered containing a spot center
Returns:
list: spot center coordinates of the format [[y0, x0], [y1, x1],...]
"""
if type(y_pred) is not dict:
raise TypeError('Input predictions must be a dictionary.')
if 'classification' not in y_pred.keys() or 'offset_regression' not in y_pred.keys():
raise NameError('Input must have keys \'classification\' and \'offset_regression\'')
dot_centers = []
for ind in range(np.shape(y_pred['classification'])[0]):
contains_dot = y_pred['classification'][ind, ..., 1] > threshold
delta_y = y_pred['offset_regression'][ind, ..., 0]
delta_x = y_pred['offset_regression'][ind, ..., 1]
dot_pixel_inds = np.argwhere(contains_dot)
dot_centers.append([[y_ind + delta_y[y_ind, x_ind], x_ind +
delta_x[y_ind, x_ind]] for y_ind, x_ind in dot_pixel_inds])
return np.array(dot_centers)
def y_annotations_to_point_list_restrictive(y_pred, threshold=0.95):
"""Convert raw prediction to a predicted point list: classification of
pixel as containing dot > threshold AND center regression is contained
in the pixel. The corresponding regression values will be used to create
a final spot position prediction which will be added to the output spot
center coordinates list.
Args:
y_pred: a dictionary of predictions with keys 'classification' and
'offset_regression' corresponding to the named outputs of the
dot_net_2D model
ind: the index of the image in the batch for which to convert the
annotations
threshold: a number in [0, 1]. Pixels with classification
score > threshold are considered containing a spot center
Returns:
list: spot center coordinates of the format [[y0, x0], [y1, x1],...]
"""
if type(y_pred) is not dict:
raise TypeError('Input predictions must be a dictionary.')
if 'classification' not in y_pred.keys() or 'offset_regression' not in y_pred.keys():
raise NameError('Input must have keys \'classification\' and \'offset_regression\'')
dot_centers = []
for ind in range(np.shape(y_pred['classification'])[0]):
contains_dot = y_pred['classification'][ind, ..., 1] > threshold
delta_y = y_pred['offset_regression'][ind, ..., 0]
delta_x = y_pred['offset_regression'][ind, ..., 1]
contains_its_regression = (abs(delta_x) <= 0.5) & (abs(delta_y) <= 0.5)
final_dot_detection = contains_dot & contains_its_regression
dot_pixel_inds = np.argwhere(final_dot_detection)
dot_centers.append(np.array(
[[y_ind + delta_y[y_ind, x_ind],
x_ind + delta_x[y_ind, x_ind]] for y_ind, x_ind in dot_pixel_inds]))
return np.array(dot_centers)
def y_annotations_to_point_list_max(y_pred, threshold=0.95, min_distance=2):
"""Convert raw prediction to a predicted point list using PLM to determine
local maxima in classification prediction image, and their corresponding
regression values will be used to create a final spot position prediction
which will be added to the output spot center coordinates list.
Args:
y_pred: a dictionary of predictions with keys 'classification' and
'offset_regression' corresponding to the named outputs of the
dot_net_2D model
threshold: a number in [0, 1]. Pixels with classification
score > threshold are considered as containing a spot center
min_distance: the minimum distance between detected spots in pixels
Returns:
list: spot center coordinates of the format [[y0, x0], [y1, x1],...]
"""
if type(y_pred) is not dict:
raise TypeError('Input predictions must be a dictionary.')
if 'classification' not in y_pred.keys() or 'offset_regression' not in y_pred.keys():
raise NameError('Input must have keys \'classification\' and \'offset_regression\'')
dot_centers = []
for ind in range(np.shape(y_pred['classification'])[0]):
dot_pixel_inds = peak_local_max(y_pred['classification'][ind, ..., 1],
min_distance=min_distance,
threshold_abs=threshold)
delta_y = y_pred['offset_regression'][ind, ..., 0]
delta_x = y_pred['offset_regression'][ind, ..., 1]
dot_centers.append(np.array(
[[y_ind + delta_y[y_ind, x_ind],
x_ind + delta_x[y_ind, x_ind]] for y_ind, x_ind in dot_pixel_inds]))
return np.array(dot_centers)
def y_annotations_to_point_list_cc(y_pred, threshold=0.95):
# make final decision to be: average regression over each connected component of above
# detection threshold pixels
if type(y_pred) is not dict:
raise TypeError('Input predictions must be a dictionary.')
if 'classification' not in y_pred.keys() or 'offset_regression' not in y_pred.keys():
raise NameError('Input must have keys \'classification\' and \'offset_regression\'')
dot_centers = []
for ind in range(np.shape(y_pred['classification'])[0]):
delta_y = y_pred['offset_regression'][ind, ..., 0]
delta_x = y_pred['offset_regression'][ind, ..., 1]
blobs = y_pred['classification'][ind, ..., 1] > threshold
label_image = measure.label(blobs, background=0)
rp = measure.regionprops(label_image)
dot_centers_temp = []
for region in rp:
region_pixel_inds = region.coords
reg_pred = [[y_ind + delta_y[y_ind, x_ind], x_ind + delta_x[y_ind, x_ind]]
for y_ind, x_ind in region_pixel_inds]
dot_centers_temp.append(np.mean(reg_pred, axis=0))
dot_centers.append(dot_centers_temp)
return np.array(dot_centers)
| 45.465909 | 92 | 0.677331 |
acf02c25ef781cbaccf94e41006a475c084fed2c | 2,426 | py | Python | scripts/hyperopt.py | heytitle/Syllable-based-Neural-Thai-Word-Segmentation | bb8a4f0dbabe31a65f9bfa1fd784000544e3e7f5 | [
"MIT"
] | 8 | 2020-10-22T10:15:29.000Z | 2021-09-15T08:11:34.000Z | scripts/hyperopt.py | heytitle/Syllable-based-Neural-Thai-Word-Segmentation | bb8a4f0dbabe31a65f9bfa1fd784000544e3e7f5 | [
"MIT"
] | 3 | 2021-07-04T06:14:53.000Z | 2021-11-09T03:07:16.000Z | scripts/hyperopt.py | heytitle/Syllable-based-Neural-Thai-Word-Segmentation | bb8a4f0dbabe31a65f9bfa1fd784000544e3e7f5 | [
"MIT"
] | null | null | null | """Usage: hyperopt --config=<config> [--dry-run] --N=<N> [--max-epoch=<max-epoch>]
Options:
-h --help Show this screen.
--version Show version.
--max-epoch=<max-epoch> Maximum number of epoch [default: 20].
"""
from docopt import docopt
import numpy as np
import os
from sklearn.model_selection import ParameterSampler
from scipy.stats import distributions as dist
import yaml
import time
from datetime import datetime
DATASET = "./data/best-syllable-big"
def merge_arch_params(p):
arch_params = []
keys = []
for k, v in p.items():
if "arch_" in k:
arch_params.append("%s:%s" % (k.split("_")[1], str(v)))
keys.append(k)
for k in keys:
del p[k]
return dict(**p, arch="|".join(arch_params))
if __name__ == '__main__':
arguments = docopt(__doc__, version='Hyperopt')
print(arguments)
config = arguments["--config"]
param_grid = dict()
with open(config, "r") as fh:
for k, v in yaml.full_load(fh).items():
if type(v) == str:
param_grid[k] = eval("dist." + v)
else:
param_grid[k] = v
_, config_name = os.path.split(config)
dt = datetime.today().strftime("%Y-%m-%d--%H-%M")
config_name = f"{config_name}-{dt}"
print(config_name)
max_epoch = int(arguments["--max-epoch"])
n_iters = int(arguments["--N"])
param_list = list(
ParameterSampler(param_grid, n_iter=n_iters)
)
cmd_template = """
sbatch --job-name {job_name} --output "./logs/{job_name}.out" jobscript.sh ./scripts/train.py --model-name {model_name} \
--data-dir {dataset} \
--epoch {max_epoch} \
--output-dir="{output_dir}" \
--lr {lr} \
--batch-size={batch_size} \
--model-params="{arch}" \
--weight-decay={weight_decay}
"""
print("------------------------")
for i, p in enumerate(param_list):
job_name = f"{config_name}.{n_iters}.{i}.log"
output_dir = f"./artifacts/{config_name}.{n_iters}/run-{i}"
p = merge_arch_params(p)
cmd = cmd_template.format(
**p,
max_epoch=max_epoch,
output_dir=output_dir,
job_name=job_name,
dataset=DATASET
).strip()
if arguments["--dry-run"]:
print(cmd)
else:
os.system(cmd)
if i+1 % 10 == 0:
time.sleep(5) | 25.010309 | 121 | 0.562655 |
acf02ce1e8a0d114caa1a9c8bb21e173f6ef0fbd | 231 | py | Python | solution/problem048.py | jo0t4/project-euler | 75834d7cfc27478cf235af0db69a796672e21e61 | [
"MIT"
] | null | null | null | solution/problem048.py | jo0t4/project-euler | 75834d7cfc27478cf235af0db69a796672e21e61 | [
"MIT"
] | null | null | null | solution/problem048.py | jo0t4/project-euler | 75834d7cfc27478cf235af0db69a796672e21e61 | [
"MIT"
] | null | null | null | # The series, 1^1 + 2^2 + 3^3 + ... + 10^10 = 10405071317.
# Find the last ten digits of the series, 1^1 + 2^2 + 3^3 + ... + 1000^1000.
sumSeries = 0
for x in range(1, 1001):
sumSeries += pow(x, x)
print(str(sumSeries)[-10:])
| 21 | 76 | 0.580087 |
acf02cfdc3d090550d32d21d2dcbad4459909cc2 | 3,709 | py | Python | tf_agents/specs/distribution_spec.py | Akshay22121995/agents | 1455410dffed3cfdede793b87c179965cdd27d22 | [
"Apache-2.0"
] | 1 | 2019-10-28T08:39:04.000Z | 2019-10-28T08:39:04.000Z | tf_agents/specs/distribution_spec.py | Akshay22121995/agents | 1455410dffed3cfdede793b87c179965cdd27d22 | [
"Apache-2.0"
] | null | null | null | tf_agents/specs/distribution_spec.py | Akshay22121995/agents | 1455410dffed3cfdede793b87c179965cdd27d22 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Spec definition for tensorflow_probability.Distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
class DistributionSpec(object):
"""Describes a tfp.distribution.Distribution."""
__slots__ = [
"_builder", "_input_params_spec", "_sample_spec",
"_distribution_parameters"
]
def __init__(self, builder, input_params_spec, sample_spec,
**distribution_parameters):
"""Creates a DistributionSpec.
Args:
builder: Callable function(**params) which returns a Distribution
following the spec.
input_params_spec: Nest of tensor_specs describing the tensor parameters
required for building the described distribution.
sample_spec: Data type of the output samples of the described
distribution.
**distribution_parameters: Extra parameters for building the distribution.
"""
self._builder = builder
self._input_params_spec = input_params_spec
self._sample_spec = sample_spec
self._distribution_parameters = distribution_parameters
@property
def builder(self):
"""Returns the `distribution_builder` of the spec."""
return self._builder
@property
def input_params_spec(self):
"""Returns the `input_params_spec` of the spec."""
return self._input_params_spec
@property
def sample_spec(self):
"""Returns the `sample_spec` of the spec."""
return self._sample_spec
@property
def distribution_parameters(self):
"""Returns the `distribution_parameters` of the spec."""
return self._distribution_parameters
def build_distribution(self, **distribution_parameters):
"""Creates an instance of the described distribution.
The spec's paramers are updated with the given ones.
Args:
**distribution_parameters: Kwargs update the spec's distribution
parameters.
Returns:
Distribution instance.
"""
kwargs = self._distribution_parameters.copy()
kwargs.update(distribution_parameters)
return self._builder(**kwargs)
def __repr__(self):
return ("DistributionSpec(builder={}, input_params_spec={}, "
"sample_spec={})").format(self.builder,
repr(self.input_params_spec),
repr(self.sample_spec))
def deterministic_distribution_from_spec(spec):
"""Creates a Deterministic distribution_spec from a tensor_spec."""
return DistributionSpec(tfd.Deterministic, {"loc": spec}, sample_spec=spec)
def nested_distributions_from_specs(specs, parameters):
"""Builds a nest of distributions from a nest of specs.
Args:
specs: A nest of distribution specs.
parameters: A nest of distribution kwargs.
Returns:
Nest of distribution instances with the same structure as the given specs.
"""
return tf.contrib.framework.nest.map_structure_up_to(
specs, lambda spec, parameters: spec.build_distribution(**parameters),
specs, parameters)
| 32.535088 | 80 | 0.723376 |
acf02d4318736448be1236da86b598a208a78f16 | 48,381 | py | Python | tests/async/test_page.py | shadowwolf899/playwright-python | 1003d082057374385c9a608ecae4b50d4715db97 | [
"Apache-2.0"
] | null | null | null | tests/async/test_page.py | shadowwolf899/playwright-python | 1003d082057374385c9a608ecae4b50d4715db97 | [
"Apache-2.0"
] | null | null | null | tests/async/test_page.py | shadowwolf899/playwright-python | 1003d082057374385c9a608ecae4b50d4715db97 | [
"Apache-2.0"
] | 1 | 2022-01-29T10:35:58.000Z | 2022-01-29T10:35:58.000Z | # Copyright (c) Microsoft Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import os
import re
import pytest
from playwright.async_api import Error, Page, Route, TimeoutError
from tests.server import Server
async def test_close_should_reject_all_promises(context):
new_page = await context.new_page()
with pytest.raises(Error) as exc_info:
await asyncio.gather(
new_page.evaluate("() => new Promise(r => {})"), new_page.close()
)
assert "Target closed" in exc_info.value.message
async def test_closed_should_not_visible_in_context_pages(context):
page = await context.new_page()
assert page in context.pages
await page.close()
assert page not in context.pages
async def test_close_should_run_beforeunload_if_asked_for(
context, server, is_chromium, is_webkit
):
page = await context.new_page()
await page.goto(server.PREFIX + "/beforeunload.html")
# We have to interact with a page so that 'beforeunload' handlers
# fire.
await page.click("body")
async with page.expect_event("dialog") as dialog_info:
await page.close(run_before_unload=True)
dialog = await dialog_info.value
assert dialog.type == "beforeunload"
assert dialog.default_value == ""
if is_chromium:
assert dialog.message == ""
elif is_webkit:
assert dialog.message == "Leave?"
else:
assert (
"This page is asking you to confirm that you want to leave"
in dialog.message
)
async with page.expect_event("close"):
await dialog.accept()
async def test_close_should_not_run_beforeunload_by_default(context, server):
page = await context.new_page()
await page.goto(server.PREFIX + "/beforeunload.html")
# We have to interact with a page so that 'beforeunload' handlers
# fire.
await page.click("body")
await page.close()
async def test_should_be_able_to_navigate_away_from_page_with_before_unload(
server: Server, page: Page
):
await page.goto(server.PREFIX + "/beforeunload.html")
# We have to interact with a page so that 'beforeunload' handlers
# fire.
await page.click("body")
await page.goto(server.EMPTY_PAGE)
async def test_close_should_set_the_page_close_state(context):
page = await context.new_page()
assert page.is_closed() is False
await page.close()
assert page.is_closed()
async def test_close_should_terminate_network_waiters(context, server):
page = await context.new_page()
async def wait_for_request():
with pytest.raises(Error) as exc_info:
async with page.expect_request(server.EMPTY_PAGE):
pass
return exc_info.value
async def wait_for_response():
with pytest.raises(Error) as exc_info:
async with page.expect_response(server.EMPTY_PAGE):
pass
return exc_info.value
results = await asyncio.gather(
wait_for_request(), wait_for_response(), page.close()
)
for i in range(2):
error = results[i]
assert "Page closed" in error.message
assert "Timeout" not in error.message
async def test_close_should_be_callable_twice(context):
page = await context.new_page()
await asyncio.gather(
page.close(),
page.close(),
)
await page.close()
async def test_load_should_fire_when_expected(page):
async with page.expect_event("load"):
await page.goto("about:blank")
async def test_async_stacks_should_work(page, server):
await page.route(
"**/empty.html", lambda route, response: asyncio.create_task(route.abort())
)
with pytest.raises(Error) as exc_info:
await page.goto(server.EMPTY_PAGE)
assert __file__ in exc_info.value.stack
async def test_opener_should_provide_access_to_the_opener_page(page):
async with page.expect_popup() as popup_info:
await page.evaluate("window.open('about:blank')"),
popup = await popup_info.value
opener = await popup.opener()
assert opener == page
async def test_opener_should_return_null_if_parent_page_has_been_closed(page):
async with page.expect_popup() as popup_info:
await page.evaluate("window.open('about:blank')"),
popup = await popup_info.value
await page.close()
opener = await popup.opener()
assert opener is None
async def test_domcontentloaded_should_fire_when_expected(page, server):
future = asyncio.create_task(page.goto("about:blank"))
async with page.expect_event("domcontentloaded"):
pass
await future
async def test_wait_for_request(page, server):
await page.goto(server.EMPTY_PAGE)
async with page.expect_request(server.PREFIX + "/digits/2.png") as request_info:
await page.evaluate(
"""() => {
fetch('/digits/1.png')
fetch('/digits/2.png')
fetch('/digits/3.png')
}"""
)
request = await request_info.value
assert request.url == server.PREFIX + "/digits/2.png"
async def test_wait_for_request_should_work_with_predicate(page, server):
await page.goto(server.EMPTY_PAGE)
async with page.expect_request(
lambda request: request.url == server.PREFIX + "/digits/2.png"
) as request_info:
await page.evaluate(
"""() => {
fetch('/digits/1.png')
fetch('/digits/2.png')
fetch('/digits/3.png')
}"""
)
request = await request_info.value
assert request.url == server.PREFIX + "/digits/2.png"
async def test_wait_for_request_should_timeout(page, server):
with pytest.raises(Error) as exc_info:
async with page.expect_event("request", timeout=1):
pass
assert exc_info.type is TimeoutError
async def test_wait_for_request_should_respect_default_timeout(page, server):
page.set_default_timeout(1)
with pytest.raises(Error) as exc_info:
async with page.expect_event("request", lambda _: False):
pass
assert exc_info.type is TimeoutError
async def test_wait_for_request_should_work_with_no_timeout(page, server):
await page.goto(server.EMPTY_PAGE)
async with page.expect_request(
server.PREFIX + "/digits/2.png", timeout=0
) as request_info:
await page.evaluate(
"""() => setTimeout(() => {
fetch('/digits/1.png')
fetch('/digits/2.png')
fetch('/digits/3.png')
}, 50)"""
)
request = await request_info.value
assert request.url == server.PREFIX + "/digits/2.png"
async def test_wait_for_request_should_work_with_url_match(page, server):
await page.goto(server.EMPTY_PAGE)
async with page.expect_request(re.compile(r"digits\/\d\.png")) as request_info:
await page.evaluate("fetch('/digits/1.png')")
request = await request_info.value
assert request.url == server.PREFIX + "/digits/1.png"
async def test_wait_for_event_should_fail_with_error_upon_disconnect(page):
with pytest.raises(Error) as exc_info:
async with page.expect_download():
await page.close()
assert "Page closed" in exc_info.value.message
async def test_wait_for_response_should_work(page, server):
await page.goto(server.EMPTY_PAGE)
async with page.expect_response(server.PREFIX + "/digits/2.png") as response_info:
await page.evaluate(
"""() => {
fetch('/digits/1.png')
fetch('/digits/2.png')
fetch('/digits/3.png')
}"""
)
response = await response_info.value
assert response.url == server.PREFIX + "/digits/2.png"
async def test_wait_for_response_should_respect_timeout(page):
with pytest.raises(Error) as exc_info:
async with page.expect_response("**/*", timeout=1):
pass
assert exc_info.type is TimeoutError
async def test_wait_for_response_should_respect_default_timeout(page):
page.set_default_timeout(1)
with pytest.raises(Error) as exc_info:
async with page.expect_response(lambda _: False):
pass
assert exc_info.type is TimeoutError
async def test_wait_for_response_should_work_with_predicate(page, server):
await page.goto(server.EMPTY_PAGE)
async with page.expect_response(
lambda response: response.url == server.PREFIX + "/digits/2.png"
) as response_info:
await page.evaluate(
"""() => {
fetch('/digits/1.png')
fetch('/digits/2.png')
fetch('/digits/3.png')
}"""
)
response = await response_info.value
assert response.url == server.PREFIX + "/digits/2.png"
async def test_wait_for_response_should_work_with_no_timeout(page, server):
await page.goto(server.EMPTY_PAGE)
async with page.expect_response(server.PREFIX + "/digits/2.png") as response_info:
await page.evaluate(
"""() => {
fetch('/digits/1.png')
fetch('/digits/2.png')
fetch('/digits/3.png')
}"""
)
response = await response_info.value
assert response.url == server.PREFIX + "/digits/2.png"
async def test_expose_binding(page):
binding_source = []
def binding(source, a, b):
binding_source.append(source)
return a + b
await page.expose_binding("add", lambda source, a, b: binding(source, a, b))
result = await page.evaluate("add(5, 6)")
assert binding_source[0]["context"] == page.context
assert binding_source[0]["page"] == page
assert binding_source[0]["frame"] == page.main_frame
assert result == 11
async def test_expose_function(page, server):
await page.expose_function("compute", lambda a, b: a * b)
result = await page.evaluate("compute(9, 4)")
assert result == 36
async def test_expose_function_should_throw_exception_in_page_context(page, server):
def throw():
raise Exception("WOOF WOOF")
await page.expose_function("woof", lambda: throw())
result = await page.evaluate(
"""async() => {
try {
await woof()
} catch (e) {
return {message: e.message, stack: e.stack}
}
}"""
)
assert result["message"] == "WOOF WOOF"
assert __file__ in result["stack"]
async def test_expose_function_should_be_callable_from_inside_add_init_script(page):
called = []
await page.expose_function("woof", lambda: called.append(True))
await page.add_init_script("woof()")
await page.reload()
assert called == [True]
async def test_expose_function_should_survive_navigation(page, server):
await page.expose_function("compute", lambda a, b: a * b)
await page.goto(server.EMPTY_PAGE)
result = await page.evaluate("compute(9, 4)")
assert result == 36
async def test_expose_function_should_await_returned_promise(page):
async def mul(a, b):
return a * b
await page.expose_function("compute", mul)
assert await page.evaluate("compute(3, 5)") == 15
async def test_expose_function_should_work_on_frames(page, server):
await page.expose_function("compute", lambda a, b: a * b)
await page.goto(server.PREFIX + "/frames/nested-frames.html")
frame = page.frames[1]
assert await frame.evaluate("compute(3, 5)") == 15
async def test_expose_function_should_work_on_frames_before_navigation(page, server):
await page.goto(server.PREFIX + "/frames/nested-frames.html")
await page.expose_function("compute", lambda a, b: a * b)
frame = page.frames[1]
assert await frame.evaluate("compute(3, 5)") == 15
async def test_expose_function_should_work_after_cross_origin_navigation(page, server):
await page.goto(server.EMPTY_PAGE)
await page.expose_function("compute", lambda a, b: a * b)
await page.goto(server.CROSS_PROCESS_PREFIX + "/empty.html")
assert await page.evaluate("compute(9, 4)") == 36
async def test_expose_function_should_work_with_complex_objects(page, server):
await page.expose_function("complexObject", lambda a, b: dict(x=a["x"] + b["x"]))
result = await page.evaluate("complexObject({x: 5}, {x: 2})")
assert result["x"] == 7
async def test_expose_bindinghandle_should_work(page, server):
targets = []
def logme(t):
targets.append(t)
return 17
await page.expose_binding("logme", lambda source, t: logme(t), handle=True)
result = await page.evaluate("logme({ foo: 42 })")
assert (await targets[0].evaluate("x => x.foo")) == 42
assert result == 17
async def test_page_error_should_fire(page, server, browser_name):
url = server.PREFIX + "/error.html"
async with page.expect_event("pageerror") as error_info:
await page.goto(url)
error = await error_info.value
assert error.name == "Error"
assert error.message == "Fancy error!"
# Note that WebKit reports the stack of the 'throw' statement instead of the Error constructor call.
if browser_name == "chromium":
assert (
error.stack
== """Error: Fancy error!
at c (myscript.js:14:11)
at b (myscript.js:10:5)
at a (myscript.js:6:5)
at myscript.js:3:1"""
)
if browser_name == "firefox":
assert (
error.stack
== """Error: Fancy error!
at c (myscript.js:14:11)
at b (myscript.js:10:5)
at a (myscript.js:6:5)
at (myscript.js:3:1)"""
)
if browser_name == "webkit":
assert (
error.stack
== f"""Error: Fancy error!
at c ({url}:14:36)
at b ({url}:10:6)
at a ({url}:6:6)
at global code ({url}:3:2)"""
)
async def test_page_error_should_handle_odd_values(page):
cases = [["null", "null"], ["undefined", "undefined"], ["0", "0"], ['""', ""]]
for [value, message] in cases:
async with page.expect_event("pageerror") as error_info:
await page.evaluate(f"() => setTimeout(() => {{ throw {value}; }}, 0)")
error = await error_info.value
assert error.message == message
async def test_page_error_should_handle_object(page, is_chromium):
async with page.expect_event("pageerror") as error_info:
await page.evaluate("() => setTimeout(() => { throw {}; }, 0)")
error = await error_info.value
assert error.message == "Object" if is_chromium else "[object Object]"
async def test_page_error_should_handle_window(page, is_chromium):
async with page.expect_event("pageerror") as error_info:
await page.evaluate("() => setTimeout(() => { throw window; }, 0)")
error = await error_info.value
assert error.message == "Window" if is_chromium else "[object Window]"
async def test_page_error_should_pass_error_name_property(page):
async with page.expect_event("pageerror") as error_info:
await page.evaluate(
"""() => setTimeout(() => {
const error = new Error("my-message");
error.name = "my-name";
throw error;
}, 0)
"""
)
error = await error_info.value
assert error.message == "my-message"
assert error.name == "my-name"
expected_output = "<html><head></head><body><div>hello</div></body></html>"
async def test_set_content_should_work(page, server):
await page.set_content("<div>hello</div>")
result = await page.content()
assert result == expected_output
async def test_set_content_should_work_with_domcontentloaded(page, server):
await page.set_content("<div>hello</div>", wait_until="domcontentloaded")
result = await page.content()
assert result == expected_output
async def test_set_content_should_work_with_doctype(page, server):
doctype = "<!DOCTYPE html>"
await page.set_content(f"{doctype}<div>hello</div>")
result = await page.content()
assert result == f"{doctype}{expected_output}"
async def test_set_content_should_work_with_HTML_4_doctype(page, server):
doctype = '<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">'
await page.set_content(f"{doctype}<div>hello</div>")
result = await page.content()
assert result == f"{doctype}{expected_output}"
async def test_set_content_should_respect_timeout(page, server):
img_path = "/img.png"
# stall for image
server.set_route(img_path, lambda request: None)
with pytest.raises(Error) as exc_info:
await page.set_content(
f'<img src="{server.PREFIX + img_path}"></img>', timeout=1
)
assert exc_info.type is TimeoutError
async def test_set_content_should_respect_default_navigation_timeout(page, server):
page.set_default_navigation_timeout(1)
img_path = "/img.png"
# stall for image
await page.route(img_path, lambda route, request: None)
with pytest.raises(Error) as exc_info:
await page.set_content(f'<img src="{server.PREFIX + img_path}"></img>')
assert "Timeout 1ms exceeded" in exc_info.value.message
assert exc_info.type is TimeoutError
async def test_set_content_should_await_resources_to_load(page, server):
img_path = "/img.png"
img_route = asyncio.Future()
await page.route(img_path, lambda route, request: img_route.set_result(route))
loaded = []
async def load():
await page.set_content(f'<img src="{server.PREFIX + img_path}"></img>')
loaded.append(True)
content_promise = asyncio.create_task(load())
await asyncio.sleep(0) # execute scheduled tasks, but don't await them
route = await img_route
assert loaded == []
asyncio.create_task(route.continue_())
await content_promise
async def test_set_content_should_work_with_tricky_content(page):
await page.set_content("<div>hello world</div>" + "\x7F")
assert await page.eval_on_selector("div", "div => div.textContent") == "hello world"
async def test_set_content_should_work_with_accents(page):
await page.set_content("<div>aberración</div>")
assert await page.eval_on_selector("div", "div => div.textContent") == "aberración"
async def test_set_content_should_work_with_emojis(page):
await page.set_content("<div>🐥</div>")
assert await page.eval_on_selector("div", "div => div.textContent") == "🐥"
async def test_set_content_should_work_with_newline(page):
await page.set_content("<div>\n</div>")
assert await page.eval_on_selector("div", "div => div.textContent") == "\n"
async def test_add_script_tag_should_work_with_a_url(page, server):
await page.goto(server.EMPTY_PAGE)
script_handle = await page.add_script_tag(url="/injectedfile.js")
assert script_handle.as_element()
assert await page.evaluate("__injected") == 42
async def test_add_script_tag_should_work_with_a_url_and_type_module(page, server):
await page.goto(server.EMPTY_PAGE)
await page.add_script_tag(url="/es6/es6import.js", type="module")
assert await page.evaluate("__es6injected") == 42
async def test_add_script_tag_should_work_with_a_path_and_type_module(
page, server, assetdir
):
await page.goto(server.EMPTY_PAGE)
await page.add_script_tag(path=assetdir / "es6" / "es6pathimport.js", type="module")
await page.wait_for_function("window.__es6injected")
assert await page.evaluate("__es6injected") == 42
async def test_add_script_tag_should_work_with_a_content_and_type_module(page, server):
await page.goto(server.EMPTY_PAGE)
await page.add_script_tag(
content="import num from '/es6/es6module.js';window.__es6injected = num;",
type="module",
)
await page.wait_for_function("window.__es6injected")
assert await page.evaluate("__es6injected") == 42
async def test_add_script_tag_should_throw_an_error_if_loading_from_url_fail(
page, server
):
await page.goto(server.EMPTY_PAGE)
with pytest.raises(Error) as exc_info:
await page.add_script_tag(url="/nonexistfile.js")
assert exc_info.value
async def test_add_script_tag_should_work_with_a_path(page, server, assetdir):
await page.goto(server.EMPTY_PAGE)
script_handle = await page.add_script_tag(path=assetdir / "injectedfile.js")
assert script_handle.as_element()
assert await page.evaluate("__injected") == 42
@pytest.mark.skip_browser("webkit")
async def test_add_script_tag_should_include_source_url_when_path_is_provided(
page, server, assetdir
):
# Lacking sourceURL support in WebKit
await page.goto(server.EMPTY_PAGE)
await page.add_script_tag(path=assetdir / "injectedfile.js")
result = await page.evaluate("__injectedError.stack")
assert os.path.join("assets", "injectedfile.js") in result
async def test_add_script_tag_should_work_with_content(page, server):
await page.goto(server.EMPTY_PAGE)
script_handle = await page.add_script_tag(content="window.__injected = 35;")
assert script_handle.as_element()
assert await page.evaluate("__injected") == 35
@pytest.mark.skip_browser("firefox")
async def test_add_script_tag_should_throw_when_added_with_content_to_the_csp_page(
page, server
):
# Firefox fires onload for blocked script before it issues the CSP console error.
await page.goto(server.PREFIX + "/csp.html")
with pytest.raises(Error) as exc_info:
await page.add_script_tag(content="window.__injected = 35;")
assert exc_info.value
async def test_add_script_tag_should_throw_when_added_with_URL_to_the_csp_page(
page, server
):
await page.goto(server.PREFIX + "/csp.html")
with pytest.raises(Error) as exc_info:
await page.add_script_tag(url=server.CROSS_PROCESS_PREFIX + "/injectedfile.js")
assert exc_info.value
async def test_add_script_tag_should_throw_a_nice_error_when_the_request_fails(
page, server
):
await page.goto(server.EMPTY_PAGE)
url = server.PREFIX + "/this_does_not_exist.js"
with pytest.raises(Error) as exc_info:
await page.add_script_tag(url=url)
assert url in exc_info.value.message
async def test_add_style_tag_should_work_with_a_url(page, server):
await page.goto(server.EMPTY_PAGE)
style_handle = await page.add_style_tag(url="/injectedstyle.css")
assert style_handle.as_element()
assert (
await page.evaluate(
"window.getComputedStyle(document.querySelector('body')).getPropertyValue('background-color')"
)
== "rgb(255, 0, 0)"
)
async def test_add_style_tag_should_throw_an_error_if_loading_from_url_fail(
page, server
):
await page.goto(server.EMPTY_PAGE)
with pytest.raises(Error) as exc_info:
await page.add_style_tag(url="/nonexistfile.js")
assert exc_info.value
async def test_add_style_tag_should_work_with_a_path(page, server, assetdir):
await page.goto(server.EMPTY_PAGE)
style_handle = await page.add_style_tag(path=assetdir / "injectedstyle.css")
assert style_handle.as_element()
assert (
await page.evaluate(
"window.getComputedStyle(document.querySelector('body')).getPropertyValue('background-color')"
)
== "rgb(255, 0, 0)"
)
async def test_add_style_tag_should_include_source_url_when_path_is_provided(
page, server, assetdir
):
await page.goto(server.EMPTY_PAGE)
await page.add_style_tag(path=assetdir / "injectedstyle.css")
style_handle = await page.query_selector("style")
style_content = await page.evaluate("style => style.innerHTML", style_handle)
assert os.path.join("assets", "injectedstyle.css") in style_content
async def test_add_style_tag_should_work_with_content(page, server):
await page.goto(server.EMPTY_PAGE)
style_handle = await page.add_style_tag(content="body { background-color: green; }")
assert style_handle.as_element()
assert (
await page.evaluate(
"window.getComputedStyle(document.querySelector('body')).getPropertyValue('background-color')"
)
== "rgb(0, 128, 0)"
)
async def test_add_style_tag_should_throw_when_added_with_content_to_the_CSP_page(
page, server
):
await page.goto(server.PREFIX + "/csp.html")
with pytest.raises(Error) as exc_info:
await page.add_style_tag(content="body { background-color: green; }")
assert exc_info.value
async def test_add_style_tag_should_throw_when_added_with_URL_to_the_CSP_page(
page, server
):
await page.goto(server.PREFIX + "/csp.html")
with pytest.raises(Error) as exc_info:
await page.add_style_tag(url=server.CROSS_PROCESS_PREFIX + "/injectedstyle.css")
assert exc_info.value
async def test_url_should_work(page, server):
assert page.url == "about:blank"
await page.goto(server.EMPTY_PAGE)
assert page.url == server.EMPTY_PAGE
async def test_url_should_include_hashes(page, server):
await page.goto(server.EMPTY_PAGE + "#hash")
assert page.url == server.EMPTY_PAGE + "#hash"
await page.evaluate("window.location.hash = 'dynamic'")
assert page.url == server.EMPTY_PAGE + "#dynamic"
async def test_title_should_return_the_page_title(page, server):
await page.goto(server.PREFIX + "/title.html")
assert await page.title() == "Woof-Woof"
async def test_select_option_should_select_single_option(page, server):
await page.goto(server.PREFIX + "/input/select.html")
await page.select_option("select", "blue")
assert await page.evaluate("result.onInput") == ["blue"]
assert await page.evaluate("result.onChange") == ["blue"]
async def test_select_option_should_select_single_option_by_value(page, server):
await page.goto(server.PREFIX + "/input/select.html")
await page.select_option("select", "blue")
assert await page.evaluate("result.onInput") == ["blue"]
assert await page.evaluate("result.onChange") == ["blue"]
async def test_select_option_should_select_single_option_by_label(page, server):
await page.goto(server.PREFIX + "/input/select.html")
await page.select_option("select", label="Indigo")
assert await page.evaluate("result.onInput") == ["indigo"]
assert await page.evaluate("result.onChange") == ["indigo"]
async def test_select_option_should_select_single_option_by_handle(page, server):
await page.goto(server.PREFIX + "/input/select.html")
await page.select_option(
"select", element=await page.query_selector("[id=whiteOption]")
)
assert await page.evaluate("result.onInput") == ["white"]
assert await page.evaluate("result.onChange") == ["white"]
async def test_select_option_should_select_single_option_by_index(page, server):
await page.goto(server.PREFIX + "/input/select.html")
await page.select_option("select", index=2)
assert await page.evaluate("result.onInput") == ["brown"]
assert await page.evaluate("result.onChange") == ["brown"]
async def test_select_option_should_select_only_first_option(page, server):
await page.goto(server.PREFIX + "/input/select.html")
await page.select_option("select", ["blue", "green", "red"])
assert await page.evaluate("result.onInput") == ["blue"]
assert await page.evaluate("result.onChange") == ["blue"]
async def test_select_option_should_not_throw_when_select_causes_navigation(
page, server
):
await page.goto(server.PREFIX + "/input/select.html")
await page.eval_on_selector(
"select",
"select => select.addEventListener('input', () => window.location = '/empty.html')",
)
async with page.expect_navigation():
await page.select_option("select", "blue")
assert "empty.html" in page.url
async def test_select_option_should_select_multiple_options(page, server):
await page.goto(server.PREFIX + "/input/select.html")
await page.evaluate("makeMultiple()")
await page.select_option("select", ["blue", "green", "red"])
assert await page.evaluate("result.onInput") == ["blue", "green", "red"]
assert await page.evaluate("result.onChange") == ["blue", "green", "red"]
async def test_select_option_should_select_multiple_options_with_attributes(
page, server
):
await page.goto(server.PREFIX + "/input/select.html")
await page.evaluate("makeMultiple()")
await page.select_option(
"select",
value="blue",
label="Green",
index=4,
)
assert await page.evaluate("result.onInput") == ["blue", "gray", "green"]
assert await page.evaluate("result.onChange") == ["blue", "gray", "green"]
async def test_select_option_should_respect_event_bubbling(page, server):
await page.goto(server.PREFIX + "/input/select.html")
await page.select_option("select", "blue")
assert await page.evaluate("result.onBubblingInput") == ["blue"]
assert await page.evaluate("result.onBubblingChange") == ["blue"]
async def test_select_option_should_throw_when_element_is_not_a__select_(page, server):
await page.goto(server.PREFIX + "/input/select.html")
with pytest.raises(Error) as exc_info:
await page.select_option("body", "")
assert "Element is not a <select> element" in exc_info.value.message
async def test_select_option_should_return_on_no_matched_values(page, server):
await page.goto(server.PREFIX + "/input/select.html")
with pytest.raises(TimeoutError) as exc_info:
await page.select_option("select", ["42", "abc"], timeout=1000)
assert "Timeout 1000" in exc_info.value.message
async def test_select_option_should_return_an_array_of_matched_values(page, server):
await page.goto(server.PREFIX + "/input/select.html")
await page.evaluate("makeMultiple()")
result = await page.select_option("select", ["blue", "black", "magenta"])
assert result == ["black", "blue", "magenta"]
async def test_select_option_should_return_an_array_of_one_element_when_multiple_is_not_set(
page, server
):
await page.goto(server.PREFIX + "/input/select.html")
result = await page.select_option("select", ["42", "blue", "black", "magenta"])
assert len(result) == 1
async def test_select_option_should_return_on_no_values(page, server):
await page.goto(server.PREFIX + "/input/select.html")
result = await page.select_option("select", [])
assert result == []
async def test_select_option_should_not_allow_null_items(page, server):
await page.goto(server.PREFIX + "/input/select.html")
await page.evaluate("makeMultiple()")
with pytest.raises(Error) as exc_info:
await page.select_option("select", ["blue", None, "black", "magenta"])
assert "expected string, got object" in exc_info.value.message
async def test_select_option_should_unselect_with_null(page, server):
await page.goto(server.PREFIX + "/input/select.html")
await page.evaluate("makeMultiple()")
result = await page.select_option("select", ["blue", "black", "magenta"])
assert result == ["black", "blue", "magenta"]
await page.select_option("select", None)
assert await page.eval_on_selector(
"select",
"select => Array.from(select.options).every(option => !option.selected)",
)
async def test_select_option_should_deselect_all_options_when_passed_no_values_for_a_multiple_select(
page, server
):
await page.goto(server.PREFIX + "/input/select.html")
await page.evaluate("makeMultiple()")
await page.select_option("select", ["blue", "black", "magenta"])
await page.select_option("select", [])
assert await page.eval_on_selector(
"select",
"select => Array.from(select.options).every(option => !option.selected)",
)
async def test_select_option_should_deselect_all_options_when_passed_no_values_for_a_select_without_multiple(
page, server
):
await page.goto(server.PREFIX + "/input/select.html")
await page.select_option("select", ["blue", "black", "magenta"])
await page.select_option("select", [])
assert await page.eval_on_selector(
"select",
"select => Array.from(select.options).every(option => !option.selected)",
)
async def test_select_option_should_work_when_re_defining_top_level_event_class(
page, server
):
await page.goto(server.PREFIX + "/input/select.html")
await page.evaluate("window.Event = null")
await page.select_option("select", "blue")
assert await page.evaluate("result.onInput") == ["blue"]
assert await page.evaluate("result.onChange") == ["blue"]
async def give_it_a_chance_to_fill(page):
for i in range(5):
await page.evaluate(
"() => new Promise(f => requestAnimationFrame(() => requestAnimationFrame(f)))"
)
async def test_fill_should_fill_textarea(page, server):
await page.goto(server.PREFIX + "/input/textarea.html")
await page.fill("textarea", "some value")
assert await page.evaluate("result") == "some value"
async def test_fill_should_fill_input(page, server):
await page.goto(server.PREFIX + "/input/textarea.html")
await page.fill("input", "some value")
assert await page.evaluate("result") == "some value"
async def test_fill_should_throw_on_unsupported_inputs(page, server):
await page.goto(server.PREFIX + "/input/textarea.html")
for type in [
"button",
"checkbox",
"file",
"image",
"radio",
"reset",
"submit",
]:
await page.eval_on_selector(
"input", "(input, type) => input.setAttribute('type', type)", type
)
with pytest.raises(Error) as exc_info:
await page.fill("input", "")
assert f'input of type "{type}" cannot be filled' in exc_info.value.message
async def test_fill_should_fill_different_input_types(page, server):
await page.goto(server.PREFIX + "/input/textarea.html")
for type in ["password", "search", "tel", "text", "url"]:
await page.eval_on_selector(
"input", "(input, type) => input.setAttribute('type', type)", type
)
await page.fill("input", "text " + type)
assert await page.evaluate("result") == "text " + type
async def test_fill_should_fill_date_input_after_clicking(page, server):
await page.set_content("<input type=date>")
await page.click("input")
await page.fill("input", "2020-03-02")
assert await page.eval_on_selector("input", "input => input.value") == "2020-03-02"
@pytest.mark.skip_browser("webkit")
async def test_fill_should_throw_on_incorrect_date(page, server):
# Disabled as in upstream, we should validate time in the Playwright lib
await page.set_content("<input type=date>")
with pytest.raises(Error) as exc_info:
await page.fill("input", "2020-13-05")
assert "Malformed value" in exc_info.value.message
async def test_fill_should_fill_time_input(page, server):
await page.set_content("<input type=time>")
await page.fill("input", "13:15")
assert await page.eval_on_selector("input", "input => input.value") == "13:15"
@pytest.mark.skip_browser("webkit")
async def test_fill_should_throw_on_incorrect_time(page, server):
# Disabled as in upstream, we should validate time in the Playwright lib
await page.set_content("<input type=time>")
with pytest.raises(Error) as exc_info:
await page.fill("input", "25:05")
assert "Malformed value" in exc_info.value.message
async def test_fill_should_fill_datetime_local_input(page, server):
await page.set_content("<input type=datetime-local>")
await page.fill("input", "2020-03-02T05:15")
assert (
await page.eval_on_selector("input", "input => input.value")
== "2020-03-02T05:15"
)
@pytest.mark.only_browser("chromium")
async def test_fill_should_throw_on_incorrect_datetime_local(page):
await page.set_content("<input type=datetime-local>")
with pytest.raises(Error) as exc_info:
await page.fill("input", "abc")
assert "Malformed value" in exc_info.value.message
async def test_fill_should_fill_contenteditable(page, server):
await page.goto(server.PREFIX + "/input/textarea.html")
await page.fill("div[contenteditable]", "some value")
assert (
await page.eval_on_selector("div[contenteditable]", "div => div.textContent")
== "some value"
)
async def test_fill_should_fill_elements_with_existing_value_and_selection(
page, server
):
await page.goto(server.PREFIX + "/input/textarea.html")
await page.eval_on_selector("input", "input => input.value = 'value one'")
await page.fill("input", "another value")
assert await page.evaluate("result") == "another value"
await page.eval_on_selector(
"input",
"""input => {
input.selectionStart = 1
input.selectionEnd = 2
}""",
)
await page.fill("input", "maybe this one")
assert await page.evaluate("result") == "maybe this one"
await page.eval_on_selector(
"div[contenteditable]",
"""div => {
div.innerHTML = 'some text <span>some more text<span> and even more text'
range = document.createRange()
range.selectNodeContents(div.querySelector('span'))
selection = window.getSelection()
selection.removeAllRanges()
selection.addRange(range)
}""",
)
await page.fill("div[contenteditable]", "replace with this")
assert (
await page.eval_on_selector("div[contenteditable]", "div => div.textContent")
== "replace with this"
)
async def test_fill_should_throw_when_element_is_not_an_input_textarea_or_contenteditable(
page, server
):
await page.goto(server.PREFIX + "/input/textarea.html")
with pytest.raises(Error) as exc_info:
await page.fill("body", "")
assert "Element is not an <input>" in exc_info.value.message
async def test_fill_should_throw_if_passed_a_non_string_value(page, server):
await page.goto(server.PREFIX + "/input/textarea.html")
with pytest.raises(Error) as exc_info:
await page.fill("textarea", 123)
assert "expected string, got number" in exc_info.value.message
async def test_fill_should_retry_on_disabled_element(page, server):
await page.goto(server.PREFIX + "/input/textarea.html")
await page.eval_on_selector("input", "i => i.disabled = true")
done = []
async def fill():
await page.fill("input", "some value")
done.append(True)
promise = asyncio.create_task(fill())
await give_it_a_chance_to_fill(page)
assert done == []
assert await page.evaluate("result") == ""
await page.eval_on_selector("input", "i => i.disabled = false")
await promise
assert await page.evaluate("result") == "some value"
async def test_fill_should_retry_on_readonly_element(page, server):
await page.goto(server.PREFIX + "/input/textarea.html")
await page.eval_on_selector("textarea", "i => i.readOnly = true")
done = []
async def fill():
await page.fill("textarea", "some value")
done.append(True)
promise = asyncio.create_task(fill())
await give_it_a_chance_to_fill(page)
assert done == []
assert await page.evaluate("result") == ""
await page.eval_on_selector("textarea", "i => i.readOnly = false")
await promise
assert await page.evaluate("result") == "some value"
async def test_fill_should_retry_on_invisible_element(page, server):
await page.goto(server.PREFIX + "/input/textarea.html")
await page.eval_on_selector("input", "i => i.style.display = 'none'")
done = []
async def fill():
await page.fill("input", "some value")
done.append(True)
promise = asyncio.create_task(fill())
await give_it_a_chance_to_fill(page)
assert done == []
assert await page.evaluate("result") == ""
await page.eval_on_selector("input", "i => i.style.display = 'inline'")
await promise
assert await page.evaluate("result") == "some value"
async def test_fill_should_be_able_to_fill_the_body(page):
await page.set_content('<body contentEditable="true"></body>')
await page.fill("body", "some value")
assert await page.evaluate("document.body.textContent") == "some value"
async def test_fill_should_fill_fixed_position_input(page):
await page.set_content('<input style="position: fixed;" />')
await page.fill("input", "some value")
assert await page.evaluate("document.querySelector('input').value") == "some value"
async def test_fill_should_be_able_to_fill_when_focus_is_in_the_wrong_frame(page):
await page.set_content(
"""
<div contentEditable="true"></div>
<iframe></iframe>
"""
)
await page.focus("iframe")
await page.fill("div", "some value")
assert await page.eval_on_selector("div", "d => d.textContent") == "some value"
async def test_fill_should_be_able_to_fill_the_input_type_number_(page):
await page.set_content('<input id="input" type="number"></input>')
await page.fill("input", "42")
assert await page.evaluate("input.value") == "42"
async def test_fill_should_be_able_to_fill_exponent_into_the_input_type_number_(page):
await page.set_content('<input id="input" type="number"></input>')
await page.fill("input", "-10e5")
assert await page.evaluate("input.value") == "-10e5"
async def test_fill_should_be_able_to_fill_input_type_number__with_empty_string(page):
await page.set_content('<input id="input" type="number" value="123"></input>')
await page.fill("input", "")
assert await page.evaluate("input.value") == ""
async def test_fill_should_not_be_able_to_fill_text_into_the_input_type_number_(page):
await page.set_content('<input id="input" type="number"></input>')
with pytest.raises(Error) as exc_info:
await page.fill("input", "abc")
assert "Cannot type text into input[type=number]" in exc_info.value.message
async def test_fill_should_be_able_to_clear(page, server):
await page.goto(server.PREFIX + "/input/textarea.html")
await page.fill("input", "some value")
assert await page.evaluate("result") == "some value"
await page.fill("input", "")
assert await page.evaluate("result") == ""
async def test_close_event_should_work_with_window_close(page, server):
async with page.expect_popup() as popup_info:
await page.evaluate("window['newPage'] = window.open('about:blank')")
popup = await popup_info.value
async with popup.expect_event("close"):
await page.evaluate("window['newPage'].close()")
async def test_close_event_should_work_with_page_close(context, server):
page = await context.new_page()
async with page.expect_event("close"):
await page.close()
async def test_page_context_should_return_the_correct_browser_instance(page, context):
assert page.context == context
async def test_frame_should_respect_name(page, server):
await page.set_content("<iframe name=target></iframe>")
assert page.frame(name="bogus") is None
frame = page.frame(name="target")
assert frame
assert frame == page.main_frame.child_frames[0]
async def test_frame_should_respect_url(page, server):
await page.set_content(f'<iframe src="{server.EMPTY_PAGE}"></iframe>')
assert page.frame(url=re.compile(r"bogus")) is None
assert page.frame(url=re.compile(r"empty")).url == server.EMPTY_PAGE
async def test_press_should_work(page, server):
await page.goto(server.PREFIX + "/input/textarea.html")
await page.press("textarea", "a")
assert await page.evaluate("document.querySelector('textarea').value") == "a"
async def test_frame_press_should_work(page, server):
await page.set_content(
f'<iframe name=inner src="{server.PREFIX}/input/textarea.html"></iframe>'
)
frame = page.frame("inner")
await frame.press("textarea", "a")
assert await frame.evaluate("document.querySelector('textarea').value") == "a"
async def test_should_emulate_reduced_motion(page, server):
assert await page.evaluate(
"matchMedia('(prefers-reduced-motion: no-preference)').matches"
)
await page.emulate_media(reduced_motion="reduce")
assert await page.evaluate("matchMedia('(prefers-reduced-motion: reduce)').matches")
assert not await page.evaluate(
"matchMedia('(prefers-reduced-motion: no-preference)').matches"
)
await page.emulate_media(reduced_motion="no-preference")
assert not await page.evaluate(
"matchMedia('(prefers-reduced-motion: reduce)').matches"
)
assert await page.evaluate(
"matchMedia('(prefers-reduced-motion: no-preference)').matches"
)
async def test_input_value(page: Page, server: Server):
await page.goto(server.PREFIX + "/input/textarea.html")
await page.fill("input", "my-text-content")
assert await page.input_value("input") == "my-text-content"
await page.fill("input", "")
assert await page.input_value("input") == ""
async def test_drag_and_drop_helper_method(page: Page, server: Server):
await page.goto(server.PREFIX + "/drag-n-drop.html")
await page.drag_and_drop("#source", "#target")
assert (
await page.eval_on_selector(
"#target", "target => target.contains(document.querySelector('#source'))"
)
is True
)
async def test_should_check_box_using_set_checked(page: Page):
await page.set_content("`<input id='checkbox' type='checkbox'></input>`")
await page.set_checked("input", True)
assert await page.evaluate("checkbox.checked") is True
await page.set_checked("input", False)
assert await page.evaluate("checkbox.checked") is False
async def test_should_set_bodysize_and_headersize(page: Page, server: Server):
await page.goto(server.EMPTY_PAGE)
async with page.expect_request("*/**") as request_info:
await page.evaluate(
"() => fetch('./get', { method: 'POST', body: '12345'}).then(r => r.text())"
)
request = await request_info.value
sizes = await request.sizes()
assert sizes["requestBodySize"] == 5
assert sizes["requestHeadersSize"] >= 300
async def test_should_set_bodysize_to_0(page: Page, server: Server):
await page.goto(server.EMPTY_PAGE)
async with page.expect_request("*/**") as request_info:
await page.evaluate("() => fetch('./get').then(r => r.text())")
request = await request_info.value
sizes = await request.sizes()
assert sizes["requestBodySize"] == 0
assert sizes["requestHeadersSize"] >= 200
@pytest.mark.skip_browser("webkit") # https://bugs.webkit.org/show_bug.cgi?id=225281
async def test_should_emulate_forced_colors(page):
assert await page.evaluate("matchMedia('(forced-colors: none)').matches")
await page.emulate_media(forced_colors="none")
assert await page.evaluate("matchMedia('(forced-colors: none)').matches")
assert not await page.evaluate("matchMedia('(forced-colors: active)').matches")
await page.emulate_media(forced_colors="active")
assert await page.evaluate("matchMedia('(forced-colors: active)').matches")
assert not await page.evaluate("matchMedia('(forced-colors: none)').matches")
async def test_should_not_throw_when_continuing_while_page_is_closing(
page: Page, server: Server
):
done = None
def handle_route(route: Route) -> None:
nonlocal done
done = asyncio.gather(route.continue_(), page.close())
await page.route("**/*", handle_route)
with pytest.raises(Error):
await page.goto(server.EMPTY_PAGE)
await done
async def test_should_not_throw_when_continuing_after_page_is_closed(
page: Page, server: Server
):
done = asyncio.Future()
async def handle_route(route: Route) -> None:
await page.close()
await route.continue_()
nonlocal done
done.set_result(True)
await page.route("**/*", handle_route)
with pytest.raises(Error):
await page.goto(server.EMPTY_PAGE)
await done
| 35.418009 | 109 | 0.689982 |
acf02dcd44496ddc4917d59e69c42758a64b1f06 | 2,149 | py | Python | lingvodoc/schema/gql_tasks.py | SegFaulti4/lingvodoc | 8b296b43453a46b814d3cd381f94382ebcb9c6a6 | [
"Apache-2.0"
] | 5 | 2017-03-30T18:02:11.000Z | 2021-07-20T16:02:34.000Z | lingvodoc/schema/gql_tasks.py | SegFaulti4/lingvodoc | 8b296b43453a46b814d3cd381f94382ebcb9c6a6 | [
"Apache-2.0"
] | 15 | 2016-02-24T13:16:59.000Z | 2021-09-03T11:47:15.000Z | lingvodoc/schema/gql_tasks.py | Winking-maniac/lingvodoc | f037bf0e91ccdf020469037220a43e63849aa24a | [
"Apache-2.0"
] | 22 | 2015-09-25T07:13:40.000Z | 2021-08-04T18:08:26.000Z | import graphene
from lingvodoc.schema.gql_holders import (
LingvodocObjectType,
CompositeIdHolder,
AdditionalMetadata,
CreatedAt,
MarkedForDeletion,
Relationship,
MovedTo,
fetch_object,
client_id_check,
del_object,
acl_check_by_id,
ResponseError,
LingvodocID,
ObjectVal
)
from lingvodoc.cache.caching import TaskStatus
class Task(LingvodocObjectType):
"""
#created_at | timestamp without time zone | NOT NULL
#id | bigint | NOT NULL DEFAULT nextval('organization_id_seq'::regclass)
#marked_for_deletion | boolean | NOT NULL
#name | text |
#about | text |
#additional_metadata | jsonb |
"""
dbType = None
id = graphene.String()
key = graphene.String()
progress = graphene.Int()
result_link_list = graphene.List(graphene.String)
status = graphene.String()
task_details = graphene.String()
task_family = graphene.String()
total_stages = graphene.Int()
current_stage = graphene.Int()
user_id = graphene.Int()
def resolve_user_id(self, info):
return int(self.user_id)
class DeleteTask(graphene.Mutation):
"""
example:
mutation {
create_user( login: "new", email: "n@mail.ru", name: "Новое имя", birthday: [1, 1, 1970], password: "secret") {
field {
login
id
}
triumph
}
}
(this example works)
returns:
{
"create_user": {
"field": {
"login": "new",
"id": 70
},
"triumph": true
}
}
"""
class Arguments:
task_id = graphene.String(required=True)
triumph = graphene.Boolean()
@staticmethod
def mutate(root, info, **args):
task_id = args.get('task_id')
task_key = "task:" + task_id
task = TaskStatus.get_from_cache(task_key)
if task:
task.delete()
return DeleteTask(triumph=True) | 25.282353 | 119 | 0.548627 |
acf02e04a764ac85f0ca7d72892746be7e296a4e | 583 | py | Python | tests/records/test_glossary.py | geo-bl-ch/pyramid_oereb | 767375a4adda4589e12c4257377fc30258cdfcb3 | [
"BSD-2-Clause"
] | null | null | null | tests/records/test_glossary.py | geo-bl-ch/pyramid_oereb | 767375a4adda4589e12c4257377fc30258cdfcb3 | [
"BSD-2-Clause"
] | 3 | 2019-12-26T17:00:44.000Z | 2022-03-21T22:16:54.000Z | tests/records/test_glossary.py | geo-bl-ch/pyramid_oereb | 767375a4adda4589e12c4257377fc30258cdfcb3 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import sys
import pytest
from pyramid_oereb.lib.records.glossary import GlossaryRecord
def test_mandatory_fields():
with pytest.raises(TypeError):
GlossaryRecord()
def test_init():
record = GlossaryRecord({'fr': u'SGRF'}, {'fr': u'Service de la géomatique et du registre foncier'})
assert record.title.get('fr') == u'SGRF'
assert record.content is not None
if sys.version_info.major == 2:
assert isinstance(record.content.get('fr'), unicode) # noqa
else:
assert isinstance(record.content.get('fr'), str)
| 26.5 | 104 | 0.67753 |
acf031929c745070d67d1d1ef66d667a27f357d1 | 6,347 | py | Python | peru/runtime.py | olson-sean-k/peru | 67b14affa621faae2ac4e0ee680065ab611d56d9 | [
"MIT"
] | null | null | null | peru/runtime.py | olson-sean-k/peru | 67b14affa621faae2ac4e0ee680065ab611d56d9 | [
"MIT"
] | null | null | null | peru/runtime.py | olson-sean-k/peru | 67b14affa621faae2ac4e0ee680065ab611d56d9 | [
"MIT"
] | null | null | null | import asyncio
import collections
import os
from pathlib import Path
import tempfile
from . import cache
from . import compat
from .error import PrintableError
from . import display
from .keyval import KeyVal
from . import parser
from . import plugin
@asyncio.coroutine
def Runtime(args, env):
'This is the async constructor for the _Runtime class.'
r = _Runtime(args, env)
yield from r._init_cache()
return r
class _Runtime:
def __init__(self, args, env):
"Don't instantiate this class directly. Use the Runtime() constructor."
self._set_paths(args, env)
compat.makedirs(self.state_dir)
self._tmp_root = os.path.join(self.state_dir, 'tmp')
compat.makedirs(self._tmp_root)
self.overrides = KeyVal(os.path.join(self.state_dir, 'overrides'),
self._tmp_root)
self.force = args.get('--force', False)
if args['--quiet'] and args['--verbose']:
raise PrintableError(
"Peru can't be quiet and verbose at the same time.")
self.quiet = args['--quiet']
self.verbose = args['--verbose']
self.no_overrides = args.get('--no-overrides', False)
self.no_cache = args.get('--no-cache', False)
# Use a semaphore (a lock that allows N holders at once) to limit the
# number of fetches that can run in parallel.
num_fetches = _get_parallel_fetch_limit(args)
self.fetch_semaphore = asyncio.BoundedSemaphore(num_fetches)
# Use locks to make sure the same cache keys don't get double fetched.
self.cache_key_locks = collections.defaultdict(asyncio.Lock)
# Use a different set of locks to make sure that plugin cache dirs are
# only used by one job at a time.
self.plugin_cache_locks = collections.defaultdict(asyncio.Lock)
self.display = get_display(args)
@asyncio.coroutine
def _init_cache(self):
self.cache = yield from cache.Cache(self.cache_dir)
def _set_paths(self, args, env):
explicit_peru_file = args['--file']
explicit_sync_dir = args['--sync-dir']
explicit_basename = args['--file-basename']
if explicit_peru_file and explicit_basename:
raise CommandLineError(
'Cannot use both --file and --file-basename at the same time.')
if explicit_peru_file and explicit_sync_dir:
self.peru_file = explicit_peru_file
self.sync_dir = explicit_sync_dir
elif explicit_peru_file or explicit_sync_dir:
raise CommandLineError(
'If the --file or --sync-dir is set, '
'the other must also be set.')
else:
basename = explicit_basename or parser.DEFAULT_PERU_FILE_NAME
self.peru_file = find_project_file(os.getcwd(), basename)
self.sync_dir = os.path.dirname(self.peru_file)
self.state_dir = (args['--state-dir'] or
os.path.join(self.sync_dir, '.peru'))
self.cache_dir = (args['--cache-dir'] or env.get('PERU_CACHE_DIR') or
os.path.join(self.state_dir, 'cache'))
def tmp_dir(self):
dir = tempfile.TemporaryDirectory(dir=self._tmp_root)
return dir
def set_override(self, name, path):
if not os.path.isabs(path):
# We can't store relative paths as given, because peru could be
# running from a different working dir next time. But we don't want
# to absolutify everything, because the user might want the paths
# to be relative (for example, so a whole workspace can be moved as
# a group while preserving all the overrides). So reinterpret all
# relative paths from the project root.
path = os.path.relpath(path, start=self.sync_dir)
self.overrides[name] = path
def get_override(self, name):
if self.no_overrides or name not in self.overrides:
return None
path = self.overrides[name]
if not os.path.isabs(path):
# Relative paths are stored relative to the project root.
# Reinterpret them relative to the cwd. See the above comment in
# set_override.
path = os.path.relpath(os.path.join(self.sync_dir, path))
return path
def get_plugin_context(self):
return plugin.PluginContext(
# Plugin cwd is always the directory containing peru.yaml, even if
# the sync_dir has been explicitly set elsewhere. That's because
# relative paths in peru.yaml should respect the location of that
# file.
cwd=str(Path(self.peru_file).parent),
plugin_cache_root=self.cache.plugins_root,
parallelism_semaphore=self.fetch_semaphore,
plugin_cache_locks=self.plugin_cache_locks,
tmp_root=self._tmp_root)
def find_project_file(start_dir, basename):
'''Walk up the directory tree until we find a file of the given name.'''
prefix = os.path.abspath(start_dir)
while True:
candidate = os.path.join(prefix, basename)
if os.path.isfile(candidate):
return candidate
if os.path.exists(candidate):
raise PrintableError(
"Found {}, but it's not a file.".format(candidate))
if os.path.dirname(prefix) == prefix:
# We've walked all the way to the top. Bail.
raise PrintableError("Can't find " + basename)
# Not found at this level. We must go...shallower.
prefix = os.path.dirname(prefix)
def _get_parallel_fetch_limit(args):
jobs = args.get('--jobs')
if jobs is None:
return plugin.DEFAULT_PARALLEL_FETCH_LIMIT
try:
parallel = int(jobs)
if parallel <= 0:
raise PrintableError('Argument to --jobs must be 1 or more.')
return parallel
except:
raise PrintableError('Argument to --jobs must be a number.')
def get_display(args):
if args['--quiet']:
return display.QuietDisplay()
elif args['--verbose']:
return display.VerboseDisplay()
elif compat.is_fancy_terminal():
return display.FancyDisplay()
else:
return display.QuietDisplay()
class CommandLineError(PrintableError):
pass
| 37.556213 | 79 | 0.635103 |
acf032d33d9b0e135ae39dcafd51eab550f5fa10 | 41,047 | py | Python | openml/datasets/functions.py | a-moadel/openml-python | fba6aabfb1592cf4c375f12703bc25615998a9a2 | [
"BSD-3-Clause"
] | null | null | null | openml/datasets/functions.py | a-moadel/openml-python | fba6aabfb1592cf4c375f12703bc25615998a9a2 | [
"BSD-3-Clause"
] | null | null | null | openml/datasets/functions.py | a-moadel/openml-python | fba6aabfb1592cf4c375f12703bc25615998a9a2 | [
"BSD-3-Clause"
] | null | null | null | # License: BSD 3-Clause
import io
import logging
import os
from typing import List, Dict, Union, Optional
import numpy as np
import arff
import pandas as pd
import xmltodict
from scipy.sparse import coo_matrix
from collections import OrderedDict
import openml.utils
import openml._api_calls
from .dataset import OpenMLDataset
from ..exceptions import (
OpenMLHashException,
OpenMLServerException,
OpenMLPrivateDatasetError,
)
from ..utils import (
_remove_cache_dir_for_id,
_create_cache_directory_for_id,
)
DATASETS_CACHE_DIR_NAME = "datasets"
logger = logging.getLogger(__name__)
############################################################################
# Local getters/accessors to the cache directory
def _get_cache_directory(dataset: OpenMLDataset) -> str:
""" Return the cache directory of the OpenMLDataset """
return _create_cache_directory_for_id(DATASETS_CACHE_DIR_NAME, dataset.dataset_id)
def list_qualities() -> List[str]:
""" Return list of data qualities available.
The function performs an API call to retrieve the entire list of
data qualities that are computed on the datasets uploaded.
Returns
-------
list
"""
api_call = "data/qualities/list"
xml_string = openml._api_calls._perform_api_call(api_call, "get")
qualities = xmltodict.parse(xml_string, force_list=("oml:quality"))
# Minimalistic check if the XML is useful
if "oml:data_qualities_list" not in qualities:
raise ValueError("Error in return XML, does not contain " '"oml:data_qualities_list"')
if not isinstance(qualities["oml:data_qualities_list"]["oml:quality"], list):
raise TypeError("Error in return XML, does not contain " '"oml:quality" as a list')
qualities = qualities["oml:data_qualities_list"]["oml:quality"]
return qualities
def list_datasets(
data_id: Optional[List[int]] = None,
offset: Optional[int] = None,
size: Optional[int] = None,
status: Optional[str] = None,
tag: Optional[str] = None,
output_format: str = "dict",
**kwargs,
) -> Union[Dict, pd.DataFrame]:
"""
Return a list of all dataset which are on OpenML.
Supports large amount of results.
Parameters
----------
data_id : list, optional
A list of data ids, to specify which datasets should be
listed
offset : int, optional
The number of datasets to skip, starting from the first.
size : int, optional
The maximum number of datasets to show.
status : str, optional
Should be {active, in_preparation, deactivated}. By
default active datasets are returned, but also datasets
from another status can be requested.
tag : str, optional
output_format: str, optional (default='dict')
The parameter decides the format of the output.
- If 'dict' the output is a dict of dict
- If 'dataframe' the output is a pandas DataFrame
kwargs : dict, optional
Legal filter operators (keys in the dict):
data_name, data_version, number_instances,
number_features, number_classes, number_missing_values.
Returns
-------
datasets : dict of dicts, or dataframe
- If output_format='dict'
A mapping from dataset ID to dict.
Every dataset is represented by a dictionary containing
the following information:
- dataset id
- name
- format
- status
If qualities are calculated for the dataset, some of
these are also returned.
- If output_format='dataframe'
Each row maps to a dataset
Each column contains the following information:
- dataset id
- name
- format
- status
If qualities are calculated for the dataset, some of
these are also included as columns.
"""
if output_format not in ["dataframe", "dict"]:
raise ValueError(
"Invalid output format selected. " "Only 'dict' or 'dataframe' applicable."
)
return openml.utils._list_all(
data_id=data_id,
output_format=output_format,
listing_call=_list_datasets,
offset=offset,
size=size,
status=status,
tag=tag,
**kwargs,
)
def _list_datasets(data_id: Optional[List] = None, output_format="dict", **kwargs):
"""
Perform api call to return a list of all datasets.
Parameters
----------
The arguments that are lists are separated from the single value
ones which are put into the kwargs.
display_errors is also separated from the kwargs since it has a
default value.
data_id : list, optional
output_format: str, optional (default='dict')
The parameter decides the format of the output.
- If 'dict' the output is a dict of dict
- If 'dataframe' the output is a pandas DataFrame
kwargs : dict, optional
Legal filter operators (keys in the dict):
tag, status, limit, offset, data_name, data_version, number_instances,
number_features, number_classes, number_missing_values.
Returns
-------
datasets : dict of dicts, or dataframe
"""
api_call = "data/list"
if kwargs is not None:
for operator, value in kwargs.items():
api_call += "/%s/%s" % (operator, value)
if data_id is not None:
api_call += "/data_id/%s" % ",".join([str(int(i)) for i in data_id])
return __list_datasets(api_call=api_call, output_format=output_format)
def __list_datasets(api_call, output_format="dict"):
xml_string = openml._api_calls._perform_api_call(api_call, "get")
datasets_dict = xmltodict.parse(xml_string, force_list=("oml:dataset",))
# Minimalistic check if the XML is useful
assert type(datasets_dict["oml:data"]["oml:dataset"]) == list, type(datasets_dict["oml:data"])
assert datasets_dict["oml:data"]["@xmlns:oml"] == "http://openml.org/openml", datasets_dict[
"oml:data"
]["@xmlns:oml"]
datasets = dict()
for dataset_ in datasets_dict["oml:data"]["oml:dataset"]:
ignore_attribute = ["oml:file_id", "oml:quality"]
dataset = {
k.replace("oml:", ""): v for (k, v) in dataset_.items() if k not in ignore_attribute
}
dataset["did"] = int(dataset["did"])
dataset["version"] = int(dataset["version"])
# The number of qualities can range from 0 to infinity
for quality in dataset_.get("oml:quality", list()):
try:
dataset[quality["@name"]] = int(quality["#text"])
except ValueError:
dataset[quality["@name"]] = float(quality["#text"])
datasets[dataset["did"]] = dataset
if output_format == "dataframe":
datasets = pd.DataFrame.from_dict(datasets, orient="index")
return datasets
def _expand_parameter(parameter: Union[str, List[str]]) -> List[str]:
expanded_parameter = []
if isinstance(parameter, str):
expanded_parameter = [x.strip() for x in parameter.split(",")]
elif isinstance(parameter, list):
expanded_parameter = parameter
return expanded_parameter
def _validated_data_attributes(
attributes: List[str], data_attributes: List[str], parameter_name: str
) -> None:
for attribute_ in attributes:
is_attribute_a_data_attribute = any([attr[0] == attribute_ for attr in data_attributes])
if not is_attribute_a_data_attribute:
raise ValueError(
"all attribute of '{}' should be one of the data attribute. "
" Got '{}' while candidates are {}.".format(
parameter_name, attribute_, [attr[0] for attr in data_attributes]
)
)
def check_datasets_active(
dataset_ids: List[int], raise_error_if_not_exist: bool = True,
) -> Dict[int, bool]:
"""
Check if the dataset ids provided are active.
Raises an error if a dataset_id in the given list
of dataset_ids does not exist on the server.
Parameters
----------
dataset_ids : List[int]
A list of integers representing dataset ids.
raise_error_if_not_exist : bool (default=True)
Flag that if activated can raise an error, if one or more of the
given dataset ids do not exist on the server.
Returns
-------
dict
A dictionary with items {did: bool}
"""
dataset_list = list_datasets(status="all", data_id=dataset_ids)
active = {}
for did in dataset_ids:
dataset = dataset_list.get(did, None)
if dataset is None:
if raise_error_if_not_exist:
raise ValueError(f"Could not find dataset {did} in OpenML dataset list.")
else:
active[did] = dataset["status"] == "active"
return active
def _name_to_id(
dataset_name: str, version: Optional[int] = None, error_if_multiple: bool = False
) -> int:
""" Attempt to find the dataset id of the dataset with the given name.
If multiple datasets with the name exist, and ``error_if_multiple`` is ``False``,
then return the least recent still active dataset.
Raises an error if no dataset with the name is found.
Raises an error if a version is specified but it could not be found.
Parameters
----------
dataset_name : str
The name of the dataset for which to find its id.
version : int
Version to retrieve. If not specified, the oldest active version is returned.
error_if_multiple : bool (default=False)
If `False`, if multiple datasets match, return the least recent active dataset.
If `True`, if multiple datasets match, raise an error.
Returns
-------
int
The id of the dataset.
"""
status = None if version is not None else "active"
candidates = list_datasets(data_name=dataset_name, status=status, data_version=version)
if error_if_multiple and len(candidates) > 1:
raise ValueError("Multiple active datasets exist with name {}".format(dataset_name))
if len(candidates) == 0:
no_dataset_for_name = "No active datasets exist with name {}".format(dataset_name)
and_version = " and version {}".format(version) if version is not None else ""
raise RuntimeError(no_dataset_for_name + and_version)
# Dataset ids are chronological so we can just sort based on ids (instead of version)
return sorted(candidates)[0]
def get_datasets(
dataset_ids: List[Union[str, int]], download_data: bool = True,
) -> List[OpenMLDataset]:
"""Download datasets.
This function iterates :meth:`openml.datasets.get_dataset`.
Parameters
----------
dataset_ids : iterable
Integers or strings representing dataset ids or dataset names.
If dataset names are specified, the least recent still active dataset version is returned.
download_data : bool, optional
If True, also download the data file. Beware that some datasets are large and it might
make the operation noticeably slower. Metadata is also still retrieved.
If False, create the OpenMLDataset and only populate it with the metadata.
The data may later be retrieved through the `OpenMLDataset.get_data` method.
Returns
-------
datasets : list of datasets
A list of dataset objects.
"""
datasets = []
for dataset_id in dataset_ids:
datasets.append(get_dataset(dataset_id, download_data))
return datasets
@openml.utils.thread_safe_if_oslo_installed
def get_dataset(
dataset_id: Union[int, str],
download_data: bool = True,
version: int = None,
error_if_multiple: bool = False,
cache_format: str = "pickle",
) -> OpenMLDataset:
""" Download the OpenML dataset representation, optionally also download actual data file.
This function is thread/multiprocessing safe.
This function uses caching. A check will be performed to determine if the information has
previously been downloaded, and if so be loaded from disk instead of retrieved from the server.
If dataset is retrieved by name, a version may be specified.
If no version is specified and multiple versions of the dataset exist,
the earliest version of the dataset that is still active will be returned.
If no version is specified, multiple versions of the dataset exist and
``exception_if_multiple`` is set to ``True``, this function will raise an exception.
Parameters
----------
dataset_id : int or str
Dataset ID of the dataset to download
download_data : bool, optional (default=True)
If True, also download the data file. Beware that some datasets are large and it might
make the operation noticeably slower. Metadata is also still retrieved.
If False, create the OpenMLDataset and only populate it with the metadata.
The data may later be retrieved through the `OpenMLDataset.get_data` method.
version : int, optional (default=None)
Specifies the version if `dataset_id` is specified by name.
If no version is specified, retrieve the least recent still active version.
error_if_multiple : bool, optional (default=False)
If ``True`` raise an error if multiple datasets are found with matching criteria.
cache_format : str, optional (default='pickle')
Format for caching the dataset - may be feather or pickle
Note that the default 'pickle' option may load slower than feather when
no.of.rows is very high.
Returns
-------
dataset : :class:`openml.OpenMLDataset`
The downloaded dataset.
"""
if cache_format not in ["feather", "pickle"]:
raise ValueError(
"cache_format must be one of 'feather' or 'pickle. "
"Invalid format specified: {}".format(cache_format)
)
if isinstance(dataset_id, str):
try:
dataset_id = int(dataset_id)
except ValueError:
dataset_id = _name_to_id(dataset_id, version, error_if_multiple) # type: ignore
elif not isinstance(dataset_id, int):
raise TypeError(
"`dataset_id` must be one of `str` or `int`, not {}.".format(type(dataset_id))
)
did_cache_dir = _create_cache_directory_for_id(DATASETS_CACHE_DIR_NAME, dataset_id,)
remove_dataset_cache = True
try:
description = _get_dataset_description(did_cache_dir, dataset_id)
features_file = _get_dataset_features_file(did_cache_dir, dataset_id)
try:
qualities_file = _get_dataset_qualities_file(did_cache_dir, dataset_id)
except OpenMLServerException as e:
if e.code == 362 and str(e) == "No qualities found - None":
logger.warning("No qualities found for dataset {}".format(dataset_id))
qualities_file = None
else:
raise
arff_file = _get_dataset_arff(description) if download_data else None
remove_dataset_cache = False
except OpenMLServerException as e:
# if there was an exception,
# check if the user had access to the dataset
if e.code == 112:
raise OpenMLPrivateDatasetError(e.message) from None
else:
raise e
finally:
if remove_dataset_cache:
_remove_cache_dir_for_id(DATASETS_CACHE_DIR_NAME, did_cache_dir)
dataset = _create_dataset_from_description(
description, features_file, qualities_file, arff_file, cache_format
)
return dataset
def attributes_arff_from_df(df):
""" Describe attributes of the dataframe according to ARFF specification.
Parameters
----------
df : DataFrame, shape (n_samples, n_features)
The dataframe containing the data set.
Returns
-------
attributes_arff : str
The data set attributes as required by the ARFF format.
"""
PD_DTYPES_TO_ARFF_DTYPE = {"integer": "INTEGER", "floating": "REAL", "string": "STRING"}
attributes_arff = []
if not all([isinstance(column_name, str) for column_name in df.columns]):
logger.warning("Converting non-str column names to str.")
df.columns = [str(column_name) for column_name in df.columns]
for column_name in df:
# skipna=True does not infer properly the dtype. The NA values are
# dropped before the inference instead.
column_dtype = pd.api.types.infer_dtype(df[column_name].dropna(), skipna=False)
if column_dtype == "categorical":
# for categorical feature, arff expects a list string. However, a
# categorical column can contain mixed type and should therefore
# raise an error asking to convert all entries to string.
categories = df[column_name].cat.categories
categories_dtype = pd.api.types.infer_dtype(categories)
if categories_dtype not in ("string", "unicode"):
raise ValueError(
"The column '{}' of the dataframe is of "
"'category' dtype. Therefore, all values in "
"this columns should be string. Please "
"convert the entries which are not string. "
"Got {} dtype in this column.".format(column_name, categories_dtype)
)
attributes_arff.append((column_name, categories.tolist()))
elif column_dtype == "boolean":
# boolean are encoded as categorical.
attributes_arff.append((column_name, ["True", "False"]))
elif column_dtype in PD_DTYPES_TO_ARFF_DTYPE.keys():
attributes_arff.append((column_name, PD_DTYPES_TO_ARFF_DTYPE[column_dtype]))
else:
raise ValueError(
"The dtype '{}' of the column '{}' is not "
"currently supported by liac-arff. Supported "
"dtypes are categorical, string, integer, "
"floating, and boolean.".format(column_dtype, column_name)
)
return attributes_arff
def create_dataset(
name,
description,
creator,
contributor,
collection_date,
language,
licence,
attributes,
data,
default_target_attribute,
ignore_attribute,
citation,
row_id_attribute=None,
original_data_url=None,
paper_url=None,
update_comment=None,
version_label=None,
):
"""Create a dataset.
This function creates an OpenMLDataset object.
The OpenMLDataset object contains information related to the dataset
and the actual data file.
Parameters
----------
name : str
Name of the dataset.
description : str
Description of the dataset.
creator : str
The person who created the dataset.
contributor : str
People who contributed to the current version of the dataset.
collection_date : str
The date the data was originally collected, given by the uploader.
language : str
Language in which the data is represented.
Starts with 1 upper case letter, rest lower case, e.g. 'English'.
licence : str
License of the data.
attributes : list, dict, or 'auto'
A list of tuples. Each tuple consists of the attribute name and type.
If passing a pandas DataFrame, the attributes can be automatically
inferred by passing ``'auto'``. Specific attributes can be manually
specified by a passing a dictionary where the key is the name of the
attribute and the value is the data type of the attribute.
data : ndarray, list, dataframe, coo_matrix, shape (n_samples, n_features)
An array that contains both the attributes and the targets. When
providing a dataframe, the attribute names and type can be inferred by
passing ``attributes='auto'``.
The target feature is indicated as meta-data of the dataset.
default_target_attribute : str
The default target attribute, if it exists.
Can have multiple values, comma separated.
ignore_attribute : str | list
Attributes that should be excluded in modelling,
such as identifiers and indexes.
Can have multiple values, comma separated.
citation : str
Reference(s) that should be cited when building on this data.
version_label : str, optional
Version label provided by user.
Can be a date, hash, or some other type of id.
row_id_attribute : str, optional
The attribute that represents the row-id column, if present in the
dataset. If ``data`` is a dataframe and ``row_id_attribute`` is not
specified, the index of the dataframe will be used as the
``row_id_attribute``. If the name of the index is ``None``, it will
be discarded.
.. versionadded: 0.8
Inference of ``row_id_attribute`` from a dataframe.
original_data_url : str, optional
For derived data, the url to the original dataset.
paper_url : str, optional
Link to a paper describing the dataset.
update_comment : str, optional
An explanation for when the dataset is uploaded.
Returns
-------
class:`openml.OpenMLDataset`
Dataset description."""
if isinstance(data, pd.DataFrame):
# infer the row id from the index of the dataset
if row_id_attribute is None:
row_id_attribute = data.index.name
# When calling data.values, the index will be skipped.
# We need to reset the index such that it is part of the data.
if data.index.name is not None:
data = data.reset_index()
if attributes == "auto" or isinstance(attributes, dict):
if not hasattr(data, "columns"):
raise ValueError(
"Automatically inferring attributes requires "
"a pandas DataFrame. A {!r} was given instead.".format(data)
)
# infer the type of data for each column of the DataFrame
attributes_ = attributes_arff_from_df(data)
if isinstance(attributes, dict):
# override the attributes which was specified by the user
for attr_idx in range(len(attributes_)):
attr_name = attributes_[attr_idx][0]
if attr_name in attributes.keys():
attributes_[attr_idx] = (attr_name, attributes[attr_name])
else:
attributes_ = attributes
ignore_attributes = _expand_parameter(ignore_attribute)
_validated_data_attributes(ignore_attributes, attributes_, "ignore_attribute")
default_target_attributes = _expand_parameter(default_target_attribute)
_validated_data_attributes(default_target_attributes, attributes_, "default_target_attribute")
if row_id_attribute is not None:
is_row_id_an_attribute = any([attr[0] == row_id_attribute for attr in attributes_])
if not is_row_id_an_attribute:
raise ValueError(
"'row_id_attribute' should be one of the data attribute. "
" Got '{}' while candidates are {}.".format(
row_id_attribute, [attr[0] for attr in attributes_]
)
)
if hasattr(data, "columns"):
if all(isinstance(dtype, pd.SparseDtype) for dtype in data.dtypes):
data = data.sparse.to_coo()
# liac-arff only support COO matrices with sorted rows
row_idx_sorted = np.argsort(data.row)
data.row = data.row[row_idx_sorted]
data.col = data.col[row_idx_sorted]
data.data = data.data[row_idx_sorted]
else:
data = data.values
if isinstance(data, (list, np.ndarray)):
if isinstance(data[0], (list, np.ndarray)):
data_format = "arff"
elif isinstance(data[0], dict):
data_format = "sparse_arff"
else:
raise ValueError(
"When giving a list or a numpy.ndarray, "
"they should contain a list/ numpy.ndarray "
"for dense data or a dictionary for sparse "
"data. Got {!r} instead.".format(data[0])
)
elif isinstance(data, coo_matrix):
data_format = "sparse_arff"
else:
raise ValueError(
"When giving a list or a numpy.ndarray, "
"they should contain a list/ numpy.ndarray "
"for dense data or a dictionary for sparse "
"data. Got {!r} instead.".format(data[0])
)
arff_object = {
"relation": name,
"description": description,
"attributes": attributes_,
"data": data,
}
# serializes the ARFF dataset object and returns a string
arff_dataset = arff.dumps(arff_object)
try:
# check if ARFF is valid
decoder = arff.ArffDecoder()
return_type = arff.COO if data_format == "sparse_arff" else arff.DENSE
decoder.decode(arff_dataset, encode_nominal=True, return_type=return_type)
except arff.ArffException:
raise ValueError(
"The arguments you have provided \
do not construct a valid ARFF file"
)
return OpenMLDataset(
name=name,
description=description,
data_format=data_format,
creator=creator,
contributor=contributor,
collection_date=collection_date,
language=language,
licence=licence,
default_target_attribute=default_target_attribute,
row_id_attribute=row_id_attribute,
ignore_attribute=ignore_attribute,
citation=citation,
version_label=version_label,
original_data_url=original_data_url,
paper_url=paper_url,
update_comment=update_comment,
dataset=arff_dataset,
)
def status_update(data_id, status):
"""
Updates the status of a dataset to either 'active' or 'deactivated'.
Please see the OpenML API documentation for a description of the status
and all legal status transitions:
https://docs.openml.org/#dataset-status
Parameters
----------
data_id : int
The data id of the dataset
status : str,
'active' or 'deactivated'
"""
legal_status = {"active", "deactivated"}
if status not in legal_status:
raise ValueError("Illegal status value. " "Legal values: %s" % legal_status)
data = {"data_id": data_id, "status": status}
result_xml = openml._api_calls._perform_api_call("data/status/update", "post", data=data)
result = xmltodict.parse(result_xml)
server_data_id = result["oml:data_status_update"]["oml:id"]
server_status = result["oml:data_status_update"]["oml:status"]
if status != server_status or int(data_id) != int(server_data_id):
# This should never happen
raise ValueError("Data id/status does not collide")
def edit_dataset(
data_id,
description=None,
creator=None,
contributor=None,
collection_date=None,
language=None,
default_target_attribute=None,
ignore_attribute=None,
citation=None,
row_id_attribute=None,
original_data_url=None,
paper_url=None,
) -> int:
""" Edits an OpenMLDataset.
In addition to providing the dataset id of the dataset to edit (through data_id),
you must specify a value for at least one of the optional function arguments,
i.e. one value for a field to edit.
This function allows editing of both non-critical and critical fields.
Critical fields are default_target_attribute, ignore_attribute, row_id_attribute.
- Editing non-critical data fields is allowed for all authenticated users.
- Editing critical fields is allowed only for the owner, provided there are no tasks
associated with this dataset.
If dataset has tasks or if the user is not the owner, the only way
to edit critical fields is to use fork_dataset followed by edit_dataset.
Parameters
----------
data_id : int
ID of the dataset.
description : str
Description of the dataset.
creator : str
The person who created the dataset.
contributor : str
People who contributed to the current version of the dataset.
collection_date : str
The date the data was originally collected, given by the uploader.
language : str
Language in which the data is represented.
Starts with 1 upper case letter, rest lower case, e.g. 'English'.
default_target_attribute : str
The default target attribute, if it exists.
Can have multiple values, comma separated.
ignore_attribute : str | list
Attributes that should be excluded in modelling,
such as identifiers and indexes.
citation : str
Reference(s) that should be cited when building on this data.
row_id_attribute : str, optional
The attribute that represents the row-id column, if present in the
dataset. If ``data`` is a dataframe and ``row_id_attribute`` is not
specified, the index of the dataframe will be used as the
``row_id_attribute``. If the name of the index is ``None``, it will
be discarded.
.. versionadded: 0.8
Inference of ``row_id_attribute`` from a dataframe.
original_data_url : str, optional
For derived data, the url to the original dataset.
paper_url : str, optional
Link to a paper describing the dataset.
Returns
-------
Dataset id
"""
if not isinstance(data_id, int):
raise TypeError("`data_id` must be of type `int`, not {}.".format(type(data_id)))
# compose data edit parameters as xml
form_data = {"data_id": data_id}
xml = OrderedDict() # type: 'OrderedDict[str, OrderedDict]'
xml["oml:data_edit_parameters"] = OrderedDict()
xml["oml:data_edit_parameters"]["@xmlns:oml"] = "http://openml.org/openml"
xml["oml:data_edit_parameters"]["oml:description"] = description
xml["oml:data_edit_parameters"]["oml:creator"] = creator
xml["oml:data_edit_parameters"]["oml:contributor"] = contributor
xml["oml:data_edit_parameters"]["oml:collection_date"] = collection_date
xml["oml:data_edit_parameters"]["oml:language"] = language
xml["oml:data_edit_parameters"]["oml:default_target_attribute"] = default_target_attribute
xml["oml:data_edit_parameters"]["oml:row_id_attribute"] = row_id_attribute
xml["oml:data_edit_parameters"]["oml:ignore_attribute"] = ignore_attribute
xml["oml:data_edit_parameters"]["oml:citation"] = citation
xml["oml:data_edit_parameters"]["oml:original_data_url"] = original_data_url
xml["oml:data_edit_parameters"]["oml:paper_url"] = paper_url
# delete None inputs
for k in list(xml["oml:data_edit_parameters"]):
if not xml["oml:data_edit_parameters"][k]:
del xml["oml:data_edit_parameters"][k]
file_elements = {"edit_parameters": ("description.xml", xmltodict.unparse(xml))}
result_xml = openml._api_calls._perform_api_call(
"data/edit", "post", data=form_data, file_elements=file_elements
)
result = xmltodict.parse(result_xml)
data_id = result["oml:data_edit"]["oml:id"]
return int(data_id)
def fork_dataset(data_id: int) -> int:
"""
Creates a new dataset version, with the authenticated user as the new owner.
The forked dataset can have distinct dataset meta-data,
but the actual data itself is shared with the original version.
This API is intended for use when a user is unable to edit the critical fields of a dataset
through the edit_dataset API.
(Critical fields are default_target_attribute, ignore_attribute, row_id_attribute.)
Specifically, this happens when the user is:
1. Not the owner of the dataset.
2. User is the owner of the dataset, but the dataset has tasks.
In these two cases the only way to edit critical fields is:
1. STEP 1: Fork the dataset using fork_dataset API
2. STEP 2: Call edit_dataset API on the forked version.
Parameters
----------
data_id : int
id of the dataset to be forked
Returns
-------
Dataset id of the forked dataset
"""
if not isinstance(data_id, int):
raise TypeError("`data_id` must be of type `int`, not {}.".format(type(data_id)))
# compose data fork parameters
form_data = {"data_id": data_id}
result_xml = openml._api_calls._perform_api_call("data/fork", "post", data=form_data)
result = xmltodict.parse(result_xml)
data_id = result["oml:data_fork"]["oml:id"]
return int(data_id)
def _get_dataset_description(did_cache_dir, dataset_id):
"""Get the dataset description as xml dictionary.
This function is NOT thread/multiprocessing safe.
Parameters
----------
did_cache_dir : str
Cache subdirectory for this dataset.
dataset_id : int
Dataset ID
Returns
-------
dict
XML Dataset description parsed to a dict.
"""
# TODO implement a cache for this that invalidates itself after some time
# This can be saved on disk, but cannot be cached properly, because
# it contains the information on whether a dataset is active.
description_file = os.path.join(did_cache_dir, "description.xml")
try:
with io.open(description_file, encoding="utf8") as fh:
dataset_xml = fh.read()
except Exception:
url_extension = "data/{}".format(dataset_id)
dataset_xml = openml._api_calls._perform_api_call(url_extension, "get")
with io.open(description_file, "w", encoding="utf8") as fh:
fh.write(dataset_xml)
description = xmltodict.parse(dataset_xml)["oml:data_set_description"]
return description
def _get_dataset_arff(description: Union[Dict, OpenMLDataset], cache_directory: str = None) -> str:
""" Return the path to the local arff file of the dataset. If is not cached, it is downloaded.
Checks if the file is in the cache, if yes, return the path to the file.
If not, downloads the file and caches it, then returns the file path.
The cache directory is generated based on dataset information, but can also be specified.
This function is NOT thread/multiprocessing safe.
Parameters
----------
description : dictionary or OpenMLDataset
Either a dataset description as dict or OpenMLDataset.
cache_directory: str, optional (default=None)
Folder to store the arff file in.
If None, use the default cache directory for the dataset.
Returns
-------
output_filename : string
Location of ARFF file.
"""
if isinstance(description, dict):
md5_checksum_fixture = description.get("oml:md5_checksum")
url = description["oml:url"]
did = description.get("oml:id")
elif isinstance(description, OpenMLDataset):
md5_checksum_fixture = description.md5_checksum
url = description.url
did = description.dataset_id
else:
raise TypeError("`description` should be either OpenMLDataset or Dict.")
if cache_directory is None:
cache_directory = _create_cache_directory_for_id(DATASETS_CACHE_DIR_NAME, did)
output_file_path = os.path.join(cache_directory, "dataset.arff")
try:
openml._api_calls._download_text_file(
source=url, output_path=output_file_path, md5_checksum=md5_checksum_fixture
)
except OpenMLHashException as e:
additional_info = " Raised when downloading dataset {}.".format(did)
e.args = (e.args[0] + additional_info,)
raise
return output_file_path
def _get_dataset_features_file(did_cache_dir: str, dataset_id: int) -> str:
"""API call to load dataset features. Loads from cache or downloads them.
Features are feature descriptions for each column.
(name, index, categorical, ...)
This function is NOT thread/multiprocessing safe.
Parameters
----------
did_cache_dir : str
Cache subdirectory for this dataset
dataset_id : int
Dataset ID
Returns
-------
str
Path of the cached dataset feature file
"""
features_file = os.path.join(did_cache_dir, "features.xml")
# Dataset features aren't subject to change...
if not os.path.isfile(features_file):
url_extension = "data/features/{}".format(dataset_id)
features_xml = openml._api_calls._perform_api_call(url_extension, "get")
with io.open(features_file, "w", encoding="utf8") as fh:
fh.write(features_xml)
return features_file
def _get_dataset_qualities_file(did_cache_dir, dataset_id):
"""API call to load dataset qualities. Loads from cache or downloads them.
Features are metafeatures (number of features, number of classes, ...)
This function is NOT thread/multiprocessing safe.
Parameters
----------
did_cache_dir : str
Cache subdirectory for this dataset
dataset_id : int
Dataset ID
Returns
-------
str
Path of the cached qualities file
"""
# Dataset qualities are subject to change and must be fetched every time
qualities_file = os.path.join(did_cache_dir, "qualities.xml")
try:
with io.open(qualities_file, encoding="utf8") as fh:
qualities_xml = fh.read()
except (OSError, IOError):
url_extension = "data/qualities/{}".format(dataset_id)
qualities_xml = openml._api_calls._perform_api_call(url_extension, "get")
with io.open(qualities_file, "w", encoding="utf8") as fh:
fh.write(qualities_xml)
return qualities_file
def _create_dataset_from_description(
description: Dict[str, str],
features_file: str,
qualities_file: str,
arff_file: str = None,
cache_format: str = "pickle",
) -> OpenMLDataset:
"""Create a dataset object from a description dict.
Parameters
----------
description : dict
Description of a dataset in xml dict.
featuresfile : str
Path of the dataset features as xml file.
qualities : list
Path of the dataset qualities as xml file.
arff_file : string, optional
Path of dataset ARFF file.
cache_format: string, optional
Caching option for datasets (feather/pickle)
Returns
-------
dataset : dataset object
Dataset object from dict and ARFF.
"""
return OpenMLDataset(
description["oml:name"],
description.get("oml:description"),
data_format=description["oml:format"],
dataset_id=description["oml:id"],
version=description["oml:version"],
creator=description.get("oml:creator"),
contributor=description.get("oml:contributor"),
collection_date=description.get("oml:collection_date"),
upload_date=description.get("oml:upload_date"),
language=description.get("oml:language"),
licence=description.get("oml:licence"),
url=description["oml:url"],
default_target_attribute=description.get("oml:default_target_attribute"),
row_id_attribute=description.get("oml:row_id_attribute"),
ignore_attribute=description.get("oml:ignore_attribute"),
version_label=description.get("oml:version_label"),
citation=description.get("oml:citation"),
tag=description.get("oml:tag"),
visibility=description.get("oml:visibility"),
original_data_url=description.get("oml:original_data_url"),
paper_url=description.get("oml:paper_url"),
update_comment=description.get("oml:update_comment"),
md5_checksum=description.get("oml:md5_checksum"),
data_file=arff_file,
cache_format=cache_format,
features_file=features_file,
qualities_file=qualities_file,
)
def _get_online_dataset_arff(dataset_id):
"""Download the ARFF file for a given dataset id
from the OpenML website.
Parameters
----------
dataset_id : int
A dataset id.
Returns
-------
str
A string representation of an ARFF file.
"""
dataset_xml = openml._api_calls._perform_api_call("data/%d" % dataset_id, "get")
# build a dict from the xml.
# use the url from the dataset description and return the ARFF string
return openml._api_calls._download_text_file(
xmltodict.parse(dataset_xml)["oml:data_set_description"]["oml:url"],
)
def _get_online_dataset_format(dataset_id):
"""Get the dataset format for a given dataset id
from the OpenML website.
Parameters
----------
dataset_id : int
A dataset id.
Returns
-------
str
Dataset format.
"""
dataset_xml = openml._api_calls._perform_api_call("data/%d" % dataset_id, "get")
# build a dict from the xml and get the format from the dataset description
return xmltodict.parse(dataset_xml)["oml:data_set_description"]["oml:format"].lower()
| 36.780466 | 99 | 0.660535 |
acf032e74c9ab97e4697d8e213fe235f5b39c696 | 2,005 | py | Python | Bing BGI.py | ds17/reptiles | 99418624ae4b7548bf4dc1ea834e8c75a47a0557 | [
"Apache-2.0"
] | null | null | null | Bing BGI.py | ds17/reptiles | 99418624ae4b7548bf4dc1ea834e8c75a47a0557 | [
"Apache-2.0"
] | null | null | null | Bing BGI.py | ds17/reptiles | 99418624ae4b7548bf4dc1ea834e8c75a47a0557 | [
"Apache-2.0"
] | 1 | 2021-02-20T13:17:42.000Z | 2021-02-20T13:17:42.000Z | #D:\Python\Python35\python
# -*- coding:utf-8 -*-
import re,sys,os,time,logging
import urllib.request
log_dir='D:\\WallPaper\\BingWallpaper\\bing reptile.log'
logging.basicConfig(filename=log_dir,level=logging.INFO)
file_dir='D:\\WallPaper\\BingWallpaper'
now_time=time.strftime('%Y%m%d%H%M%S')
# 通过os.walk遍历壁纸文件夹下所有文件的文件名并放到list中
e_name_t=[]
for root,dirs,files in os.walk(file_dir):
if not(len(files)==0):
for pic_name in files:
if '_1920x1080.jpg' in pic_name:
name_right=pic_name.find('_')
e_name=pic_name.replace(pic_name[:name_right+1],'')
e_name_t.append(e_name)
#通过logging永久保存爬取的文件名,在每次运行的时候读取文件名并放入list中。
log=open(log_dir,'r')
print('Reading Log...')
# time.sleep(2)
names=log.read()
e_name_l=re.findall(r'INFO:root:.*:(.*.jpg)',names) #INFO:root:已存在:KazakhstanNasa_ZH-CN9791985430_1920x1080.jpg
e_name_l=set(e_name_l) #将e_name_l从list转换为set,删除log中重复的条目,提高性能
# print('e_name_l SET:',e_name_l)
def get_bing_backpic():
i=8
url= 'http://cn.bing.com/HPImageArchive.aspx?format=js&idx=0&n='+ str(i)
html=urllib.request.urlopen(url).read()
if html=='null':
print('获取页面错误')
sys.exit(-1)
html=html.decode('utf-8')
reg=re.compile('"url":"(.*?)","urlbase"',re.S)
text = re.findall(reg,html)
# logging.warning([pic_url for pic_url in text])
text_no=0
for imgurl in text :
imgurl='http://cn.bing.com'+imgurl
right = imgurl.rindex('/')
name = imgurl.replace(imgurl[:right+1],'')
if name in e_name_t:
logging.info('已存在:' + name)
print ('已存在:' + name)
elif name in e_name_l:
logging.info('已爬过,被删除:'+name)
print('已爬过,被删除:'+name)
else:
save_name=now_time+'_'+name
savepath = file_dir+'\\' + save_name
urllib.request.urlretrieve(imgurl, savepath)
logging.info('保存成功:' + name)
print ('保存成功:'+ save_name)
text_no=text_no+1
if text_no==len(text):
sleep_time=6
print('\n'+now_time+':爬取结束。\n'+'壁纸保存路径:'+file_dir+'\n'+str(sleep_time)+'秒后跳出')
logging.info('爬取时间:'+now_time+'\n\n')
time.sleep(sleep_time)
get_bing_backpic()
| 28.642857 | 112 | 0.697257 |
acf0333429bb49cbd535037c240b41febd035bc5 | 3,403 | py | Python | api/views.py | GaniAliguzhinov/AviataTask | bfcd059b65036332ed3b645b44eff255b0f30205 | [
"MIT"
] | null | null | null | api/views.py | GaniAliguzhinov/AviataTask | bfcd059b65036332ed3b645b44eff255b0f30205 | [
"MIT"
] | null | null | null | api/views.py | GaniAliguzhinov/AviataTask | bfcd059b65036332ed3b645b44eff255b0f30205 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.views.decorators.cache import cache_page
from django.utils.decorators import method_decorator
from Route.models import Route
from Route.serializers import RouteSerializer
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.views import APIView
from helper import convert_date
from Route.tasks import search_flight
import datetime
@api_view(['GET'])
def search(request):
"""
API method of creating a Route object.
This implicitly checks flights for given route.
Parameters: fly_from, fly_to, date
Without date, returns flights for all cached
dates for the given route.
"""
if request.method == 'GET':
fly_from = request.GET.get('fly_from', None)
fly_to = request.GET.get('fly_to', None)
date_from = request.GET.get('date', None)
date_to = request.GET.get('date', None)
if all(v is not None for v in [fly_from, fly_to, date_from, date_to]):
try:
route = Route.objects.all().filter(fly_from=fly_from,
fly_to=fly_to,
date_from=date_from,
date_to=date_to).first()
if route is None:
route = Route(fly_from=fly_from,
fly_to=fly_to,
date_from=date_from,
date_to=date_to)
route.save()
if not route.response:
route.response = {'executing': 1}
route.save()
search_flight.delay(route.pk)
serializer = RouteSerializer(route)
return Response(serializer.data)
except AssertionError:
print('No flights found')
return Response()
elif all(v is not None for v in [fly_from, fly_to]):
try:
routes = Route.objects.all().filter(fly_from=fly_from,
fly_to=fly_to)
serializer = {f'{route.date_from}':
(lambda r: r.data['response'].get('price', -1))
(RouteSerializer(route)) for route in routes}
return Response(serializer)
except AssertionError:
print('No flights found')
return Response()
return Response()
class ApiView(APIView):
@method_decorator(cache_page(60*60*2))
def get(self, request, format=None):
pairs = [(r.fly_from, r.fly_to) for r in Route.objects.all()]
try:
all_routes = [Route.objects.all()
.filter(fly_from=p[0],
fly_to=p[1]) for p in pairs]
serializers = {f'{routes[0].fly_from}-{routes[0].fly_to}':
{f'{route.date_from}':
(lambda r: r.data['response'].get('price', -1))
(RouteSerializer(route)) for route in routes}
for routes in all_routes}
return Response(serializers)
except AssertionError:
print('No flights found')
return Response()
| 40.511905 | 78 | 0.530708 |
acf03343c10fe2b089dfa74bc7a0f172fa407c4b | 740 | py | Python | python/20190311/my_djangos/django_share_app/shares/models.py | Realize0917/career | b5d02ac53cfc3ce3a2ca38d11480c51560283e67 | [
"MIT"
] | 3 | 2019-01-17T05:50:51.000Z | 2019-03-15T10:10:07.000Z | python/20190311/my_djangos/django_share_app/shares/models.py | Realize0917/career | b5d02ac53cfc3ce3a2ca38d11480c51560283e67 | [
"MIT"
] | 10 | 2019-01-17T06:07:03.000Z | 2019-02-19T05:55:25.000Z | python/20190311/my_djangos/django_share_app/shares/models.py | Realize0917/career | b5d02ac53cfc3ce3a2ca38d11480c51560283e67 | [
"MIT"
] | 4 | 2018-12-22T07:32:55.000Z | 2019-03-06T09:13:48.000Z | from django.db import models
from django.utils import timezone
class Upload(models.Model):
"""
文件上传对象
"""
file_name = models.CharField(max_length=32)
file_size = models.CharField(max_length=10)
download_count = models.IntegerField(default=0)
upload_time = models.DateTimeField(default=timezone.now)
file_path = models.CharField(max_length=32)
ip_addr = models.CharField(max_length=32)
def __str__(self):
return self.file_name
def to_dict(self):
return {
'file_name': self.file_name,
'file_size': self.file_size,
'ip_addr': self.ip_addr,
'upload_time': str(self.upload_time),
'file_path': self.file_path,
}
| 25.517241 | 60 | 0.644595 |
acf03347166cd583ae8f10fee57d3e9f2b638eb5 | 1,624 | py | Python | pyunmarked/nmixture.py | kenkellner/pyunmarked | 485bd96b4ca12a019b478fc19f68f577279ac9b8 | [
"MIT"
] | null | null | null | pyunmarked/nmixture.py | kenkellner/pyunmarked | 485bd96b4ca12a019b478fc19f68f577279ac9b8 | [
"MIT"
] | null | null | null | pyunmarked/nmixture.py | kenkellner/pyunmarked | 485bd96b4ca12a019b478fc19f68f577279ac9b8 | [
"MIT"
] | null | null | null | from . import model
import numpy as np
from scipy import special, stats
class NmixtureModel(model.UnmarkedModel):
def __init__(self, det_formula, abun_formula, data):
self.response = model.Response(data.y)
abun = model.Submodel("Abundance", "abun", abun_formula, np.exp, data.site_covs)
det = model.Submodel("Detection", "det", det_formula, special.expit, data.obs_covs)
self.submodels = model.SubmodelDict(abun=abun, det=det)
def negloglik(self, x, mod, K):
x = np.array(x)
beta_abun = x[mod["abun"].index]
beta_det = x[mod["det"].index]
y = mod.response.y
N, J = y.shape
lam = mod["abun"].predict(beta=beta_abun, interval=False)
p = mod["det"].predict(beta=beta_det, interval=False).reshape(N, J)
nll = 0.0
for i in range(N):
kvals = range(int(mod.response.Kmin[i]), int(K)+1)
f = stats.poisson.pmf(kvals, lam[i])
ymat = np.tile(y[i,], (len(kvals), 1))
pmat = np.tile(p[i,], (len(kvals), 1))
kmat = np.tile(kvals, (J, 1)).transpose()
g = stats.binom.logpmf(ymat, kmat, pmat).sum(axis=1)
fg = f * np.exp(g)
nll -= np.log(fg.sum())
return nll
def simulate(self):
N, J = self.response.y.shape
lam = self.predict("abun", interval=False)
p = self.predict("det", interval=False).reshape(N, J)
z = np.random.poisson(lam, N)
y = np.empty((N, J))
for i in range(N):
y[i,] = np.random.binomial(z[i], p[i,], J)
return y
| 38.666667 | 91 | 0.556034 |
acf03375c31448a14d368b07e0c00eb5948d7fc6 | 5,760 | py | Python | resources/lib/services/playback/progress_manager.py | moebelwagen/plugin.video.netflix | 06b812159f230efbb23dc9b4e2299b91f16a2ebe | [
"MIT"
] | null | null | null | resources/lib/services/playback/progress_manager.py | moebelwagen/plugin.video.netflix | 06b812159f230efbb23dc9b4e2299b91f16a2ebe | [
"MIT"
] | null | null | null | resources/lib/services/playback/progress_manager.py | moebelwagen/plugin.video.netflix | 06b812159f230efbb23dc9b4e2299b91f16a2ebe | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2019 Stefano Gottardo - @CastagnaIT (original implementation module)
Manages events to send to the netflix service for the progress of the played video
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from __future__ import absolute_import, division, unicode_literals
from xbmcgui import Window
import resources.lib.common as common
from resources.lib.services.msl.msl_utils import EVENT_START, EVENT_ENGAGE, EVENT_STOP, EVENT_KEEP_ALIVE
from .action_manager import PlaybackActionManager
class ProgressManager(PlaybackActionManager):
"""Detect the progress of the played video and send the data to the netflix service"""
def __init__(self): # pylint: disable=super-on-old-class
super(ProgressManager, self).__init__()
self.event_data = {}
self.is_event_start_sent = False
self.last_tick_count = 0
self.tick_elapsed = 0
self.last_player_state = {}
self.is_player_in_pause = False
self.lock_events = False
self.window_cls = Window(10000) # Kodi home window
def _initialize(self, data):
if not data['event_data']:
common.warn('ProgressManager: disabled due to no event data')
self.enabled = False
return
self.event_data = data['event_data']
def _on_tick(self, player_state):
if self.lock_events:
return
if self.is_player_in_pause and (self.tick_elapsed - self.last_tick_count) >= 1800:
# When the player is paused for more than 30 minutes we interrupt the sending of events (1800secs=30m)
_send_event(EVENT_ENGAGE, self.event_data, self.last_player_state)
_send_event(EVENT_STOP, self.event_data, self.last_player_state)
self.is_event_start_sent = False
self.lock_events = True
else:
if not self.is_event_start_sent:
# We do not use _on_playback_started() to send EVENT_START, because StreamContinuityManager
# and ResumeManager may cause inconsistencies with the content of player_state data
# When the playback starts for the first time, for correctness should send elapsed_seconds value to 0
if self.tick_elapsed < 5 and self.event_data['resume_position'] is None:
player_state['elapsed_seconds'] = 0
_send_event(EVENT_START, self.event_data, player_state)
self.is_event_start_sent = True
self.tick_elapsed = 0
else:
# Generate events to send to Netflix service every 1 minute (60secs=1m)
if (self.tick_elapsed - self.last_tick_count) >= 60:
_send_event(EVENT_KEEP_ALIVE, self.event_data, player_state)
self.last_tick_count = self.tick_elapsed
# On Kodi we can save every second instead every minute, but only after the first minute
if self.last_tick_count:
self._save_resume_time(player_state['elapsed_seconds'])
self.last_player_state = player_state
self.tick_elapsed += 1 # One tick almost always represents one second
def on_playback_pause(self, player_state):
if not self.is_event_start_sent:
return
self.tick_elapsed = 0
self.is_player_in_pause = True
_send_event(EVENT_ENGAGE, self.event_data, player_state)
def on_playback_resume(self, player_state):
self.is_player_in_pause = False
self.lock_events = False
def on_playback_seek(self, player_state):
if not self.is_event_start_sent or self.lock_events:
# This might happen when ResumeManager skip is performed
return
self.tick_elapsed = 0
_send_event(EVENT_ENGAGE, self.event_data, player_state)
def _on_playback_stopped(self):
if not self.is_event_start_sent or self.lock_events:
return
self.tick_elapsed = 0
_send_event(EVENT_ENGAGE, self.event_data, self.last_player_state)
_send_event(EVENT_STOP, self.event_data, self.last_player_state)
def _save_resume_time(self, resume_time):
"""Save resume time in order to modify the frontend cache"""
# Why this, the video lists are requests to the web service only once and then will be cached in order to
# quickly get the data and speed up a lot the GUI response.
# Watched status of a (video) list item is based on resume time, and the resume time is saved in the cache data.
# To avoid slowing down the GUI by invalidating the cache to get new data from website service, one solution is
# modify the cache data.
# Altering here the cache on the fly is not possible because it is currently not shared between service-frontend
# therefore we save the value in a Kodi property and we will modify the cache from addon frontend.
# The choice to save the value in a Kodi property is to not continuously lock with mutex the database.
# The callback _on_playback_stopped can not be used, because the loading of frontend happen before.
self.window_cls.setProperty('nf_playback_resume_time', str(resume_time))
def _send_event(event_type, event_data, player_state):
if not player_state:
common.warn('ProgressManager: the event [{}] cannot be sent, missing player_state data', event_type)
return
common.send_signal(common.Signals.QUEUE_VIDEO_EVENT, {
'event_type': event_type,
'event_data': event_data,
'player_state': player_state
}, non_blocking=True)
| 48.403361 | 120 | 0.684549 |
acf0345c81ef61e3966234526b3adf1587f96bcc | 18,721 | py | Python | tests.py | fibersel/aiochclient | 0fc82126148eb5e60f2ff497b9d97166be4be9de | [
"MIT"
] | null | null | null | tests.py | fibersel/aiochclient | 0fc82126148eb5e60f2ff497b9d97166be4be9de | [
"MIT"
] | null | null | null | tests.py | fibersel/aiochclient | 0fc82126148eb5e60f2ff497b9d97166be4be9de | [
"MIT"
] | null | null | null | import datetime as dt
from decimal import Decimal
from uuid import uuid4
import aiohttp
import pytest
from aiochclient import ChClient, ChClientError
pytestmark = pytest.mark.asyncio
@pytest.fixture
def uuid():
return uuid4()
@pytest.fixture
def rows(uuid):
return [
(
1,
1000,
10000,
12_345_678_910,
-4,
-453,
21322,
-32123,
23.432,
-56754.564_542,
"hello man",
"hello fixed man".ljust(32, " "),
dt.date(2018, 9, 21),
dt.datetime(2018, 9, 21, 10, 32, 23),
"hello",
"world",
[1, 2, 3, 4],
(4, "hello"),
0,
["hello", "world"],
"'\b\f\r\n\t\\",
uuid,
[uuid, uuid, uuid],
["hello", "world", "hello"],
[dt.date(2018, 9, 21), dt.date(2018, 9, 22)],
[
dt.datetime(2018, 9, 21, 10, 32, 23),
dt.datetime(2018, 9, 21, 10, 32, 24),
],
"hello man",
"hello man",
777,
dt.date(1994, 9, 7),
dt.datetime(2018, 9, 21, 10, 32, 23),
Decimal('1234.5678'),
Decimal('1234.56'),
Decimal('1234.56'),
Decimal('123.56'),
),
(
2,
1000,
10000,
12_345_678_910,
-4,
-453,
21322,
-32123,
23.432,
-56754.564_542,
"hello man",
"hello fixed man".ljust(32, " "),
None,
None,
"hello",
"world",
[1, 2, 3, 4],
(4, "hello"),
None,
[],
"'\b\f\r\n\t\\",
None,
[],
[],
[],
[],
"hello man",
None,
777,
dt.date(1994, 9, 7),
dt.datetime(2018, 9, 21, 10, 32, 23),
Decimal('1234.5678'),
Decimal('1234.56'),
Decimal('1234.56'),
Decimal('123.56'),
),
]
@pytest.fixture(
params=[
{
"compress_response": True,
"user": "default",
"password": "",
"database": "default",
"allow_suspicious_low_cardinality_types": 1,
},
{"allow_suspicious_low_cardinality_types": 1},
]
)
async def chclient(request):
async with aiohttp.ClientSession() as s:
yield ChClient(s, **request.param)
@pytest.fixture
async def all_types_db(chclient, rows):
await chclient.execute("DROP TABLE IF EXISTS all_types")
await chclient.execute(
"""
CREATE TABLE all_types (uint8 UInt8,
uint16 UInt16,
uint32 UInt32,
uint64 UInt64,
int8 Int8,
int16 Int16,
int32 Int32,
int64 Int64,
float32 Float32,
float64 Float64,
string String,
fixed_string FixedString(32),
date Nullable(Date),
datetime Nullable(DateTime),
enum8 Enum8('hello' = 1, 'world' = 2),
enum16 Enum16('hello' = 1000, 'world' = 2000),
array_uint8 Array(UInt8),
tuple Tuple(UInt8, String),
nullable Nullable(Int8),
array_string Array(String),
escape_string String,
uuid Nullable(UUID),
array_uuid Array(UUID),
array_enum Array(Enum8('hello' = 1, 'world' = 2)),
array_date Array(Date),
array_datetime Array(DateTime),
low_cardinality_str LowCardinality(String),
low_cardinality_nullable_str LowCardinality(Nullable(String)),
low_cardinality_int LowCardinality(Int32),
low_cardinality_date LowCardinality(Date),
low_cardinality_datetime LowCardinality(DateTime),
decimal32 Decimal32(4),
decimal64 Decimal64(2),
decimal128 Decimal128(6),
decimal Decimal(6, 3)
) ENGINE = Memory
"""
)
await chclient.execute("INSERT INTO all_types VALUES", *rows)
@pytest.fixture
def class_chclient(chclient, all_types_db, rows, request):
request.cls.ch = chclient
request.cls.rows = rows
@pytest.mark.client
@pytest.mark.usefixtures("class_chclient")
class TestClient:
async def test_is_alive(self):
assert await self.ch.is_alive() is True
async def test_bad_query(self):
with pytest.raises(ChClientError):
await self.ch.execute("SELE")
async def test_bad_select(self):
with pytest.raises(ChClientError):
await self.ch.execute("SELECT * FROM all_types WHERE", 1, 2, 3, 4)
@pytest.mark.types
@pytest.mark.usefixtures("class_chclient")
class TestTypes:
async def select_field(self, field):
return await self.ch.fetchval(f"SELECT {field} FROM all_types WHERE uint8=1")
async def select_record(self, field):
return await self.ch.fetchrow(f"SELECT {field} FROM all_types WHERE uint8=1")
async def test_uint8(self):
assert await self.select_field("uint8") == 1
record = await self.select_record("uint8")
assert record[0] == 1
assert record["uint8"] == 1
async def test_uint16(self):
result = 1000
assert await self.select_field("uint16") == result
record = await self.select_record("uint16")
assert record[0] == result
assert record["uint16"] == result
async def test_uint32(self):
result = 10000
assert await self.select_field("uint32") == result
record = await self.select_record("uint32")
assert record[0] == result
assert record["uint32"] == result
async def test_uint64(self):
result = 12_345_678_910
assert await self.select_field("uint64") == result
record = await self.select_record("uint64")
assert record[0] == result
assert record["uint64"] == result
async def test_int8(self):
result = -4
assert await self.select_field("int8") == result
record = await self.select_record("int8")
assert record[0] == result
assert record["int8"] == result
async def test_int16(self):
result = -453
assert await self.select_field("int16") == result
record = await self.select_record("int16")
assert record[0] == result
assert record["int16"] == result
async def test_int32(self):
result = 21322
assert await self.select_field("int32") == result
record = await self.select_record("int32")
assert record[0] == result
assert record["int32"] == result
async def test_int64(self):
result = -32123
assert await self.select_field("int64") == result
record = await self.select_record("int64")
assert record[0] == result
assert record["int64"] == result
async def test_float32(self):
result = 23.432
assert await self.select_field("float32") == result
record = await self.select_record("float32")
assert record[0] == result
assert record["float32"] == result
async def test_float64(self):
result = -56754.564_542
assert await self.select_field("float64") == result
record = await self.select_record("float64")
assert record[0] == result
assert record["float64"] == result
async def test_string(self):
result = "hello man"
assert await self.select_field("string") == result
record = await self.select_record("string")
assert record[0] == result
assert record["string"] == result
async def test_fixed_string(self):
result = "hello fixed man".ljust(32, " ")
assert await self.select_field("fixed_string") == result
record = await self.select_record("fixed_string")
assert record[0] == result
assert record["fixed_string"] == result
async def test_date(self):
result = dt.date(2018, 9, 21)
assert await self.select_field("date") == result
record = await self.select_record("date")
assert record[0] == result
assert record["date"] == result
async def test_datetime(self):
result = dt.datetime(2018, 9, 21, 10, 32, 23)
assert await self.select_field("datetime") == result
record = await self.select_record("datetime")
assert record[0] == result
assert record["datetime"] == result
async def test_enum8(self):
result = "hello"
assert await self.select_field("enum8") == result
record = await self.select_record("enum8")
assert record[0] == result
assert record["enum8"] == result
async def test_enum16(self):
result = "world"
assert await self.select_field("enum16") == result
record = await self.select_record("enum16")
assert record[0] == result
assert record["enum16"] == result
async def test_array_uint8(self):
result = [1, 2, 3, 4]
assert await self.select_field("array_uint8") == result
record = await self.select_record("array_uint8")
assert record[0] == result
assert record["array_uint8"] == result
async def test_tuple(self):
result = (4, "hello")
assert await self.select_field("tuple") == result
record = await self.select_record("tuple")
assert record[0] == result
assert record["tuple"] == result
async def test_nullable(self):
result = 0
assert await self.select_field("nullable") == result
record = await self.select_record("nullable")
assert record[0] == result
assert record["nullable"] == result
async def test_array_string(self):
result = ["hello", "world"]
assert await self.select_field("array_string") == result
record = await self.select_record("array_string")
assert record[0] == result
assert record["array_string"] == result
async def test_escape_string(self):
result = "'\b\f\r\n\t\\"
assert await self.select_field("escape_string") == result
record = await self.select_record("escape_string")
assert record[0] == result
assert record["escape_string"] == result
async def test_uuid(self, uuid):
result = uuid
assert await self.select_field("uuid") == result
record = await self.select_record("uuid")
assert record[0] == result
assert record["uuid"] == result
async def test_array_uuid(self, uuid):
result = [uuid, uuid, uuid]
assert await self.select_field("array_uuid") == result
record = await self.select_record("array_uuid")
assert record[0] == result
assert record["array_uuid"] == result
async def test_array_enum(self):
result = ["hello", "world", "hello"]
assert await self.select_field("array_enum ") == result
record = await self.select_record("array_enum ")
assert record[0] == result
assert record["array_enum"] == result
async def test_array_date(self):
assert await self.select_field("array_date ") == [
dt.date(2018, 9, 21),
dt.date(2018, 9, 22),
]
async def test_array_datetime(self):
assert await self.select_field("array_datetime ") == [
dt.datetime(2018, 9, 21, 10, 32, 23),
dt.datetime(2018, 9, 21, 10, 32, 24),
]
async def test_low_cardinality_str(self):
result = "hello man"
assert await self.select_field("low_cardinality_str") == result
record = await self.select_record("low_cardinality_str")
assert record[0] == result
assert record["low_cardinality_str"] == result
async def test_low_cardinality_nullable_str(self):
result = "hello man"
assert await self.select_field("low_cardinality_nullable_str") == result
record = await self.select_record("low_cardinality_nullable_str")
assert record[0] == result
assert record["low_cardinality_nullable_str"] == result
async def test_low_cardinality_int(self):
result = 777
assert await self.select_field("low_cardinality_int") == result
record = await self.select_record("low_cardinality_int")
assert record[0] == result
assert record["low_cardinality_int"] == result
async def test_low_cardinality_date(self):
result = dt.date(1994, 9, 7)
assert await self.select_field("low_cardinality_date") == result
record = await self.select_record("low_cardinality_date")
assert record[0] == result
assert record["low_cardinality_date"] == result
async def test_low_cardinality_datetime(self):
assert await self.select_field("low_cardinality_datetime") == dt.datetime(
2018, 9, 21, 10, 32, 23
)
async def test_decimal(self):
assert await self.select_field("decimal") == Decimal('123.56')
async def test_decimal32(self):
assert await self.select_field("decimal32") == Decimal('1234.5678')
async def test_decimal64(self):
assert await self.select_field("decimal64") == Decimal('1234.56')
async def test_decimal128(self):
assert await self.select_field("decimal128") == Decimal('1234.56')
@pytest.mark.fetching
@pytest.mark.usefixtures("class_chclient")
class TestFetching:
async def test_fetchrow_full(self):
assert (await self.ch.fetchrow("SELECT * FROM all_types WHERE uint8=1"))[
:
] == self.rows[0]
async def test_fetchrow_with_empties(self):
assert (await self.ch.fetchrow("SELECT * FROM all_types WHERE uint8=2"))[
:
] == self.rows[1]
async def test_fetchrow_none_result(self):
assert (
await self.ch.fetchrow("SELECT * FROM all_types WHERE uint8=42")
) is None
async def test_fetchone_full(self):
assert (await self.ch.fetchone("SELECT * FROM all_types WHERE uint8=1"))[
:
] == self.rows[0]
async def test_fetchone_with_empties(self):
assert (await self.ch.fetchone("SELECT * FROM all_types WHERE uint8=2"))[
:
] == self.rows[1]
async def test_fetchone_none_result(self):
assert (
await self.ch.fetchone("SELECT * FROM all_types WHERE uint8=42")
) is None
async def test_fetchval_none_result(self):
assert (
await self.ch.fetchval("SELECT uint8 FROM all_types WHERE uint8=42")
) is None
async def test_fetch(self):
rows = await self.ch.fetch("SELECT * FROM all_types")
assert [row[:] for row in rows] == self.rows
async def test_cursor(self):
assert [
row[:] async for row in self.ch.cursor("SELECT * FROM all_types")
] == self.rows
async def test_iterate(self):
assert [
row[:] async for row in self.ch.iterate("SELECT * FROM all_types")
] == self.rows
async def test_select_with_execute(self):
assert (await self.ch.execute("SELECT * FROM all_types WHERE uint8=1")) is None
@pytest.mark.record
@pytest.mark.usefixtures("class_chclient")
class TestRecord:
async def test_common_objects(self):
records = await self.ch.fetch("SELECT * FROM all_types")
assert id(records[0]._converters) == id(records[1]._converters)
assert id(records[0]._names) == id(records[1]._names)
async def test_lazy_decoding(self):
record = await self.ch.fetchrow("SELECT * FROM all_types WHERE uint8=2")
assert type(record._row) == bytes
record[0]
assert type(record._row) == tuple
assert type(record._row[0]) == int
async def test_mapping(self):
record = await self.ch.fetchrow("SELECT * FROM all_types WHERE uint8=2")
assert list(record.values())[0] == 2
assert list(record.keys())[0] == "uint8"
assert list(record.items())[0] == ("uint8", 2)
assert record.get("uint8") == 2
assert record.get(0) == 2
async def test_bool(self):
records = await self.ch.fetch(
"SELECT uniq(array_string) FROM all_types GROUP BY array_string WITH TOTALS"
)
assert bool(records[-2]) is False
async def test_len(self):
record = await self.ch.fetchrow("SELECT * FROM all_types WHERE uint8=2")
assert len(record) == len(self.rows[1])
async def test_index_error(self):
record = await self.ch.fetchrow("SELECT * FROM all_types WHERE uint8=2")
with pytest.raises(IndexError):
record[42]
records = await self.ch.fetch(
"SELECT uniq(array_string) FROM all_types GROUP BY array_string WITH TOTALS"
)
with pytest.raises(IndexError):
records[-2][0]
async def test_key_error(self):
record = await self.ch.fetchrow("SELECT * FROM all_types WHERE uint8=2")
with pytest.raises(KeyError):
record["no_such_key"]
records = await self.ch.fetch(
"SELECT uniq(array_string) FROM all_types GROUP BY array_string WITH TOTALS"
)
with pytest.raises(KeyError):
records[-2]["a"]
@pytest.mark.usefixtures("class_chclient")
class TestJsonInsert:
async def test_json_insert(self):
sql = "INSERT INTO all_types FORMAT JSONEachRow"
records = [
{"decimal32": 32},
{"fixed_string": "simple string", "low_cardinality_str": "meow test"},
]
await self.ch.execute(sql, *records)
result = await self.ch.fetch("SELECT * FROM all_types WHERE decimal32 = 32")
assert len(result) == 1
result = await self.ch.fetch(
"SELECT * FROM all_types WHERE low_cardinality_str = 'meow test'"
)
assert len(result) == 1
| 34.413603 | 90 | 0.569788 |
acf0348cc9f5686842f2e3136971034fafda1149 | 1,706 | py | Python | dfi/metrics.py | nicemanis/deep-focus-interpolation | 1df772fdb9a7c9c0813a3d1d6948a07dedb4384b | [
"MIT"
] | null | null | null | dfi/metrics.py | nicemanis/deep-focus-interpolation | 1df772fdb9a7c9c0813a3d1d6948a07dedb4384b | [
"MIT"
] | null | null | null | dfi/metrics.py | nicemanis/deep-focus-interpolation | 1df772fdb9a7c9c0813a3d1d6948a07dedb4384b | [
"MIT"
] | null | null | null | import tensorflow as tf
import keras.backend as K
class Metrics:
def __init__(self, hparams):
self.hparams = hparams
def get_max_val(self):
return self.hparams.data.norm_max - self.hparams.data.norm_min
def get_y(self, x, r):
y1 = r[:, :, :, 0:1] + x[:, :, :, 0:1]
y2 = r[:, :, :, 1:2] + x[:, :, :, 1:2]
y_pred = (y1 + y2) / 2
return K.clip(y_pred, self.hparams.data.norm_min, self.hparams.data.norm_max)
def get_psnr(self):
def psnr(y_true, y_pred):
# clip values to ensure correct PSNR calculation
return tf.image.psnr(
y_true, K.clip(y_pred, self.hparams.data.norm_min, self.hparams.data.norm_max), self.get_max_val())
return psnr
def get_residual_psnr(self, x):
def psnr(r_true, r_pred):
y_true = self.get_y(x, r_true)
y_pred = self.get_y(x, r_pred)
return tf.image.psnr(y_true, y_pred, self.get_max_val())
return psnr
def get_ssim(self):
def ssim(y_true, y_pred):
return tf.image.ssim(
y_true, K.clip(y_pred, self.hparams.data.norm_min, self.hparams.data.norm_max), self.get_max_val())
return ssim
def get_residual_ssim(self, x):
def ssim(r_true, r_pred):
y_true = self.get_y(x, r_true)
y_pred = self.get_y(x, r_pred)
return tf.image.ssim(y_true, y_pred, self.get_max_val())
return ssim
def get_metrics(self, x):
if self.hparams.model.type == "target":
return [self.get_psnr(), self.get_ssim()]
else:
return [self.get_residual_psnr(x), self.get_residual_ssim(x)]
| 31.018182 | 115 | 0.586166 |
acf0354328469d1e5ea291e224e46f025b573923 | 1,807 | py | Python | lib/config.py | sstm2/jnotebook_reader | 300e43502ccc2f5cf5b359c8dd519e36cdd2e563 | [
"Apache-2.0"
] | 101 | 2020-11-23T06:01:25.000Z | 2022-03-19T14:07:59.000Z | lib/config.py | sstm2/jnotebook_reader | 300e43502ccc2f5cf5b359c8dd519e36cdd2e563 | [
"Apache-2.0"
] | 6 | 2020-12-04T08:56:23.000Z | 2021-09-09T02:09:18.000Z | lib/config.py | sstm2/jnotebook_reader | 300e43502ccc2f5cf5b359c8dd519e36cdd2e563 | [
"Apache-2.0"
] | 14 | 2020-11-23T06:09:34.000Z | 2021-12-23T02:56:16.000Z | # Copyright 2020 LINE Corporation
#
# LINE Corporation licenses this file to you under the Apache License,
# version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
config = {
"default": {
"server": {
"port": 9088, # The port judy server listening on
"root": "" # Context path, base url
},
"storage": {
"type": "local", # local or s3
"directories": ["docs"], # If type is local effective
"s3": { # s3 config, if type is s3 effective
"endpoint": None, # s3 endpoint, if type is s3 required, if set with None would access to s3 global url
"accessKey": "YOUR_ACCESS_KEY", # optional, default; request header "Access-Key" could replace it
"secretKey": "YOUR_SECRET_KEY", # optional, default; request header "Secret-Key" could replace it
"buckets": ["YOUR_BUCKET_NAME"] # optional, default; request header "Bucket-Name" could replace it
}
},
"logging": {
"level": logging.DEBUG,
"format": "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s",
"filename": ""
}
}
}["default"]
| 45.175 | 134 | 0.574986 |
acf0358ce6bd4806643fab9444032d06c1fc39e6 | 1,183 | py | Python | nipap/nipap/nipapconfig.py | job/NIPAP | fd2d82af45196b9875431e7c4bf2cc36a8ff50c1 | [
"MIT"
] | 1 | 2017-07-11T18:57:57.000Z | 2017-07-11T18:57:57.000Z | nipap/nipap/nipapconfig.py | job/NIPAP | fd2d82af45196b9875431e7c4bf2cc36a8ff50c1 | [
"MIT"
] | null | null | null | nipap/nipap/nipapconfig.py | job/NIPAP | fd2d82af45196b9875431e7c4bf2cc36a8ff50c1 | [
"MIT"
] | null | null | null | import ConfigParser
class NipapConfig(ConfigParser.SafeConfigParser):
""" Makes configuration data available.
Implemented as a class with a shared state; once an instance has been
created, new instances with the same state can be obtained by calling
the custructor again.
"""
__shared_state = {}
_config = None
_cfg_path = None
def __init__(self, cfg_path=None, default={}):
""" Takes config file path and command line arguments.
"""
self.__dict__ = self.__shared_state
if len(self.__shared_state) == 0:
# First time - create new instance!
self._cfg_path = cfg_path
ConfigParser.ConfigParser.__init__(self, default)
self.read_file()
def read_file(self):
""" Read the configuration file
"""
# don't try to parse config file if we don't have one set
if not self._cfg_path:
return
try:
cfg_fp = open(self._cfg_path, 'r')
self.readfp(cfg_fp)
except IOError as exc:
raise NipapConfigError(str(exc))
class NipapConfigError(Exception):
pass
| 23.66 | 77 | 0.611158 |
acf03614481fc192cc4bb8236950bf5d14f134a8 | 18,064 | py | Python | news_site/views.py | Sreejoy/news_site | 512f2c695f68476edffdd8f2ca36adc167c4d3eb | [
"MIT"
] | null | null | null | news_site/views.py | Sreejoy/news_site | 512f2c695f68476edffdd8f2ca36adc167c4d3eb | [
"MIT"
] | null | null | null | news_site/views.py | Sreejoy/news_site | 512f2c695f68476edffdd8f2ca36adc167c4d3eb | [
"MIT"
] | 2 | 2020-04-06T19:53:43.000Z | 2020-09-12T09:39:23.000Z | '''
imports
'''
from django.http.response import HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils import timezone
from .models import users,news
from django.db.models import Q
from datetime import datetime,timedelta
import os,binascii
import simplejson
#generates random token for authentication
def generate_token():
return str(binascii.b2a_hex(os.urandom(200)))
#validates token from "users" table
def validate_token(token):
try:
login_status = True
try:
user = users.objects.get(token=token)
except:
login_status = False
if login_status:
expire_time = user.token_expire_time
now=timezone.now()
if now < expire_time:
return True
else:
return False
else:
return False
except Exception,e:
print e
return False
#creates new story, input - title,body,author, writes to "news" table of database, returns code '1' if successful, '0' if otherwise
def create_story(request):
result = {}
if request.method == "POST":
try:
title = str(request.POST['title'].encode('utf-8').strip())
body = str(request.POST['body'].encode('utf-8').strip())
author = str(request.POST['author'].encode('utf-8').strip())
token = str(request.POST['token'].encode('utf-8').strip())
if not validate_token(token): #authorization by token
result['code'] = '0'
result['message'] = "Unauthorized request"
result = simplejson.dumps(result)
return HttpResponse(result, content_type='application/json')
insert_status = True
try:
news_object = news(
title=title,
body=body,
author=author
)
news_object.save()
except Exception,e:
print e
insert_status = False
if insert_status:
result['code'] = '1'
result['message'] = "Successfully created story."
else:
result['code'] = '0'
result['message'] = "Couldn't create story."
except:
result['code'] ='0'
result['message']="Couldn't create story."
result = simplejson.dumps(result)
return HttpResponse(result, content_type='application/json')
def get_xml_string(key,value):
return "<%s>%s</%s>"%(key,value,key)
#(used in story search)reads story, input - search_text,method(text/json), returns news in json/text, returns code '1' if successful, '0' if otherwise
def read_story(request):
result = {}
if request.method == "GET":
try:
search_text = str(request.GET.get(u'search_text', '').encode('utf-8').strip())
try:
news_id = int(search_text)
except:
news_id = -1
method = str(request.GET.get(u'method', '').encode('utf-8').strip()) # can be text or json
token = str(request.GET.get(u'token', '').encode('utf-8').strip())
item_per_page = int(request.GET.get(u'item_per_page', '').encode('utf-8').strip())
item_page_current = int(request.GET.get(u'item_page_current', '').encode('utf-8').strip())
start_ = (item_per_page) * (item_page_current - 1)
end_ = start_ + item_per_page
if not validate_token(token): #authorization by token
result['code'] = '0'
result['message'] = "Unauthorized request"
result = simplejson.dumps(result)
return HttpResponse(result, content_type='application/json')
try:
stories = news.objects.filter(Q(title__contains=search_text)|Q(author__contains=search_text)|Q(body__contains=search_text)|Q(news_id=news_id)).order_by('news_id').reverse()
except Exception,e:
print e
stories = False
story_data = []
for story in stories:
story_data.append([story.news_id, story.title, story.body, story.author])
if str(method) == 'json': #for json format
if stories: # news found by given id
Heading = ['News']
result['TableData'] = story_data
result['count'] = len(result['TableData'])
result['TableData']= result['TableData'][start_:end_]
#result['TableData']=story
result['code'] = 1
result['Heading']=Heading
else:
result['TableData'] = []
result['code'] = '0'
result['message'] = "Couldn't find story for search='%s'" % search_text
json = simplejson.dumps(result)
return HttpResponse(json, content_type='application/json')
else: #for text/xml format
xml_string="<all_news>"
if story_data: # news found by given id
xml_string += get_xml_string('code', '1')
for each in story_data:
xml_string += "<news>"
xml_string += get_xml_string('news_id', each[0])
xml_string += get_xml_string('title', each[1])
xml_string += get_xml_string('body', each[2])
xml_string += get_xml_string('author', each[3])
xml_string += "</news>"
else:
xml_string += get_xml_string('code', '0')
xml_string += get_xml_string('message', "Couldn't find story for search_text='%s'" % search_text)
xml_string += "</all_news>"
return HttpResponse(xml_string, content_type='application/xml')
except Exception,e:
print e
result['code'] ='0'
result['message']="Couldn't read story"
result = simplejson.dumps(result)
return HttpResponse(result, content_type='application/json')
#updates story in json format, input - news_id,title,body,author, updates "news" table of database, returns code '1' if successful, '0' if otherwise
def update_story(request):
result = {}
if request.method == "POST":
try:
news_id = str(request.POST['news_id'].encode('utf-8').strip())
title = str(request.POST['title'].encode('utf-8').strip())
body = str(request.POST['body'].encode('utf-8').strip())
author = str(request.POST['author'].encode('utf-8').strip())
token = str(request.POST['token'].encode('utf-8').strip())
if not validate_token(token): # authorization by token
result['code'] = '0'
result['message'] = "Unauthorized request"
result = simplejson.dumps(result)
return HttpResponse(result, content_type='application/json')
exist_status = True
try:
news.objects.get(news_id=news_id)
except Exception,e:
print e
exist_status = False
if exist_status:
update_dict={
'title':title,
'body':body,
'author':author
}
update_status = True
try:
news.objects.filter(news_id=news_id).update(**update_dict)
except Exception, e:
print e
update_status = False
if update_status:
result['code'] = '1'
result['message'] = "Successfully updated story."
else:
result['code'] = '0'
result['message'] = "Couldn't update story."
else:
result['code'] = '0'
result['message'] = "Couldn't find story with id='%s'"%news_id
except:
result['code'] ='0'
result['message']="Couldn't update story."
result = simplejson.dumps(result)
return HttpResponse(result, content_type='application/json')
#deletes story, input - news_id, deletes story from "news" table of database, returns code '1' if successful, '0' if otherwise
def delete_story(request):
result = {}
if request.method == "GET":
try:
news_id = str(request.GET.get('news_id','').encode('utf-8').strip())
token = str(request.GET.get('token','').encode('utf-8').strip())
if not validate_token(token): # authorization by token
result['code'] = '0'
result['message'] = "Unauthorized request"
result = simplejson.dumps(result)
return HttpResponse(result, content_type='application/json')
exist_status = True
try:
news.objects.get(news_id=news_id)
except Exception, e:
print e
exist_status = False
if exist_status:
delete_status = True
try:
news.objects.filter(news_id=news_id).delete()
except Exception, e:
print e
delete_status = False
if delete_status:
result['code'] = '1'
result['message'] = "Successfully deleted story."
else:
result['code'] = '0'
result['message'] = "Couldn't delete story."
else:
result['code'] = '0'
result['message'] = "Couldn't find story with id='%s'"%news_id
except:
result['code'] ='0'
result['message']="Couldn't delete story."
result = simplejson.dumps(result)
return HttpResponse(result, content_type='application/json')
#creates new user, input - username,password1,password2, returns the token if signup is successful,writes to "users" table of database, returns code '1' if successful, '0' if otherwise
def signup(request):
result = {}
if request.method == "POST":
try:
username = str(request.POST['username'].encode('utf-8'))
password1 = str(request.POST['password1'].encode('utf-8'))
password2 = str(request.POST['password2'].encode('utf-8'))
if username == "" or password1 == "" or password2 == "":
result['code'] = '0'
result['message'] = "Username or Password can't be empty"
result = simplejson.dumps(result)
return HttpResponse(result, content_type='application/json')
if password1 != password2:
result['code'] = '0'
result['message'] = "Passwords didn't match"
result = simplejson.dumps(result)
return HttpResponse(result, content_type='application/json')
login_status = True
try:
users.objects.get(user_name=username)
except:
login_status = False
if not login_status:
token = generate_token()
insert_status = True
try:
user = users(
user_name=username,
password=password1,
token=token,
token_expire_time = datetime.now()+timedelta(days=7)
)
user.save()
except:
insert_status = False
if insert_status:
result['code'] = '1'
result['message'] = "Successfully created user."
result['token'] = token
else:
result['code'] = '0'
result['message'] = "Couldn't create user"
else:
result['code'] = '0'
result['message'] = "Username already exists"
except Exception,e:
result['code'] ='0'
result['message']="Couldn't create user."
result = simplejson.dumps(result)
return HttpResponse(result, content_type='application/json')
#lets user to log in, input - username,password writes to "users" table of database,it updates the token and token expire date and return the token, returns code '1' if successful, '0' if otherwise
def login(request):
result = {}
if request.method == "POST":
try:
username = str(request.POST['username'].encode('utf-8'))
password = str(request.POST['password'].encode('utf-8'))
if username == "" or password == "":
result['code'] = '0'
result['message'] = "Username or Password can't be empty"
result = simplejson.dumps(result)
return HttpResponse(result, content_type='application/json')
login_status = True
try:
users.objects.get(user_name=username, password=password)
except:
login_status = False
if login_status:
token = generate_token()
update_dict={
'token': token,
'token_expire_time':datetime.now()+timedelta(days=7)
}
update_status = True
try:
users.objects.filter(user_name=username).update(**update_dict)
except:
update_status = False
if update_status:
result['code'] = '1'
result['message'] = "Login successful"
result['token'] = token
else:
result['code'] = '0'
result['message'] = "Incorrect Username/Password"
else:
result['code'] = '0'
result['message'] = "Incorrect Username/Password"
except:
result['code'] ='0'
result['message']="Bad Request"
result = simplejson.dumps(result)
return HttpResponse(result, content_type='application/json')
#lets user to log out, input - username,token writes to "users" table of database,it deletes the token and return the token, returns code '1' if successful, '0' if otherwise
def logout(request):
result = {}
if request.method == "GET":
try:
username = str(request.GET.get('username','').encode('utf-8'))
token = str(request.GET.get('token','').encode('utf-8'))
if username == "" or token == "":
result['code'] = '0'
result['message'] = "Username or Token can't be empty"
result = simplejson.dumps(result)
return HttpResponse(result, content_type='application/json')
login_status = True
try:
users.objects.get(user_name=username)
except:
login_status = False
if login_status:
update_dict={
'token': ''
}
update_status = True
try:
users.objects.filter(user_name=username).update(**update_dict)
except:
update_status = False
if update_status:
result['code'] = '1'
result['message'] = "Logout successful"
else:
result['code'] = '0'
result['message'] = "Couldn't logout"
else:
result['code'] = '0'
result['message'] = "User not logged in"
except:
result['code'] ='0'
result['message']="Bad Request"
result = simplejson.dumps(result)
return HttpResponse(result, content_type='application/json')
#UI Related functions
def LoginPage(request):
return render_to_response('login.html', {}, context_instance=RequestContext(request))
def all_story(request):
return render_to_response('story.html', {}, context_instance=RequestContext(request))
#gives user search suggestions
def search_suggestions(request):
if request.is_ajax():
if request.method == 'GET':
items_per_page = 10
has_next_page = False
requested_page = int(request.GET.get(u'page', ''))
value = request.GET.get(u'term', '')
start = items_per_page * (requested_page - 1)
end = start + items_per_page
try:
stories = news.objects.filter(Q(title__contains=value)|Q(author__contains=value)|Q(body__contains=value)).order_by('news_id').reverse()
except Exception,e:
print e
stories = False
story_data = []
for story in stories:
story_data.append([story.news_id, story.title])
if end < len(story_data):
has_next_page = True
story_data = story_data[start:end]
results = []
result_dict = {}
result_dict['id'] = value
result_dict['text'] = value + ' - ' + 'Text'
result_dict['type'] = 'Text'
results.append(result_dict)
for each_item in story_data:
result_dict = {}
result_dict['id'] = each_item[0]
result_dict['text'] = each_item[1] + ' - News'
result_dict['type'] = 'News'
results.append(result_dict)
Dict = {
'items': results,
'more': has_next_page
}
result = simplejson.dumps(Dict)
return HttpResponse(result, content_type='application/json') | 38.680942 | 197 | 0.524635 |
acf036aba7a34c7715a05a7afde499a28620ea2c | 1,190 | py | Python | airflow/contrib/operators/opsgenie_alert_operator.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 79 | 2021-10-15T07:32:27.000Z | 2022-03-28T04:10:19.000Z | airflow/contrib/operators/opsgenie_alert_operator.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 153 | 2021-10-15T05:23:46.000Z | 2022-02-23T06:07:10.000Z | airflow/contrib/operators/opsgenie_alert_operator.py | emilioego/airflow | 3457c7847cd24413ff5b622e65c27d8370f94502 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 23 | 2021-10-15T02:36:37.000Z | 2022-03-17T02:59:27.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.opsgenie.operators.opsgenie_alert`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.opsgenie.operators.opsgenie_alert import OpsgenieAlertOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.opsgenie.operators.opsgenie_alert`.",
DeprecationWarning,
stacklevel=2,
)
| 39.666667 | 99 | 0.777311 |
acf0371e418658dfe88f0eab0ea5291c92cb5f4c | 10,721 | py | Python | virtual_env/lib/python3.5/site-packages/google_compute_engine/networking/tests/network_daemon_test.py | straydag/To_Due_Backend | ac91f5ebabe8e4f2b6db7faa5ccbd30ebdb4e3f6 | [
"MIT"
] | 2 | 2019-06-25T18:25:49.000Z | 2019-06-27T04:48:53.000Z | virtual_env/lib/python3.5/site-packages/google_compute_engine/networking/tests/network_daemon_test.py | straydag/To_Due_Backend | ac91f5ebabe8e4f2b6db7faa5ccbd30ebdb4e3f6 | [
"MIT"
] | 6 | 2020-09-08T00:13:19.000Z | 2022-02-27T01:04:42.000Z | virtual_env/lib/python3.5/site-packages/google_compute_engine/networking/tests/network_daemon_test.py | straydag/To_Due_Backend | ac91f5ebabe8e4f2b6db7faa5ccbd30ebdb4e3f6 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for network_daemon.py module."""
from google_compute_engine import network_utils
from google_compute_engine.networking import network_daemon
from google_compute_engine.test_compat import mock
from google_compute_engine.test_compat import unittest
class NetworkDaemonTest(unittest.TestCase):
def setUp(self):
self.mock_logger = mock.Mock()
self.mock_watcher = mock.Mock()
self.mock_setup = mock.create_autospec(network_daemon.NetworkDaemon)
self.mock_setup.logger = self.mock_logger
self.mock_setup.watcher = self.mock_watcher
self.mock_ip_forwarding = mock.Mock()
self.mock_network_setup = mock.Mock()
self.mock_network_utils = mock.Mock()
self.mock_setup.ip_forwarding = self.mock_ip_forwarding
self.mock_setup.network_setup = self.mock_network_setup
self.mock_setup.network_utils = self.mock_network_utils
@mock.patch('google_compute_engine.networking.network_daemon.ip_forwarding')
@mock.patch('google_compute_engine.networking.network_daemon.network_setup')
@mock.patch('google_compute_engine.networking.network_daemon.network_utils')
@mock.patch('google_compute_engine.networking.network_daemon.metadata_watcher')
@mock.patch('google_compute_engine.networking.network_daemon.logger')
@mock.patch('google_compute_engine.networking.network_daemon.file_utils')
def testNetworkDaemon(
self, mock_lock, mock_logger, mock_watcher, mock_network_utils,
mock_network_setup, mock_ip_forwarding):
mock_logger_instance = mock.Mock()
mock_logger.Logger.return_value = mock_logger_instance
mocks = mock.Mock()
mocks.attach_mock(mock_lock, 'lock')
mocks.attach_mock(mock_logger, 'logger')
mocks.attach_mock(mock_network_utils, 'network')
mocks.attach_mock(mock_ip_forwarding, 'forwarding')
mocks.attach_mock(mock_network_setup, 'network_setup')
mocks.attach_mock(mock_watcher, 'watcher')
metadata_key = network_daemon.NetworkDaemon.network_interface_metadata_key
with mock.patch.object(
network_daemon.NetworkDaemon, 'HandleNetworkInterfaces'
) as mock_handle:
network_daemon.NetworkDaemon(
ip_forwarding_enabled=True,
proto_id='66',
ip_aliases=None,
target_instance_ips=None,
dhclient_script='x',
dhcp_command='y',
network_setup_enabled=True,
debug=True)
expected_calls = [
mock.call.logger.Logger(name=mock.ANY, debug=True, facility=mock.ANY),
mock.call.forwarding.IpForwarding(proto_id='66', debug=True),
mock.call.network_setup.NetworkSetup(
debug=True, dhclient_script='x', dhcp_command='y'),
mock.call.network.NetworkUtils(logger=mock_logger_instance),
mock.call.watcher.MetadataWatcher(logger=mock_logger_instance),
mock.call.lock.LockFile(network_daemon.LOCKFILE),
mock.call.lock.LockFile().__enter__(),
mock.call.logger.Logger().info(mock.ANY),
mock.call.watcher.MetadataWatcher().WatchMetadata(
mock_handle, metadata_key=metadata_key, recursive=True,
timeout=mock.ANY),
mock.call.lock.LockFile().__exit__(None, None, None),
]
self.assertEqual(mocks.mock_calls, expected_calls)
@mock.patch('google_compute_engine.networking.network_daemon.ip_forwarding')
@mock.patch('google_compute_engine.networking.network_daemon.network_setup')
@mock.patch('google_compute_engine.networking.network_daemon.network_utils')
@mock.patch('google_compute_engine.networking.network_daemon.metadata_watcher')
@mock.patch('google_compute_engine.networking.network_daemon.logger')
@mock.patch('google_compute_engine.networking.network_daemon.file_utils')
def testNetworkDaemonError(
self, mock_lock, mock_logger, mock_watcher, mock_network_utils,
mock_network_setup, mock_ip_forwarding):
mock_logger_instance = mock.Mock()
mock_logger.Logger.return_value = mock_logger_instance
mocks = mock.Mock()
mocks.attach_mock(mock_lock, 'lock')
mocks.attach_mock(mock_logger, 'logger')
mocks.attach_mock(mock_ip_forwarding, 'forwarding')
mocks.attach_mock(mock_network_setup, 'network_setup')
mocks.attach_mock(mock_network_utils, 'network')
mocks.attach_mock(mock_watcher, 'watcher')
self.mock_setup._ExtractInterfaceMetadata.return_value = []
mock_lock.LockFile.side_effect = IOError('Test Error')
with mock.patch.object(
network_daemon.NetworkDaemon, 'HandleNetworkInterfaces'):
network_daemon.NetworkDaemon(
ip_forwarding_enabled=False,
proto_id='66',
ip_aliases=None,
target_instance_ips=None,
dhclient_script='x',
dhcp_command='y',
network_setup_enabled=False,
debug=True)
expected_calls = [
mock.call.logger.Logger(name=mock.ANY, debug=True, facility=mock.ANY),
mock.call.forwarding.IpForwarding(proto_id='66', debug=True),
mock.call.network_setup.NetworkSetup(
debug=True, dhclient_script='x', dhcp_command='y'),
mock.call.network.NetworkUtils(logger=mock_logger_instance),
mock.call.watcher.MetadataWatcher(logger=mock_logger_instance),
mock.call.lock.LockFile(network_daemon.LOCKFILE),
mock.call.logger.Logger().warning('Test Error'),
]
self.assertEqual(mocks.mock_calls, expected_calls)
def testHandleNetworkInterfaces(self):
mocks = mock.Mock()
mocks.attach_mock(self.mock_ip_forwarding, 'forwarding')
mocks.attach_mock(self.mock_network_setup, 'network_setup')
mocks.attach_mock(self.mock_setup, 'setup')
self.mock_setup.ip_aliases = None
self.mock_setup.target_instance_ips = None
self.mock_setup.ip_forwarding_enabled = True
self.mock_setup.network_setup_enabled = True
self.mock_setup._ExtractInterfaceMetadata.return_value = [
network_daemon.NetworkDaemon.NetworkInterface('a'),
network_daemon.NetworkDaemon.NetworkInterface('b'),
]
result = mock.Mock()
network_daemon.NetworkDaemon.HandleNetworkInterfaces(
self.mock_setup, result)
expected_calls = [
mock.call.setup._ExtractInterfaceMetadata(result),
mock.call.network_setup.EnableNetworkInterfaces(['b']),
mock.call.forwarding.HandleForwardedIps('a', None, None),
mock.call.forwarding.HandleForwardedIps('b', None, None),
]
self.assertEqual(mocks.mock_calls, expected_calls)
def testHandleNetworkInterfacesDisabled(self):
mocks = mock.Mock()
mocks.attach_mock(self.mock_ip_forwarding, 'forwarding')
mocks.attach_mock(self.mock_network_setup, 'network_setup')
mocks.attach_mock(self.mock_setup, 'setup')
self.mock_setup.ip_aliases = None
self.mock_setup.target_instance_ips = None
self.mock_setup.ip_forwarding_enabled = False
self.mock_setup.network_setup_enabled = False
self.mock_setup._ExtractInterfaceMetadata.return_value = [
network_daemon.NetworkDaemon.NetworkInterface('a'),
network_daemon.NetworkDaemon.NetworkInterface('b'),
]
result = mock.Mock()
network_daemon.NetworkDaemon.HandleNetworkInterfaces(
self.mock_setup, result)
expected_calls = [
mock.call.setup._ExtractInterfaceMetadata(result),
]
self.assertEqual(mocks.mock_calls, expected_calls)
def testExtractInterfaceMetadata(self):
self.mock_setup.ip_aliases = True
self.mock_setup.target_instance_ips = True
self.mock_setup.network_utils = network_utils.NetworkUtils()
self.mock_setup.network_utils.interfaces = {
'1': 'eth0', '2': 'eth1', '3': 'eth2',
}
metadata = [
{
'mac': '1',
'forwardedIps': ['a'],
},
{
'mac': '2',
'forwardedIps': ['b'],
'ipAliases': ['banana'],
'targetInstanceIps': ['baklava'],
},
{
'mac': '3',
'ipAliases': ['cherry'],
'targetInstanceIps': ['cake'],
},
{
'mac': '4',
},
{
'forwardedIps': ['d'],
'ipAliases': ['date'],
'targetInstanceIps': ['doughnuts'],
},
]
expected_interfaces = [
network_daemon.NetworkDaemon.NetworkInterface('eth0', ['a']),
network_daemon.NetworkDaemon.NetworkInterface(
'eth1', ['b', 'banana', 'baklava']),
network_daemon.NetworkDaemon.NetworkInterface(
'eth2', ['cherry', 'cake']),
]
actual_interfaces = network_daemon.NetworkDaemon._ExtractInterfaceMetadata(
self.mock_setup, metadata)
for actual, expected in zip(actual_interfaces, expected_interfaces):
self.assertEqual(actual.name, expected.name)
self.assertEqual(actual.forwarded_ips, expected.forwarded_ips)
def testExtractInterfaceMetadataWithoutOptions(self):
self.mock_setup.ip_aliases = None
self.mock_setup.target_instance_ips = None
self.mock_setup.network_utils = network_utils.NetworkUtils()
self.mock_setup.network_utils.interfaces = {
'1': 'eth0', '2': 'eth1', '3': 'eth2',
}
metadata = [
{
'mac': '1',
'forwardedIps': ['a'],
},
{
'mac': '2',
'forwardedIps': ['b'],
'ipAliases': ['banana'],
'targetInstanceIps': ['baklava'],
},
{
'mac': '3',
'ipAliases': ['cherry'],
'targetInstanceIps': ['cake'],
},
]
expected_interfaces = [
network_daemon.NetworkDaemon.NetworkInterface('eth0', ['a']),
network_daemon.NetworkDaemon.NetworkInterface('eth1', ['b']),
network_daemon.NetworkDaemon.NetworkInterface('eth2', []),
]
actual_interfaces = network_daemon.NetworkDaemon._ExtractInterfaceMetadata(
self.mock_setup, metadata)
for actual, expected in zip(actual_interfaces, expected_interfaces):
self.assertEqual(actual.name, expected.name)
self.assertEqual(actual.forwarded_ips, expected.forwarded_ips)
| 41.234615 | 81 | 0.695458 |
acf03725d32a01edee5a45a7f6c2320c2a8e7816 | 1,212 | py | Python | tests/components/dexcom/__init__.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | tests/components/dexcom/__init__.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | tests/components/dexcom/__init__.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Tests for the Dexcom integration."""
import json
from unittest.mock import patch
from pydexcom import GlucoseReading
from homeassistant.components.dexcom.const import CONF_SERVER, DOMAIN, SERVER_US
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from tests.common import MockConfigEntry, load_fixture
CONFIG = {
CONF_USERNAME: "test_username",
CONF_PASSWORD: "test_password",
CONF_SERVER: SERVER_US,
}
GLUCOSE_READING = GlucoseReading(json.loads(load_fixture("data.json", "dexcom")))
async def init_integration(hass) -> MockConfigEntry:
"""Set up the Dexcom integration in Home Assistant."""
entry = MockConfigEntry(
domain=DOMAIN,
title="test_username",
unique_id="test_username",
data=CONFIG,
options=None,
)
with patch(
"homeassistant.components.dexcom.Dexcom.get_current_glucose_reading",
return_value=GLUCOSE_READING,
), patch(
"homeassistant.components.dexcom.Dexcom.create_session",
return_value="test_session_id",
):
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
| 28.186047 | 81 | 0.721122 |
acf037e00e3c3e5e6a3a249c0eae39f7f6b73685 | 18,368 | py | Python | research/lfads/distributions.py | jdavidagudelo/tensorflow-models | 6f019beec73b01861363bf717706e27f4210b979 | [
"Apache-2.0"
] | 1 | 2021-05-17T01:42:29.000Z | 2021-05-17T01:42:29.000Z | research/lfads/distributions.py | jdavidagudelo/tensorflow-models | 6f019beec73b01861363bf717706e27f4210b979 | [
"Apache-2.0"
] | null | null | null | research/lfads/distributions.py | jdavidagudelo/tensorflow-models | 6f019beec73b01861363bf717706e27f4210b979 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
import numpy as np
import tensorflow as tf
from research.lfads.utils import linear, log_sum_exp
class Poisson(object):
"""Poisson distributon
Computes the log probability under the model.
"""
def __init__(self, log_rates):
""" Create Poisson distributions with log_rates parameters.
Args:
log_rates: a tensor-like list of log rates underlying the Poisson dist.
"""
self.logr = log_rates
def logp(self, bin_counts):
"""Compute the log probability for the counts in the bin, under the model.
Args:
bin_counts: array-like integer counts
Returns:
The log-probability under the Poisson models for each element of
bin_counts.
"""
k = tf.to_float(bin_counts)
# log poisson(k, r) = log(r^k * e^(-r) / k!) = k log(r) - r - log k!
# log poisson(k, r=exp(x)) = k * x - exp(x) - lgamma(k + 1)
return k * self.logr - tf.exp(self.logr) - tf.lgamma(k + 1)
def diag_gaussian_log_likelihood(z, mu=0.0, logvar=0.0):
"""Log-likelihood under a Gaussian distribution with diagonal covariance.
Returns the log-likelihood for each dimension. One should sum the
results for the log-likelihood under the full multidimensional model.
Args:
z: The value to compute the log-likelihood.
mu: The mean of the Gaussian
logvar: The log variance of the Gaussian.
Returns:
The log-likelihood under the Gaussian model.
"""
return -0.5 * (logvar + np.log(2 * np.pi) + \
tf.square((z - mu) / tf.exp(0.5 * logvar)))
def gaussian_pos_log_likelihood(unused_mean, logvar, noise):
"""Gaussian log-likelihood function for a posterior in VAE
Note: This function is specialized for a posterior distribution, that has the
form of z = mean + sigma * noise.
Args:
unused_mean: ignore
logvar: The log variance of the distribution
noise: The noise used in the sampling of the posterior.
Returns:
The log-likelihood under the Gaussian model.
"""
# ln N(z; mean, sigma) = - ln(sigma) - 0.5 ln 2pi - noise^2 / 2
return - 0.5 * (logvar + np.log(2 * np.pi) + tf.square(noise))
class Gaussian(object):
"""Base class for Gaussian distribution classes."""
pass
class DiagonalGaussian(Gaussian):
"""Diagonal Gaussian with different constant mean and variances in each
dimension.
"""
def __init__(self, batch_size, z_size, mean, logvar):
"""Create a diagonal gaussian distribution.
Args:
batch_size: The size of the batch, i.e. 0th dim in 2D tensor of samples.
z_size: The dimension of the distribution, i.e. 1st dim in 2D tensor.
mean: The N-D mean of the distribution.
logvar: The N-D log variance of the diagonal distribution.
"""
size__xz = [None, z_size]
self.mean = mean # bxn already
self.logvar = logvar # bxn already
self.noise = noise = tf.random_normal(tf.shape(logvar))
self.sample = mean + tf.exp(0.5 * logvar) * noise
mean.set_shape(size__xz)
logvar.set_shape(size__xz)
self.sample.set_shape(size__xz)
def logp(self, z=None):
"""Compute the log-likelihood under the distribution.
Args:
z (optional): value to compute likelihood for, if None, use sample.
Returns:
The likelihood of z under the model.
"""
if z is None:
z = self.sample
# This is needed to make sure that the gradients are simple.
# The value of the function shouldn't change.
if z == self.sample:
return gaussian_pos_log_likelihood(self.mean, self.logvar, self.noise)
return diag_gaussian_log_likelihood(z, self.mean, self.logvar)
class LearnableDiagonalGaussian(Gaussian):
"""Diagonal Gaussian whose mean and variance are learned parameters."""
def __init__(self, batch_size, z_size, name, mean_init=0.0,
var_init=1.0, var_min=0.0, var_max=1000000.0):
"""Create a learnable diagonal gaussian distribution.
Args:
batch_size: The size of the batch, i.e. 0th dim in 2D tensor of samples.
z_size: The dimension of the distribution, i.e. 1st dim in 2D tensor.
name: prefix name for the mean and log TF variables.
mean_init (optional): The N-D mean initialization of the distribution.
var_init (optional): The N-D variance initialization of the diagonal
distribution.
var_min (optional): The minimum value the learned variance can take in any
dimension.
var_max (optional): The maximum value the learned variance can take in any
dimension.
"""
size_1xn = [1, z_size]
size__xn = [None, z_size]
size_bx1 = tf.stack([batch_size, 1])
assert var_init > 0.0, "Problems"
assert var_max >= var_min, "Problems"
assert var_init >= var_min, "Problems"
assert var_max >= var_init, "Problems"
z_mean_1xn = tf.get_variable(name=name + "/mean", shape=size_1xn,
initializer=tf.constant_initializer(mean_init))
self.mean_bxn = mean_bxn = tf.tile(z_mean_1xn, size_bx1)
mean_bxn.set_shape(size__xn) # tile loses shape
log_var_init = np.log(var_init)
if var_max > var_min:
var_is_trainable = True
else:
var_is_trainable = False
z_logvar_1xn = \
tf.get_variable(name=(name + "/logvar"), shape=size_1xn,
initializer=tf.constant_initializer(log_var_init),
trainable=var_is_trainable)
if var_is_trainable:
z_logit_var_1xn = tf.exp(z_logvar_1xn)
z_var_1xn = tf.nn.sigmoid(z_logit_var_1xn) * (var_max - var_min) + var_min
z_logvar_1xn = tf.log(z_var_1xn)
logvar_bxn = tf.tile(z_logvar_1xn, size_bx1)
self.logvar_bxn = logvar_bxn
self.noise_bxn = noise_bxn = tf.random_normal(tf.shape(logvar_bxn))
self.sample_bxn = mean_bxn + tf.exp(0.5 * logvar_bxn) * noise_bxn
def logp(self, z=None):
"""Compute the log-likelihood under the distribution.
Args:
z (optional): value to compute likelihood for, if None, use sample.
Returns:
The likelihood of z under the model.
"""
if z is None:
z = self.sample
# This is needed to make sure that the gradients are simple.
# The value of the function shouldn't change.
if z == self.sample_bxn:
return gaussian_pos_log_likelihood(self.mean_bxn, self.logvar_bxn,
self.noise_bxn)
return diag_gaussian_log_likelihood(z, self.mean_bxn, self.logvar_bxn)
@property
def mean(self):
return self.mean_bxn
@property
def logvar(self):
return self.logvar_bxn
@property
def sample(self):
return self.sample_bxn
class DiagonalGaussianFromInput(Gaussian):
"""Diagonal Gaussian whose mean and variance are conditioned on other
variables.
Note: the parameters to convert from input to the learned mean and log
variance are held in this class.
"""
def __init__(self, x_bxu, z_size, name, var_min=0.0):
"""Create an input dependent diagonal Gaussian distribution.
Args:
x: The input tensor from which the mean and variance are computed,
via a linear transformation of x. I.e.
mu = Wx + b, log(var) = Mx + c
z_size: The size of the distribution.
name: The name to prefix to learned variables.
var_min (optional): Minimal variance allowed. This is an additional
way to control the amount of information getting through the stochastic
layer.
"""
size_bxn = tf.stack([tf.shape(x_bxu)[0], z_size])
self.mean_bxn = mean_bxn = linear(x_bxu, z_size, name=(name + "/mean"))
logvar_bxn = linear(x_bxu, z_size, name=(name + "/logvar"))
if var_min > 0.0:
logvar_bxn = tf.log(tf.exp(logvar_bxn) + var_min)
self.logvar_bxn = logvar_bxn
self.noise_bxn = noise_bxn = tf.random_normal(size_bxn)
self.noise_bxn.set_shape([None, z_size])
self.sample_bxn = mean_bxn + tf.exp(0.5 * logvar_bxn) * noise_bxn
def logp(self, z=None):
"""Compute the log-likelihood under the distribution.
Args:
z (optional): value to compute likelihood for, if None, use sample.
Returns:
The likelihood of z under the model.
"""
if z is None:
z = self.sample
# This is needed to make sure that the gradients are simple.
# The value of the function shouldn't change.
if z == self.sample_bxn:
return gaussian_pos_log_likelihood(self.mean_bxn,
self.logvar_bxn, self.noise_bxn)
return diag_gaussian_log_likelihood(z, self.mean_bxn, self.logvar_bxn)
@property
def mean(self):
return self.mean_bxn
@property
def logvar(self):
return self.logvar_bxn
@property
def sample(self):
return self.sample_bxn
class GaussianProcess:
"""Base class for Gaussian processes."""
pass
class LearnableAutoRegressive1Prior(GaussianProcess):
"""AR(1) model where autocorrelation and process variance are learned
parameters. Assumed zero mean.
"""
def __init__(self, batch_size, z_size,
autocorrelation_taus, noise_variances,
do_train_prior_ar_atau, do_train_prior_ar_nvar,
num_steps, name):
"""Create a learnable autoregressive (1) process.
Args:
batch_size: The size of the batch, i.e. 0th dim in 2D tensor of samples.
z_size: The dimension of the distribution, i.e. 1st dim in 2D tensor.
autocorrelation_taus: The auto correlation time constant of the AR(1)
process.
A value of 0 is uncorrelated gaussian noise.
noise_variances: The variance of the additive noise, *not* the process
variance.
do_train_prior_ar_atau: Train or leave as constant, the autocorrelation?
do_train_prior_ar_nvar: Train or leave as constant, the noise variance?
num_steps: Number of steps to run the process.
name: The name to prefix to learned TF variables.
"""
# Note the use of the plural in all of these quantities. This is intended
# to mark that even though a sample z_t from the posterior is thought of a
# single sample of a multidimensional gaussian, the prior is actually
# thought of as U AR(1) processes, where U is the dimension of the inferred
# input.
size_bx1 = tf.stack([batch_size, 1])
size__xu = [None, z_size]
# process variance, the variance at time t over all instantiations of AR(1)
# with these parameters.
log_evar_inits_1xu = tf.expand_dims(tf.log(noise_variances), 0)
self.logevars_1xu = logevars_1xu = \
tf.Variable(log_evar_inits_1xu, name=name + "/logevars", dtype=tf.float32,
trainable=do_train_prior_ar_nvar)
self.logevars_bxu = logevars_bxu = tf.tile(logevars_1xu, size_bx1)
logevars_bxu.set_shape(size__xu) # tile loses shape
# \tau, which is the autocorrelation time constant of the AR(1) process
log_atau_inits_1xu = tf.expand_dims(tf.log(autocorrelation_taus), 0)
self.logataus_1xu = logataus_1xu = \
tf.Variable(log_atau_inits_1xu, name=name + "/logatau", dtype=tf.float32,
trainable=do_train_prior_ar_atau)
# phi in x_t = \mu + phi x_tm1 + \eps
# phi = exp(-1/tau)
# phi = exp(-1/exp(logtau))
# phi = exp(-exp(-logtau))
phis_1xu = tf.exp(-tf.exp(-logataus_1xu))
self.phis_bxu = phis_bxu = tf.tile(phis_1xu, size_bx1)
phis_bxu.set_shape(size__xu)
# process noise
# pvar = evar / (1- phi^2)
# logpvar = log ( exp(logevar) / (1 - phi^2) )
# logpvar = logevar - log(1-phi^2)
# logpvar = logevar - (log(1-phi) + log(1+phi))
self.logpvars_1xu = \
logevars_1xu - tf.log(1.0 - phis_1xu) - tf.log(1.0 + phis_1xu)
self.logpvars_bxu = logpvars_bxu = tf.tile(self.logpvars_1xu, size_bx1)
logpvars_bxu.set_shape(size__xu)
# process mean (zero but included in for completeness)
self.pmeans_bxu = pmeans_bxu = tf.zeros_like(phis_bxu)
# For sampling from the prior during de-novo generation.
self.means_t = means_t = [None] * num_steps
self.logvars_t = logvars_t = [None] * num_steps
self.samples_t = samples_t = [None] * num_steps
self.gaussians_t = gaussians_t = [None] * num_steps
sample_bxu = tf.zeros_like(phis_bxu)
for t in range(num_steps):
# process variance used here to make process completely stationary
if t == 0:
logvar_pt_bxu = self.logpvars_bxu
else:
logvar_pt_bxu = self.logevars_bxu
z_mean_pt_bxu = pmeans_bxu + phis_bxu * sample_bxu
gaussians_t[t] = DiagonalGaussian(batch_size, z_size,
mean=z_mean_pt_bxu,
logvar=logvar_pt_bxu)
sample_bxu = gaussians_t[t].sample
samples_t[t] = sample_bxu
logvars_t[t] = logvar_pt_bxu
means_t[t] = z_mean_pt_bxu
def logp_t(self, z_t_bxu, z_tm1_bxu=None):
"""Compute the log-likelihood under the distribution for a given time t,
not the whole sequence.
Args:
z_t_bxu: sample to compute likelihood for at time t.
z_tm1_bxu (optional): sample condition probability of z_t upon.
Returns:
The likelihood of p_t under the model at time t. i.e.
p(z_t|z_tm1) = N(z_tm1 * phis, eps^2)
"""
if z_tm1_bxu is None:
return diag_gaussian_log_likelihood(z_t_bxu, self.pmeans_bxu,
self.logpvars_bxu)
else:
means_t_bxu = self.pmeans_bxu + self.phis_bxu * z_tm1_bxu
logp_tgtm1_bxu = diag_gaussian_log_likelihood(z_t_bxu,
means_t_bxu,
self.logevars_bxu)
return logp_tgtm1_bxu
class KLCost_GaussianGaussian(object):
"""log p(x|z) + KL(q||p) terms for Gaussian posterior and Gaussian prior. See
eqn 10 and Appendix B in VAE for latter term,
http://arxiv.org/abs/1312.6114
The log p(x|z) term is the reconstruction error under the model.
The KL term represents the penalty for passing information from the encoder
to the decoder.
To sample KL(q||p), we simply sample
ln q - ln p
by drawing samples from q and averaging.
"""
def __init__(self, zs, prior_zs):
"""Create a lower bound in three parts, normalized reconstruction
cost, normalized KL divergence cost, and their sum.
E_q[ln p(z_i | z_{i+1}) / q(z_i | x)
\int q(z) ln p(z) dz = - 0.5 ln(2pi) - 0.5 \sum (ln(sigma_p^2) + \
sigma_q^2 / sigma_p^2 + (mean_p - mean_q)^2 / sigma_p^2)
\int q(z) ln q(z) dz = - 0.5 ln(2pi) - 0.5 \sum (ln(sigma_q^2) + 1)
Args:
zs: posterior z ~ q(z|x)
prior_zs: prior zs
"""
# L = -KL + log p(x|z), to maximize bound on likelihood
# -L = KL - log p(x|z), to minimize bound on NLL
# so 'KL cost' is postive KL divergence
kl_b = 0.0
for z, prior_z in zip(zs, prior_zs):
assert isinstance(z, Gaussian)
assert isinstance(prior_z, Gaussian)
# ln(2pi) terms cancel
kl_b += 0.5 * tf.reduce_sum(
prior_z.logvar - z.logvar
+ tf.exp(z.logvar - prior_z.logvar)
+ tf.square((z.mean - prior_z.mean) / tf.exp(0.5 * prior_z.logvar))
- 1.0, [1])
self.kl_cost_b = kl_b
self.kl_cost = tf.reduce_mean(kl_b)
class KLCost_GaussianGaussianProcessSampled(object):
""" log p(x|z) + KL(q||p) terms for Gaussian posterior and Gaussian process
prior via sampling.
The log p(x|z) term is the reconstruction error under the model.
The KL term represents the penalty for passing information from the encoder
to the decoder.
To sample KL(q||p), we simply sample
ln q - ln p
by drawing samples from q and averaging.
"""
def __init__(self, post_zs, prior_z_process):
"""Create a lower bound in three parts, normalized reconstruction
cost, normalized KL divergence cost, and their sum.
Args:
post_zs: posterior z ~ q(z|x)
prior_z_process: prior AR(1) process
"""
assert len(post_zs) > 1, "GP is for time, need more than 1 time step."
assert isinstance(prior_z_process, GaussianProcess), "Must use GP."
# L = -KL + log p(x|z), to maximize bound on likelihood
# -L = KL - log p(x|z), to minimize bound on NLL
# so 'KL cost' is postive KL divergence
z0_bxu = post_zs[0].sample
logq_bxu = post_zs[0].logp(z0_bxu)
logp_bxu = prior_z_process.logp_t(z0_bxu)
z_tm1_bxu = z0_bxu
for z_t in post_zs[1:]:
# posterior is independent in time, prior is not
z_t_bxu = z_t.sample
logq_bxu += z_t.logp(z_t_bxu)
logp_bxu += prior_z_process.logp_t(z_t_bxu, z_tm1_bxu)
z_tm1 = z_t_bxu
kl_bxu = logq_bxu - logp_bxu
kl_b = tf.reduce_sum(kl_bxu, [1])
self.kl_cost_b = kl_b
self.kl_cost = tf.reduce_mean(kl_b)
| 37.107071 | 86 | 0.627123 |
acf038b3f78b1e285ab6fae452432f81e0d75adc | 360 | py | Python | dask_geopandas/__init__.py | jorisvandenbossche/dask-geopandas-1 | 80833e2211bc326f51ed586a99a523fc62cf5a95 | [
"BSD-3-Clause"
] | null | null | null | dask_geopandas/__init__.py | jorisvandenbossche/dask-geopandas-1 | 80833e2211bc326f51ed586a99a523fc62cf5a95 | [
"BSD-3-Clause"
] | null | null | null | dask_geopandas/__init__.py | jorisvandenbossche/dask-geopandas-1 | 80833e2211bc326f51ed586a99a523fc62cf5a95 | [
"BSD-3-Clause"
] | 1 | 2022-03-04T12:18:12.000Z | 2022-03-04T12:18:12.000Z | from ._version import get_versions
from . import backends
from .core import (
points_from_xy,
GeoDataFrame,
GeoSeries,
from_geopandas,
from_dask_dataframe,
)
__version__ = get_versions()["version"]
del get_versions
__all__ = [
"points_from_xy",
"GeoDataFrame",
"GeoSeries",
"from_geopandas",
"from_dask_dataframe",
]
| 16.363636 | 39 | 0.694444 |
acf0391f9ff06a5ba6b6b7e8f6c0026cb8ab3728 | 905 | py | Python | gtf2bed/constants.py | jvfe/gtf2bed | 7ac21759498ca9495030982d2a11c2a63149a75c | [
"BSD-3-Clause"
] | 1 | 2021-04-22T09:27:35.000Z | 2021-04-22T09:27:35.000Z | gtf2bed/constants.py | jvfe/gtf2bed | 7ac21759498ca9495030982d2a11c2a63149a75c | [
"BSD-3-Clause"
] | null | null | null | gtf2bed/constants.py | jvfe/gtf2bed | 7ac21759498ca9495030982d2a11c2a63149a75c | [
"BSD-3-Clause"
] | null | null | null | FEATURES = set(
[
"five_prime_utr",
"three_prime_utr",
"CDS",
"exon",
"intron",
"start_codon",
"stop_codon",
"ncRNA",
]
)
NON_CODING_BIOTYPES = set(
[
"Mt_rRNA",
"Mt_tRNA",
"miRNA",
"misc_RNA",
"rRNA",
"scRNA",
"snRNA",
"snoRNA",
"ribozyme",
"sRNA",
"scaRNA",
"lncRNA",
"ncRNA",
"Mt_tRNA_pseudogene",
"tRNA_pseudogene",
"snoRNA_pseudogene",
"snRNA_pseudogene",
"scRNA_pseudogene",
"rRNA_pseudogene",
"misc_RNA_pseudogene",
"miRNA_pseudogene",
"non_coding",
"known_ncrna ",
"lincRNA ",
"macro_lncRNA ",
"3prime_overlapping_ncRNA",
"vault_RNA",
"vaultRNA",
"bidirectional_promoter_lncRNA",
]
)
| 19.255319 | 40 | 0.462983 |
acf0394fd32f16645be9b4bbea768fffd1f04a7c | 21,577 | py | Python | evennia/server/amp.py | ReidLiu/text-world | b016546a0663d05cc581211ecdcb978cb0bd82c4 | [
"BSD-3-Clause"
] | 28 | 2015-04-14T00:15:27.000Z | 2021-11-18T08:31:16.000Z | evennia/server/amp.py | ReidLiu/text-world | b016546a0663d05cc581211ecdcb978cb0bd82c4 | [
"BSD-3-Clause"
] | null | null | null | evennia/server/amp.py | ReidLiu/text-world | b016546a0663d05cc581211ecdcb978cb0bd82c4 | [
"BSD-3-Clause"
] | 18 | 2015-07-17T16:48:11.000Z | 2020-12-05T15:57:49.000Z | """
Contains the protocols, commands, and client factory needed for the Server
and Portal to communicate with each other, letting Portal work as a proxy.
Both sides use this same protocol.
The separation works like this:
Portal - (AMP client) handles protocols. It contains a list of connected
sessions in a dictionary for identifying the respective player
connected. If it looses the AMP connection it will automatically
try to reconnect.
Server - (AMP server) Handles all mud operations. The server holds its own list
of sessions tied to player objects. This is synced against the portal
at startup and when a session connects/disconnects
"""
# imports needed on both server and portal side
import os
from time import time
from collections import defaultdict
try:
import cPickle as pickle
except ImportError:
import pickle
from twisted.protocols import amp
from twisted.internet import protocol, task
from twisted.internet.defer import Deferred
from evennia.utils.utils import to_str, variable_from_module
# communication bits
PCONN = chr(1) # portal session connect
PDISCONN = chr(2) # portal session disconnect
PSYNC = chr(3) # portal session sync
SLOGIN = chr(4) # server session login
SDISCONN = chr(5) # server session disconnect
SDISCONNALL = chr(6) # server session disconnect all
SSHUTD = chr(7) # server shutdown
SSYNC = chr(8) # server session sync
SCONN = chr(9) # server creating new connection (for irc/imc2 bots etc)
PCONNSYNC = chr(10) # portal post-syncing a session
AMP_MAXLEN = 65535 # max allowed data length in AMP protocol (cannot be changed)
BATCH_RATE = 500 # max commands/sec before switching to batch-sending
BATCH_TIMEOUT = 1.0 # how often to poll to empty batch queue, in seconds
# buffers
_SENDBATCH = defaultdict(list)
_MSGBUFFER = defaultdict(list)
def get_restart_mode(restart_file):
"""
Parse the server/portal restart status
"""
if os.path.exists(restart_file):
flag = open(restart_file, 'r').read()
return flag == "True"
return False
class AmpServerFactory(protocol.ServerFactory):
"""
This factory creates the Server as a new AMPProtocol instance for accepting
connections from the Portal.
"""
def __init__(self, server):
"""
server: The Evennia server service instance
protocol: The protocol the factory creates instances of.
"""
self.server = server
self.protocol = AMPProtocol
def buildProtocol(self, addr):
"""
Start a new connection, and store it on the service object
"""
#print "Evennia Server connected to Portal at %s." % addr
self.server.amp_protocol = AMPProtocol()
self.server.amp_protocol.factory = self
return self.server.amp_protocol
class AmpClientFactory(protocol.ReconnectingClientFactory):
"""
This factory creates an instance of the Portal, an AMPProtocol
instances to use to connect
"""
# Initial reconnect delay in seconds.
initialDelay = 1
factor = 1.5
maxDelay = 1
def __init__(self, portal):
self.portal = portal
self.protocol = AMPProtocol
def startedConnecting(self, connector):
"""
Called when starting to try to connect to the MUD server.
"""
pass
#print 'AMP started to connect:', connector
def buildProtocol(self, addr):
"""
Creates an AMPProtocol instance when connecting to the server.
"""
#print "Portal connected to Evennia server at %s." % addr
self.resetDelay()
self.portal.amp_protocol = AMPProtocol()
self.portal.amp_protocol.factory = self
return self.portal.amp_protocol
def clientConnectionLost(self, connector, reason):
"""
Called when the AMP connection to the MUD server is lost.
"""
if hasattr(self, "server_restart_mode"):
self.maxDelay = 1
else:
# Don't translate this; avoid loading django on portal side.
self.maxDelay = 10
self.portal.sessions.announce_all(" ... Portal lost connection to Server.")
protocol.ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
def clientConnectionFailed(self, connector, reason):
"""
Called when an AMP connection attempt to the MUD server fails.
"""
if hasattr(self, "server_restart_mode"):
self.maxDelay = 1
else:
self.maxDelay = 10
self.portal.sessions.announce_all(" ...")
protocol.ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
# AMP Communication Command types
class MsgPortal2Server(amp.Command):
"""
Message portal -> server
"""
key = "MsgPortal2Server"
arguments = [('hashid', amp.String()),
('data', amp.String()),
('ipart', amp.Integer()),
('nparts', amp.Integer())]
errors = [(Exception, 'EXCEPTION')]
response = []
class MsgServer2Portal(amp.Command):
"""
Message server -> portal
"""
key = "MsgServer2Portal"
arguments = [('hashid', amp.String()),
('data', amp.String()),
('ipart', amp.Integer()),
('nparts', amp.Integer())]
errors = [(Exception, 'EXCEPTION')]
response = []
class ServerAdmin(amp.Command):
"""
Portal -> Server
Sent when the portal needs to perform admin
operations on the server, such as when a new
session connects or resyncs
"""
key = "ServerAdmin"
arguments = [('hashid', amp.String()),
('data', amp.String()),
('ipart', amp.Integer()),
('nparts', amp.Integer())]
errors = [(Exception, 'EXCEPTION')]
response = []
class PortalAdmin(amp.Command):
"""
Server -> Portal
Sent when the server needs to perform admin
operations on the portal.
"""
key = "PortalAdmin"
arguments = [('hashid', amp.String()),
('data', amp.String()),
('ipart', amp.Integer()),
('nparts', amp.Integer())]
errors = [(Exception, 'EXCEPTION')]
response = []
class FunctionCall(amp.Command):
"""
Bidirectional
Sent when either process needs to call an
arbitrary function in the other. This does
not use the batch-send functionality.
"""
key = "FunctionCall"
arguments = [('module', amp.String()),
('function', amp.String()),
('args', amp.String()),
('kwargs', amp.String())]
errors = [(Exception, 'EXCEPTION')]
response = [('result', amp.String())]
# Helper functions
dumps = lambda data: to_str(pickle.dumps(to_str(data), pickle.HIGHEST_PROTOCOL))
loads = lambda data: pickle.loads(to_str(data))
#------------------------------------------------------------
# Core AMP protocol for communication Server <-> Portal
#------------------------------------------------------------
class AMPProtocol(amp.AMP):
"""
This is the protocol that the MUD server and the proxy server
communicate to each other with. AMP is a bi-directional protocol, so
both the proxy and the MUD use the same commands and protocol.
AMP specifies responder methods here and connect them to amp.Command
subclasses that specify the datatypes of the input/output of these methods.
"""
# helper methods
def __init__(self, *args, **kwargs):
"""
Initialize protocol with some things that need to be
in place already before connecting both on portal and server.
"""
self.min_batch_step = 1.0 / BATCH_RATE
self.lastsend = time()
self.task = task.LoopingCall(self.batch_send, None, None)
self.task.start(BATCH_TIMEOUT)
def connectionMade(self):
"""
This is called when a connection is established
between server and portal. AMP calls it on both sides,
so we need to make sure to only trigger resync from the
portal side.
"""
self.transport.setTcpNoDelay(True) # this makes for a factor x10 faster sends!
if hasattr(self.factory, "portal"):
# only the portal has the 'portal' property, so we know we are
# on the portal side and can initialize the connection.
sessdata = self.factory.portal.sessions.get_all_sync_data()
self.call_remote_ServerAdmin(0,
PSYNC,
data=sessdata)
self.factory.portal.sessions.at_server_connection()
if hasattr(self.factory, "server_restart_mode"):
del self.factory.server_restart_mode
# Error handling
def errback(self, e, info):
"error handler, to avoid dropping connections on server tracebacks."
e.trap(Exception)
print "AMP Error for %(info)s: %(e)s" % {'info': info,
'e': e.getErrorMessage()}
def batch_send(self, command, sessid, **kwargs):
"""
This will batch data together to send fewer, large batches.
Kwargs:
force_direct: send direct
"""
#print "batch_send 1:", command, sessid
global _SENDBATCH
if command is None:
# called by the automatic cleanup mechanism
commands = [cmd for cmd in (MsgPortal2Server, MsgServer2Portal, ServerAdmin, PortalAdmin)
if _SENDBATCH.get(cmd, False)]
if not commands:
return
else:
# called to send right away
commands = [command]
_SENDBATCH[command].append((sessid, kwargs))
force_direct = kwargs.pop("force_direct", False)
now = time()
#print "batch_send 2:", now, self.lastsend, self.min_batch_step, now-self.lastsend > self.min_batch_step
if force_direct or now - self.lastsend > self.min_batch_step:
for command in commands:
batch = dumps(_SENDBATCH[command])
_SENDBATCH[command] = []
# split in parts small enough to fit in AMP MAXLEN
to_send = [batch[i:i+AMP_MAXLEN] for i in range(0, len(batch), AMP_MAXLEN)]
nparts = len(to_send)
# tag this batch
hashid = "%s-%s" % (id(batch), now)
if nparts == 1:
deferreds = [self.callRemote(command,
hashid=hashid,
data=batch,
ipart=0,
nparts=1).addErrback(self.errback, command.key)]
else:
deferreds = []
for ipart, part in enumerate(to_send):
deferred = self.callRemote(command,
hashid=hashid,
data=part,
ipart=ipart,
nparts=nparts)
deferred.addErrback(self.errback, "%s part %i/%i" % (command.key, ipart, part))
deferreds.append(deferred)
self.lastsend = time() # don't use now here, keep it as up-to-date as possible
return deferreds
def batch_recv(self, hashid, data, ipart, nparts):
"""
This will receive and unpack data sent as a batch. This both
handles too-long data as well as batch-sending very fast-
arriving commands.
"""
global _MSGBUFFER
if nparts == 1:
# most common case
return loads(data)
else:
if ipart < nparts-1:
# not yet complete
_MSGBUFFER[hashid].append(data)
return []
else:
# all parts in place - deserialize it
return loads(_MSGBUFFER.pop(hashid) + data)
# Message definition + helper methods to call/create each message type
# Portal -> Server Msg
def amp_msg_portal2server(self, hashid, data, ipart, nparts):
"""
Relays message to server. This method is executed on the Server.
Since AMP has a limit of 65355 bytes per message, it's possible the
data comes in multiple chunks; if so (nparts>1) we buffer the data
and wait for the remaining parts to arrive before continuing.
"""
batch = self.batch_recv(hashid, data, ipart, nparts)
for (sessid, kwargs) in batch:
#print "msg portal -> server (server side):", sessid, msg, loads(ret["data"])
self.factory.server.sessions.data_in(sessid,
text=kwargs["msg"],
data=kwargs["data"])
return {}
MsgPortal2Server.responder(amp_msg_portal2server)
def call_remote_MsgPortal2Server(self, sessid, msg, data=""):
"""
Access method called by the Portal and executed on the Portal.
"""
#print "msg portal->server (portal side):", sessid, msg, data
return self.batch_send(MsgPortal2Server, sessid,
msg=msg if msg is not None else "",
data=data)
# Server -> Portal message
def amp_msg_server2portal(self, hashid, data, ipart, nparts):
"""
Relays message to Portal. This method is executed on the Portal.
"""
batch = self.batch_recv(hashid, data, ipart, nparts)
for (sessid, kwargs) in batch:
#print "msg server->portal (portal side):", sessid, ret["text"], loads(ret["data"])
self.factory.portal.sessions.data_out(sessid,
text=kwargs["msg"],
data=kwargs["data"])
return {}
MsgServer2Portal.responder(amp_msg_server2portal)
def amp_batch_server2portal(self, hashid, data, ipart, nparts):
"""
Relays batch data to Portal. This method is executed on the Portal.
"""
batch = self.batch_recv(hashid, data, ipart, nparts)
if batch is not None:
for (sessid, kwargs) in batch:
self.factory.portal.sessions.data_out(sessid,
text=kwargs["msg"],
**kwargs["data"])
return {}
MsgServer2Portal.responder(amp_batch_server2portal)
def call_remote_MsgServer2Portal(self, sessid, msg, data=""):
"""
Access method called by the Server and executed on the Server.
"""
#print "msg server->portal (server side):", sessid, msg, data
return self.batch_send(MsgServer2Portal, sessid, msg=msg, data=data)
# Server administration from the Portal side
def amp_server_admin(self, hashid, data, ipart, nparts):
"""
This allows the portal to perform admin
operations on the server. This is executed on the Server.
"""
#print "serveradmin (server side):", hashid, ipart, nparts
batch = self.batch_recv(hashid, data, ipart, nparts)
for (sessid, kwargs) in batch:
operation = kwargs["operation"]
data = kwargs["data"]
server_sessionhandler = self.factory.server.sessions
#print "serveradmin (server side):", sessid, ord(operation), data
if operation == PCONN: # portal_session_connect
# create a new session and sync it
server_sessionhandler.portal_connect(data)
elif operation == PCONNSYNC: #portal_session_sync
server_sessionhandler.portal_session_sync(data)
elif operation == PDISCONN: # portal_session_disconnect
# session closed from portal side
self.factory.server.sessions.portal_disconnect(sessid)
elif operation == PSYNC: # portal_session_sync
# force a resync of sessions when portal reconnects to
# server (e.g. after a server reboot) the data kwarg
# contains a dict {sessid: {arg1:val1,...}}
# representing the attributes to sync for each
# session.
server_sessionhandler.portal_sessions_sync(data)
else:
raise Exception("operation %(op)s not recognized." % {'op': operation})
return {}
ServerAdmin.responder(amp_server_admin)
def call_remote_ServerAdmin(self, sessid, operation="", data=""):
"""
Access method called by the Portal and Executed on the Portal.
"""
#print "serveradmin (portal side):", sessid, ord(operation), data
if hasattr(self.factory, "server_restart_mode"):
return self.batch_send(ServerAdmin, sessid, force_direct=True, operation=operation, data=data)
return self.batch_send(ServerAdmin, sessid, operation=operation, data=data)
# Portal administraton from the Server side
def amp_portal_admin(self, hashid, data, ipart, nparts):
"""
This allows the server to perform admin
operations on the portal. This is executed on the Portal.
"""
#print "portaladmin (portal side):", sessid, ord(operation), data
batch = self.batch_recv(hashid, data, ipart, nparts)
for (sessid, kwargs) in batch:
operation = kwargs["operation"]
data = kwargs["data"]
portal_sessionhandler = self.factory.portal.sessions
if operation == SLOGIN: # server_session_login
# a session has authenticated; sync it.
portal_sessionhandler.server_logged_in(sessid, data)
elif operation == SDISCONN: # server_session_disconnect
# the server is ordering to disconnect the session
portal_sessionhandler.server_disconnect(sessid, reason=data)
elif operation == SDISCONNALL: # server_session_disconnect_all
# server orders all sessions to disconnect
portal_sessionhandler.server_disconnect_all(reason=data)
elif operation == SSHUTD: # server_shutdown
# the server orders the portal to shut down
self.factory.portal.shutdown(restart=False)
elif operation == SSYNC: # server_session_sync
# server wants to save session data to the portal,
# maybe because it's about to shut down.
portal_sessionhandler.server_session_sync(data)
# set a flag in case we are about to shut down soon
self.factory.server_restart_mode = True
elif operation == SCONN: # server_force_connection (for irc/imc2 etc)
portal_sessionhandler.server_connect(**data)
else:
raise Exception("operation %(op)s not recognized." % {'op': operation})
return {}
PortalAdmin.responder(amp_portal_admin)
def call_remote_PortalAdmin(self, sessid, operation="", data=""):
"""
Access method called by the server side.
"""
if operation == SSYNC:
return self.batch_send(PortalAdmin, sessid, force_direct=True, operation=operation, data=data)
return self.batch_send(PortalAdmin, sessid, operation=operation, data=data)
# Extra functions
def amp_function_call(self, module, function, args, **kwargs):
"""
This allows Portal- and Server-process to call an arbitrary function
in the other process. It is intended for use by plugin modules.
"""
args = loads(args)
kwargs = loads(kwargs)
# call the function (don't catch tracebacks here)
result = variable_from_module(module, function)(*args, **kwargs)
if isinstance(result, Deferred):
# if result is a deferred, attach handler to properly
# wrap the return value
result.addCallback(lambda r: {"result": dumps(r)})
return result
else:
return {'result': dumps(result)}
FunctionCall.responder(amp_function_call)
def call_remote_FunctionCall(self, modulepath, functionname, *args, **kwargs):
"""
Access method called by either process. This will call an arbitrary
function on the other process (On Portal if calling from Server and
vice versa).
Inputs:
modulepath (str) - python path to module holding function to call
functionname (str) - name of function in given module
*args, **kwargs will be used as arguments/keyword args for the
remote function call
Returns:
A deferred that fires with the return value of the remote
function call
"""
return self.callRemote(FunctionCall,
module=modulepath,
function=functionname,
args=dumps(args),
kwargs=dumps(kwargs)).addCallback(lambda r: loads(r["result"])).addErrback(self.errback, "FunctionCall")
| 38.393238 | 135 | 0.590583 |
acf0395e5e81aacf19a768506b8be870fd2e9508 | 3,652 | py | Python | app/posts/routes.py | orached/itora_tuto | 8de36d834fc7ef2dc8895ec7ac048fb420de76e3 | [
"MIT"
] | null | null | null | app/posts/routes.py | orached/itora_tuto | 8de36d834fc7ef2dc8895ec7ac048fb420de76e3 | [
"MIT"
] | 3 | 2020-03-24T18:03:03.000Z | 2021-02-02T22:23:27.000Z | app/posts/routes.py | orached/itora_tuto | 8de36d834fc7ef2dc8895ec7ac048fb420de76e3 | [
"MIT"
] | null | null | null | from flask import render_template, flash, redirect, url_for, request, g, \
jsonify, current_app
from flask_login import current_user, login_required
from flask_babel import _
from guess_language import guess_language
from app import db
from app.posts.forms import PostForm, CommentForm
from app.models import Post, Comment, Category
from app.translate import translate
from app.posts import bp
@bp.route('/managepost', methods=['GET', 'POST'])
@login_required
def managepost():
category_list=[(g.id, g.title) for g in Category.query.all()]
form = PostForm()
form.category.choices = category_list
if form.validate_on_submit():
language = guess_language(form.post.data)
if language == 'UNKNOWN' or len(language) > 5:
language = ''
post = Post(title=form.title.data, body_html=form.post.data, author=current_user,
language=language, category_id=form.category.data)
db.session.add(post)
db.session.commit()
flash(_('Votre article est publié !'))
return redirect(url_for('posts.managepost'))
page = request.args.get('page', 1, type=int)
posts = current_user.posts.order_by(Post.timestamp.desc()).paginate(
page, current_app.config['POSTS_PER_PAGE'], False)
next_url = url_for('posts.managepost', page=posts.next_num) \
if posts.has_next else None
prev_url = url_for('posts.managepost', page=posts.prev_num) \
if posts.has_prev else None
return render_template('manage_post.html', title=_('Articles'), form=form,
posts=posts.items, next_url=next_url,
prev_url=prev_url)
@bp.route('/translate', methods=['POST'])
def translate_text():
return jsonify({'text': translate(request.form['text'],
request.form['source_language'],
request.form['dest_language'])})
@bp.route('/post/<int:id>', methods=['GET', 'POST'])
def post(id):
post = Post.query.get_or_404(id)
form = CommentForm()
if form.validate_on_submit():
language = guess_language(form.comment.data)
if language == 'UNKNOWN' or len(language) > 5:
language = ''
comment = Comment(body=form.comment.data, post=post,
author=current_user, language=language)
db.session.add(comment)
db.session.commit()
flash('Votre commentaire a été publié.')
return redirect(url_for('posts.post', id=post.id))
comments = post.comments.order_by(Comment.timestamp.asc())
return render_template('post.html', post=post, form=form, comments=comments)
@bp.route('/editpost/<int:id>', methods=['GET', 'POST'])
@login_required
def editpost(id):
post = Post.query.get_or_404(id)
if current_user != post.author:
abort(403)
category_list=[(g.id, g.title) for g in Category.query.all()]
form = PostForm()
form.category.choices = category_list
if form.validate_on_submit():
post.title = form.title.data
post.category_id = form.category.data
post.body_html = form.post.data
db.session.add(post)
db.session.commit()
flash('L\'article a été mis à jour.')
return redirect(url_for('posts.post', id=post.id))
form.title.data = post.title
form.post.data = post.body_html
return render_template('edit_post.html', form=form)
@bp.route('/category/<int:id>')
def category(id):
category = Category.query.get_or_404(id)
posts = category.posts.order_by(Post.id)
return render_template('category.html', category=category, posts=posts) | 40.131868 | 89 | 0.654984 |
acf039aacfa38cd014fb2947c51d0e5e2ddcadd9 | 326 | py | Python | Lib/hTools2/dialogs/misc/__init__.py | miguelsousa/hTools2 | eab400677c1b21bb2519a7354a142e167c2b39ba | [
"BSD-3-Clause"
] | null | null | null | Lib/hTools2/dialogs/misc/__init__.py | miguelsousa/hTools2 | eab400677c1b21bb2519a7354a142e167c2b39ba | [
"BSD-3-Clause"
] | null | null | null | Lib/hTools2/dialogs/misc/__init__.py | miguelsousa/hTools2 | eab400677c1b21bb2519a7354a142e167c2b39ba | [
"BSD-3-Clause"
] | null | null | null | import hTools2
reload(hTools2)
# debug
if hTools2.DEBUG:
import select_fonts
reload(select_fonts)
import checkbox_builder
reload(checkbox_builder)
# import
from select_fonts import SelectFonts
from checkbox_builder import checkBoxBuilder
# export
__all__ = [
'SelectFonts',
'checkBoxBuilder'
]
| 13.04 | 44 | 0.748466 |
acf039d84190e8d50a4d599abdc4e29e9cbcb29b | 704 | py | Python | student_lab1/tests/q5.py | ds-modules/DATA-198-SP21 | eedff5e04a5fcb50032ff9f746b164bf8658030f | [
"BSD-3-Clause"
] | null | null | null | student_lab1/tests/q5.py | ds-modules/DATA-198-SP21 | eedff5e04a5fcb50032ff9f746b164bf8658030f | [
"BSD-3-Clause"
] | 1 | 2021-03-29T16:48:19.000Z | 2021-03-29T16:48:19.000Z | student_lab1/tests/q5.py | ds-modules/DATA-198-SP21 | eedff5e04a5fcb50032ff9f746b164bf8658030f | [
"BSD-3-Clause"
] | null | null | null | test = { 'name': 'q5',
'points': 1,
'suites': [ { 'cases': [ {'code': '>>> assert len(cereal.columns) == 17\n', 'hidden': False, 'locked': False},
{'code': ">>> assert 'shashank_ratings' in cereal.columns\n", 'hidden': False, 'locked': False},
{'code': ">>> assert 'rating' in cereal.columns\n", 'hidden': False, 'locked': False},
{'code': '>>> assert type(cereal) == type(pd.DataFrame())\n', 'hidden': False, 'locked': False}],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'}]}
| 64 | 132 | 0.410511 |
acf03a6d20beeac2de5ee35af5f7540ded001fcd | 2,343 | py | Python | start_jupyter_cm/gnome.py | hyperspy/start_ipython_cm | 294c039afc57331ef663ac79e94a7578391967e4 | [
"BSD-3-Clause"
] | 36 | 2017-01-08T18:02:43.000Z | 2022-02-23T20:17:29.000Z | start_jupyter_cm/gnome.py | hyperspy/start_ipython_cm | 294c039afc57331ef663ac79e94a7578391967e4 | [
"BSD-3-Clause"
] | 21 | 2016-02-24T18:23:38.000Z | 2021-08-15T11:46:25.000Z | start_jupyter_cm/gnome.py | hyperspy/start_ipython_cm | 294c039afc57331ef663ac79e94a7578391967e4 | [
"BSD-3-Clause"
] | 9 | 2016-02-20T17:33:29.000Z | 2021-08-02T07:00:54.000Z | import os, sys
import stat
from subprocess import call
import shutil
from .utils import get_environment_label
NPATH = os.path.expanduser("~/.local/share/nautilus")
SPATH = os.path.join(NPATH, "scripts")
PATH = "%s/bin"%sys.exec_prefix
CONDA_ENV_LABEL = get_environment_label()
script = \
"""#!/usr/bin/python
import sys
import os.path
import subprocess
folders = [path for path in sys.argv[1:] if os.path.isdir(path)]
any_file_selected = len(folders) < len(sys.argv[1:])
if any_file_selected:
subprocess.Popen(["%s/jupyter-%s"])
for folder in folders:
os.chdir(folder)
subprocess.Popen(["%s/jupyter-%s"])
os.chdir("..")
"""
def add_jupyter_here():
if not os.path.exists(NPATH):
print("Nothing done. Currently only Gnome with Nautilus as file ",
"manager is supported.")
return
if not os.path.exists(SPATH):
os.makedirs(SPATH)
logo_path = os.path.expandvars(os.path.join(
os.path.dirname(__file__), 'icons'))
logos = {'qtconsole': os.path.join(logo_path, 'jupyter-qtconsole.png'),
'notebook': os.path.join(logo_path, 'jupyter.png'),
'lab': os.path.join(logo_path, 'jupyter.png')}
for terminal in ["qtconsole", "notebook", "lab"]:
script_path = os.path.join(SPATH, "Jupyter %s here%s" % (
terminal, CONDA_ENV_LABEL))
if (not os.path.exists(script_path) and
shutil.which("jupyter-%s" % terminal)):
with open(script_path, "w") as f:
f.write(script % (PATH, terminal, PATH, terminal))
st = os.stat(script_path)
os.chmod(script_path, st.st_mode | stat.S_IEXEC)
if shutil.which("gio"):
# Call it only if available in the system
call(['gio', 'set', '-t', 'string', '%s' % script_path,
'metadata::custom-icon', 'file://%s' % logos[terminal]])
print('Jupyter %s here%s created.' % (terminal, CONDA_ENV_LABEL))
def remove_jupyter_here():
for terminal in ["qtconsole", "notebook", "lab"]:
script_path = os.path.join(SPATH, "Jupyter %s here%s" %(
terminal, CONDA_ENV_LABEL))
if os.path.exists(script_path):
os.remove(script_path)
print("Jupyter %s here%s removed." % (terminal, CONDA_ENV_LABEL))
| 33 | 78 | 0.612463 |
acf03b184fd58cfd04202f43f586fcda6e413838 | 135,754 | py | Python | research/object_detection/meta_architectures/center_net_meta_arch_tf2_test.py | TensorFlow230/TF230Updates | fb46d2fe878cde196c99a6b10c1925a5261b57cb | [
"Apache-2.0"
] | 3 | 2022-03-05T10:46:52.000Z | 2022-03-22T06:00:05.000Z | research/object_detection/meta_architectures/center_net_meta_arch_tf2_test.py | TensorFlow230/TF230Updates | fb46d2fe878cde196c99a6b10c1925a5261b57cb | [
"Apache-2.0"
] | 1 | 2021-09-02T12:43:42.000Z | 2021-09-02T12:43:42.000Z | research/object_detection/meta_architectures/center_net_meta_arch_tf2_test.py | TensorFlow230/TF230Updates | fb46d2fe878cde196c99a6b10c1925a5261b57cb | [
"Apache-2.0"
] | 1 | 2021-09-22T18:34:10.000Z | 2021-09-22T18:34:10.000Z | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the CenterNet Meta architecture code."""
from __future__ import division
import functools
import unittest
from absl.testing import parameterized
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.builders import post_processing_builder
from object_detection.core import keypoint_ops
from object_detection.core import losses
from object_detection.core import preprocessor
from object_detection.core import standard_fields as fields
from object_detection.core import target_assigner as cn_assigner
from object_detection.meta_architectures import center_net_meta_arch as cnma
from object_detection.models import center_net_resnet_feature_extractor
from object_detection.protos import post_processing_pb2
from object_detection.utils import test_case
from object_detection.utils import tf_version
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CenterNetMetaArchPredictionHeadTest(
test_case.TestCase, parameterized.TestCase):
"""Test CenterNet meta architecture prediction head."""
@parameterized.parameters([True, False])
def test_prediction_head(self, use_depthwise):
head = cnma.make_prediction_net(num_out_channels=7,
use_depthwise=use_depthwise)
output = head(np.zeros((4, 128, 128, 8)))
self.assertEqual((4, 128, 128, 7), output.shape)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CenterNetMetaArchHelpersTest(test_case.TestCase, parameterized.TestCase):
"""Test for CenterNet meta architecture related functions."""
def test_row_col_channel_indices_from_flattened_indices(self):
"""Tests that the computation of row, col, channel indices is correct."""
r_grid, c_grid, ch_grid = (np.zeros((5, 4, 3), dtype=np.int),
np.zeros((5, 4, 3), dtype=np.int),
np.zeros((5, 4, 3), dtype=np.int))
r_grid[..., 0] = r_grid[..., 1] = r_grid[..., 2] = np.array(
[[0, 0, 0, 0],
[1, 1, 1, 1],
[2, 2, 2, 2],
[3, 3, 3, 3],
[4, 4, 4, 4]]
)
c_grid[..., 0] = c_grid[..., 1] = c_grid[..., 2] = np.array(
[[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3]]
)
for i in range(3):
ch_grid[..., i] = i
indices = np.arange(60)
ri, ci, chi = cnma.row_col_channel_indices_from_flattened_indices(
indices, 4, 3)
np.testing.assert_array_equal(ri, r_grid.flatten())
np.testing.assert_array_equal(ci, c_grid.flatten())
np.testing.assert_array_equal(chi, ch_grid.flatten())
def test_row_col_indices_from_flattened_indices(self):
"""Tests that the computation of row, col indices is correct."""
r_grid = np.array([[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3],
[4, 4, 4, 4]])
c_grid = np.array([[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3],
[0, 1, 2, 3]])
indices = np.arange(20)
ri, ci, = cnma.row_col_indices_from_flattened_indices(indices, 4)
np.testing.assert_array_equal(ri, r_grid.flatten())
np.testing.assert_array_equal(ci, c_grid.flatten())
def test_flattened_indices_from_row_col_indices(self):
r = np.array(
[[0, 0, 0, 0],
[1, 1, 1, 1],
[2, 2, 2, 2]]
)
c = np.array(
[[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3]]
)
idx = cnma.flattened_indices_from_row_col_indices(r, c, 4)
np.testing.assert_array_equal(np.arange(12), idx.flatten())
def test_get_valid_anchor_weights_in_flattened_image(self):
"""Tests that the anchor weights are valid upon flattening out."""
valid_weights = np.zeros((2, 5, 5), dtype=np.float)
valid_weights[0, :3, :4] = 1.0
valid_weights[1, :2, :2] = 1.0
def graph_fn():
true_image_shapes = tf.constant([[3, 4], [2, 2]])
w = cnma.get_valid_anchor_weights_in_flattened_image(
true_image_shapes, 5, 5)
return w
w = self.execute(graph_fn, [])
np.testing.assert_allclose(w, valid_weights.reshape(2, -1))
self.assertEqual((2, 25), w.shape)
def test_convert_strided_predictions_to_normalized_boxes(self):
"""Tests that boxes have correct coordinates in normalized input space."""
def graph_fn():
boxes = np.zeros((2, 3, 4), dtype=np.float32)
boxes[0] = [[10, 20, 30, 40], [20, 30, 50, 100], [50, 60, 100, 180]]
boxes[1] = [[-5, -5, 5, 5], [45, 60, 110, 120], [150, 150, 200, 250]]
true_image_shapes = tf.constant([[100, 90, 3], [150, 150, 3]])
clipped_boxes = (
cnma.convert_strided_predictions_to_normalized_boxes(
boxes, 2, true_image_shapes))
return clipped_boxes
clipped_boxes = self.execute(graph_fn, [])
expected_boxes = np.zeros((2, 3, 4), dtype=np.float32)
expected_boxes[0] = [[0.2, 4./9, 0.6, 8./9], [0.4, 2./3, 1, 1],
[1, 1, 1, 1]]
expected_boxes[1] = [[0., 0, 1./15, 1./15], [3./5, 4./5, 1, 1],
[1, 1, 1, 1]]
np.testing.assert_allclose(expected_boxes, clipped_boxes)
@parameterized.parameters(
{'clip_to_window': True},
{'clip_to_window': False}
)
def test_convert_strided_predictions_to_normalized_keypoints(
self, clip_to_window):
"""Tests that keypoints have correct coordinates in normalized coords."""
keypoint_coords_np = np.array(
[
# Example 0.
[
[[-10., 8.], [60., 22.], [60., 120.]],
[[20., 20.], [0., 0.], [0., 0.]],
],
# Example 1.
[
[[40., 50.], [20., 160.], [200., 150.]],
[[10., 0.], [40., 10.], [0., 0.]],
],
], dtype=np.float32)
keypoint_scores_np = np.array(
[
# Example 0.
[
[1.0, 0.9, 0.2],
[0.7, 0.0, 0.0],
],
# Example 1.
[
[1.0, 1.0, 0.2],
[0.7, 0.6, 0.0],
],
], dtype=np.float32)
def graph_fn():
keypoint_coords = tf.constant(keypoint_coords_np, dtype=tf.float32)
keypoint_scores = tf.constant(keypoint_scores_np, dtype=tf.float32)
true_image_shapes = tf.constant([[320, 400, 3], [640, 640, 3]])
stride = 4
keypoint_coords_out, keypoint_scores_out = (
cnma.convert_strided_predictions_to_normalized_keypoints(
keypoint_coords, keypoint_scores, stride, true_image_shapes,
clip_to_window))
return keypoint_coords_out, keypoint_scores_out
keypoint_coords_out, keypoint_scores_out = self.execute(graph_fn, [])
if clip_to_window:
expected_keypoint_coords_np = np.array(
[
# Example 0.
[
[[0.0, 0.08], [0.75, 0.22], [0.75, 1.0]],
[[0.25, 0.2], [0., 0.], [0.0, 0.0]],
],
# Example 1.
[
[[0.25, 0.3125], [0.125, 1.0], [1.0, 0.9375]],
[[0.0625, 0.], [0.25, 0.0625], [0., 0.]],
],
], dtype=np.float32)
expected_keypoint_scores_np = np.array(
[
# Example 0.
[
[0.0, 0.9, 0.0],
[0.7, 0.0, 0.0],
],
# Example 1.
[
[1.0, 1.0, 0.0],
[0.7, 0.6, 0.0],
],
], dtype=np.float32)
else:
expected_keypoint_coords_np = np.array(
[
# Example 0.
[
[[-0.125, 0.08], [0.75, 0.22], [0.75, 1.2]],
[[0.25, 0.2], [0., 0.], [0., 0.]],
],
# Example 1.
[
[[0.25, 0.3125], [0.125, 1.0], [1.25, 0.9375]],
[[0.0625, 0.], [0.25, 0.0625], [0., 0.]],
],
], dtype=np.float32)
expected_keypoint_scores_np = np.array(
[
# Example 0.
[
[1.0, 0.9, 0.2],
[0.7, 0.0, 0.0],
],
# Example 1.
[
[1.0, 1.0, 0.2],
[0.7, 0.6, 0.0],
],
], dtype=np.float32)
np.testing.assert_allclose(expected_keypoint_coords_np, keypoint_coords_out)
np.testing.assert_allclose(expected_keypoint_scores_np, keypoint_scores_out)
def test_convert_strided_predictions_to_instance_masks(self):
def graph_fn():
boxes = tf.constant(
[
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.5, 0.5, 1.0],
[0.0, 0.0, 0.0, 0.0]],
], tf.float32)
classes = tf.constant(
[
[0, 1, 0],
], tf.int32)
masks_np = np.zeros((1, 4, 4, 2), dtype=np.float32)
masks_np[0, :, 2:, 0] = 1 # Class 0.
masks_np[0, :, :3, 1] = 1 # Class 1.
masks = tf.constant(masks_np)
true_image_shapes = tf.constant([[6, 8, 3]])
instance_masks, _ = cnma.convert_strided_predictions_to_instance_masks(
boxes, classes, masks, stride=2, mask_height=2, mask_width=2,
true_image_shapes=true_image_shapes)
return instance_masks
instance_masks = self.execute_cpu(graph_fn, [])
expected_instance_masks = np.array(
[
[
# Mask 0 (class 0).
[[1, 1],
[1, 1]],
# Mask 1 (class 1).
[[1, 0],
[1, 0]],
# Mask 2 (class 0).
[[0, 0],
[0, 0]],
]
])
np.testing.assert_array_equal(expected_instance_masks, instance_masks)
def test_convert_strided_predictions_raises_error_with_one_tensor(self):
def graph_fn():
boxes = tf.constant(
[
[[0.5, 0.5, 1.0, 1.0],
[0.0, 0.5, 0.5, 1.0],
[0.0, 0.0, 0.0, 0.0]],
], tf.float32)
classes = tf.constant(
[
[0, 1, 0],
], tf.int32)
masks_np = np.zeros((1, 4, 4, 2), dtype=np.float32)
masks_np[0, :, 2:, 0] = 1 # Class 0.
masks_np[0, :, :3, 1] = 1 # Class 1.
masks = tf.constant(masks_np)
true_image_shapes = tf.constant([[6, 8, 3]])
densepose_part_heatmap = tf.random.uniform(
[1, 4, 4, 24])
instance_masks, _ = cnma.convert_strided_predictions_to_instance_masks(
boxes, classes, masks, true_image_shapes,
densepose_part_heatmap=densepose_part_heatmap,
densepose_surface_coords=None)
return instance_masks
with self.assertRaises(ValueError):
self.execute_cpu(graph_fn, [])
def test_crop_and_threshold_masks(self):
boxes_np = np.array(
[[0., 0., 0.5, 0.5],
[0.25, 0.25, 1.0, 1.0]], dtype=np.float32)
classes_np = np.array([0, 2], dtype=np.int32)
masks_np = np.zeros((4, 4, _NUM_CLASSES), dtype=np.float32)
masks_np[0, 0, 0] = 0.8
masks_np[1, 1, 0] = 0.6
masks_np[3, 3, 2] = 0.7
part_heatmap_np = np.zeros((4, 4, _DENSEPOSE_NUM_PARTS), dtype=np.float32)
part_heatmap_np[0, 0, 4] = 1
part_heatmap_np[0, 0, 2] = 0.6 # Lower scoring.
part_heatmap_np[1, 1, 8] = 0.2
part_heatmap_np[3, 3, 4] = 0.5
surf_coords_np = np.zeros((4, 4, 2 * _DENSEPOSE_NUM_PARTS),
dtype=np.float32)
surf_coords_np[:, :, 8:10] = 0.2, 0.9
surf_coords_np[:, :, 16:18] = 0.3, 0.5
true_height, true_width = 10, 10
input_height, input_width = 10, 10
mask_height = 4
mask_width = 4
def graph_fn():
elems = [
tf.constant(boxes_np),
tf.constant(classes_np),
tf.constant(masks_np),
tf.constant(part_heatmap_np),
tf.constant(surf_coords_np),
tf.constant(true_height, dtype=tf.int32),
tf.constant(true_width, dtype=tf.int32)
]
part_masks, surface_coords = cnma.crop_and_threshold_masks(
elems, input_height, input_width, mask_height=mask_height,
mask_width=mask_width, densepose_class_index=0)
return part_masks, surface_coords
part_masks, surface_coords = self.execute_cpu(graph_fn, [])
expected_part_masks = np.zeros((2, 4, 4), dtype=np.uint8)
expected_part_masks[0, 0, 0] = 5 # Recall classes are 1-indexed in output.
expected_part_masks[0, 2, 2] = 9 # Recall classes are 1-indexed in output.
expected_part_masks[1, 3, 3] = 1 # Standard instance segmentation mask.
expected_surface_coords = np.zeros((2, 4, 4, 2), dtype=np.float32)
expected_surface_coords[0, 0, 0, :] = 0.2, 0.9
expected_surface_coords[0, 2, 2, :] = 0.3, 0.5
np.testing.assert_allclose(expected_part_masks, part_masks)
np.testing.assert_allclose(expected_surface_coords, surface_coords)
def test_gather_surface_coords_for_parts(self):
surface_coords_cropped_np = np.zeros((2, 5, 5, _DENSEPOSE_NUM_PARTS, 2),
dtype=np.float32)
surface_coords_cropped_np[0, 0, 0, 5] = 0.3, 0.4
surface_coords_cropped_np[0, 1, 0, 9] = 0.5, 0.6
highest_scoring_part_np = np.zeros((2, 5, 5), dtype=np.int32)
highest_scoring_part_np[0, 0, 0] = 5
highest_scoring_part_np[0, 1, 0] = 9
def graph_fn():
surface_coords_cropped = tf.constant(surface_coords_cropped_np,
tf.float32)
highest_scoring_part = tf.constant(highest_scoring_part_np, tf.int32)
surface_coords_gathered = cnma.gather_surface_coords_for_parts(
surface_coords_cropped, highest_scoring_part)
return surface_coords_gathered
surface_coords_gathered = self.execute_cpu(graph_fn, [])
np.testing.assert_allclose([0.3, 0.4], surface_coords_gathered[0, 0, 0])
np.testing.assert_allclose([0.5, 0.6], surface_coords_gathered[0, 1, 0])
def test_top_k_feature_map_locations(self):
feature_map_np = np.zeros((2, 3, 3, 2), dtype=np.float32)
feature_map_np[0, 2, 0, 1] = 1.0
feature_map_np[0, 2, 1, 1] = 0.9 # Get's filtered due to max pool.
feature_map_np[0, 0, 1, 0] = 0.7
feature_map_np[0, 2, 2, 0] = 0.5
feature_map_np[0, 2, 2, 1] = -0.3
feature_map_np[1, 2, 1, 1] = 0.7
feature_map_np[1, 1, 0, 0] = 0.4
feature_map_np[1, 1, 2, 0] = 0.1
def graph_fn():
feature_map = tf.constant(feature_map_np)
scores, y_inds, x_inds, channel_inds = (
cnma.top_k_feature_map_locations(
feature_map, max_pool_kernel_size=3, k=3))
return scores, y_inds, x_inds, channel_inds
scores, y_inds, x_inds, channel_inds = self.execute(graph_fn, [])
np.testing.assert_allclose([1.0, 0.7, 0.5], scores[0])
np.testing.assert_array_equal([2, 0, 2], y_inds[0])
np.testing.assert_array_equal([0, 1, 2], x_inds[0])
np.testing.assert_array_equal([1, 0, 0], channel_inds[0])
np.testing.assert_allclose([0.7, 0.4, 0.1], scores[1])
np.testing.assert_array_equal([2, 1, 1], y_inds[1])
np.testing.assert_array_equal([1, 0, 2], x_inds[1])
np.testing.assert_array_equal([1, 0, 0], channel_inds[1])
def test_top_k_feature_map_locations_no_pooling(self):
feature_map_np = np.zeros((2, 3, 3, 2), dtype=np.float32)
feature_map_np[0, 2, 0, 1] = 1.0
feature_map_np[0, 2, 1, 1] = 0.9
feature_map_np[0, 0, 1, 0] = 0.7
feature_map_np[0, 2, 2, 0] = 0.5
feature_map_np[0, 2, 2, 1] = -0.3
feature_map_np[1, 2, 1, 1] = 0.7
feature_map_np[1, 1, 0, 0] = 0.4
feature_map_np[1, 1, 2, 0] = 0.1
def graph_fn():
feature_map = tf.constant(feature_map_np)
scores, y_inds, x_inds, channel_inds = (
cnma.top_k_feature_map_locations(
feature_map, max_pool_kernel_size=1, k=3))
return scores, y_inds, x_inds, channel_inds
scores, y_inds, x_inds, channel_inds = self.execute(graph_fn, [])
np.testing.assert_allclose([1.0, 0.9, 0.7], scores[0])
np.testing.assert_array_equal([2, 2, 0], y_inds[0])
np.testing.assert_array_equal([0, 1, 1], x_inds[0])
np.testing.assert_array_equal([1, 1, 0], channel_inds[0])
np.testing.assert_allclose([0.7, 0.4, 0.1], scores[1])
np.testing.assert_array_equal([2, 1, 1], y_inds[1])
np.testing.assert_array_equal([1, 0, 2], x_inds[1])
np.testing.assert_array_equal([1, 0, 0], channel_inds[1])
def test_top_k_feature_map_locations_per_channel(self):
feature_map_np = np.zeros((2, 3, 3, 2), dtype=np.float32)
feature_map_np[0, 2, 0, 0] = 1.0 # Selected.
feature_map_np[0, 2, 1, 0] = 0.9 # Get's filtered due to max pool.
feature_map_np[0, 0, 1, 0] = 0.7 # Selected.
feature_map_np[0, 2, 2, 1] = 0.5 # Selected.
feature_map_np[0, 0, 0, 1] = 0.3 # Selected.
feature_map_np[1, 2, 1, 0] = 0.7 # Selected.
feature_map_np[1, 1, 0, 0] = 0.4 # Get's filtered due to max pool.
feature_map_np[1, 1, 2, 0] = 0.3 # Get's filtered due to max pool.
feature_map_np[1, 1, 0, 1] = 0.8 # Selected.
feature_map_np[1, 1, 2, 1] = 0.3 # Selected.
def graph_fn():
feature_map = tf.constant(feature_map_np)
scores, y_inds, x_inds, channel_inds = (
cnma.top_k_feature_map_locations(
feature_map, max_pool_kernel_size=3, k=2, per_channel=True))
return scores, y_inds, x_inds, channel_inds
scores, y_inds, x_inds, channel_inds = self.execute(graph_fn, [])
np.testing.assert_allclose([1.0, 0.7, 0.5, 0.3], scores[0])
np.testing.assert_array_equal([2, 0, 2, 0], y_inds[0])
np.testing.assert_array_equal([0, 1, 2, 0], x_inds[0])
np.testing.assert_array_equal([0, 0, 1, 1], channel_inds[0])
np.testing.assert_allclose([0.7, 0.0, 0.8, 0.3], scores[1])
np.testing.assert_array_equal([2, 0, 1, 1], y_inds[1])
np.testing.assert_array_equal([1, 0, 0, 2], x_inds[1])
np.testing.assert_array_equal([0, 0, 1, 1], channel_inds[1])
def test_top_k_feature_map_locations_k1(self):
feature_map_np = np.zeros((2, 3, 3, 2), dtype=np.float32)
feature_map_np[0, 2, 0, 0] = 1.0 # Selected.
feature_map_np[0, 2, 1, 0] = 0.9
feature_map_np[0, 0, 1, 0] = 0.7
feature_map_np[0, 2, 2, 1] = 0.5
feature_map_np[0, 0, 0, 1] = 0.3
feature_map_np[1, 2, 1, 0] = 0.7
feature_map_np[1, 1, 0, 0] = 0.4
feature_map_np[1, 1, 2, 0] = 0.3
feature_map_np[1, 1, 0, 1] = 0.8 # Selected.
feature_map_np[1, 1, 2, 1] = 0.3
def graph_fn():
feature_map = tf.constant(feature_map_np)
scores, y_inds, x_inds, channel_inds = (
cnma.top_k_feature_map_locations(
feature_map, max_pool_kernel_size=3, k=1, per_channel=False))
return scores, y_inds, x_inds, channel_inds
scores, y_inds, x_inds, channel_inds = self.execute(graph_fn, [])
np.testing.assert_allclose([1.0], scores[0])
np.testing.assert_array_equal([2], y_inds[0])
np.testing.assert_array_equal([0], x_inds[0])
np.testing.assert_array_equal([0], channel_inds[0])
np.testing.assert_allclose([0.8], scores[1])
np.testing.assert_array_equal([1], y_inds[1])
np.testing.assert_array_equal([0], x_inds[1])
np.testing.assert_array_equal([1], channel_inds[1])
def test_top_k_feature_map_locations_k1_per_channel(self):
feature_map_np = np.zeros((2, 3, 3, 2), dtype=np.float32)
feature_map_np[0, 2, 0, 0] = 1.0 # Selected.
feature_map_np[0, 2, 1, 0] = 0.9
feature_map_np[0, 0, 1, 0] = 0.7
feature_map_np[0, 2, 2, 1] = 0.5 # Selected.
feature_map_np[0, 0, 0, 1] = 0.3
feature_map_np[1, 2, 1, 0] = 0.7 # Selected.
feature_map_np[1, 1, 0, 0] = 0.4
feature_map_np[1, 1, 2, 0] = 0.3
feature_map_np[1, 1, 0, 1] = 0.8 # Selected.
feature_map_np[1, 1, 2, 1] = 0.3
def graph_fn():
feature_map = tf.constant(feature_map_np)
scores, y_inds, x_inds, channel_inds = (
cnma.top_k_feature_map_locations(
feature_map, max_pool_kernel_size=3, k=1, per_channel=True))
return scores, y_inds, x_inds, channel_inds
scores, y_inds, x_inds, channel_inds = self.execute(graph_fn, [])
np.testing.assert_allclose([1.0, 0.5], scores[0])
np.testing.assert_array_equal([2, 2], y_inds[0])
np.testing.assert_array_equal([0, 2], x_inds[0])
np.testing.assert_array_equal([0, 1], channel_inds[0])
np.testing.assert_allclose([0.7, 0.8], scores[1])
np.testing.assert_array_equal([2, 1], y_inds[1])
np.testing.assert_array_equal([1, 0], x_inds[1])
np.testing.assert_array_equal([0, 1], channel_inds[1])
def test_box_prediction(self):
class_pred = np.zeros((3, 128, 128, 5), dtype=np.float32)
hw_pred = np.zeros((3, 128, 128, 2), dtype=np.float32)
offset_pred = np.zeros((3, 128, 128, 2), dtype=np.float32)
# Sample 1, 2 boxes
class_pred[0, 10, 20] = [0.3, .7, 0.0, 0.0, 0.0]
hw_pred[0, 10, 20] = [40, 60]
offset_pred[0, 10, 20] = [1, 2]
class_pred[0, 50, 60] = [0.55, 0.0, 0.0, 0.0, 0.45]
hw_pred[0, 50, 60] = [50, 50]
offset_pred[0, 50, 60] = [0, 0]
# Sample 2, 2 boxes (at same location)
class_pred[1, 100, 100] = [0.0, 0.1, 0.9, 0.0, 0.0]
hw_pred[1, 100, 100] = [10, 10]
offset_pred[1, 100, 100] = [1, 3]
# Sample 3, 3 boxes
class_pred[2, 60, 90] = [0.0, 0.0, 0.0, 0.2, 0.8]
hw_pred[2, 60, 90] = [40, 30]
offset_pred[2, 60, 90] = [0, 0]
class_pred[2, 65, 95] = [0.0, 0.7, 0.3, 0.0, 0.0]
hw_pred[2, 65, 95] = [20, 20]
offset_pred[2, 65, 95] = [1, 2]
class_pred[2, 75, 85] = [1.0, 0.0, 0.0, 0.0, 0.0]
hw_pred[2, 75, 85] = [21, 25]
offset_pred[2, 75, 85] = [5, 2]
def graph_fn():
class_pred_tensor = tf.constant(class_pred)
hw_pred_tensor = tf.constant(hw_pred)
offset_pred_tensor = tf.constant(offset_pred)
_, y_indices, x_indices, _ = (
cnma.top_k_feature_map_locations(
class_pred_tensor, max_pool_kernel_size=3, k=2))
boxes = cnma.prediction_tensors_to_boxes(
y_indices, x_indices, hw_pred_tensor, offset_pred_tensor)
return boxes
boxes = self.execute(graph_fn, [])
np.testing.assert_allclose(
[[0, 0, 31, 52], [25, 35, 75, 85]], boxes[0])
np.testing.assert_allclose(
[[96, 98, 106, 108], [96, 98, 106, 108]], boxes[1])
np.testing.assert_allclose(
[[69.5, 74.5, 90.5, 99.5], [40, 75, 80, 105]], boxes[2])
def test_offset_prediction(self):
class_pred = np.zeros((3, 128, 128, 5), dtype=np.float32)
offset_pred = np.zeros((3, 128, 128, 2), dtype=np.float32)
# Sample 1, 2 boxes
class_pred[0, 10, 20] = [0.3, .7, 0.0, 0.0, 0.0]
offset_pred[0, 10, 20] = [1, 2]
class_pred[0, 50, 60] = [0.55, 0.0, 0.0, 0.0, 0.45]
offset_pred[0, 50, 60] = [0, 0]
# Sample 2, 2 boxes (at same location)
class_pred[1, 100, 100] = [0.0, 0.1, 0.9, 0.0, 0.0]
offset_pred[1, 100, 100] = [1, 3]
# Sample 3, 3 boxes
class_pred[2, 60, 90] = [0.0, 0.0, 0.0, 0.2, 0.8]
offset_pred[2, 60, 90] = [0, 0]
class_pred[2, 65, 95] = [0.0, 0.7, 0.3, 0.0, 0.0]
offset_pred[2, 65, 95] = [1, 2]
class_pred[2, 75, 85] = [1.0, 0.0, 0.0, 0.0, 0.0]
offset_pred[2, 75, 85] = [5, 2]
def graph_fn():
class_pred_tensor = tf.constant(class_pred)
offset_pred_tensor = tf.constant(offset_pred)
_, y_indices, x_indices, _ = (
cnma.top_k_feature_map_locations(
class_pred_tensor, max_pool_kernel_size=3, k=2))
offsets = cnma.prediction_tensors_to_temporal_offsets(
y_indices, x_indices, offset_pred_tensor)
return offsets
offsets = self.execute(graph_fn, [])
np.testing.assert_allclose(
[[1, 2], [0, 0]], offsets[0])
np.testing.assert_allclose(
[[1, 3], [1, 3]], offsets[1])
np.testing.assert_allclose(
[[5, 2], [0, 0]], offsets[2])
def test_keypoint_candidate_prediction(self):
keypoint_heatmap_np = np.zeros((2, 3, 3, 2), dtype=np.float32)
keypoint_heatmap_np[0, 0, 0, 0] = 1.0
keypoint_heatmap_np[0, 2, 1, 0] = 0.7
keypoint_heatmap_np[0, 1, 1, 0] = 0.6
keypoint_heatmap_np[0, 0, 2, 1] = 0.7
keypoint_heatmap_np[0, 1, 1, 1] = 0.3 # Filtered by low score.
keypoint_heatmap_np[0, 2, 2, 1] = 0.2
keypoint_heatmap_np[1, 1, 0, 0] = 0.6
keypoint_heatmap_np[1, 2, 1, 0] = 0.5
keypoint_heatmap_np[1, 0, 0, 0] = 0.4
keypoint_heatmap_np[1, 0, 0, 1] = 1.0
keypoint_heatmap_np[1, 0, 1, 1] = 0.9
keypoint_heatmap_np[1, 2, 0, 1] = 0.8
keypoint_heatmap_offsets_np = np.zeros((2, 3, 3, 2), dtype=np.float32)
keypoint_heatmap_offsets_np[0, 0, 0] = [0.5, 0.25]
keypoint_heatmap_offsets_np[0, 2, 1] = [-0.25, 0.5]
keypoint_heatmap_offsets_np[0, 1, 1] = [0.0, 0.0]
keypoint_heatmap_offsets_np[0, 0, 2] = [1.0, 0.0]
keypoint_heatmap_offsets_np[0, 2, 2] = [1.0, 1.0]
keypoint_heatmap_offsets_np[1, 1, 0] = [0.25, 0.5]
keypoint_heatmap_offsets_np[1, 2, 1] = [0.5, 0.0]
keypoint_heatmap_offsets_np[1, 0, 0] = [0.0, -0.5]
keypoint_heatmap_offsets_np[1, 0, 1] = [0.5, -0.5]
keypoint_heatmap_offsets_np[1, 2, 0] = [-1.0, -0.5]
def graph_fn():
keypoint_heatmap = tf.constant(keypoint_heatmap_np, dtype=tf.float32)
keypoint_heatmap_offsets = tf.constant(
keypoint_heatmap_offsets_np, dtype=tf.float32)
(keypoint_cands, keypoint_scores, num_keypoint_candidates, _) = (
cnma.prediction_tensors_to_keypoint_candidates(
keypoint_heatmap,
keypoint_heatmap_offsets,
keypoint_score_threshold=0.5,
max_pool_kernel_size=1,
max_candidates=2))
return keypoint_cands, keypoint_scores, num_keypoint_candidates
(keypoint_cands, keypoint_scores,
num_keypoint_candidates) = self.execute(graph_fn, [])
expected_keypoint_candidates = [
[ # Example 0.
[[0.5, 0.25], [1.0, 2.0]], # Keypoint 1.
[[1.75, 1.5], [1.0, 1.0]], # Keypoint 2.
],
[ # Example 1.
[[1.25, 0.5], [0.0, -0.5]], # Keypoint 1.
[[2.5, 1.0], [0.5, 0.5]], # Keypoint 2.
],
]
expected_keypoint_scores = [
[ # Example 0.
[1.0, 0.7], # Keypoint 1.
[0.7, 0.3], # Keypoint 2.
],
[ # Example 1.
[0.6, 1.0], # Keypoint 1.
[0.5, 0.9], # Keypoint 2.
],
]
expected_num_keypoint_candidates = [
[2, 1],
[2, 2]
]
np.testing.assert_allclose(expected_keypoint_candidates, keypoint_cands)
np.testing.assert_allclose(expected_keypoint_scores, keypoint_scores)
np.testing.assert_array_equal(expected_num_keypoint_candidates,
num_keypoint_candidates)
def test_prediction_to_single_instance_keypoints(self):
image_size = (9, 9)
object_heatmap_np = np.zeros((1, image_size[0], image_size[1], 1),
dtype=np.float32)
# This should be picked.
object_heatmap_np[0, 4, 4, 0] = 0.9
# This shouldn't be picked since it's farther away from the center.
object_heatmap_np[0, 2, 2, 0] = 1.0
keypoint_heatmap_np = np.zeros((1, image_size[0], image_size[1], 4),
dtype=np.float32)
# Top-left corner should be picked.
keypoint_heatmap_np[0, 1, 1, 0] = 0.9
keypoint_heatmap_np[0, 4, 4, 0] = 1.0
# Top-right corner should be picked.
keypoint_heatmap_np[0, 1, 7, 1] = 0.9
keypoint_heatmap_np[0, 4, 4, 1] = 1.0
# Bottom-left corner should be picked.
keypoint_heatmap_np[0, 7, 1, 2] = 0.9
keypoint_heatmap_np[0, 4, 4, 2] = 1.0
# Bottom-right corner should be picked.
keypoint_heatmap_np[0, 7, 7, 3] = 0.9
keypoint_heatmap_np[0, 4, 4, 3] = 1.0
keypoint_offset_np = np.zeros((1, image_size[0], image_size[1], 8),
dtype=np.float32)
keypoint_offset_np[0, 1, 1] = [0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
keypoint_offset_np[0, 1, 7] = [0.0, 0.0, 0.5, -0.5, 0.0, 0.0, 0.0, 0.0]
keypoint_offset_np[0, 7, 1] = [0.0, 0.0, 0.0, 0.0, -0.5, 0.5, 0.0, 0.0]
keypoint_offset_np[0, 7, 7] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5, -0.5]
keypoint_regression_np = np.zeros((1, image_size[0], image_size[1], 8),
dtype=np.float32)
keypoint_regression_np[0, 4, 4] = [-3, -3, -3, 3, 3, -3, 3, 3]
kp_params = get_fake_kp_params(
candidate_ranking_mode='score_distance_ratio')
def graph_fn():
object_heatmap = tf.constant(object_heatmap_np, dtype=tf.float32)
keypoint_heatmap = tf.constant(keypoint_heatmap_np, dtype=tf.float32)
keypoint_offset = tf.constant(keypoint_offset_np, dtype=tf.float32)
keypoint_regression = tf.constant(
keypoint_regression_np, dtype=tf.float32)
(keypoint_cands, keypoint_scores, _) = (
cnma.prediction_to_single_instance_keypoints(
object_heatmap,
keypoint_heatmap,
keypoint_offset,
keypoint_regression,
kp_params=kp_params))
return keypoint_cands, keypoint_scores
(keypoint_cands, keypoint_scores) = self.execute(graph_fn, [])
expected_keypoint_candidates = [[[
[1.5, 1.5], # top-left
[1.5, 6.5], # top-right
[6.5, 1.5], # bottom-left
[6.5, 6.5], # bottom-right
]]]
expected_keypoint_scores = [[[0.9, 0.9, 0.9, 0.9]]]
np.testing.assert_allclose(expected_keypoint_candidates, keypoint_cands)
np.testing.assert_allclose(expected_keypoint_scores, keypoint_scores)
@parameterized.parameters({'provide_keypoint_score': True},
{'provide_keypoint_score': False})
def test_prediction_to_multi_instance_keypoints(self, provide_keypoint_score):
image_size = (9, 9)
keypoint_heatmap_np = np.zeros((1, image_size[0], image_size[1], 3, 4),
dtype=np.float32)
# Instance 0.
keypoint_heatmap_np[0, 1, 1, 0, 0] = 0.9
keypoint_heatmap_np[0, 1, 7, 0, 1] = 0.9
keypoint_heatmap_np[0, 7, 1, 0, 2] = 0.9
keypoint_heatmap_np[0, 7, 7, 0, 3] = 0.9
# Instance 1.
keypoint_heatmap_np[0, 2, 2, 1, 0] = 0.8
keypoint_heatmap_np[0, 2, 8, 1, 1] = 0.8
keypoint_heatmap_np[0, 8, 2, 1, 2] = 0.8
keypoint_heatmap_np[0, 8, 8, 1, 3] = 0.8
keypoint_offset_np = np.zeros((1, image_size[0], image_size[1], 8),
dtype=np.float32)
keypoint_offset_np[0, 1, 1] = [0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
keypoint_offset_np[0, 1, 7] = [0.0, 0.0, 0.5, -0.5, 0.0, 0.0, 0.0, 0.0]
keypoint_offset_np[0, 7, 1] = [0.0, 0.0, 0.0, 0.0, -0.5, 0.5, 0.0, 0.0]
keypoint_offset_np[0, 7, 7] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5, -0.5]
keypoint_offset_np[0, 2, 2] = [0.3, 0.3, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
keypoint_offset_np[0, 2, 8] = [0.0, 0.0, 0.3, -0.3, 0.0, 0.0, 0.0, 0.0]
keypoint_offset_np[0, 8, 2] = [0.0, 0.0, 0.0, 0.0, -0.3, 0.3, 0.0, 0.0]
keypoint_offset_np[0, 8, 8] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.3, -0.3]
def graph_fn():
keypoint_heatmap = tf.constant(keypoint_heatmap_np, dtype=tf.float32)
keypoint_offset = tf.constant(keypoint_offset_np, dtype=tf.float32)
if provide_keypoint_score:
(keypoint_cands, keypoint_scores) = (
cnma.prediction_tensors_to_multi_instance_kpts(
keypoint_heatmap,
keypoint_offset,
tf.reduce_max(keypoint_heatmap, axis=3)))
else:
(keypoint_cands, keypoint_scores) = (
cnma.prediction_tensors_to_multi_instance_kpts(
keypoint_heatmap,
keypoint_offset))
return keypoint_cands, keypoint_scores
(keypoint_cands, keypoint_scores) = self.execute(graph_fn, [])
expected_keypoint_candidates_0 = [
[1.5, 1.5], # top-left
[1.5, 6.5], # top-right
[6.5, 1.5], # bottom-left
[6.5, 6.5], # bottom-right
]
expected_keypoint_scores_0 = [0.9, 0.9, 0.9, 0.9]
expected_keypoint_candidates_1 = [
[2.3, 2.3], # top-left
[2.3, 7.7], # top-right
[7.7, 2.3], # bottom-left
[7.7, 7.7], # bottom-right
]
expected_keypoint_scores_1 = [0.8, 0.8, 0.8, 0.8]
np.testing.assert_allclose(
expected_keypoint_candidates_0, keypoint_cands[0, 0, :, :])
np.testing.assert_allclose(
expected_keypoint_candidates_1, keypoint_cands[0, 1, :, :])
np.testing.assert_allclose(
expected_keypoint_scores_0, keypoint_scores[0, 0, :])
np.testing.assert_allclose(
expected_keypoint_scores_1, keypoint_scores[0, 1, :])
def test_keypoint_candidate_prediction_per_keypoints(self):
keypoint_heatmap_np = np.zeros((2, 3, 3, 2), dtype=np.float32)
keypoint_heatmap_np[0, 0, 0, 0] = 1.0
keypoint_heatmap_np[0, 2, 1, 0] = 0.7
keypoint_heatmap_np[0, 1, 1, 0] = 0.6
keypoint_heatmap_np[0, 0, 2, 1] = 0.7
keypoint_heatmap_np[0, 1, 1, 1] = 0.3 # Filtered by low score.
keypoint_heatmap_np[0, 2, 2, 1] = 0.2
keypoint_heatmap_np[1, 1, 0, 0] = 0.6
keypoint_heatmap_np[1, 2, 1, 0] = 0.5
keypoint_heatmap_np[1, 0, 0, 0] = 0.4
keypoint_heatmap_np[1, 0, 0, 1] = 1.0
keypoint_heatmap_np[1, 0, 1, 1] = 0.9
keypoint_heatmap_np[1, 2, 0, 1] = 0.8
# Note that the keypoint offsets are now per keypoint (as opposed to
# keypoint agnostic, in the test test_keypoint_candidate_prediction).
keypoint_heatmap_offsets_np = np.zeros((2, 3, 3, 4), dtype=np.float32)
keypoint_heatmap_offsets_np[0, 0, 0] = [0.5, 0.25, 0.0, 0.0]
keypoint_heatmap_offsets_np[0, 2, 1] = [-0.25, 0.5, 0.0, 0.0]
keypoint_heatmap_offsets_np[0, 1, 1] = [0.0, 0.0, 0.0, 0.0]
keypoint_heatmap_offsets_np[0, 0, 2] = [0.0, 0.0, 1.0, 0.0]
keypoint_heatmap_offsets_np[0, 2, 2] = [0.0, 0.0, 1.0, 1.0]
keypoint_heatmap_offsets_np[1, 1, 0] = [0.25, 0.5, 0.0, 0.0]
keypoint_heatmap_offsets_np[1, 2, 1] = [0.5, 0.0, 0.0, 0.0]
keypoint_heatmap_offsets_np[1, 0, 0] = [0.0, 0.0, 0.0, -0.5]
keypoint_heatmap_offsets_np[1, 0, 1] = [0.0, 0.0, 0.5, -0.5]
keypoint_heatmap_offsets_np[1, 2, 0] = [0.0, 0.0, -1.0, -0.5]
def graph_fn():
keypoint_heatmap = tf.constant(keypoint_heatmap_np, dtype=tf.float32)
keypoint_heatmap_offsets = tf.constant(
keypoint_heatmap_offsets_np, dtype=tf.float32)
(keypoint_cands, keypoint_scores, num_keypoint_candidates, _) = (
cnma.prediction_tensors_to_keypoint_candidates(
keypoint_heatmap,
keypoint_heatmap_offsets,
keypoint_score_threshold=0.5,
max_pool_kernel_size=1,
max_candidates=2))
return keypoint_cands, keypoint_scores, num_keypoint_candidates
(keypoint_cands, keypoint_scores,
num_keypoint_candidates) = self.execute(graph_fn, [])
expected_keypoint_candidates = [
[ # Example 0.
[[0.5, 0.25], [1.0, 2.0]], # Candidate 1 of keypoint 1, 2.
[[1.75, 1.5], [1.0, 1.0]], # Candidate 2 of keypoint 1, 2.
],
[ # Example 1.
[[1.25, 0.5], [0.0, -0.5]], # Candidate 1 of keypoint 1, 2.
[[2.5, 1.0], [0.5, 0.5]], # Candidate 2 of keypoint 1, 2.
],
]
expected_keypoint_scores = [
[ # Example 0.
[1.0, 0.7], # Candidate 1 scores of keypoint 1, 2.
[0.7, 0.3], # Candidate 2 scores of keypoint 1, 2.
],
[ # Example 1.
[0.6, 1.0], # Candidate 1 scores of keypoint 1, 2.
[0.5, 0.9], # Candidate 2 scores of keypoint 1, 2.
],
]
expected_num_keypoint_candidates = [
[2, 1],
[2, 2]
]
np.testing.assert_allclose(expected_keypoint_candidates, keypoint_cands)
np.testing.assert_allclose(expected_keypoint_scores, keypoint_scores)
np.testing.assert_array_equal(expected_num_keypoint_candidates,
num_keypoint_candidates)
@parameterized.parameters({'per_keypoint_depth': True},
{'per_keypoint_depth': False})
def test_keypoint_candidate_prediction_depth(self, per_keypoint_depth):
keypoint_heatmap_np = np.zeros((2, 3, 3, 2), dtype=np.float32)
keypoint_heatmap_np[0, 0, 0, 0] = 1.0
keypoint_heatmap_np[0, 2, 1, 0] = 0.7
keypoint_heatmap_np[0, 1, 1, 0] = 0.6
keypoint_heatmap_np[0, 0, 2, 1] = 0.7
keypoint_heatmap_np[0, 1, 1, 1] = 0.3 # Filtered by low score.
keypoint_heatmap_np[0, 2, 2, 1] = 0.2
keypoint_heatmap_np[1, 1, 0, 0] = 0.6
keypoint_heatmap_np[1, 2, 1, 0] = 0.5
keypoint_heatmap_np[1, 0, 0, 0] = 0.4
keypoint_heatmap_np[1, 0, 0, 1] = 1.0
keypoint_heatmap_np[1, 0, 1, 1] = 0.9
keypoint_heatmap_np[1, 2, 0, 1] = 0.8
if per_keypoint_depth:
keypoint_depths_np = np.zeros((2, 3, 3, 2), dtype=np.float32)
keypoint_depths_np[0, 0, 0, 0] = -1.5
keypoint_depths_np[0, 2, 1, 0] = -1.0
keypoint_depths_np[0, 0, 2, 1] = 1.5
else:
keypoint_depths_np = np.zeros((2, 3, 3, 1), dtype=np.float32)
keypoint_depths_np[0, 0, 0, 0] = -1.5
keypoint_depths_np[0, 2, 1, 0] = -1.0
keypoint_depths_np[0, 0, 2, 0] = 1.5
keypoint_heatmap_offsets_np = np.zeros((2, 3, 3, 2), dtype=np.float32)
keypoint_heatmap_offsets_np[0, 0, 0] = [0.5, 0.25]
keypoint_heatmap_offsets_np[0, 2, 1] = [-0.25, 0.5]
keypoint_heatmap_offsets_np[0, 1, 1] = [0.0, 0.0]
keypoint_heatmap_offsets_np[0, 0, 2] = [1.0, 0.0]
keypoint_heatmap_offsets_np[0, 2, 2] = [1.0, 1.0]
keypoint_heatmap_offsets_np[1, 1, 0] = [0.25, 0.5]
keypoint_heatmap_offsets_np[1, 2, 1] = [0.5, 0.0]
keypoint_heatmap_offsets_np[1, 0, 0] = [0.0, -0.5]
keypoint_heatmap_offsets_np[1, 0, 1] = [0.5, -0.5]
keypoint_heatmap_offsets_np[1, 2, 0] = [-1.0, -0.5]
def graph_fn():
keypoint_heatmap = tf.constant(keypoint_heatmap_np, dtype=tf.float32)
keypoint_heatmap_offsets = tf.constant(
keypoint_heatmap_offsets_np, dtype=tf.float32)
keypoint_depths = tf.constant(keypoint_depths_np, dtype=tf.float32)
(keypoint_cands, keypoint_scores, num_keypoint_candidates,
keypoint_depths) = (
cnma.prediction_tensors_to_keypoint_candidates(
keypoint_heatmap,
keypoint_heatmap_offsets,
keypoint_score_threshold=0.5,
max_pool_kernel_size=1,
max_candidates=2,
keypoint_depths=keypoint_depths))
return (keypoint_cands, keypoint_scores, num_keypoint_candidates,
keypoint_depths)
(_, keypoint_scores, _, keypoint_depths) = self.execute(graph_fn, [])
expected_keypoint_scores = [
[ # Example 0.
[1.0, 0.7], # Keypoint 1.
[0.7, 0.3], # Keypoint 2.
],
[ # Example 1.
[0.6, 1.0], # Keypoint 1.
[0.5, 0.9], # Keypoint 2.
],
]
expected_keypoint_depths = [
[
[-1.5, 1.5],
[-1.0, 0.0],
],
[
[0., 0.],
[0., 0.],
],
]
np.testing.assert_allclose(expected_keypoint_scores, keypoint_scores)
np.testing.assert_allclose(expected_keypoint_depths, keypoint_depths)
def test_regressed_keypoints_at_object_centers(self):
batch_size = 2
num_keypoints = 5
num_instances = 6
regressed_keypoint_feature_map_np = np.random.randn(
batch_size, 10, 10, 2 * num_keypoints).astype(np.float32)
y_indices = np.random.choice(10, (batch_size, num_instances))
x_indices = np.random.choice(10, (batch_size, num_instances))
offsets = np.stack([y_indices, x_indices], axis=2).astype(np.float32)
def graph_fn():
regressed_keypoint_feature_map = tf.constant(
regressed_keypoint_feature_map_np, dtype=tf.float32)
gathered_regressed_keypoints = (
cnma.regressed_keypoints_at_object_centers(
regressed_keypoint_feature_map,
tf.constant(y_indices, dtype=tf.int32),
tf.constant(x_indices, dtype=tf.int32)))
return gathered_regressed_keypoints
gathered_regressed_keypoints = self.execute(graph_fn, [])
expected_gathered_keypoints_0 = regressed_keypoint_feature_map_np[
0, y_indices[0], x_indices[0], :]
expected_gathered_keypoints_1 = regressed_keypoint_feature_map_np[
1, y_indices[1], x_indices[1], :]
expected_gathered_keypoints = np.stack([
expected_gathered_keypoints_0,
expected_gathered_keypoints_1], axis=0)
expected_gathered_keypoints = np.reshape(
expected_gathered_keypoints,
[batch_size, num_instances, num_keypoints, 2])
expected_gathered_keypoints += np.expand_dims(offsets, axis=2)
expected_gathered_keypoints = np.reshape(
expected_gathered_keypoints,
[batch_size, num_instances, -1])
np.testing.assert_allclose(expected_gathered_keypoints,
gathered_regressed_keypoints)
@parameterized.parameters(
{'candidate_ranking_mode': 'min_distance'},
{'candidate_ranking_mode': 'score_distance_ratio'},
)
def test_refine_keypoints(self, candidate_ranking_mode):
regressed_keypoints_np = np.array(
[
# Example 0.
[
[[2.0, 2.0], [6.0, 10.0], [14.0, 7.0]], # Instance 0.
[[0.0, 6.0], [3.0, 3.0], [5.0, 7.0]], # Instance 1.
],
# Example 1.
[
[[6.0, 2.0], [0.0, 0.0], [0.1, 0.1]], # Instance 0.
[[6.0, 2.5], [5.0, 5.0], [9.0, 3.0]], # Instance 1.
],
], dtype=np.float32)
keypoint_candidates_np = np.array(
[
# Example 0.
[
[[2.0, 2.5], [6.0, 10.5], [4.0, 7.0]], # Candidate 0.
[[1.0, 8.0], [0.0, 0.0], [2.0, 2.0]], # Candidate 1.
[[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], # Candidate 2.
],
# Example 1.
[
[[6.0, 1.5], [0.1, 0.4], [0.0, 0.0]], # Candidate 0.
[[1.0, 4.0], [0.0, 0.3], [0.0, 0.0]], # Candidate 1.
[[0.0, 0.0], [0.1, 0.3], [0.0, 0.0]], # Candidate 2.
]
], dtype=np.float32)
keypoint_scores_np = np.array(
[
# Example 0.
[
[0.8, 0.9, 1.0], # Candidate 0.
[0.6, 0.1, 0.9], # Candidate 1.
[0.0, 0.0, 0.0], # Candidate 1.
],
# Example 1.
[
[0.7, 0.3, 0.0], # Candidate 0.
[0.6, 0.1, 0.0], # Candidate 1.
[0.0, 0.28, 0.0], # Candidate 1.
]
], dtype=np.float32)
num_keypoints_candidates_np = np.array(
[
# Example 0.
[2, 2, 2],
# Example 1.
[2, 3, 0],
], dtype=np.int32)
unmatched_keypoint_score = 0.1
def graph_fn():
regressed_keypoints = tf.constant(
regressed_keypoints_np, dtype=tf.float32)
keypoint_candidates = tf.constant(
keypoint_candidates_np, dtype=tf.float32)
keypoint_scores = tf.constant(keypoint_scores_np, dtype=tf.float32)
num_keypoint_candidates = tf.constant(num_keypoints_candidates_np,
dtype=tf.int32)
# The behavior of bboxes=None is different now. We provide the bboxes
# explicitly by using the regressed keypoints to create the same
# behavior.
regressed_keypoints_flattened = tf.reshape(
regressed_keypoints, [-1, 3, 2])
bboxes_flattened = keypoint_ops.keypoints_to_enclosing_bounding_boxes(
regressed_keypoints_flattened)
(refined_keypoints, refined_scores, _) = cnma.refine_keypoints(
regressed_keypoints,
keypoint_candidates,
keypoint_scores,
num_keypoint_candidates,
bboxes=bboxes_flattened,
unmatched_keypoint_score=unmatched_keypoint_score,
box_scale=1.2,
candidate_search_scale=0.3,
candidate_ranking_mode=candidate_ranking_mode)
return refined_keypoints, refined_scores
refined_keypoints, refined_scores = self.execute(graph_fn, [])
if candidate_ranking_mode == 'min_distance':
expected_refined_keypoints = np.array(
[
# Example 0.
[
[[2.0, 2.5], [6.0, 10.5], [14.0, 7.0]], # Instance 0.
[[0.0, 6.0], [3.0, 3.0], [4.0, 7.0]], # Instance 1.
],
# Example 1.
[
[[6.0, 1.5], [0.0, 0.3], [0.1, 0.1]], # Instance 0.
[[6.0, 2.5], [5.0, 5.0], [9.0, 3.0]], # Instance 1.
],
], dtype=np.float32)
expected_refined_scores = np.array(
[
# Example 0.
[
[0.8, 0.9, unmatched_keypoint_score], # Instance 0.
[unmatched_keypoint_score, # Instance 1.
unmatched_keypoint_score, 1.0],
],
# Example 1.
[
[0.7, 0.1, unmatched_keypoint_score], # Instance 0.
[unmatched_keypoint_score, # Instance 1.
0.1, unmatched_keypoint_score],
],
], dtype=np.float32)
else:
expected_refined_keypoints = np.array(
[
# Example 0.
[
[[2.0, 2.5], [6.0, 10.5], [14.0, 7.0]], # Instance 0.
[[0.0, 6.0], [3.0, 3.0], [4.0, 7.0]], # Instance 1.
],
# Example 1.
[
[[6.0, 1.5], [0.1, 0.3], [0.1, 0.1]], # Instance 0.
[[6.0, 2.5], [5.0, 5.0], [9.0, 3.0]], # Instance 1.
],
], dtype=np.float32)
expected_refined_scores = np.array(
[
# Example 0.
[
[0.8, 0.9, unmatched_keypoint_score], # Instance 0.
[unmatched_keypoint_score, # Instance 1.
unmatched_keypoint_score, 1.0],
],
# Example 1.
[
[0.7, 0.28, unmatched_keypoint_score], # Instance 0.
[unmatched_keypoint_score, # Instance 1.
0.1, unmatched_keypoint_score],
],
], dtype=np.float32)
np.testing.assert_allclose(expected_refined_keypoints, refined_keypoints)
np.testing.assert_allclose(expected_refined_scores, refined_scores)
def test_refine_keypoints_without_bbox(self):
regressed_keypoints_np = np.array(
[
# Example 0.
[
[[2.0, 2.0], [6.0, 10.0], [14.0, 7.0]], # Instance 0.
[[0.0, 6.0], [3.0, 3.0], [5.0, 7.0]], # Instance 1.
],
], dtype=np.float32)
keypoint_candidates_np = np.array(
[
# Example 0.
[
[[2.0, 2.5], [6.0, 10.5], [4.0, 7.0]], # Candidate 0.
[[1.0, 8.0], [0.0, 0.0], [2.0, 2.0]], # Candidate 1.
[[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]], # Candidate 2.
],
], dtype=np.float32)
keypoint_scores_np = np.array(
[
# Example 0.
[
[0.8, 0.9, 1.0], # Candidate 0.
[0.6, 0.1, 0.9], # Candidate 1.
[0.0, 0.0, 0.0], # Candidate 1.
],
], dtype=np.float32)
num_keypoints_candidates_np = np.array(
[
# Example 0.
[2, 2, 2],
], dtype=np.int32)
unmatched_keypoint_score = 0.1
def graph_fn():
regressed_keypoints = tf.constant(
regressed_keypoints_np, dtype=tf.float32)
keypoint_candidates = tf.constant(
keypoint_candidates_np, dtype=tf.float32)
keypoint_scores = tf.constant(keypoint_scores_np, dtype=tf.float32)
num_keypoint_candidates = tf.constant(num_keypoints_candidates_np,
dtype=tf.int32)
(refined_keypoints, refined_scores, _) = cnma.refine_keypoints(
regressed_keypoints,
keypoint_candidates,
keypoint_scores,
num_keypoint_candidates,
bboxes=None,
unmatched_keypoint_score=unmatched_keypoint_score,
box_scale=1.2,
candidate_search_scale=0.3,
candidate_ranking_mode='min_distance')
return refined_keypoints, refined_scores
refined_keypoints, refined_scores = self.execute(graph_fn, [])
# The expected refined keypoints pick the ones that are closest to the
# regressed keypoint locations without filtering out the candidates which
# are outside of the bounding box.
expected_refined_keypoints = np.array(
[
# Example 0.
[
[[2.0, 2.5], [6.0, 10.5], [4.0, 7.0]], # Instance 0.
[[1.0, 8.0], [0.0, 0.0], [4.0, 7.0]], # Instance 1.
],
], dtype=np.float32)
expected_refined_scores = np.array(
[
# Example 0.
[
[0.8, 0.9, 1.0], # Instance 0.
[0.6, 0.1, 1.0], # Instance 1.
],
], dtype=np.float32)
np.testing.assert_allclose(expected_refined_keypoints, refined_keypoints)
np.testing.assert_allclose(expected_refined_scores, refined_scores)
@parameterized.parameters({'predict_depth': True}, {'predict_depth': False})
def test_refine_keypoints_with_bboxes(self, predict_depth):
regressed_keypoints_np = np.array(
[
# Example 0.
[
[[2.0, 2.0], [6.0, 10.0], [14.0, 7.0]], # Instance 0.
[[0.0, 6.0], [3.0, 3.0], [5.0, 7.0]], # Instance 1.
],
# Example 1.
[
[[6.0, 2.0], [0.0, 0.0], [0.1, 0.1]], # Instance 0.
[[6.0, 2.5], [5.0, 5.0], [9.0, 3.0]], # Instance 1.
],
], dtype=np.float32)
keypoint_candidates_np = np.array(
[
# Example 0.
[
[[2.0, 2.5], [6.0, 10.5], [4.0, 7.0]], # Candidate 0.
[[1.0, 8.0], [0.0, 0.0], [2.0, 2.0]], # Candidate 1.
],
# Example 1.
[
[[6.0, 1.5], [5.0, 5.0], [0.0, 0.0]], # Candidate 0.
[[1.0, 4.0], [0.0, 0.3], [0.0, 0.0]], # Candidate 1.
]
], dtype=np.float32)
keypoint_scores_np = np.array(
[
# Example 0.
[
[0.8, 0.9, 1.0], # Candidate 0.
[0.6, 0.1, 0.9], # Candidate 1.
],
# Example 1.
[
[0.7, 0.4, 0.0], # Candidate 0.
[0.6, 0.1, 0.0], # Candidate 1.
]
],
dtype=np.float32)
keypoint_depths_np = np.array(
[
# Example 0.
[
[-0.8, -0.9, -1.0], # Candidate 0.
[-0.6, -0.1, -0.9], # Candidate 1.
],
# Example 1.
[
[-0.7, -0.4, -0.0], # Candidate 0.
[-0.6, -0.1, -0.0], # Candidate 1.
]
],
dtype=np.float32)
num_keypoints_candidates_np = np.array(
[
# Example 0.
[2, 2, 2],
# Example 1.
[2, 2, 0],
], dtype=np.int32)
bboxes_np = np.array(
[
# Example 0.
[
[2.0, 2.0, 14.0, 10.0], # Instance 0.
[0.0, 3.0, 5.0, 7.0], # Instance 1.
],
# Example 1.
[
[0.0, 0.0, 6.0, 2.0], # Instance 0.
[5.0, 1.4, 9.0, 5.0], # Instance 1.
],
], dtype=np.float32)
unmatched_keypoint_score = 0.1
def graph_fn():
regressed_keypoints = tf.constant(
regressed_keypoints_np, dtype=tf.float32)
keypoint_candidates = tf.constant(
keypoint_candidates_np, dtype=tf.float32)
keypoint_scores = tf.constant(keypoint_scores_np, dtype=tf.float32)
if predict_depth:
keypoint_depths = tf.constant(keypoint_depths_np, dtype=tf.float32)
else:
keypoint_depths = None
num_keypoint_candidates = tf.constant(num_keypoints_candidates_np,
dtype=tf.int32)
bboxes = tf.constant(bboxes_np, dtype=tf.float32)
(refined_keypoints, refined_scores,
refined_depths) = cnma.refine_keypoints(
regressed_keypoints,
keypoint_candidates,
keypoint_scores,
num_keypoint_candidates,
bboxes=bboxes,
unmatched_keypoint_score=unmatched_keypoint_score,
box_scale=1.0,
candidate_search_scale=0.3,
keypoint_depth_candidates=keypoint_depths)
if predict_depth:
return refined_keypoints, refined_scores, refined_depths
else:
return refined_keypoints, refined_scores
expected_refined_keypoints = np.array(
[
# Example 0.
[
[[2.0, 2.5], [6.0, 10.0], [14.0, 7.0]], # Instance 0.
[[0.0, 6.0], [3.0, 3.0], [4.0, 7.0]], # Instance 1.
],
# Example 1.
[
[[6.0, 1.5], [0.0, 0.3], [0.1, 0.1]], # Instance 0.
[[6.0, 1.5], [5.0, 5.0], [9.0, 3.0]], # Instance 1.
],
], dtype=np.float32)
expected_refined_scores = np.array(
[
# Example 0.
[
[0.8, unmatched_keypoint_score, # Instance 0.
unmatched_keypoint_score],
[unmatched_keypoint_score, # Instance 1.
unmatched_keypoint_score, 1.0],
],
# Example 1.
[
[0.7, 0.1, unmatched_keypoint_score], # Instance 0.
[0.7, 0.4, unmatched_keypoint_score], # Instance 1.
],
], dtype=np.float32)
if predict_depth:
refined_keypoints, refined_scores, refined_depths = self.execute(
graph_fn, [])
expected_refined_depths = np.array([[[-0.8, 0.0, 0.0], [0.0, 0.0, -1.0]],
[[-0.7, -0.1, 0.0], [-0.7, -0.4,
0.0]]])
np.testing.assert_allclose(expected_refined_depths, refined_depths)
else:
refined_keypoints, refined_scores = self.execute(graph_fn, [])
np.testing.assert_allclose(expected_refined_keypoints, refined_keypoints)
np.testing.assert_allclose(expected_refined_scores, refined_scores)
def test_sdr_scaled_ranking_score(self):
keypoint_scores_np = np.array(
[
# Example 0.
[
[0.9, 0.9, 0.9], # Candidate 0.
[0.9, 0.9, 0.9], # Candidate 1.
]
],
dtype=np.float32)
distances_np = np.expand_dims(
np.array(
[
# Instance 0.
[
[2.0, 1.0, 0.0], # Candidate 0.
[2.0, 1.0, 2.0], # Candidate 1.
],
# Instance 1.
[
[2.0, 1.0, 0.0], # Candidate 0.
[2.0, 1.0, 2.0], # Candidate 1.
]
],
dtype=np.float32),
axis=0)
bboxes_np = np.array(
[
# Example 0.
[
[2.0, 2.0, 20.0, 20.0], # Instance 0 large box.
[3.0, 3.0, 4.0, 4.0], # Instance 1 small box.
],
],
dtype=np.float32)
# def graph_fn():
keypoint_scores = tf.constant(
keypoint_scores_np, dtype=tf.float32)
distances = tf.constant(
distances_np, dtype=tf.float32)
bboxes = tf.constant(bboxes_np, dtype=tf.float32)
ranking_scores = cnma.sdr_scaled_ranking_score(
keypoint_scores=keypoint_scores,
distances=distances,
bboxes=bboxes,
score_distance_multiplier=0.1)
self.assertAllEqual([1, 2, 2, 3], ranking_scores.shape)
# When the scores are the same, larger distance results in lower ranking
# score.
# instance 0, candidate 0, keypoint type 0 v.s 1 vs. 2
self.assertGreater(ranking_scores[0, 0, 0, 2], ranking_scores[0, 0, 0, 1])
self.assertGreater(ranking_scores[0, 0, 0, 1], ranking_scores[0, 0, 0, 0])
# When the scores are the same, the difference of distances are the same,
# instance with larger bbox has less ranking score difference, i.e. less
# sensitive to the distance change.
# instance 0 vs. 1, candidate 0, keypoint type 0 and 1
self.assertGreater(
ranking_scores[0, 1, 1, 1] - ranking_scores[0, 1, 1, 0],
ranking_scores[0, 0, 1, 1] - ranking_scores[0, 0, 1, 0]
)
def test_gaussian_weighted_score(self):
keypoint_scores_np = np.array(
[
# Example 0.
[
[0.9, 0.9, 0.9], # Candidate 0.
[1.0, 0.8, 1.0], # Candidate 1.
]
],
dtype=np.float32)
distances_np = np.expand_dims(
np.array(
[
# Instance 0.
[
[2.0, 1.0, 0.0], # Candidate 0.
[1.0, 0.0, 2.0], # Candidate 1.
],
# Instance 1.
[
[2.0, 1.0, 0.0], # Candidate 0.
[1.0, 0.0, 2.0], # Candidate 1.
]
],
dtype=np.float32),
axis=0)
bboxes_np = np.array(
[
# Example 0.
[
[2.0, 2.0, 20.0, 20.0], # Instance 0 large box.
[3.0, 3.0, 4.0, 4.0], # Instance 1 small box.
],
],
dtype=np.float32)
# def graph_fn():
keypoint_scores = tf.constant(
keypoint_scores_np, dtype=tf.float32)
distances = tf.constant(
distances_np, dtype=tf.float32)
bboxes = tf.constant(bboxes_np, dtype=tf.float32)
ranking_scores = cnma.gaussian_weighted_score(
keypoint_scores=keypoint_scores,
distances=distances,
keypoint_std_dev=[1.0, 0.5, 1.5],
bboxes=bboxes)
self.assertAllEqual([1, 2, 2, 3], ranking_scores.shape)
# When distance is zero, the candidate's score remains the same.
# instance 0, candidate 0, keypoint type 2
self.assertAlmostEqual(ranking_scores[0, 0, 0, 2], keypoint_scores[0, 0, 2])
# instance 0, candidate 1, keypoint type 1
self.assertAlmostEqual(ranking_scores[0, 0, 1, 1], keypoint_scores[0, 1, 1])
# When the distances of two candidates are 1:2 and the keypoint standard
# deviation is 1:2 and the keypoint heatmap scores are the same, the
# resulting ranking score should be the same.
# instance 0, candidate 0, keypoint type 0, 1.
self.assertAlmostEqual(
ranking_scores[0, 0, 0, 0], ranking_scores[0, 0, 0, 1])
# When the distances/heatmap scores/keypoint standard deviations are the
# same, the instance with larger bbox size gets higher score.
self.assertGreater(ranking_scores[0, 0, 0, 0], ranking_scores[0, 1, 0, 0])
def test_pad_to_full_keypoint_dim(self):
batch_size = 4
num_instances = 8
num_keypoints = 2
keypoint_inds = [1, 3]
num_total_keypoints = 5
kpt_coords_np = np.random.randn(batch_size, num_instances, num_keypoints, 2)
kpt_scores_np = np.random.randn(batch_size, num_instances, num_keypoints)
def graph_fn():
kpt_coords = tf.constant(kpt_coords_np)
kpt_scores = tf.constant(kpt_scores_np)
kpt_coords_padded, kpt_scores_padded = (
cnma._pad_to_full_keypoint_dim(
kpt_coords, kpt_scores, keypoint_inds, num_total_keypoints))
return kpt_coords_padded, kpt_scores_padded
kpt_coords_padded, kpt_scores_padded = self.execute(graph_fn, [])
self.assertAllEqual([batch_size, num_instances, num_total_keypoints, 2],
kpt_coords_padded.shape)
self.assertAllEqual([batch_size, num_instances, num_total_keypoints],
kpt_scores_padded.shape)
for i, kpt_ind in enumerate(keypoint_inds):
np.testing.assert_allclose(kpt_coords_np[:, :, i, :],
kpt_coords_padded[:, :, kpt_ind, :])
np.testing.assert_allclose(kpt_scores_np[:, :, i],
kpt_scores_padded[:, :, kpt_ind])
def test_pad_to_full_instance_dim(self):
batch_size = 4
max_instances = 8
num_keypoints = 6
num_instances = 2
instance_inds = [1, 3]
kpt_coords_np = np.random.randn(batch_size, num_instances, num_keypoints, 2)
kpt_scores_np = np.random.randn(batch_size, num_instances, num_keypoints)
def graph_fn():
kpt_coords = tf.constant(kpt_coords_np)
kpt_scores = tf.constant(kpt_scores_np)
kpt_coords_padded, kpt_scores_padded = (
cnma._pad_to_full_instance_dim(
kpt_coords, kpt_scores, instance_inds, max_instances))
return kpt_coords_padded, kpt_scores_padded
kpt_coords_padded, kpt_scores_padded = self.execute(graph_fn, [])
self.assertAllEqual([batch_size, max_instances, num_keypoints, 2],
kpt_coords_padded.shape)
self.assertAllEqual([batch_size, max_instances, num_keypoints],
kpt_scores_padded.shape)
for i, inst_ind in enumerate(instance_inds):
np.testing.assert_allclose(kpt_coords_np[:, i, :, :],
kpt_coords_padded[:, inst_ind, :, :])
np.testing.assert_allclose(kpt_scores_np[:, i, :],
kpt_scores_padded[:, inst_ind, :])
def test_predicted_embeddings_at_object_centers(self):
batch_size = 2
embedding_size = 5
num_instances = 6
predicted_embedding_feature_map_np = np.random.randn(
batch_size, 10, 10, embedding_size).astype(np.float32)
y_indices = np.random.choice(10, (batch_size, num_instances))
x_indices = np.random.choice(10, (batch_size, num_instances))
def graph_fn():
predicted_embedding_feature_map = tf.constant(
predicted_embedding_feature_map_np, dtype=tf.float32)
gathered_predicted_embeddings = (
cnma.predicted_embeddings_at_object_centers(
predicted_embedding_feature_map,
tf.constant(y_indices, dtype=tf.int32),
tf.constant(x_indices, dtype=tf.int32)))
return gathered_predicted_embeddings
gathered_predicted_embeddings = self.execute(graph_fn, [])
expected_gathered_embeddings_0 = predicted_embedding_feature_map_np[
0, y_indices[0], x_indices[0], :]
expected_gathered_embeddings_1 = predicted_embedding_feature_map_np[
1, y_indices[1], x_indices[1], :]
expected_gathered_embeddings = np.stack([
expected_gathered_embeddings_0,
expected_gathered_embeddings_1], axis=0)
expected_gathered_embeddings = np.reshape(
expected_gathered_embeddings,
[batch_size, num_instances, embedding_size])
np.testing.assert_allclose(expected_gathered_embeddings,
gathered_predicted_embeddings)
# Common parameters for setting up testing examples across tests.
_NUM_CLASSES = 10
_KEYPOINT_INDICES = [0, 1, 2, 3]
_NUM_KEYPOINTS = len(_KEYPOINT_INDICES)
_DENSEPOSE_NUM_PARTS = 24
_TASK_NAME = 'human_pose'
_NUM_TRACK_IDS = 3
_REID_EMBED_SIZE = 2
_NUM_FC_LAYERS = 1
def get_fake_center_params(max_box_predictions=5):
"""Returns the fake object center parameter namedtuple."""
return cnma.ObjectCenterParams(
classification_loss=losses.WeightedSigmoidClassificationLoss(),
object_center_loss_weight=1.0,
min_box_overlap_iou=1.0,
max_box_predictions=max_box_predictions,
use_labeled_classes=False,
center_head_num_filters=[128],
center_head_kernel_sizes=[5])
def get_fake_od_params():
"""Returns the fake object detection parameter namedtuple."""
return cnma.ObjectDetectionParams(
localization_loss=losses.L1LocalizationLoss(),
offset_loss_weight=1.0,
scale_loss_weight=0.1)
def get_fake_kp_params(num_candidates_per_keypoint=100,
per_keypoint_offset=False,
predict_depth=False,
per_keypoint_depth=False,
peak_radius=0,
candidate_ranking_mode='min_distance',
argmax_postprocessing=False):
"""Returns the fake keypoint estimation parameter namedtuple."""
return cnma.KeypointEstimationParams(
task_name=_TASK_NAME,
class_id=1,
keypoint_indices=_KEYPOINT_INDICES,
keypoint_std_dev=[0.00001] * len(_KEYPOINT_INDICES),
classification_loss=losses.WeightedSigmoidClassificationLoss(),
localization_loss=losses.L1LocalizationLoss(),
unmatched_keypoint_score=0.1,
keypoint_candidate_score_threshold=0.1,
num_candidates_per_keypoint=num_candidates_per_keypoint,
per_keypoint_offset=per_keypoint_offset,
predict_depth=predict_depth,
per_keypoint_depth=per_keypoint_depth,
offset_peak_radius=peak_radius,
candidate_ranking_mode=candidate_ranking_mode,
argmax_postprocessing=argmax_postprocessing)
def get_fake_mask_params():
"""Returns the fake mask estimation parameter namedtuple."""
return cnma.MaskParams(
classification_loss=losses.WeightedSoftmaxClassificationLoss(),
task_loss_weight=1.0,
mask_height=4,
mask_width=4,
mask_head_num_filters=[96],
mask_head_kernel_sizes=[3])
def get_fake_densepose_params():
"""Returns the fake DensePose estimation parameter namedtuple."""
return cnma.DensePoseParams(
class_id=1,
classification_loss=losses.WeightedSoftmaxClassificationLoss(),
localization_loss=losses.L1LocalizationLoss(),
part_loss_weight=1.0,
coordinate_loss_weight=1.0,
num_parts=_DENSEPOSE_NUM_PARTS,
task_loss_weight=1.0,
upsample_to_input_res=True,
upsample_method='nearest')
def get_fake_track_params():
"""Returns the fake object tracking parameter namedtuple."""
return cnma.TrackParams(
num_track_ids=_NUM_TRACK_IDS,
reid_embed_size=_REID_EMBED_SIZE,
num_fc_layers=_NUM_FC_LAYERS,
classification_loss=losses.WeightedSoftmaxClassificationLoss(),
task_loss_weight=1.0)
def get_fake_temporal_offset_params():
"""Returns the fake temporal offset parameter namedtuple."""
return cnma.TemporalOffsetParams(
localization_loss=losses.WeightedSmoothL1LocalizationLoss(),
task_loss_weight=1.0)
def build_center_net_meta_arch(build_resnet=False,
num_classes=_NUM_CLASSES,
max_box_predictions=5,
apply_non_max_suppression=False,
detection_only=False,
per_keypoint_offset=False,
predict_depth=False,
per_keypoint_depth=False,
peak_radius=0,
keypoint_only=False,
candidate_ranking_mode='min_distance',
argmax_postprocessing=False):
"""Builds the CenterNet meta architecture."""
if build_resnet:
feature_extractor = (
center_net_resnet_feature_extractor.CenterNetResnetFeatureExtractor(
'resnet_v2_101'))
else:
feature_extractor = DummyFeatureExtractor(
channel_means=(1.0, 2.0, 3.0),
channel_stds=(10., 20., 30.),
bgr_ordering=False,
num_feature_outputs=2,
stride=4)
image_resizer_fn = functools.partial(
preprocessor.resize_to_range,
min_dimension=128,
max_dimension=128,
pad_to_max_dimesnion=True)
non_max_suppression_fn = None
if apply_non_max_suppression:
post_processing_proto = post_processing_pb2.PostProcessing()
post_processing_proto.batch_non_max_suppression.iou_threshold = 1.0
post_processing_proto.batch_non_max_suppression.score_threshold = 0.6
(post_processing_proto.batch_non_max_suppression.max_total_detections
) = max_box_predictions
(post_processing_proto.batch_non_max_suppression.max_detections_per_class
) = max_box_predictions
(post_processing_proto.batch_non_max_suppression.change_coordinate_frame
) = False
non_max_suppression_fn, _ = post_processing_builder.build(
post_processing_proto)
if keypoint_only:
num_candidates_per_keypoint = 100 if max_box_predictions > 1 else 1
return cnma.CenterNetMetaArch(
is_training=True,
add_summaries=False,
num_classes=num_classes,
feature_extractor=feature_extractor,
image_resizer_fn=image_resizer_fn,
object_center_params=get_fake_center_params(max_box_predictions),
keypoint_params_dict={
_TASK_NAME:
get_fake_kp_params(num_candidates_per_keypoint,
per_keypoint_offset, predict_depth,
per_keypoint_depth, peak_radius,
candidate_ranking_mode,
argmax_postprocessing)
},
non_max_suppression_fn=non_max_suppression_fn)
elif detection_only:
return cnma.CenterNetMetaArch(
is_training=True,
add_summaries=False,
num_classes=num_classes,
feature_extractor=feature_extractor,
image_resizer_fn=image_resizer_fn,
object_center_params=get_fake_center_params(max_box_predictions),
object_detection_params=get_fake_od_params(),
non_max_suppression_fn=non_max_suppression_fn)
elif num_classes == 1:
num_candidates_per_keypoint = 100 if max_box_predictions > 1 else 1
return cnma.CenterNetMetaArch(
is_training=True,
add_summaries=False,
num_classes=num_classes,
feature_extractor=feature_extractor,
image_resizer_fn=image_resizer_fn,
object_center_params=get_fake_center_params(max_box_predictions),
object_detection_params=get_fake_od_params(),
keypoint_params_dict={
_TASK_NAME:
get_fake_kp_params(num_candidates_per_keypoint,
per_keypoint_offset, predict_depth,
per_keypoint_depth, peak_radius,
candidate_ranking_mode,
argmax_postprocessing)
},
non_max_suppression_fn=non_max_suppression_fn)
else:
return cnma.CenterNetMetaArch(
is_training=True,
add_summaries=False,
num_classes=num_classes,
feature_extractor=feature_extractor,
image_resizer_fn=image_resizer_fn,
object_center_params=get_fake_center_params(),
object_detection_params=get_fake_od_params(),
keypoint_params_dict={_TASK_NAME: get_fake_kp_params(
candidate_ranking_mode=candidate_ranking_mode)},
mask_params=get_fake_mask_params(),
densepose_params=get_fake_densepose_params(),
track_params=get_fake_track_params(),
temporal_offset_params=get_fake_temporal_offset_params(),
non_max_suppression_fn=non_max_suppression_fn)
def _logit(p):
return np.log(
(p + np.finfo(np.float32).eps) / (1 - p + np.finfo(np.float32).eps))
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CenterNetMetaArchLibTest(test_case.TestCase):
"""Test for CenterNet meta architecture related functions."""
def test_get_keypoint_name(self):
self.assertEqual('human_pose/keypoint_offset',
cnma.get_keypoint_name('human_pose', 'keypoint_offset'))
def test_get_num_instances_from_weights(self):
weight1 = tf.constant([0.0, 0.0, 0.0], dtype=tf.float32)
weight2 = tf.constant([0.5, 0.9, 0.0], dtype=tf.float32)
weight3 = tf.constant([0.0, 0.0, 1.0], dtype=tf.float32)
def graph_fn_1():
# Total of three elements with non-zero values.
num_instances = cnma.get_num_instances_from_weights(
[weight1, weight2, weight3])
return num_instances
num_instances = self.execute(graph_fn_1, [])
self.assertAlmostEqual(3, num_instances)
# No non-zero value in the weights. Return minimum value: 1.
def graph_fn_2():
# Total of three elements with non-zero values.
num_instances = cnma.get_num_instances_from_weights([weight1, weight1])
return num_instances
num_instances = self.execute(graph_fn_2, [])
self.assertAlmostEqual(1, num_instances)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CenterNetMetaArchTest(test_case.TestCase, parameterized.TestCase):
"""Tests for the CenterNet meta architecture."""
def test_construct_prediction_heads(self):
model = build_center_net_meta_arch()
fake_feature_map = np.zeros((4, 128, 128, 8))
# Check the dictionary contains expected keys and corresponding heads with
# correct dimensions.
# "object center" head:
output = model._prediction_head_dict[cnma.OBJECT_CENTER][-1](
fake_feature_map)
self.assertEqual((4, 128, 128, _NUM_CLASSES), output.shape)
# "object scale" (height/width) head:
output = model._prediction_head_dict[cnma.BOX_SCALE][-1](fake_feature_map)
self.assertEqual((4, 128, 128, 2), output.shape)
# "object offset" head:
output = model._prediction_head_dict[cnma.BOX_OFFSET][-1](fake_feature_map)
self.assertEqual((4, 128, 128, 2), output.shape)
# "keypoint offset" head:
output = model._prediction_head_dict[
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET)][-1](
fake_feature_map)
self.assertEqual((4, 128, 128, 2), output.shape)
# "keypoint heatmap" head:
output = model._prediction_head_dict[cnma.get_keypoint_name(
_TASK_NAME, cnma.KEYPOINT_HEATMAP)][-1](
fake_feature_map)
self.assertEqual((4, 128, 128, _NUM_KEYPOINTS), output.shape)
# "keypoint regression" head:
output = model._prediction_head_dict[cnma.get_keypoint_name(
_TASK_NAME, cnma.KEYPOINT_REGRESSION)][-1](
fake_feature_map)
self.assertEqual((4, 128, 128, 2 * _NUM_KEYPOINTS), output.shape)
# "mask" head:
output = model._prediction_head_dict[cnma.SEGMENTATION_HEATMAP][-1](
fake_feature_map)
self.assertEqual((4, 128, 128, _NUM_CLASSES), output.shape)
# "densepose parts" head:
output = model._prediction_head_dict[cnma.DENSEPOSE_HEATMAP][-1](
fake_feature_map)
self.assertEqual((4, 128, 128, _DENSEPOSE_NUM_PARTS), output.shape)
# "densepose surface coordinates" head:
output = model._prediction_head_dict[cnma.DENSEPOSE_REGRESSION][-1](
fake_feature_map)
self.assertEqual((4, 128, 128, 2 * _DENSEPOSE_NUM_PARTS), output.shape)
# "track embedding" head:
output = model._prediction_head_dict[cnma.TRACK_REID][-1](
fake_feature_map)
self.assertEqual((4, 128, 128, _REID_EMBED_SIZE), output.shape)
# "temporal offset" head:
output = model._prediction_head_dict[cnma.TEMPORAL_OFFSET][-1](
fake_feature_map)
self.assertEqual((4, 128, 128, 2), output.shape)
def test_initialize_target_assigners(self):
model = build_center_net_meta_arch()
assigner_dict = model._initialize_target_assigners(
stride=2,
min_box_overlap_iou=0.7)
# Check whether the correponding target assigner class is initialized.
# object center target assigner:
self.assertIsInstance(assigner_dict[cnma.OBJECT_CENTER],
cn_assigner.CenterNetCenterHeatmapTargetAssigner)
# object detection target assigner:
self.assertIsInstance(assigner_dict[cnma.DETECTION_TASK],
cn_assigner.CenterNetBoxTargetAssigner)
# keypoint estimation target assigner:
self.assertIsInstance(assigner_dict[_TASK_NAME],
cn_assigner.CenterNetKeypointTargetAssigner)
# mask estimation target assigner:
self.assertIsInstance(assigner_dict[cnma.SEGMENTATION_TASK],
cn_assigner.CenterNetMaskTargetAssigner)
# DensePose estimation target assigner:
self.assertIsInstance(assigner_dict[cnma.DENSEPOSE_TASK],
cn_assigner.CenterNetDensePoseTargetAssigner)
# Track estimation target assigner:
self.assertIsInstance(assigner_dict[cnma.TRACK_TASK],
cn_assigner.CenterNetTrackTargetAssigner)
# Temporal Offset target assigner:
self.assertIsInstance(assigner_dict[cnma.TEMPORALOFFSET_TASK],
cn_assigner.CenterNetTemporalOffsetTargetAssigner)
def test_predict(self):
"""Test the predict function."""
model = build_center_net_meta_arch()
def graph_fn():
prediction_dict = model.predict(tf.zeros([2, 128, 128, 3]), None)
return prediction_dict
prediction_dict = self.execute(graph_fn, [])
self.assertEqual(prediction_dict['preprocessed_inputs'].shape,
(2, 128, 128, 3))
self.assertEqual(prediction_dict[cnma.OBJECT_CENTER][0].shape,
(2, 32, 32, _NUM_CLASSES))
self.assertEqual(prediction_dict[cnma.BOX_SCALE][0].shape,
(2, 32, 32, 2))
self.assertEqual(prediction_dict[cnma.BOX_OFFSET][0].shape,
(2, 32, 32, 2))
self.assertEqual(prediction_dict[cnma.SEGMENTATION_HEATMAP][0].shape,
(2, 32, 32, _NUM_CLASSES))
self.assertEqual(prediction_dict[cnma.DENSEPOSE_HEATMAP][0].shape,
(2, 32, 32, _DENSEPOSE_NUM_PARTS))
self.assertEqual(prediction_dict[cnma.DENSEPOSE_REGRESSION][0].shape,
(2, 32, 32, 2 * _DENSEPOSE_NUM_PARTS))
self.assertEqual(prediction_dict[cnma.TRACK_REID][0].shape,
(2, 32, 32, _REID_EMBED_SIZE))
self.assertEqual(prediction_dict[cnma.TEMPORAL_OFFSET][0].shape,
(2, 32, 32, 2))
def test_loss(self):
"""Test the loss function."""
groundtruth_dict = get_fake_groundtruth_dict(16, 32, 4)
model = build_center_net_meta_arch()
model.provide_groundtruth(
groundtruth_boxes_list=groundtruth_dict[fields.BoxListFields.boxes],
groundtruth_weights_list=groundtruth_dict[fields.BoxListFields.weights],
groundtruth_classes_list=groundtruth_dict[fields.BoxListFields.classes],
groundtruth_keypoints_list=groundtruth_dict[
fields.BoxListFields.keypoints],
groundtruth_masks_list=groundtruth_dict[
fields.BoxListFields.masks],
groundtruth_dp_num_points_list=groundtruth_dict[
fields.BoxListFields.densepose_num_points],
groundtruth_dp_part_ids_list=groundtruth_dict[
fields.BoxListFields.densepose_part_ids],
groundtruth_dp_surface_coords_list=groundtruth_dict[
fields.BoxListFields.densepose_surface_coords],
groundtruth_track_ids_list=groundtruth_dict[
fields.BoxListFields.track_ids],
groundtruth_track_match_flags_list=groundtruth_dict[
fields.BoxListFields.track_match_flags],
groundtruth_temporal_offsets_list=groundtruth_dict[
fields.BoxListFields.temporal_offsets])
kernel_initializer = tf.constant_initializer(
[[1, 1, 0], [-1000000, -1000000, 1000000]])
model.track_reid_classification_net = tf.keras.layers.Dense(
_NUM_TRACK_IDS,
kernel_initializer=kernel_initializer,
input_shape=(_REID_EMBED_SIZE,))
prediction_dict = get_fake_prediction_dict(
input_height=16, input_width=32, stride=4)
def graph_fn():
loss_dict = model.loss(prediction_dict,
tf.constant([[16, 24, 3], [16, 24, 3]]))
return loss_dict
loss_dict = self.execute(graph_fn, [])
# The prediction and groundtruth are curated to produce very low loss.
self.assertGreater(
0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.OBJECT_CENTER)])
self.assertGreater(
0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.BOX_SCALE)])
self.assertGreater(
0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.BOX_OFFSET)])
self.assertGreater(
0.01,
loss_dict['%s/%s' %
(cnma.LOSS_KEY_PREFIX,
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP))])
self.assertGreater(
0.01,
loss_dict['%s/%s' %
(cnma.LOSS_KEY_PREFIX,
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET))])
self.assertGreater(
0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX,
cnma.get_keypoint_name(
_TASK_NAME, cnma.KEYPOINT_REGRESSION))])
self.assertGreater(
0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX,
cnma.SEGMENTATION_HEATMAP)])
self.assertGreater(
0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX,
cnma.DENSEPOSE_HEATMAP)])
self.assertGreater(
0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX,
cnma.DENSEPOSE_REGRESSION)])
self.assertGreater(
0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX,
cnma.TRACK_REID)])
self.assertGreater(
0.01, loss_dict['%s/%s' % (cnma.LOSS_KEY_PREFIX,
cnma.TEMPORAL_OFFSET)])
@parameterized.parameters(
{'target_class_id': 1, 'with_true_image_shape': True},
{'target_class_id': 2, 'with_true_image_shape': True},
{'target_class_id': 1, 'with_true_image_shape': False},
)
def test_postprocess(self, target_class_id, with_true_image_shape):
"""Test the postprocess function."""
model = build_center_net_meta_arch()
max_detection = model._center_params.max_box_predictions
num_keypoints = len(model._kp_params_dict[_TASK_NAME].keypoint_indices)
unmatched_keypoint_score = (
model._kp_params_dict[_TASK_NAME].unmatched_keypoint_score)
class_center = np.zeros((1, 32, 32, 10), dtype=np.float32)
height_width = np.zeros((1, 32, 32, 2), dtype=np.float32)
offset = np.zeros((1, 32, 32, 2), dtype=np.float32)
keypoint_heatmaps = np.ones(
(1, 32, 32, num_keypoints), dtype=np.float32) * _logit(0.001)
keypoint_offsets = np.zeros((1, 32, 32, 2), dtype=np.float32)
keypoint_regression = np.random.randn(1, 32, 32, num_keypoints * 2)
class_probs = np.ones(10) * _logit(0.25)
class_probs[target_class_id] = _logit(0.75)
class_center[0, 16, 16] = class_probs
height_width[0, 16, 16] = [5, 10]
offset[0, 16, 16] = [.25, .5]
keypoint_regression[0, 16, 16] = [
-1., -1.,
-1., 1.,
1., -1.,
1., 1.]
keypoint_heatmaps[0, 14, 14, 0] = _logit(0.9)
keypoint_heatmaps[0, 14, 18, 1] = _logit(0.9)
keypoint_heatmaps[0, 18, 14, 2] = _logit(0.9)
keypoint_heatmaps[0, 18, 18, 3] = _logit(0.05) # Note the low score.
segmentation_heatmap = np.zeros((1, 32, 32, 10), dtype=np.float32)
segmentation_heatmap[:, 14:18, 14:18, target_class_id] = 1.0
segmentation_heatmap = _logit(segmentation_heatmap)
dp_part_ind = 4
dp_part_heatmap = np.zeros((1, 32, 32, _DENSEPOSE_NUM_PARTS),
dtype=np.float32)
dp_part_heatmap[0, 14:18, 14:18, dp_part_ind] = 1.0
dp_part_heatmap = _logit(dp_part_heatmap)
dp_surf_coords = np.random.randn(1, 32, 32, 2 * _DENSEPOSE_NUM_PARTS)
embedding_size = 100
track_reid_embedding = np.zeros((1, 32, 32, embedding_size),
dtype=np.float32)
track_reid_embedding[0, 16, 16, :] = np.ones(embedding_size)
temporal_offsets = np.zeros((1, 32, 32, 2), dtype=np.float32)
temporal_offsets[..., 1] = 1
class_center = tf.constant(class_center)
height_width = tf.constant(height_width)
offset = tf.constant(offset)
keypoint_heatmaps = tf.constant(keypoint_heatmaps, dtype=tf.float32)
keypoint_offsets = tf.constant(keypoint_offsets, dtype=tf.float32)
keypoint_regression = tf.constant(keypoint_regression, dtype=tf.float32)
segmentation_heatmap = tf.constant(segmentation_heatmap, dtype=tf.float32)
dp_part_heatmap = tf.constant(dp_part_heatmap, dtype=tf.float32)
dp_surf_coords = tf.constant(dp_surf_coords, dtype=tf.float32)
track_reid_embedding = tf.constant(track_reid_embedding, dtype=tf.float32)
temporal_offsets = tf.constant(temporal_offsets, dtype=tf.float32)
prediction_dict = {
cnma.OBJECT_CENTER: [class_center],
cnma.BOX_SCALE: [height_width],
cnma.BOX_OFFSET: [offset],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP):
[keypoint_heatmaps],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET):
[keypoint_offsets],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_REGRESSION):
[keypoint_regression],
cnma.SEGMENTATION_HEATMAP: [segmentation_heatmap],
cnma.DENSEPOSE_HEATMAP: [dp_part_heatmap],
cnma.DENSEPOSE_REGRESSION: [dp_surf_coords],
cnma.TRACK_REID: [track_reid_embedding],
cnma.TEMPORAL_OFFSET: [temporal_offsets],
}
def graph_fn():
if with_true_image_shape:
detections = model.postprocess(prediction_dict,
tf.constant([[128, 128, 3]]))
else:
detections = model.postprocess(prediction_dict, None)
return detections
detections = self.execute_cpu(graph_fn, [])
self.assertAllClose(detections['detection_boxes'][0, 0],
np.array([55, 46, 75, 86]) / 128.0)
self.assertAllClose(detections['detection_scores'][0],
[.75, .5, .5, .5, .5])
expected_multiclass_scores = [.25] * 10
expected_multiclass_scores[target_class_id] = .75
self.assertAllClose(expected_multiclass_scores,
detections['detection_multiclass_scores'][0][0])
# The output embedding extracted at the object center will be a 3-D array of
# shape [batch, num_boxes, embedding_size]. The valid predicted embedding
# will be the first embedding in the first batch. It is a 1-D array of
# shape [embedding_size] with values all ones. All the values of the
# embedding will then be divided by the square root of 'embedding_size'
# after the L2 normalization.
self.assertAllClose(detections['detection_embeddings'][0, 0],
np.ones(embedding_size) / embedding_size**0.5)
self.assertEqual(detections['detection_classes'][0, 0], target_class_id)
self.assertEqual(detections['num_detections'], [5])
self.assertAllEqual([1, max_detection, num_keypoints, 2],
detections['detection_keypoints'].shape)
self.assertAllEqual([1, max_detection, num_keypoints],
detections['detection_keypoint_scores'].shape)
self.assertAllEqual([1, max_detection, 4, 4],
detections['detection_masks'].shape)
self.assertAllEqual([1, max_detection, embedding_size],
detections['detection_embeddings'].shape)
self.assertAllEqual([1, max_detection, 2],
detections['detection_temporal_offsets'].shape)
# Masks should be empty for everything but the first detection.
self.assertAllEqual(
detections['detection_masks'][0, 1:, :, :],
np.zeros_like(detections['detection_masks'][0, 1:, :, :]))
self.assertAllEqual(
detections['detection_surface_coords'][0, 1:, :, :],
np.zeros_like(detections['detection_surface_coords'][0, 1:, :, :]))
if target_class_id == 1:
expected_kpts_for_obj_0 = np.array(
[[14., 14.], [14., 18.], [18., 14.], [17., 17.]]) / 32.
expected_kpt_scores_for_obj_0 = np.array(
[0.9, 0.9, 0.9, unmatched_keypoint_score])
np.testing.assert_allclose(detections['detection_keypoints'][0][0],
expected_kpts_for_obj_0, rtol=1e-6)
np.testing.assert_allclose(detections['detection_keypoint_scores'][0][0],
expected_kpt_scores_for_obj_0, rtol=1e-6)
# First detection has DensePose parts.
self.assertSameElements(
np.unique(detections['detection_masks'][0, 0, :, :]),
set([0, dp_part_ind + 1]))
self.assertGreater(np.sum(np.abs(detections['detection_surface_coords'])),
0.0)
else:
# All keypoint outputs should be zeros.
np.testing.assert_allclose(
detections['detection_keypoints'][0][0],
np.zeros([num_keypoints, 2], np.float),
rtol=1e-6)
np.testing.assert_allclose(
detections['detection_keypoint_scores'][0][0],
np.zeros([num_keypoints], np.float),
rtol=1e-6)
# Binary segmentation mask.
self.assertSameElements(
np.unique(detections['detection_masks'][0, 0, :, :]),
set([0, 1]))
# No DensePose surface coordinates.
np.testing.assert_allclose(
detections['detection_surface_coords'][0, 0, :, :],
np.zeros_like(detections['detection_surface_coords'][0, 0, :, :]))
def test_postprocess_kpts_no_od(self):
"""Test the postprocess function."""
target_class_id = 1
model = build_center_net_meta_arch(keypoint_only=True)
max_detection = model._center_params.max_box_predictions
num_keypoints = len(model._kp_params_dict[_TASK_NAME].keypoint_indices)
class_center = np.zeros((1, 32, 32, 10), dtype=np.float32)
keypoint_heatmaps = np.zeros((1, 32, 32, num_keypoints), dtype=np.float32)
keypoint_offsets = np.zeros((1, 32, 32, 2), dtype=np.float32)
keypoint_regression = np.random.randn(1, 32, 32, num_keypoints * 2)
class_probs = np.ones(10) * _logit(0.25)
class_probs[target_class_id] = _logit(0.75)
class_center[0, 16, 16] = class_probs
keypoint_regression[0, 16, 16] = [
-1., -1.,
-1., 1.,
1., -1.,
1., 1.]
keypoint_heatmaps[0, 14, 14, 0] = _logit(0.9)
keypoint_heatmaps[0, 14, 18, 1] = _logit(0.9)
keypoint_heatmaps[0, 18, 14, 2] = _logit(0.9)
keypoint_heatmaps[0, 18, 18, 3] = _logit(0.05) # Note the low score.
class_center = tf.constant(class_center)
keypoint_heatmaps = tf.constant(keypoint_heatmaps, dtype=tf.float32)
keypoint_offsets = tf.constant(keypoint_offsets, dtype=tf.float32)
keypoint_regression = tf.constant(keypoint_regression, dtype=tf.float32)
prediction_dict = {
cnma.OBJECT_CENTER: [class_center],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP):
[keypoint_heatmaps],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET):
[keypoint_offsets],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_REGRESSION):
[keypoint_regression],
}
# def graph_fn():
detections = model.postprocess(prediction_dict,
tf.constant([[128, 128, 3]]))
# return detections
# detections = self.execute_cpu(graph_fn, [])
self.assertAllClose(detections['detection_scores'][0],
[.75, .5, .5, .5, .5])
expected_multiclass_scores = [.25] * 10
expected_multiclass_scores[target_class_id] = .75
self.assertAllClose(expected_multiclass_scores,
detections['detection_multiclass_scores'][0][0])
self.assertEqual(detections['detection_classes'][0, 0], target_class_id)
self.assertEqual(detections['num_detections'], [5])
self.assertAllEqual([1, max_detection, num_keypoints, 2],
detections['detection_keypoints'].shape)
self.assertAllEqual([1, max_detection, num_keypoints],
detections['detection_keypoint_scores'].shape)
def test_non_max_suppression(self):
"""Tests application of NMS on CenterNet detections."""
target_class_id = 1
model = build_center_net_meta_arch(apply_non_max_suppression=True,
detection_only=True)
class_center = np.zeros((1, 32, 32, 10), dtype=np.float32)
height_width = np.zeros((1, 32, 32, 2), dtype=np.float32)
offset = np.zeros((1, 32, 32, 2), dtype=np.float32)
class_probs = np.ones(10) * _logit(0.25)
class_probs[target_class_id] = _logit(0.75)
class_center[0, 16, 16] = class_probs
height_width[0, 16, 16] = [5, 10]
offset[0, 16, 16] = [.25, .5]
class_center = tf.constant(class_center)
height_width = tf.constant(height_width)
offset = tf.constant(offset)
prediction_dict = {
cnma.OBJECT_CENTER: [class_center],
cnma.BOX_SCALE: [height_width],
cnma.BOX_OFFSET: [offset],
}
def graph_fn():
detections = model.postprocess(prediction_dict,
tf.constant([[128, 128, 3]]))
return detections
detections = self.execute_cpu(graph_fn, [])
num_detections = int(detections['num_detections'])
self.assertEqual(num_detections, 1)
self.assertAllClose(detections['detection_boxes'][0, 0],
np.array([55, 46, 75, 86]) / 128.0)
self.assertAllClose(detections['detection_scores'][0][:num_detections],
[.75])
expected_multiclass_scores = [.25] * 10
expected_multiclass_scores[target_class_id] = .75
self.assertAllClose(expected_multiclass_scores,
detections['detection_multiclass_scores'][0][0])
@parameterized.parameters(
{
'candidate_ranking_mode': 'min_distance',
'argmax_postprocessing': False
},
{
'candidate_ranking_mode': 'gaussian_weighted_const',
'argmax_postprocessing': True
})
def test_postprocess_single_class(self, candidate_ranking_mode,
argmax_postprocessing):
"""Test the postprocess function."""
model = build_center_net_meta_arch(
num_classes=1, max_box_predictions=5, per_keypoint_offset=True,
candidate_ranking_mode=candidate_ranking_mode,
argmax_postprocessing=argmax_postprocessing)
max_detection = model._center_params.max_box_predictions
num_keypoints = len(model._kp_params_dict[_TASK_NAME].keypoint_indices)
class_center = np.zeros((1, 32, 32, 1), dtype=np.float32)
height_width = np.zeros((1, 32, 32, 2), dtype=np.float32)
offset = np.zeros((1, 32, 32, 2), dtype=np.float32)
keypoint_heatmaps = np.ones(
(1, 32, 32, num_keypoints), dtype=np.float32) * _logit(0.01)
keypoint_offsets = np.zeros(
(1, 32, 32, num_keypoints * 2), dtype=np.float32)
keypoint_regression = np.random.randn(1, 32, 32, num_keypoints * 2)
class_probs = np.zeros(1)
class_probs[0] = _logit(0.75)
class_center[0, 16, 16] = class_probs
height_width[0, 16, 16] = [5, 10]
offset[0, 16, 16] = [.25, .5]
keypoint_regression[0, 16, 16] = [
-1., -1.,
-1., 1.,
1., -1.,
1., 1.]
keypoint_heatmaps[0, 14, 14, 0] = _logit(0.9)
keypoint_heatmaps[0, 14, 18, 1] = _logit(0.9)
keypoint_heatmaps[0, 18, 14, 2] = _logit(0.9)
keypoint_heatmaps[0, 18, 18, 3] = _logit(0.05) # Note the low score.
class_center = tf.constant(class_center)
height_width = tf.constant(height_width)
offset = tf.constant(offset)
keypoint_heatmaps = tf.constant(keypoint_heatmaps, dtype=tf.float32)
keypoint_offsets = tf.constant(keypoint_offsets, dtype=tf.float32)
keypoint_regression = tf.constant(keypoint_regression, dtype=tf.float32)
prediction_dict = {
cnma.OBJECT_CENTER: [class_center],
cnma.BOX_SCALE: [height_width],
cnma.BOX_OFFSET: [offset],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP):
[keypoint_heatmaps],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET):
[keypoint_offsets],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_REGRESSION):
[keypoint_regression],
}
def graph_fn():
detections = model.postprocess(prediction_dict,
tf.constant([[128, 128, 3]]))
return detections
detections = self.execute_cpu(graph_fn, [])
self.assertAllClose(detections['detection_boxes'][0, 0],
np.array([55, 46, 75, 86]) / 128.0)
self.assertAllClose(detections['detection_scores'][0],
[.75, .5, .5, .5, .5])
self.assertEqual(detections['detection_classes'][0, 0], 0)
self.assertEqual(detections['num_detections'], [5])
self.assertAllEqual([1, max_detection, num_keypoints, 2],
detections['detection_keypoints'].shape)
self.assertAllClose(
[[0.4375, 0.4375], [0.4375, 0.5625], [0.5625, 0.4375]],
detections['detection_keypoints'][0, 0, 0:3, :])
self.assertAllEqual([1, max_detection, num_keypoints],
detections['detection_keypoint_scores'].shape)
def test_postprocess_single_instance(self):
"""Test the postprocess single instance function."""
model = build_center_net_meta_arch(
num_classes=1, candidate_ranking_mode='score_distance_ratio')
num_keypoints = len(model._kp_params_dict[_TASK_NAME].keypoint_indices)
class_center = np.zeros((1, 32, 32, 1), dtype=np.float32)
keypoint_heatmaps = np.zeros((1, 32, 32, num_keypoints), dtype=np.float32)
keypoint_offsets = np.zeros(
(1, 32, 32, num_keypoints * 2), dtype=np.float32)
keypoint_regression = np.random.randn(1, 32, 32, num_keypoints * 2)
class_probs = np.zeros(1)
class_probs[0] = _logit(0.75)
class_center[0, 16, 16] = class_probs
keypoint_regression[0, 16, 16] = [
-1., -1.,
-1., 1.,
1., -1.,
1., 1.]
keypoint_heatmaps[0, 14, 14, 0] = _logit(0.9)
keypoint_heatmaps[0, 14, 18, 1] = _logit(0.9)
keypoint_heatmaps[0, 18, 14, 2] = _logit(0.9)
keypoint_heatmaps[0, 18, 18, 3] = _logit(0.05) # Note the low score.
class_center = tf.constant(class_center)
keypoint_heatmaps = tf.constant(keypoint_heatmaps, dtype=tf.float32)
keypoint_offsets = tf.constant(keypoint_offsets, dtype=tf.float32)
keypoint_regression = tf.constant(keypoint_regression, dtype=tf.float32)
prediction_dict = {
cnma.OBJECT_CENTER: [class_center],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP):
[keypoint_heatmaps],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET):
[keypoint_offsets],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_REGRESSION):
[keypoint_regression],
}
def graph_fn():
detections = model.postprocess_single_instance_keypoints(
prediction_dict,
tf.constant([[128, 128, 3]]))
return detections
detections = self.execute_cpu(graph_fn, [])
self.assertAllEqual([1, 1, num_keypoints, 2],
detections['detection_keypoints'].shape)
self.assertAllEqual([1, 1, num_keypoints],
detections['detection_keypoint_scores'].shape)
@parameterized.parameters(
{'per_keypoint_depth': False},
{'per_keypoint_depth': True},
)
def test_postprocess_single_class_depth(self, per_keypoint_depth):
"""Test the postprocess function."""
model = build_center_net_meta_arch(
num_classes=1,
per_keypoint_offset=per_keypoint_depth,
predict_depth=True,
per_keypoint_depth=per_keypoint_depth)
num_keypoints = len(model._kp_params_dict[_TASK_NAME].keypoint_indices)
class_center = np.zeros((1, 32, 32, 1), dtype=np.float32)
height_width = np.zeros((1, 32, 32, 2), dtype=np.float32)
offset = np.zeros((1, 32, 32, 2), dtype=np.float32)
keypoint_heatmaps = np.ones(
(1, 32, 32, num_keypoints), dtype=np.float32) * _logit(0.001)
keypoint_offsets = np.zeros((1, 32, 32, 2), dtype=np.float32)
keypoint_regression = np.random.randn(1, 32, 32, num_keypoints * 2)
class_probs = np.zeros(1)
class_probs[0] = _logit(0.75)
class_center[0, 16, 16] = class_probs
height_width[0, 16, 16] = [5, 10]
offset[0, 16, 16] = [.25, .5]
keypoint_regression[0, 16, 16] = [-1., -1., -1., 1., 1., -1., 1., 1.]
keypoint_heatmaps[0, 14, 14, 0] = _logit(0.9)
keypoint_heatmaps[0, 14, 18, 1] = _logit(0.9)
keypoint_heatmaps[0, 18, 14, 2] = _logit(0.9)
keypoint_heatmaps[0, 18, 18, 3] = _logit(0.05) # Note the low score.
if per_keypoint_depth:
keypoint_depth = np.zeros((1, 32, 32, num_keypoints), dtype=np.float32)
keypoint_depth[0, 14, 14, 0] = -1.0
keypoint_depth[0, 14, 18, 1] = -1.1
keypoint_depth[0, 18, 14, 2] = -1.2
keypoint_depth[0, 18, 18, 3] = -1.3
else:
keypoint_depth = np.zeros((1, 32, 32, 1), dtype=np.float32)
keypoint_depth[0, 14, 14, 0] = -1.0
keypoint_depth[0, 14, 18, 0] = -1.1
keypoint_depth[0, 18, 14, 0] = -1.2
keypoint_depth[0, 18, 18, 0] = -1.3
class_center = tf.constant(class_center)
height_width = tf.constant(height_width)
offset = tf.constant(offset)
keypoint_heatmaps = tf.constant(keypoint_heatmaps, dtype=tf.float32)
keypoint_offsets = tf.constant(keypoint_offsets, dtype=tf.float32)
keypoint_regression = tf.constant(keypoint_regression, dtype=tf.float32)
keypoint_depth = tf.constant(keypoint_depth, dtype=tf.float32)
prediction_dict = {
cnma.OBJECT_CENTER: [class_center],
cnma.BOX_SCALE: [height_width],
cnma.BOX_OFFSET: [offset],
cnma.get_keypoint_name(_TASK_NAME,
cnma.KEYPOINT_HEATMAP): [keypoint_heatmaps],
cnma.get_keypoint_name(_TASK_NAME,
cnma.KEYPOINT_OFFSET): [keypoint_offsets],
cnma.get_keypoint_name(_TASK_NAME,
cnma.KEYPOINT_REGRESSION): [keypoint_regression],
cnma.get_keypoint_name(_TASK_NAME,
cnma.KEYPOINT_DEPTH): [keypoint_depth]
}
def graph_fn():
detections = model.postprocess(prediction_dict,
tf.constant([[128, 128, 3]]))
return detections
detections = self.execute_cpu(graph_fn, [])
self.assertAllClose(detections['detection_keypoint_depths'][0, 0],
np.array([-1.0, -1.1, -1.2, 0.0]))
self.assertAllClose(detections['detection_keypoint_scores'][0, 0],
np.array([0.9, 0.9, 0.9, 0.1]))
def test_mask_object_center_in_postprocess_by_true_image_shape(self):
"""Test the postprocess function is masked by true_image_shape."""
model = build_center_net_meta_arch(num_classes=1)
max_detection = model._center_params.max_box_predictions
num_keypoints = len(model._kp_params_dict[_TASK_NAME].keypoint_indices)
class_center = np.zeros((1, 32, 32, 1), dtype=np.float32)
height_width = np.zeros((1, 32, 32, 2), dtype=np.float32)
offset = np.zeros((1, 32, 32, 2), dtype=np.float32)
keypoint_heatmaps = np.zeros((1, 32, 32, num_keypoints), dtype=np.float32)
keypoint_offsets = np.zeros((1, 32, 32, 2), dtype=np.float32)
keypoint_regression = np.random.randn(1, 32, 32, num_keypoints * 2)
class_probs = np.zeros(1)
class_probs[0] = _logit(0.75)
class_center[0, 16, 16] = class_probs
height_width[0, 16, 16] = [5, 10]
offset[0, 16, 16] = [.25, .5]
keypoint_regression[0, 16, 16] = [
-1., -1.,
-1., 1.,
1., -1.,
1., 1.]
keypoint_heatmaps[0, 14, 14, 0] = _logit(0.9)
keypoint_heatmaps[0, 14, 18, 1] = _logit(0.9)
keypoint_heatmaps[0, 18, 14, 2] = _logit(0.9)
keypoint_heatmaps[0, 18, 18, 3] = _logit(0.05) # Note the low score.
class_center = tf.constant(class_center)
height_width = tf.constant(height_width)
offset = tf.constant(offset)
keypoint_heatmaps = tf.constant(keypoint_heatmaps, dtype=tf.float32)
keypoint_offsets = tf.constant(keypoint_offsets, dtype=tf.float32)
keypoint_regression = tf.constant(keypoint_regression, dtype=tf.float32)
print(class_center)
prediction_dict = {
cnma.OBJECT_CENTER: [class_center],
cnma.BOX_SCALE: [height_width],
cnma.BOX_OFFSET: [offset],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP):
[keypoint_heatmaps],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET):
[keypoint_offsets],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_REGRESSION):
[keypoint_regression],
}
def graph_fn():
detections = model.postprocess(prediction_dict,
tf.constant([[1, 1, 3]]))
return detections
detections = self.execute_cpu(graph_fn, [])
self.assertAllClose(detections['detection_boxes'][0, 0],
np.array([0, 0, 0, 0]))
# The class_center logits are initialized as 0's so it's filled with 0.5s.
# Despite that, we should only find one box.
self.assertAllClose(detections['detection_scores'][0],
[0.5, 0., 0., 0., 0.])
self.assertEqual(np.sum(detections['detection_classes']), 0)
self.assertEqual(detections['num_detections'], [1])
self.assertAllEqual([1, max_detection, num_keypoints, 2],
detections['detection_keypoints'].shape)
self.assertAllEqual([1, max_detection, num_keypoints],
detections['detection_keypoint_scores'].shape)
def test_get_instance_indices(self):
classes = tf.constant([[0, 1, 2, 0], [2, 1, 2, 2]], dtype=tf.int32)
num_detections = tf.constant([1, 3], dtype=tf.int32)
batch_index = 1
class_id = 2
model = build_center_net_meta_arch()
valid_indices = model._get_instance_indices(
classes, num_detections, batch_index, class_id)
self.assertAllEqual(valid_indices.numpy(), [0, 2])
def test_rescore_instances(self):
feature_extractor = DummyFeatureExtractor(
channel_means=(1.0, 2.0, 3.0),
channel_stds=(10., 20., 30.),
bgr_ordering=False,
num_feature_outputs=2,
stride=4)
image_resizer_fn = functools.partial(
preprocessor.resize_to_range,
min_dimension=128,
max_dimension=128,
pad_to_max_dimesnion=True)
kp_params_1 = cnma.KeypointEstimationParams(
task_name='kpt_task_1',
class_id=0,
keypoint_indices=[0, 1, 2],
keypoint_std_dev=[0.00001] * 3,
classification_loss=losses.WeightedSigmoidClassificationLoss(),
localization_loss=losses.L1LocalizationLoss(),
keypoint_candidate_score_threshold=0.1,
rescore_instances=True) # Note rescoring for class_id = 0.
kp_params_2 = cnma.KeypointEstimationParams(
task_name='kpt_task_2',
class_id=1,
keypoint_indices=[3, 4],
keypoint_std_dev=[0.00001] * 2,
classification_loss=losses.WeightedSigmoidClassificationLoss(),
localization_loss=losses.L1LocalizationLoss(),
keypoint_candidate_score_threshold=0.1,
rescore_instances=False)
model = cnma.CenterNetMetaArch(
is_training=True,
add_summaries=False,
num_classes=2,
feature_extractor=feature_extractor,
image_resizer_fn=image_resizer_fn,
object_center_params=get_fake_center_params(),
object_detection_params=get_fake_od_params(),
keypoint_params_dict={
'kpt_task_1': kp_params_1,
'kpt_task_2': kp_params_2,
})
def graph_fn():
classes = tf.constant([[1, 0]], dtype=tf.int32)
scores = tf.constant([[0.5, 0.75]], dtype=tf.float32)
keypoint_scores = tf.constant(
[
[[0.1, 0.0, 0.3, 0.4, 0.5],
[0.1, 0.2, 0.3, 0.4, 0.5]],
])
new_scores = model._rescore_instances(classes, scores, keypoint_scores)
return new_scores
new_scores = self.execute_cpu(graph_fn, [])
expected_scores = np.array(
[[0.5, 0.75 * (0.1 + 0.3)/2]]
)
self.assertAllClose(expected_scores, new_scores)
def get_fake_prediction_dict(input_height,
input_width,
stride,
per_keypoint_depth=False):
"""Prepares the fake prediction dictionary."""
output_height = input_height // stride
output_width = input_width // stride
object_center = np.zeros((2, output_height, output_width, _NUM_CLASSES),
dtype=np.float32)
# Box center:
# y: floor((0.54 + 0.56) / 2 * 4) = 2,
# x: floor((0.54 + 0.56) / 2 * 8) = 4
object_center[0, 2, 4, 1] = 1.0
object_center = _logit(object_center)
# Box size:
# height: (0.56 - 0.54) * 4 = 0.08
# width: (0.56 - 0.54) * 8 = 0.16
object_scale = np.zeros((2, output_height, output_width, 2), dtype=np.float32)
object_scale[0, 2, 4] = 0.08, 0.16
# Box center offset coordinate (0.55, 0.55):
# y-offset: 0.55 * 4 - 2 = 0.2
# x-offset: 0.55 * 8 - 4 = 0.4
object_offset = np.zeros((2, output_height, output_width, 2),
dtype=np.float32)
object_offset[0, 2, 4] = 0.2, 0.4
keypoint_heatmap = np.zeros((2, output_height, output_width, _NUM_KEYPOINTS),
dtype=np.float32)
keypoint_heatmap[0, 2, 4, 1] = 1.0
keypoint_heatmap[0, 2, 4, 3] = 1.0
keypoint_heatmap = _logit(keypoint_heatmap)
keypoint_offset = np.zeros((2, output_height, output_width, 2),
dtype=np.float32)
keypoint_offset[0, 2, 4] = 0.2, 0.4
keypoint_depth = np.zeros((2, output_height, output_width,
_NUM_KEYPOINTS if per_keypoint_depth else 1),
dtype=np.float32)
keypoint_depth[0, 2, 4] = 3.0
keypoint_regression = np.zeros(
(2, output_height, output_width, 2 * _NUM_KEYPOINTS), dtype=np.float32)
keypoint_regression[0, 2, 4] = 0.0, 0.0, 0.2, 0.4, 0.0, 0.0, 0.2, 0.4
mask_heatmap = np.zeros((2, output_height, output_width, _NUM_CLASSES),
dtype=np.float32)
mask_heatmap[0, 2, 4, 1] = 1.0
mask_heatmap = _logit(mask_heatmap)
densepose_heatmap = np.zeros((2, output_height, output_width,
_DENSEPOSE_NUM_PARTS), dtype=np.float32)
densepose_heatmap[0, 2, 4, 5] = 1.0
densepose_heatmap = _logit(densepose_heatmap)
densepose_regression = np.zeros((2, output_height, output_width,
2 * _DENSEPOSE_NUM_PARTS), dtype=np.float32)
# The surface coordinate indices for part index 5 are:
# (5 * 2, 5 * 2 + 1), or (10, 11).
densepose_regression[0, 2, 4, 10:12] = 0.4, 0.7
track_reid_embedding = np.zeros((2, output_height, output_width,
_REID_EMBED_SIZE), dtype=np.float32)
track_reid_embedding[0, 2, 4, :] = np.arange(_REID_EMBED_SIZE)
temporal_offsets = np.zeros((2, output_height, output_width, 2),
dtype=np.float32)
temporal_offsets[0, 2, 4, :] = 5
prediction_dict = {
'preprocessed_inputs':
tf.zeros((2, input_height, input_width, 3)),
cnma.OBJECT_CENTER: [
tf.constant(object_center),
tf.constant(object_center)
],
cnma.BOX_SCALE: [tf.constant(object_scale),
tf.constant(object_scale)],
cnma.BOX_OFFSET: [tf.constant(object_offset),
tf.constant(object_offset)],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_HEATMAP): [
tf.constant(keypoint_heatmap),
tf.constant(keypoint_heatmap)
],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_OFFSET): [
tf.constant(keypoint_offset),
tf.constant(keypoint_offset)
],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_REGRESSION): [
tf.constant(keypoint_regression),
tf.constant(keypoint_regression)
],
cnma.get_keypoint_name(_TASK_NAME, cnma.KEYPOINT_DEPTH): [
tf.constant(keypoint_depth),
tf.constant(keypoint_depth)
],
cnma.SEGMENTATION_HEATMAP: [
tf.constant(mask_heatmap),
tf.constant(mask_heatmap)
],
cnma.DENSEPOSE_HEATMAP: [
tf.constant(densepose_heatmap),
tf.constant(densepose_heatmap),
],
cnma.DENSEPOSE_REGRESSION: [
tf.constant(densepose_regression),
tf.constant(densepose_regression),
],
cnma.TRACK_REID: [
tf.constant(track_reid_embedding),
tf.constant(track_reid_embedding),
],
cnma.TEMPORAL_OFFSET: [
tf.constant(temporal_offsets),
tf.constant(temporal_offsets),
],
}
return prediction_dict
def get_fake_groundtruth_dict(input_height,
input_width,
stride,
has_depth=False):
"""Prepares the fake groundtruth dictionary."""
# A small box with center at (0.55, 0.55).
boxes = [
tf.constant([[0.54, 0.54, 0.56, 0.56]]),
tf.constant([[0.0, 0.0, 0.5, 0.5]]),
]
classes = [
tf.one_hot([1], depth=_NUM_CLASSES),
tf.one_hot([0], depth=_NUM_CLASSES),
]
weights = [
tf.constant([1.]),
tf.constant([0.]),
]
keypoints = [
tf.tile(
tf.expand_dims(
tf.constant([[float('nan'), 0.55,
float('nan'), 0.55, 0.55, 0.0]]),
axis=2),
multiples=[1, 1, 2]),
tf.tile(
tf.expand_dims(
tf.constant([[float('nan'), 0.55,
float('nan'), 0.55, 0.55, 0.0]]),
axis=2),
multiples=[1, 1, 2]),
]
if has_depth:
keypoint_depths = [
tf.constant([[float('nan'), 3.0,
float('nan'), 3.0, 0.55, 0.0]]),
tf.constant([[float('nan'), 0.55,
float('nan'), 0.55, 0.55, 0.0]])
]
keypoint_depth_weights = [
tf.constant([[1.0, 1.0, 1.0, 1.0, 0.0, 0.0]]),
tf.constant([[1.0, 1.0, 1.0, 1.0, 0.0, 0.0]])
]
else:
keypoint_depths = [
tf.constant([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]),
tf.constant([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
]
keypoint_depth_weights = [
tf.constant([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]]),
tf.constant([[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]])
]
labeled_classes = [
tf.one_hot([1], depth=_NUM_CLASSES) + tf.one_hot([2], depth=_NUM_CLASSES),
tf.one_hot([0], depth=_NUM_CLASSES) + tf.one_hot([1], depth=_NUM_CLASSES),
]
mask = np.zeros((1, input_height, input_width), dtype=np.float32)
mask[0, 8:8+stride, 16:16+stride] = 1
masks = [
tf.constant(mask),
tf.zeros_like(mask),
]
densepose_num_points = [
tf.constant([1], dtype=tf.int32),
tf.constant([0], dtype=tf.int32),
]
densepose_part_ids = [
tf.constant([[5, 0, 0]], dtype=tf.int32),
tf.constant([[0, 0, 0]], dtype=tf.int32),
]
densepose_surface_coords_np = np.zeros((1, 3, 4), dtype=np.float32)
densepose_surface_coords_np[0, 0, :] = 0.55, 0.55, 0.4, 0.7
densepose_surface_coords = [
tf.constant(densepose_surface_coords_np),
tf.zeros_like(densepose_surface_coords_np)
]
track_ids = [
tf.constant([2], dtype=tf.int32),
tf.constant([1], dtype=tf.int32),
]
temporal_offsets = [
tf.constant([[5.0, 5.0]], dtype=tf.float32),
tf.constant([[2.0, 3.0]], dtype=tf.float32),
]
track_match_flags = [
tf.constant([1.0], dtype=tf.float32),
tf.constant([1.0], dtype=tf.float32),
]
groundtruth_dict = {
fields.BoxListFields.boxes: boxes,
fields.BoxListFields.weights: weights,
fields.BoxListFields.classes: classes,
fields.BoxListFields.keypoints: keypoints,
fields.BoxListFields.keypoint_depths: keypoint_depths,
fields.BoxListFields.keypoint_depth_weights: keypoint_depth_weights,
fields.BoxListFields.masks: masks,
fields.BoxListFields.densepose_num_points: densepose_num_points,
fields.BoxListFields.densepose_part_ids: densepose_part_ids,
fields.BoxListFields.densepose_surface_coords: densepose_surface_coords,
fields.BoxListFields.track_ids: track_ids,
fields.BoxListFields.temporal_offsets: temporal_offsets,
fields.BoxListFields.track_match_flags: track_match_flags,
fields.InputDataFields.groundtruth_labeled_classes: labeled_classes,
}
return groundtruth_dict
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CenterNetMetaComputeLossTest(test_case.TestCase, parameterized.TestCase):
"""Test for CenterNet loss compuation related functions."""
def setUp(self):
self.model = build_center_net_meta_arch()
self.classification_loss_fn = self.model._center_params.classification_loss
self.localization_loss_fn = self.model._od_params.localization_loss
self.true_image_shapes = tf.constant([[16, 24, 3], [16, 24, 3]])
self.input_height = 16
self.input_width = 32
self.stride = 4
self.per_pixel_weights = self.get_per_pixel_weights(self.true_image_shapes,
self.input_height,
self.input_width,
self.stride)
self.prediction_dict = get_fake_prediction_dict(self.input_height,
self.input_width,
self.stride)
self.model._groundtruth_lists = get_fake_groundtruth_dict(
self.input_height, self.input_width, self.stride)
super(CenterNetMetaComputeLossTest, self).setUp()
def get_per_pixel_weights(self, true_image_shapes, input_height, input_width,
stride):
output_height, output_width = (input_height // stride,
input_width // stride)
# TODO(vighneshb) Explore whether using floor here is safe.
output_true_image_shapes = tf.ceil(tf.to_float(true_image_shapes) / stride)
per_pixel_weights = cnma.get_valid_anchor_weights_in_flattened_image(
output_true_image_shapes, output_height, output_width)
per_pixel_weights = tf.expand_dims(per_pixel_weights, 2)
return per_pixel_weights
def test_compute_object_center_loss(self):
def graph_fn():
loss = self.model._compute_object_center_loss(
object_center_predictions=self.prediction_dict[cnma.OBJECT_CENTER],
input_height=self.input_height,
input_width=self.input_width,
per_pixel_weights=self.per_pixel_weights)
return loss
loss = self.execute(graph_fn, [])
# The prediction and groundtruth are curated to produce very low loss.
self.assertGreater(0.01, loss)
default_value = self.model._center_params.use_labeled_classes
self.model._center_params = (
self.model._center_params._replace(use_labeled_classes=True))
loss = self.model._compute_object_center_loss(
object_center_predictions=self.prediction_dict[cnma.OBJECT_CENTER],
input_height=self.input_height,
input_width=self.input_width,
per_pixel_weights=self.per_pixel_weights)
self.model._center_params = (
self.model._center_params._replace(use_labeled_classes=default_value))
# The prediction and groundtruth are curated to produce very low loss.
self.assertGreater(0.01, loss)
def test_compute_box_scale_and_offset_loss(self):
def graph_fn():
scale_loss, offset_loss = self.model._compute_box_scale_and_offset_loss(
scale_predictions=self.prediction_dict[cnma.BOX_SCALE],
offset_predictions=self.prediction_dict[cnma.BOX_OFFSET],
input_height=self.input_height,
input_width=self.input_width)
return scale_loss, offset_loss
scale_loss, offset_loss = self.execute(graph_fn, [])
# The prediction and groundtruth are curated to produce very low loss.
self.assertGreater(0.01, scale_loss)
self.assertGreater(0.01, offset_loss)
def test_compute_kp_heatmap_loss(self):
def graph_fn():
loss = self.model._compute_kp_heatmap_loss(
input_height=self.input_height,
input_width=self.input_width,
task_name=_TASK_NAME,
heatmap_predictions=self.prediction_dict[cnma.get_keypoint_name(
_TASK_NAME, cnma.KEYPOINT_HEATMAP)],
classification_loss_fn=self.classification_loss_fn,
per_pixel_weights=self.per_pixel_weights)
return loss
loss = self.execute(graph_fn, [])
# The prediction and groundtruth are curated to produce very low loss.
self.assertGreater(0.01, loss)
def test_compute_kp_offset_loss(self):
def graph_fn():
loss = self.model._compute_kp_offset_loss(
input_height=self.input_height,
input_width=self.input_width,
task_name=_TASK_NAME,
offset_predictions=self.prediction_dict[cnma.get_keypoint_name(
_TASK_NAME, cnma.KEYPOINT_OFFSET)],
localization_loss_fn=self.localization_loss_fn)
return loss
loss = self.execute(graph_fn, [])
# The prediction and groundtruth are curated to produce very low loss.
self.assertGreater(0.01, loss)
def test_compute_kp_regression_loss(self):
def graph_fn():
loss = self.model._compute_kp_regression_loss(
input_height=self.input_height,
input_width=self.input_width,
task_name=_TASK_NAME,
regression_predictions=self.prediction_dict[cnma.get_keypoint_name(
_TASK_NAME, cnma.KEYPOINT_REGRESSION,)],
localization_loss_fn=self.localization_loss_fn)
return loss
loss = self.execute(graph_fn, [])
# The prediction and groundtruth are curated to produce very low loss.
self.assertGreater(0.01, loss)
@parameterized.parameters(
{'per_keypoint_depth': False},
{'per_keypoint_depth': True},
)
def test_compute_kp_depth_loss(self, per_keypoint_depth):
prediction_dict = get_fake_prediction_dict(
self.input_height,
self.input_width,
self.stride,
per_keypoint_depth=per_keypoint_depth)
model = build_center_net_meta_arch(
num_classes=1,
per_keypoint_offset=per_keypoint_depth,
predict_depth=True,
per_keypoint_depth=per_keypoint_depth,
peak_radius=1 if per_keypoint_depth else 0)
model._groundtruth_lists = get_fake_groundtruth_dict(
self.input_height, self.input_width, self.stride, has_depth=True)
def graph_fn():
loss = model._compute_kp_depth_loss(
input_height=self.input_height,
input_width=self.input_width,
task_name=_TASK_NAME,
depth_predictions=prediction_dict[cnma.get_keypoint_name(
_TASK_NAME, cnma.KEYPOINT_DEPTH)],
localization_loss_fn=self.localization_loss_fn)
return loss
loss = self.execute(graph_fn, [])
if per_keypoint_depth:
# The loss is computed on a disk with radius 1 but only the center pixel
# has the accurate prediction. The final loss is (4 * |3-0|) / 5 = 2.4
self.assertAlmostEqual(2.4, loss, delta=1e-4)
else:
# The prediction and groundtruth are curated to produce very low loss.
self.assertGreater(0.01, loss)
def test_compute_track_embedding_loss(self):
default_fc = self.model.track_reid_classification_net
# Initialize the kernel to extreme values so that the classification score
# is close to (0, 0, 1) after the softmax layer.
kernel_initializer = tf.constant_initializer(
[[1, 1, 0], [-1000000, -1000000, 1000000]])
self.model.track_reid_classification_net = tf.keras.layers.Dense(
_NUM_TRACK_IDS,
kernel_initializer=kernel_initializer,
input_shape=(_REID_EMBED_SIZE,))
loss = self.model._compute_track_embedding_loss(
input_height=self.input_height,
input_width=self.input_width,
object_reid_predictions=self.prediction_dict[cnma.TRACK_REID])
self.model.track_reid_classification_net = default_fc
# The prediction and groundtruth are curated to produce very low loss.
self.assertGreater(0.01, loss)
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CenterNetMetaArchRestoreTest(test_case.TestCase):
def test_restore_map_resnet(self):
"""Test restore map for a resnet backbone."""
model = build_center_net_meta_arch(build_resnet=True)
restore_from_objects_map = model.restore_from_objects('classification')
self.assertIsInstance(restore_from_objects_map['feature_extractor'],
tf.keras.Model)
def test_retore_map_detection(self):
"""Test that detection checkpoints can be restored."""
model = build_center_net_meta_arch(build_resnet=True)
restore_from_objects_map = model.restore_from_objects('detection')
self.assertIsInstance(restore_from_objects_map['model']._feature_extractor,
tf.keras.Model)
class DummyFeatureExtractor(cnma.CenterNetFeatureExtractor):
def __init__(self,
channel_means,
channel_stds,
bgr_ordering,
num_feature_outputs,
stride):
self._num_feature_outputs = num_feature_outputs
self._stride = stride
super(DummyFeatureExtractor, self).__init__(
channel_means=channel_means, channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
def predict(self):
pass
def loss(self):
pass
def postprocess(self):
pass
def call(self, inputs):
batch_size, input_height, input_width, _ = inputs.shape
fake_output = tf.ones([
batch_size, input_height // self._stride, input_width // self._stride,
64
], dtype=tf.float32)
return [fake_output] * self._num_feature_outputs
@property
def out_stride(self):
return self._stride
@property
def num_feature_outputs(self):
return self._num_feature_outputs
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CenterNetFeatureExtractorTest(test_case.TestCase):
"""Test the base feature extractor class."""
def test_preprocess(self):
feature_extractor = DummyFeatureExtractor(
channel_means=(1.0, 2.0, 3.0),
channel_stds=(10., 20., 30.), bgr_ordering=False,
num_feature_outputs=2, stride=4)
img = np.zeros((2, 32, 32, 3))
img[:, :, :] = 11, 22, 33
def graph_fn():
output = feature_extractor.preprocess(img)
return output
output = self.execute(graph_fn, [])
self.assertAlmostEqual(output.sum(), 2 * 32 * 32 * 3)
def test_bgr_ordering(self):
feature_extractor = DummyFeatureExtractor(
channel_means=(0.0, 0.0, 0.0),
channel_stds=(1., 1., 1.), bgr_ordering=True,
num_feature_outputs=2, stride=4)
img = np.zeros((2, 32, 32, 3), dtype=np.float32)
img[:, :, :] = 1, 2, 3
def graph_fn():
output = feature_extractor.preprocess(img)
return output
output = self.execute(graph_fn, [])
self.assertAllClose(output[..., 2], 1 * np.ones((2, 32, 32)))
self.assertAllClose(output[..., 1], 2 * np.ones((2, 32, 32)))
self.assertAllClose(output[..., 0], 3 * np.ones((2, 32, 32)))
def test_default_ordering(self):
feature_extractor = DummyFeatureExtractor(
channel_means=(0.0, 0.0, 0.0),
channel_stds=(1., 1., 1.), bgr_ordering=False,
num_feature_outputs=2, stride=4)
img = np.zeros((2, 32, 32, 3), dtype=np.float32)
img[:, :, :] = 1, 2, 3
def graph_fn():
output = feature_extractor.preprocess(img)
return output
output = self.execute(graph_fn, [])
self.assertAllClose(output[..., 0], 1 * np.ones((2, 32, 32)))
self.assertAllClose(output[..., 1], 2 * np.ones((2, 32, 32)))
self.assertAllClose(output[..., 2], 3 * np.ones((2, 32, 32)))
class Dummy1dFeatureExtractor(cnma.CenterNetFeatureExtractor):
"""Returns a static tensor."""
def __init__(self, tensor, out_stride=1, channel_means=(0., 0., 0.),
channel_stds=(1., 1., 1.), bgr_ordering=False):
"""Intializes the feature extractor.
Args:
tensor: The tensor to return as the processed feature.
out_stride: The out_stride to return if asked.
channel_means: Ignored, but provided for API compatability.
channel_stds: Ignored, but provided for API compatability.
bgr_ordering: Ignored, but provided for API compatability.
"""
super().__init__(
channel_means=channel_means, channel_stds=channel_stds,
bgr_ordering=bgr_ordering)
self._tensor = tensor
self._out_stride = out_stride
def call(self, inputs):
return [self._tensor]
@property
def out_stride(self):
"""The stride in the output image of the network."""
return self._out_stride
@property
def num_feature_outputs(self):
"""Ther number of feature outputs returned by the feature extractor."""
return 1
@property
def supported_sub_model_types(self):
return ['detection']
def get_sub_model(self, sub_model_type):
if sub_model_type == 'detection':
return self._network
else:
ValueError('Sub model type "{}" not supported.'.format(sub_model_type))
@unittest.skipIf(tf_version.is_tf1(), 'Skipping TF2.X only test.')
class CenterNetMetaArch1dTest(test_case.TestCase, parameterized.TestCase):
@parameterized.parameters([1, 2])
def test_outputs_with_correct_shape(self, stride):
# The 1D case reuses code from the 2D cases. These tests only check that
# the output shapes are correct, and relies on other tests for correctness.
batch_size = 2
height = 1
width = 32
channels = 16
unstrided_inputs = np.random.randn(
batch_size, height, width, channels)
fixed_output_features = np.random.randn(
batch_size, height, width // stride, channels)
max_boxes = 10
num_classes = 3
feature_extractor = Dummy1dFeatureExtractor(fixed_output_features, stride)
arch = cnma.CenterNetMetaArch(
is_training=True,
add_summaries=True,
num_classes=num_classes,
feature_extractor=feature_extractor,
image_resizer_fn=None,
object_center_params=cnma.ObjectCenterParams(
classification_loss=losses.PenaltyReducedLogisticFocalLoss(),
object_center_loss_weight=1.0,
max_box_predictions=max_boxes,
),
object_detection_params=cnma.ObjectDetectionParams(
localization_loss=losses.L1LocalizationLoss(),
scale_loss_weight=1.0,
offset_loss_weight=1.0,
),
keypoint_params_dict=None,
mask_params=None,
densepose_params=None,
track_params=None,
temporal_offset_params=None,
use_depthwise=False,
compute_heatmap_sparse=False,
non_max_suppression_fn=None,
unit_height_conv=True)
arch.provide_groundtruth(
groundtruth_boxes_list=[
tf.constant([[0, 0.5, 1.0, 0.75],
[0, 0.1, 1.0, 0.25]], tf.float32),
tf.constant([[0, 0, 1.0, 1.0],
[0, 0, 0.0, 0.0]], tf.float32)
],
groundtruth_classes_list=[
tf.constant([[0, 0, 1],
[0, 1, 0]], tf.float32),
tf.constant([[1, 0, 0],
[0, 0, 0]], tf.float32)
],
groundtruth_weights_list=[
tf.constant([1.0, 1.0]),
tf.constant([1.0, 0.0])]
)
predictions = arch.predict(None, None) # input is hardcoded above.
predictions['preprocessed_inputs'] = tf.constant(unstrided_inputs)
true_shapes = tf.constant([[1, 32, 16], [1, 24, 16]], tf.int32)
postprocess_output = arch.postprocess(predictions, true_shapes)
losses_output = arch.loss(predictions, true_shapes)
self.assertIn('%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.OBJECT_CENTER),
losses_output)
self.assertEqual((), losses_output['%s/%s' % (
cnma.LOSS_KEY_PREFIX, cnma.OBJECT_CENTER)].shape)
self.assertIn('%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.BOX_SCALE),
losses_output)
self.assertEqual((), losses_output['%s/%s' % (
cnma.LOSS_KEY_PREFIX, cnma.BOX_SCALE)].shape)
self.assertIn('%s/%s' % (cnma.LOSS_KEY_PREFIX, cnma.BOX_OFFSET),
losses_output)
self.assertEqual((), losses_output['%s/%s' % (
cnma.LOSS_KEY_PREFIX, cnma.BOX_OFFSET)].shape)
self.assertIn('detection_scores', postprocess_output)
self.assertEqual(postprocess_output['detection_scores'].shape,
(batch_size, max_boxes))
self.assertIn('detection_multiclass_scores', postprocess_output)
self.assertEqual(postprocess_output['detection_multiclass_scores'].shape,
(batch_size, max_boxes, num_classes))
self.assertIn('detection_classes', postprocess_output)
self.assertEqual(postprocess_output['detection_classes'].shape,
(batch_size, max_boxes))
self.assertIn('num_detections', postprocess_output)
self.assertEqual(postprocess_output['num_detections'].shape,
(batch_size,))
self.assertIn('detection_boxes', postprocess_output)
self.assertEqual(postprocess_output['detection_boxes'].shape,
(batch_size, max_boxes, 4))
self.assertIn('detection_boxes_strided', postprocess_output)
self.assertEqual(postprocess_output['detection_boxes_strided'].shape,
(batch_size, max_boxes, 4))
self.assertIn(cnma.OBJECT_CENTER, predictions)
self.assertEqual(predictions[cnma.OBJECT_CENTER][0].shape,
(batch_size, height, width // stride, num_classes))
self.assertIn(cnma.BOX_SCALE, predictions)
self.assertEqual(predictions[cnma.BOX_SCALE][0].shape,
(batch_size, height, width // stride, 2))
self.assertIn(cnma.BOX_OFFSET, predictions)
self.assertEqual(predictions[cnma.BOX_OFFSET][0].shape,
(batch_size, height, width // stride, 2))
self.assertIn('preprocessed_inputs', predictions)
if __name__ == '__main__':
tf.enable_v2_behavior()
tf.test.main()
| 39.303416 | 80 | 0.614155 |
acf03b64ee2b2e1b29cae0789233d96cb08b59b1 | 24,515 | py | Python | compiler/dna/parser/parser.py | nate97/libpandadna | df1f80c76317a1594ec98731d34c54f9dd4f1f40 | [
"BSD-3-Clause"
] | 36 | 2015-01-29T19:43:45.000Z | 2022-01-19T11:49:28.000Z | compiler/dna/parser/parser.py | nate97/libpandadna | df1f80c76317a1594ec98731d34c54f9dd4f1f40 | [
"BSD-3-Clause"
] | 44 | 2015-01-16T16:09:30.000Z | 2022-01-25T02:29:15.000Z | compiler/dna/parser/parser.py | loblao/libpandadna | 39b93cb3eb043b4124f13f4901a411a07140e6ac | [
"BSD-3-Clause"
] | 42 | 2015-01-03T08:43:21.000Z | 2022-01-11T04:29:11.000Z | import os
from dna.components.DNAAnimBuilding import DNAAnimBuilding
from dna.components.DNAAnimProp import DNAAnimProp
from dna.components.DNABattleCell import DNABattleCell
from dna.components.DNACornice import DNACornice
from dna.components.DNADoor import DNADoor
from dna.components.DNAFlatBuilding import DNAFlatBuilding
from dna.components.DNAFlatDoor import DNAFlatDoor
from dna.components.DNAGroup import DNAGroup
from dna.components.DNAInteractiveProp import DNAInteractiveProp
from dna.components.DNALandmarkBuilding import DNALandmarkBuilding
from dna.components.DNANode import DNANode
from dna.components.DNAProp import DNAProp
from dna.components.DNASign import DNASign
from dna.components.DNASignBaseline import DNASignBaseline
from dna.components.DNASignGraphic import DNASignGraphic
from dna.components.DNASignText import DNASignText
from dna.components.DNAStreet import DNAStreet
from dna.components.DNASuitPoint import DNASuitPoint
from dna.components.DNAVisGroup import DNAVisGroup
from dna.components.DNAWall import DNAWall
from dna.components.DNAWindows import DNAWindows
def p_dna(p):
pass
p_dna.__doc__ = '''\
dna : dna object
| object'''
def p_object(p):
p[0] = p[1]
p_object.__doc__ = '''\
object : suitpoint
| group
| model
| font
| store_texture'''
def p_number(p):
p[0] = p[1]
p_number.__doc__ = '''\
number : FLOAT
| INTEGER'''
def p_lpoint3f(p):
lpoint3f = (p[1], p[2], p[3])
p[0] = lpoint3f
p_lpoint3f.__doc__ = '''\
lpoint3f : number number number'''
def p_suitpoint(p):
argCount = len(p)
if argCount == 9:
index = p[3]
pointTypeStr = p[5]
pos = p[7]
landmarkBuildingIndex = -1
else:
index = p[3]
pointTypeStr = p[5]
pos = p[7]
landmarkBuildingIndex = p[9]
point = DNASuitPoint(index, pointTypeStr, pos,
landmarkBuildingIndex=landmarkBuildingIndex)
p.parser.dnaStore.storeSuitPoint(point)
p_suitpoint.__doc__ = '''\
suitpoint : STORE_SUIT_POINT "[" number "," suitpointtype "," lpoint3f "]"
| STORE_SUIT_POINT "[" number "," suitpointtype "," lpoint3f "," number "]"'''
def p_suitpointtype(p):
pointTypeStr = p[1]
p[0] = DNASuitPoint.pointTypeMap[pointTypeStr]
p_suitpointtype.__doc__ = '''\
suitpointtype : STREET_POINT
| FRONT_DOOR_POINT
| SIDE_DOOR_POINT
| COGHQ_IN_POINT
| COGHQ_OUT_POINT'''
def p_string(p):
p[0] = p[1]
p_string.__doc__ = '''\
string : QUOTED_STRING
| UNQUOTED_STRING'''
def p_dnagroupdef(p):
name = p[2]
p[0] = DNAGroup(name)
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_dnagroupdef.__doc__ = '''\
dnagroupdef : GROUP string'''
def p_dnanodedef(p):
name = p[2]
p[0] = DNANode(name)
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_dnanodedef.__doc__ = '''\
dnanodedef : NODE string'''
def p_visgroupdef(p):
name = p[2]
p[0] = DNAVisGroup(name)
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_visgroupdef.__doc__ = '''\
visgroupdef : VISGROUP string'''
def p_dnagroup(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_dnagroup.__doc__ = '''\
dnagroup : dnagroupdef "[" subgroup_list "]"'''
def p_visgroup(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_visgroup.__doc__ = '''\
visgroup : visgroupdef "[" subvisgroup_list "]"'''
def p_string_opt_list(p):
argCount = len(p)
if argCount == 2:
p[0] = []
elif (argCount == 3) and (p[2] is not None):
p[0] = p[1]
p[0].append(p[2])
p_string_opt_list.__doc__ = '''\
string_opt_list : string_opt_list string
| empty'''
def p_vis(p):
parentVis, visList = p[3], p[4]
p.parser.parentGroup.addVisible(parentVis)
for vis in visList:
p.parser.parentGroup.addVisible(vis)
p_vis.__doc__ = '''\
vis : VIS "[" string string_opt_list "]"'''
def p_empty(p):
pass
p_empty.__doc__ = '''\
empty : '''
def p_group(p):
p[0] = p[1]
p_group.__doc__ = '''\
group : dnagroup
| visgroup
| dnanode
| windows
| cornice
| door'''
def p_dnanode(p):
p[0] = p[1]
p_dnanode.__doc__ = '''\
dnanode : prop
| sign
| signbaseline
| signtext
| flatbuilding
| wall
| landmarkbuilding
| street
| signgraphic
| dnanode_grp'''
def p_dnanode_grp(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_dnanode_grp.__doc__ = '''\
dnanode_grp : dnanodedef "[" subdnanode_list "]"'''
def p_sign(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_sign.__doc__ = '''\
sign : signdef "[" subprop_list "]"'''
def p_signgraphic(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_signgraphic.__doc__ = '''\
signgraphic : signgraphicdef "[" subsigngraphic_list "]"'''
def p_prop(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_prop.__doc__ = '''\
prop : propdef "[" subprop_list "]"
| animpropdef "[" subanimprop_list "]"
| interactivepropdef "[" subinteractiveprop_list "]"'''
def p_signbaseline(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_signbaseline.__doc__ = '''\
signbaseline : baselinedef "[" subbaseline_list "]"'''
def p_signtest(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_signtest.__doc__ = '''\
signtext : signtextdef "[" subtext_list "]"'''
def p_flatbuilding(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_flatbuilding.__doc__ = '''\
flatbuilding : flatbuildingdef "[" subflatbuilding_list "]"'''
def p_wall(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_wall.__doc__ = '''\
wall : walldef "[" subwall_list "]"'''
def p_windows(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_windows.__doc__ = '''\
windows : windowsdef "[" subwindows_list "]"'''
def p_cornice(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_cornice.__doc__ = '''\
cornice : cornicedef "[" subcornice_list "]"'''
def p_landmarkbuilding(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_landmarkbuilding.__doc__ = '''\
landmarkbuilding : landmarkbuildingdef "[" sublandmarkbuilding_list "]"
| animbuildingdef "[" subanimbuilding_list "]"'''
def p_street(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_street.__doc__ = '''\
street : streetdef "[" substreet_list "]"'''
def p_door(p):
p[0] = p[1]
p.parser.parentGroup = p[0].parent
p_door.__doc__ = '''\
door : doordef "[" subdoor_list "]"
| flatdoordef "[" subdoor_list "]"'''
def p_propdef(p):
name = p[2]
p[0] = DNAProp(name)
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_propdef.__doc__ = '''\
propdef : PROP string'''
def p_animpropdef(p):
name = p[2]
p[0] = DNAAnimProp(name)
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_animpropdef.__doc__ = '''\
animpropdef : ANIM_PROP string'''
def p_interactivepropdef(p):
name = p[2]
p[0] = DNAInteractiveProp(name)
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_interactivepropdef.__doc__ = '''\
interactivepropdef : INTERACTIVE_PROP string'''
def p_flatbuildingdef(p):
name = p[2]
p[0] = DNAFlatBuilding(name)
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_flatbuildingdef.__doc__ = '''\
flatbuildingdef : FLAT_BUILDING string'''
def p_walldef(p):
p[0] = DNAWall('')
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_walldef.__doc__ = '''\
walldef : WALL'''
def p_windowsdef(p):
p[0] = DNAWindows('')
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_windowsdef.__doc__ = '''\
windowsdef : WINDOWS'''
def p_cornicedef(p):
p[0] = DNACornice('')
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_cornicedef.__doc__ = '''\
cornicedef : CORNICE'''
def p_landmarkbuildingdef(p):
name = p[2]
p[0] = DNALandmarkBuilding(name)
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
blockNumber = int(p.parser.dnaStore.getBlock(name))
p.parser.dnaStore.storeBlockNumber(blockNumber)
zoneId = 0
try:
zoneId = int(p[0].getVisGroup().name.split(':')[0])
except:
pass
finally:
p.parser.dnaStore.storeBlockZone(blockNumber, zoneId)
p_landmarkbuildingdef.__doc__ = '''\
landmarkbuildingdef : LANDMARK_BUILDING string'''
def p_animbuildingdef(p):
name = p[2]
p[0] = DNAAnimBuilding(name)
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
blockNumber = int(p.parser.dnaStore.getBlock(name))
p.parser.dnaStore.storeBlockNumber(blockNumber)
zoneId = int(p[0].getVisGroup().name.split(':')[0])
p.parser.dnaStore.storeBlockZone(blockNumber, zoneId)
p_animbuildingdef.__doc__ = '''\
animbuildingdef : ANIM_BUILDING string'''
def p_doordef(p):
p[0] = DNADoor('')
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_doordef.__doc__ = '''\
doordef : DOOR'''
def p_flatdoordef(p):
p[0] = DNAFlatDoor('')
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup.parent.setHasDoor(True)
p.parser.parentGroup = p[0]
p_flatdoordef.__doc__ = '''\
flatdoordef : FLAT_DOOR'''
def p_streetdef(p):
p[0] = DNAStreet(p[2])
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_streetdef.__doc__ = '''\
streetdef : STREET string'''
def p_signdef(p):
p[0] = DNASign()
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_signdef.__doc__ = '''\
signdef : SIGN'''
def p_signgraphicdef(p):
p[0] = DNASignGraphic('')
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_signgraphicdef.__doc__ = '''\
signgraphicdef : GRAPHIC'''
def p_baselinedef(p):
p[0] = DNASignBaseline()
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_baselinedef.__doc__ = '''\
baselinedef : BASELINE'''
def p_signtextdef(p):
p[0] = DNASignText()
p.parser.parentGroup.add(p[0])
p[0].setParent(p.parser.parentGroup)
p.parser.parentGroup = p[0]
p_signtextdef.__doc__ = '''\
signtextdef : TEXT'''
def p_suitedge(p):
startPointIndex, endPointIndex = p[3], p[4]
zoneId = int(p.parser.parentGroup.name)
edge = p.parser.dnaStore.storeSuitEdge(
startPointIndex, endPointIndex, zoneId)
p.parser.parentGroup.addSuitEdge(edge)
p_suitedge.__doc__ = '''\
suitedge : SUIT_EDGE "[" number number "]"'''
def p_battlecell(p):
width, height, pos = p[3], p[4], p[5]
p[0] = DNABattleCell(width, height, pos)
p.parser.parentGroup.addBattleCell(p[0])
p_battlecell.__doc__ = '''\
battlecell : BATTLE_CELL "[" number number lpoint3f "]"'''
def p_subgroup_list(p):
p[0] = p[1]
argCount = len(p)
if argCount == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subgroup_list.__doc__ = '''\
subgroup_list : subgroup_list group
| empty'''
def p_subvisgroup_list(p):
p[0] = p[1]
argCount = len(p)
if argCount == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subvisgroup_list.__doc__ = '''\
subvisgroup_list : subvisgroup_list group
| subvisgroup_list suitedge
| subvisgroup_list battlecell
| subvisgroup_list vis
| empty'''
def p_pos(p):
p.parser.parentGroup.setPos(p[3])
p_pos.__doc__ = '''\
pos : POS "[" lpoint3f "]"'''
def p_hpr(p):
p.parser.parentGroup.setHpr(p[3])
p_hpr.__doc__ = '''\
hpr : HPR "[" lpoint3f "]"
| NHPR "[" lpoint3f "]"'''
def p_scale(p):
p.parser.parentGroup.setScale(p[3])
p_scale.__doc__ = '''\
scale : SCALE "[" lpoint3f "]"'''
def p_flags(p):
p.parser.parentGroup.setFlags(p[3])
p_flags.__doc__ = '''\
flags : FLAGS "[" string "]"'''
def p_dnanode_sub(p):
p[0] = p[1]
p_dnanode_sub.__doc__ = '''\
dnanode_sub : group
| pos
| hpr
| scale'''
def p_dnaprop_sub(p):
p[0] = p[1]
p_dnaprop_sub.__doc__ = '''\
dnaprop_sub : code
| color'''
def p_dnaanimprop_sub(p):
p[0] = p[1]
p_dnaanimprop_sub.__doc__ = '''\
dnaanimprop_sub : anim'''
def p_dnainteractiveprop_sub(p):
p[0] = p[1]
p_dnainteractiveprop_sub.__doc__ = '''\
dnainteractiveprop_sub : cell_id'''
def p_anim(p):
p.parser.parentGroup.setAnim(p[3])
p_anim.__doc__ = '''\
anim : ANIM "[" string "]"'''
def p_cell_id(p):
p.parser.parentGroup.setCellId(p[3])
p_cell_id.__doc__ = '''\
cell_id : CELL_ID "[" number "]"'''
def p_baseline_sub(p):
p[0] = p[1]
p_baseline_sub.__doc__ = '''\
baseline_sub : code
| color
| width
| height
| indent
| kern
| stomp
| stumble
| wiggle
| flags'''
def p_text_sub(p):
p[0] = p[1]
p_text_sub.__doc__ = '''\
text_sub : letters'''
def p_signgraphic_sub(p):
p[0] = p[1]
p_signgraphic_sub.__doc__ = '''\
signgraphic_sub : width
| height
| code
| color'''
def p_flatbuilding_sub(p):
p[0] = p[1]
p_flatbuilding_sub.__doc__ = '''\
flatbuilding_sub : width'''
def p_wall_sub(p):
p[0] = p[1]
p_wall_sub.__doc__ = '''\
wall_sub : height
| code
| color'''
def p_windows_sub(p):
p[0] = p[1]
p_windows_sub.__doc__ = '''\
windows_sub : code
| color
| windowcount'''
def p_cornice_sub(p):
p[0] = p[1]
p_cornice_sub.__doc__ = '''\
cornice_sub : code
| color'''
def p_landmarkbuilding_sub(p):
p[0] = p[1]
p_landmarkbuilding_sub.__doc__ = '''\
landmarkbuilding_sub : code
| title
| article
| building_type
| wall_color'''
def p_animbuilding_sub(p):
p[0] = p[1]
p_animbuilding_sub.__doc__ = '''\
animbuilding_sub : anim'''
def p_door_sub(p):
p[0] = p[1]
p_door_sub.__doc__ = '''\
door_sub : code
| color'''
def p_street_sub(p):
p[0] = p[1]
p_street_sub.__doc__ = '''\
street_sub : code
| texture
| color'''
def p_texture(p):
p.parser.parentGroup.setTexture(p[3])
p_texture.__doc__ = '''\
texture : TEXTURE "[" string "]"'''
def p_title(p):
title = p[3]
p.parser.parentGroup.setTitle(title)
parentName = p.parser.parentGroup.name
blockNumber = int(p.parser.dnaStore.getBlock(parentName))
p.parser.dnaStore.storeBlockTitle(blockNumber, title)
p_title.__doc__ = '''\
title : TITLE "[" string "]"'''
def p_article(p):
article = p[3]
p.parser.parentGroup.setArticle(article)
parentName = p.parser.parentGroup.name
blockNumber = int(p.parser.dnaStore.getBlock(parentName))
p.parser.dnaStore.storeBlockArticle(blockNumber, article)
p_article.__doc__ = '''\
article : ARTICLE "[" string "]"'''
def p_building_type(p):
buildingType = p[3]
p.parser.parentGroup.setBuildingType(buildingType)
parentName = p.parser.parentGroup.name
blockNumber = int(p.parser.dnaStore.getBlock(parentName))
p.parser.dnaStore.storeBlockBuildingType(blockNumber, buildingType)
p_building_type.__doc__ = '''\
building_type : BUILDING_TYPE "[" string "]"'''
def p_wall_color(p):
wallColor = (p[3], p[4], p[5], p[6])
p.parser.parentGroup.setWallColor(wallColor)
p_wall_color.__doc__ = '''\
wall_color : COLOR "[" number number number number "]"'''
def p_count(p):
p.parser.parentGroup.setWindowCount(p[3])
p_count.__doc__ = '''\
windowcount : COUNT "[" number "]"'''
def p_letters(p):
p.parser.parentGroup.setLetters(p[3])
p_letters.__doc__ = '''\
letters : LETTERS "[" string "]"'''
def p_width(p):
p.parser.parentGroup.setWidth(p[3])
p_width.__doc__ = '''\
width : WIDTH "[" number "]"'''
def p_height(p):
p.parser.parentGroup.setHeight(p[3])
p_height.__doc__ = '''\
height : HEIGHT "[" number "]"'''
def p_stomp(p):
p.parser.parentGroup.setStomp(p[3])
p_stomp.__doc__ = '''\
stomp : STOMP "[" number "]"'''
def p_indent(p):
p.parser.parentGroup.setIndent(p[3])
p_indent.__doc__ = '''\
indent : INDENT "[" number "]"'''
def p_kern(p):
p.parser.parentGroup.setKern(p[3])
p_kern.__doc__ = '''\
kern : KERN "[" number "]"'''
def p_stumble(p):
p.parser.parentGroup.setStumble(p[3])
p_stumble.__doc__ = '''\
stumble : STUMBLE "[" number "]"'''
def p_wiggle(p):
p.parser.parentGroup.setWiggle(p[3])
p_wiggle.__doc__ = '''\
wiggle : WIGGLE "[" number "]"'''
def p_code(p):
p.parser.parentGroup.setCode(p[3])
p_code.__doc__ = '''\
code : CODE "[" string "]"'''
def p_color(p):
p.parser.parentGroup.setColor((p[3], p[4], p[5], p[6]))
p_color.__doc__ = '''\
color : COLOR "[" number number number number "]"'''
def p_subprop_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subprop_list.__doc__ = '''\
subprop_list : subprop_list dnanode_sub
| subprop_list dnaprop_sub
| empty'''
def p_subanimprop_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subanimprop_list.__doc__ = '''\
subanimprop_list : subanimprop_list dnanode_sub
| subanimprop_list dnaprop_sub
| subanimprop_list dnaanimprop_sub
| empty'''
def p_subinteractiveprop_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subinteractiveprop_list.__doc__ = '''\
subinteractiveprop_list : subinteractiveprop_list dnanode_sub
| subinteractiveprop_list dnaprop_sub
| subinteractiveprop_list dnaanimprop_sub
| subinteractiveprop_list dnainteractiveprop_sub
| empty'''
def p_subbaseline_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subbaseline_list.__doc__ = '''\
subbaseline_list : subbaseline_list dnanode_sub
| subbaseline_list baseline_sub
| empty'''
def p_subtext_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subtext_list.__doc__ = '''\
subtext_list : subtext_list dnanode_sub
| subtext_list text_sub
| empty'''
def p_subdnanode_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subdnanode_list.__doc__ = '''\
subdnanode_list : subdnanode_list dnanode_sub
| empty'''
def p_subsigngraphic_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subsigngraphic_list.__doc__ = '''\
subsigngraphic_list : subsigngraphic_list dnanode_sub
| subsigngraphic_list signgraphic_sub
| empty'''
def p_subflatbuilding_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subflatbuilding_list.__doc__ = '''\
subflatbuilding_list : subflatbuilding_list dnanode_sub
| subflatbuilding_list flatbuilding_sub
| empty'''
def p_subwall_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subwall_list.__doc__ = '''\
subwall_list : subwall_list dnanode_sub
| subwall_list wall_sub
| empty'''
def p_subwindows_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subwindows_list.__doc__ = '''\
subwindows_list : subwindows_list dnanode_sub
| subwindows_list windows_sub
| empty'''
def p_subcornice_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subcornice_list.__doc__ = '''\
subcornice_list : subcornice_list dnanode_sub
| subcornice_list cornice_sub
| empty'''
def p_sublandmarkbuilding_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_sublandmarkbuilding_list.__doc__ = '''\
sublandmarkbuilding_list : sublandmarkbuilding_list dnanode_sub
| sublandmarkbuilding_list landmarkbuilding_sub
| empty'''
def p_subanimbuilding_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subanimbuilding_list.__doc__ = '''\
subanimbuilding_list : subanimbuilding_list dnanode_sub
| subanimbuilding_list landmarkbuilding_sub
| subanimbuilding_list animbuilding_sub
| empty'''
def p_subdoor_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_subdoor_list.__doc__ = '''\
subdoor_list : subdoor_list dnanode_sub
| subdoor_list door_sub
| empty'''
def p_substreet_list(p):
p[0] = p[1]
if len(p) == 3:
group = p[2]
p[0].append(group)
else:
p[0] = []
p_substreet_list.__doc__ = '''\
substreet_list : substreet_list dnanode_sub
| substreet_list street_sub
| empty'''
def p_modeldef(p):
modelType, filename = p[1], p[2]
filename, extension = os.path.splitext(filename)
if not extension:
extension = '.bam'
filename += extension
p.parser.modelType = modelType
p.parser.modelName = filename
p_modeldef.__doc__ = '''\
modeldef : MODEL string
| HOODMODEL string
| PLACEMODEL string'''
def p_model(p):
pass
p_model.__doc__ = '''\
model : modeldef "[" modelnode_list "]"'''
def p_modelnode_list(p):
pass
p_modelnode_list.__doc__ = '''\
modelnode_list : modelnode_list node
| empty'''
def p_node(p):
argCount = len(p)
if argCount == 6:
root, code, search = p[3], p[4], p[4]
else:
root, code, search = p[3], p[4], p[5]
p.parser.dnaStore.storeCatalogCode(root, code)
modelName = p.parser.modelName
if p.parser.modelType == 'hood_model':
p.parser.dnaStore.storeHoodNode(code, modelName, search)
elif p.parser.modelType == 'place_model':
p.parser.dnaStore.storePlaceNode(code, modelName, search)
else:
p.parser.dnaStore.storeNode(code, modelName, search)
p_node.__doc__ = '''\
node : STORE_NODE "[" string string "]"
| STORE_NODE "[" string string string "]"'''
def p_store_texture(p):
argCount = len(p)
if argCount == 6:
code, filename = p[3], p[4]
else:
root, code, filename = p[3], p[4], p[5]
p.parser.dnaStore.storeCatalogCode(root, code)
p.parser.dnaStore.storeTexture(code, filename)
p_store_texture.__doc__ = '''\
store_texture : STORE_TEXTURE "[" string string "]"
| STORE_TEXTURE "[" string string string "]"'''
def p_font(p):
root, code, filename = p[3], p[4], p[5]
filename, extension = os.path.splitext(filename)
if not extension:
extension = '.bam'
filename += extension
p.parser.dnaStore.storeCatalogCode(root, code)
p.parser.dnaStore.storeFont(filename, code)
p_font.__doc__ = '''\
font : STORE_FONT "[" string string string "]"'''
def p_error(p):
if p is None:
raise DNAError('Syntax error unexpected EOF')
sub = (str(p.lexer.lineno), str(p))
raise DNAError('Syntax error at line %s token=%s' % sub)
| 23.68599 | 88 | 0.614073 |
acf03c1f3491d8439a0c94724717e888a360067d | 608 | py | Python | cart/urls.py | Joao-Maria-Janeiro/SaoPerolasDjango | c0c5f328c516d5364adc791817d25dab36f93204 | [
"MIT"
] | 7 | 2018-09-30T14:41:45.000Z | 2019-05-14T16:06:28.000Z | cart/urls.py | Joao-Maria-Janeiro/SaoPerolasDjango | c0c5f328c516d5364adc791817d25dab36f93204 | [
"MIT"
] | null | null | null | cart/urls.py | Joao-Maria-Janeiro/SaoPerolasDjango | c0c5f328c516d5364adc791817d25dab36f93204 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.urls import path, include
from . import views
urlpatterns = [
path('add/product<int:id>', views.add_to_cart, name="add_to_cart"),
path('remove/product<int:id>', views.remove_from_cart, name="remove_from_cart"),
path('increase/product<int:id>', views.increase_quantity,name="increase_quantity"),
path('display/', views.display_cart, name="display_cart"),
path('handler/', views.shipping_details, name="shipping_details"),
path('details/', views.order_details, name="order_details"),
path('', views.display_cart, name="index-cart"),
]
| 40.533333 | 87 | 0.725329 |
acf03c435844c3520be0d1899039104af9a9da47 | 523 | py | Python | database_logger/migrations/0003_alter_logentry_extra_info_json.py | davidegalletti/django_database_logger | e3efdd35dec7d60ea1635f5652ff6913d2825969 | [
"BSD-3-Clause"
] | null | null | null | database_logger/migrations/0003_alter_logentry_extra_info_json.py | davidegalletti/django_database_logger | e3efdd35dec7d60ea1635f5652ff6913d2825969 | [
"BSD-3-Clause"
] | null | null | null | database_logger/migrations/0003_alter_logentry_extra_info_json.py | davidegalletti/django_database_logger | e3efdd35dec7d60ea1635f5652ff6913d2825969 | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 4.0.3 on 2022-04-12 14:49
import django.core.serializers.json
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('database_logger', '0002_alter_logentry_level_name_alter_logentry_name'),
]
operations = [
migrations.AlterField(
model_name='logentry',
name='extra_info_json',
field=models.JSONField(default='{}', encoder=django.core.serializers.json.DjangoJSONEncoder),
),
]
| 26.15 | 105 | 0.67304 |
acf03ca36240e0a36755fcf934ae0b140ad28991 | 1,224 | py | Python | testCases/noiseTest/sixMachineNoise.py | thadhaines/PSLTDSim | 1bc598f3733c1369c164f54249e5f7757e6bf466 | [
"MIT"
] | null | null | null | testCases/noiseTest/sixMachineNoise.py | thadhaines/PSLTDSim | 1bc598f3733c1369c164f54249e5f7757e6bf466 | [
"MIT"
] | null | null | null | testCases/noiseTest/sixMachineNoise.py | thadhaines/PSLTDSim | 1bc598f3733c1369c164f54249e5f7757e6bf466 | [
"MIT"
] | null | null | null | # Format of required info for batch runs.
debug = 0
AMQPdebug = 0
debugTimer = 0
simNotes = """
An hour of noise 0.3% noise
area 1 BA action 5 sec
area 2 BA action 5 sec
both have step deadband
"""
# Simulation Parameters Dictionary
simParams = {
'timeStep': 1.0,
'endTime': 3600.0,
'slackTol': 1,
'PY3msgGroup' : 3,
'IPYmsgGroup' : 60,
'Hinput' : 0.0, # MW*sec of entire system, if !> 0.0, will be calculated in code
'Dsys' : 0.0, # Untested
'fBase' : 60.0, # System F base in Hertz
'freqEffects' : True, # w in swing equation will not be assumed 1 if true
# Mathematical Options
'integrationMethod' : 'rk45',
# Data Export Parameters
'fileDirectory' : "\\delme\\sixMachineNoise\\", # relative path from cwd
'fileName' : 'SixMachineNoise',
'exportFinalMirror': 1, # Export mirror with all data
'exportMat': 1, # if IPY: requies exportDict == 1 to work
'exportDict' : 0, # when using python 3 no need to export dicts.
'deleteInit' : 0, # Delete initialized mirror
}
savPath = r"C:\LTD\pslf_systems\sixMachine\sixMachine.sav"
dydPath = [r"C:\LTD\pslf_systems\sixMachine\sixMachine2.dyd"]
ltdPath = r".\testCases\noiseTest\sixMachineNoise.ltd.py" | 33.081081 | 84 | 0.669118 |
acf03cff220b2d3b655e5ca7b700f0067e13c174 | 1,003 | py | Python | fig_8/compare_edges.py | hobertlab/cook_et_al_2020_pharynx | 6ad5c46e637e2495caa3bf0e9f983594dfdf9806 | [
"MIT"
] | null | null | null | fig_8/compare_edges.py | hobertlab/cook_et_al_2020_pharynx | 6ad5c46e637e2495caa3bf0e9f983594dfdf9806 | [
"MIT"
] | null | null | null | fig_8/compare_edges.py | hobertlab/cook_et_al_2020_pharynx | 6ad5c46e637e2495caa3bf0e9f983594dfdf9806 | [
"MIT"
] | null | null | null | #compare edges
import sys
import os
import getopt
import aux
import networkx as nx
import operator
def main(argv):
#Get input arguments
try:
opts, args = getopt.getopt(argv[1:], "ha:b:o:d",
["help","","input1=","input2=","output=","directed"])
except getopt.GetoptError:
print "Warning: Unknown flag!"
sys.exit(2)
return 0
A,B,OUTPUT,DIRECTED = None,None,None,False
for opt, arg in opts:
if opt in ("-h", "--help"):
print_help("Help/formatWA.h",argv[0])
return 0
elif opt in ("-a","--input1"): A = arg
elif opt in ("-b","--input2"): B = arg
elif opt in ("-o","--output"): OUTPUT = arg
elif opt in ("-d","--directed"): DIRECTED = True
A = aux.read_adj(A,DIRECTED)
B = aux.read_adj(B,DIRECTED)
C = []
for (e1,e2) in A.edges():
if not B.has_edge(e1,e2):
C.append([e1,e2,A[e1][e2]['weight']])
C.sort(key=operator.itemgetter(2),reverse = True)
#print Gg['AQR']
if OUTPUT:
aux.write_out_list(OUTPUT,C)
if __name__ == '__main__':
main(sys.argv)
| 20.895833 | 57 | 0.629113 |
acf03e75bf289002312c35038c269b6b5b018ca2 | 750 | py | Python | package/zimagi/data/transports.py | zimagi/zima | d87b3f91e2fa669a77145413582d636d783a0c71 | [
"Apache-2.0"
] | null | null | null | package/zimagi/data/transports.py | zimagi/zima | d87b3f91e2fa669a77145413582d636d783a0c71 | [
"Apache-2.0"
] | null | null | null | package/zimagi/data/transports.py | zimagi/zima | d87b3f91e2fa669a77145413582d636d783a0c71 | [
"Apache-2.0"
] | null | null | null | from requests.exceptions import ConnectionError
from .. import exceptions, transports
import logging
logger = logging.getLogger(__name__)
class DataHTTPSTransport(transports.BaseTransport):
def transition(self, link, decoders, params = None):
params = self.get_params(link, params)
url = self.get_url(link.url, params.path)
headers = self.get_headers(url, decoders)
headers.update(self._headers)
if link.action == 'get':
try:
return self.request_page(url, headers, params, decoders)
except ConnectionError as error:
raise exceptions.CommandConnectionError(error)
def _decode_result_error(self, result, response):
return result
| 25.862069 | 72 | 0.676 |
acf03f7df6ccc17a593a217cf810c71c533b6a69 | 982 | py | Python | scripts/utils/display.py | tmralmeida/tensorrt-yolov4 | 320ffaf9cfe8fb3e787b16b03bb5de3a5e1ce5b9 | [
"Apache-2.0"
] | 7 | 2020-08-17T15:22:52.000Z | 2021-07-18T08:40:50.000Z | scripts/utils/display.py | tmralmeida/tensorrt-yolov4 | 320ffaf9cfe8fb3e787b16b03bb5de3a5e1ce5b9 | [
"Apache-2.0"
] | null | null | null | scripts/utils/display.py | tmralmeida/tensorrt-yolov4 | 320ffaf9cfe8fb3e787b16b03bb5de3a5e1ce5b9 | [
"Apache-2.0"
] | null | null | null | """display.py
"""
import cv2
def open_window(window_name, width, height, title):
"""Open the display window."""
cv2.namedWindow(window_name, cv2.WINDOW_NORMAL)
cv2.resizeWindow(window_name, width, height)
cv2.setWindowTitle(window_name, title)
def show_fps(img, fps):
"""Draw fps number at top-left corner of the image."""
font = cv2.FONT_HERSHEY_PLAIN
line = cv2.LINE_AA
fps_text = 'FPS: {:.2f}'.format(fps)
cv2.putText(img, fps_text, (11, 20), font, 1.0, (32, 32, 32), 4, line)
cv2.putText(img, fps_text, (10, 20), font, 1.0, (240, 240, 240), 1, line)
return img
def set_display(window_name, full_scrn):
"""Set disply window to either full screen or normal."""
if full_scrn:
cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_FULLSCREEN)
else:
cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN,
cv2.WINDOW_NORMAL) | 32.733333 | 77 | 0.645621 |
acf03f883383cdc492621263c0e1cd9c471f9393 | 953 | py | Python | example/factories_sample/migrations/0001_initial.py | kaoslabsinc/django-building-blocks | b5f74121b4002e2b96b5addd6f2593c1a2cead98 | [
"BSD-3-Clause"
] | 1 | 2021-08-03T12:42:37.000Z | 2021-08-03T12:42:37.000Z | example/factories_sample/migrations/0001_initial.py | kaoslabsinc/django-building-blocks | b5f74121b4002e2b96b5addd6f2593c1a2cead98 | [
"BSD-3-Clause"
] | 4 | 2021-07-27T18:22:35.000Z | 2021-08-06T21:55:39.000Z | example/factories_sample/migrations/0001_initial.py | kaoslabsinc/django-building-blocks | b5f74121b4002e2b96b5addd6f2593c1a2cead98 | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 3.2.5 on 2021-07-26 22:25
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='HasNameExample',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='HasOptionalNameExample',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255)),
],
options={
'abstract': False,
},
),
]
| 27.228571 | 117 | 0.527807 |
acf040b0d1542fa570a79dd4ee2d6a1c702a7267 | 15,847 | py | Python | gs_quant/target/groups.py | daniel-schreier/gs-quant | abc5670a35874f2ce701418c9e1da7987092b4f7 | [
"Apache-2.0"
] | null | null | null | gs_quant/target/groups.py | daniel-schreier/gs-quant | abc5670a35874f2ce701418c9e1da7987092b4f7 | [
"Apache-2.0"
] | null | null | null | gs_quant/target/groups.py | daniel-schreier/gs-quant | abc5670a35874f2ce701418c9e1da7987092b4f7 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2019 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from gs_quant.common import *
import datetime
from typing import Mapping, Tuple, Union, Optional
from gs_quant.base import Base, InstrumentBase, camel_case_translate, get_enum_value
class GroupWithMembersCount(Base):
"""Marquee Group with Members Count"""
@camel_case_translate
def __init__(
self,
members_count: int = None,
name: str = None
):
super().__init__()
self.members_count = members_count
self.name = name
@property
def members_count(self) -> int:
"""Total number of members in the group"""
return self.__members_count
@members_count.setter
def members_count(self, value: int):
self._property_changed('members_count')
self.__members_count = value
class UpdateGroupMembershipRequest(Base):
@camel_case_translate
def __init__(
self,
user_ids: Tuple[str, ...],
name: str = None
):
super().__init__()
self.user_ids = user_ids
self.name = name
@property
def user_ids(self) -> Tuple[str, ...]:
"""List of marquee user guids"""
return self.__user_ids
@user_ids.setter
def user_ids(self, value: Tuple[str, ...]):
self._property_changed('user_ids')
self.__user_ids = value
class UserCoverage(Base):
"""Sales coverage for user"""
@camel_case_translate
def __init__(
self,
name: str,
email: str,
app: str = None,
phone: str = None,
guid: str = None
):
super().__init__()
self.app = app
self.phone = phone
self.name = name
self.email = email
self.guid = guid
@property
def app(self) -> str:
"""Marquee application covered by sales person"""
return self.__app
@app.setter
def app(self, value: str):
self._property_changed('app')
self.__app = value
@property
def phone(self) -> str:
"""Coverage phone number"""
return self.__phone
@phone.setter
def phone(self, value: str):
self._property_changed('phone')
self.__phone = value
@property
def name(self) -> str:
"""Coverage name"""
return self.__name
@name.setter
def name(self, value: str):
self._property_changed('name')
self.__name = value
@property
def email(self) -> str:
"""Coverage email"""
return self.__email
@email.setter
def email(self, value: str):
self._property_changed('email')
self.__email = value
@property
def guid(self) -> str:
"""Coverage guid"""
return self.__guid
@guid.setter
def guid(self, value: str):
self._property_changed('guid')
self.__guid = value
class UserTag(Base):
"""Marquee User Tag Attribute"""
@camel_case_translate
def __init__(
self,
name: str,
added_on: datetime.datetime = None,
added_by_id: str = None,
removed: bool = None,
removed_on: datetime.datetime = None,
removed_by_id: str = None,
removal_reason: str = None,
category: str = None
):
super().__init__()
self.added_on = added_on
self.added_by_id = added_by_id
self.removed = removed
self.removed_on = removed_on
self.removed_by_id = removed_by_id
self.removal_reason = removal_reason
self.category = category
self.name = name
@property
def added_on(self) -> datetime.datetime:
"""ISO 8601-formatted timestamp"""
return self.__added_on
@added_on.setter
def added_on(self, value: datetime.datetime):
self._property_changed('added_on')
self.__added_on = value
@property
def added_by_id(self) -> str:
"""Marquee unique identifier"""
return self.__added_by_id
@added_by_id.setter
def added_by_id(self, value: str):
self._property_changed('added_by_id')
self.__added_by_id = value
@property
def removed(self) -> bool:
"""Flag to indicate if tag has been removed"""
return self.__removed
@removed.setter
def removed(self, value: bool):
self._property_changed('removed')
self.__removed = value
@property
def removed_on(self) -> datetime.datetime:
"""ISO 8601-formatted timestamp"""
return self.__removed_on
@removed_on.setter
def removed_on(self, value: datetime.datetime):
self._property_changed('removed_on')
self.__removed_on = value
@property
def removed_by_id(self) -> str:
"""Marquee unique identifier"""
return self.__removed_by_id
@removed_by_id.setter
def removed_by_id(self, value: str):
self._property_changed('removed_by_id')
self.__removed_by_id = value
@property
def removal_reason(self) -> str:
"""Reason tag was removed"""
return self.__removal_reason
@removal_reason.setter
def removal_reason(self, value: str):
self._property_changed('removal_reason')
self.__removal_reason = value
@property
def category(self) -> str:
"""Category of the tag"""
return self.__category
@category.setter
def category(self, value: str):
self._property_changed('category')
self.__category = value
@property
def name(self) -> str:
"""Name of the tag"""
return self.__name
@name.setter
def name(self, value: str):
self._property_changed('name')
self.__name = value
class GroupResponse(Base):
@camel_case_translate
def __init__(
self,
results: Tuple[GroupWithMembersCount, ...],
total_results: int,
scroll_id: Tuple[str, ...] = None,
name: str = None
):
super().__init__()
self.total_results = total_results
self.results = results
self.scroll_id = scroll_id
self.name = name
@property
def total_results(self) -> int:
"""Total number of groups that match the query."""
return self.__total_results
@total_results.setter
def total_results(self, value: int):
self._property_changed('total_results')
self.__total_results = value
@property
def results(self) -> Tuple[GroupWithMembersCount, ...]:
"""Array of group objects"""
return self.__results
@results.setter
def results(self, value: Tuple[GroupWithMembersCount, ...]):
self._property_changed('results')
self.__results = value
@property
def scroll_id(self) -> Tuple[str, ...]:
"""Scroll identifier to be used to retrieve the next batch of results"""
return self.__scroll_id
@scroll_id.setter
def scroll_id(self, value: Tuple[str, ...]):
self._property_changed('scroll_id')
self.__scroll_id = value
class CreateGroupRequest(Base):
"""Marquee Group"""
@camel_case_translate
def __init__(
self,
id_: str,
name: str,
description: str = None,
entitlements: Entitlements = None,
oe_id: str = None,
owner_id: str = None,
tags: Tuple[str, ...] = None
):
super().__init__()
self.__id = id_
self.description = description
self.name = name
self.entitlements = entitlements
self.oe_id = oe_id
self.owner_id = owner_id
self.tags = tags
@property
def id(self) -> str:
"""Marquee unique identifier for a group"""
return self.__id
@id.setter
def id(self, value: str):
self._property_changed('id')
self.__id = value
@property
def description(self) -> str:
"""Group description"""
return self.__description
@description.setter
def description(self, value: str):
self._property_changed('description')
self.__description = value
@property
def name(self) -> str:
"""Name of the group"""
return self.__name
@name.setter
def name(self, value: str):
self._property_changed('name')
self.__name = value
@property
def entitlements(self) -> Entitlements:
"""Entitlements for the given group"""
return self.__entitlements
@entitlements.setter
def entitlements(self, value: Entitlements):
self._property_changed('entitlements')
self.__entitlements = value
@property
def oe_id(self) -> str:
"""Goldman Sachs unique identifier for client's organization"""
return self.__oe_id
@oe_id.setter
def oe_id(self, value: str):
self._property_changed('oe_id')
self.__oe_id = value
@property
def owner_id(self) -> str:
"""Marquee unique identifier of user who owns the group. If not specified, ownerId
is same as createdById"""
return self.__owner_id
@owner_id.setter
def owner_id(self, value: str):
self._property_changed('owner_id')
self.__owner_id = value
@property
def tags(self) -> Tuple[str, ...]:
"""Tags associated with the groups"""
return self.__tags
@tags.setter
def tags(self, value: Tuple[str, ...]):
self._property_changed('tags')
self.__tags = value
class UpdateGroupRequest(Base):
"""Marquee Group"""
@camel_case_translate
def __init__(
self,
name: str = None,
description: str = None,
entitlements: Entitlements = None,
oe_id: str = None,
owner_id: str = None,
tags: Tuple[str, ...] = None
):
super().__init__()
self.name = name
self.description = description
self.entitlements = entitlements
self.oe_id = oe_id
self.owner_id = owner_id
self.tags = tags
@property
def name(self) -> str:
"""Name of the group"""
return self.__name
@name.setter
def name(self, value: str):
self._property_changed('name')
self.__name = value
@property
def description(self) -> str:
"""Group description"""
return self.__description
@description.setter
def description(self, value: str):
self._property_changed('description')
self.__description = value
@property
def entitlements(self) -> Entitlements:
"""Entitlements for the given group"""
return self.__entitlements
@entitlements.setter
def entitlements(self, value: Entitlements):
self._property_changed('entitlements')
self.__entitlements = value
@property
def oe_id(self) -> str:
"""Goldman Sachs unique identifier for client's organization"""
return self.__oe_id
@oe_id.setter
def oe_id(self, value: str):
self._property_changed('oe_id')
self.__oe_id = value
@property
def owner_id(self) -> str:
"""Marquee unique identifier of user who owns the group. If not specified, ownerId
is same as createdById"""
return self.__owner_id
@owner_id.setter
def owner_id(self, value: str):
self._property_changed('owner_id')
self.__owner_id = value
@property
def tags(self) -> Tuple[str, ...]:
"""Tags associated with the groups"""
return self.__tags
@tags.setter
def tags(self, value: Tuple[str, ...]):
self._property_changed('tags')
self.__tags = value
class Group(Base):
"""Marquee Group"""
@camel_case_translate
def __init__(
self,
id_: str,
name: str,
description: str = None,
created_by_id: str = None,
last_updated_by_id: str = None,
entitlements: Entitlements = None,
owner_id: str = None,
oe_id: str = None,
tags: Tuple[str, ...] = None
):
super().__init__()
self.description = description
self.name = name
self.__id = id_
self.created_by_id = created_by_id
self.last_updated_by_id = last_updated_by_id
self.entitlements = entitlements
self.owner_id = owner_id
self.oe_id = oe_id
self.tags = tags
@property
def description(self) -> str:
"""Group description"""
return self.__description
@description.setter
def description(self, value: str):
self._property_changed('description')
self.__description = value
@property
def name(self) -> str:
"""Name of the group"""
return self.__name
@name.setter
def name(self, value: str):
self._property_changed('name')
self.__name = value
@property
def id(self) -> str:
"""Marquee unique identifier of the group"""
return self.__id
@id.setter
def id(self, value: str):
self._property_changed('id')
self.__id = value
@property
def created_by_id(self) -> str:
"""Marquee unique identifier of user who created the group"""
return self.__created_by_id
@created_by_id.setter
def created_by_id(self, value: str):
self._property_changed('created_by_id')
self.__created_by_id = value
@property
def last_updated_by_id(self) -> str:
"""Marquee unique identifier of user who last updated the group"""
return self.__last_updated_by_id
@last_updated_by_id.setter
def last_updated_by_id(self, value: str):
self._property_changed('last_updated_by_id')
self.__last_updated_by_id = value
@property
def entitlements(self) -> Entitlements:
"""Entitlements for the given group"""
return self.__entitlements
@entitlements.setter
def entitlements(self, value: Entitlements):
self._property_changed('entitlements')
self.__entitlements = value
@property
def owner_id(self) -> str:
"""Marquee unique identifier of user who owns the group. If not specified, ownerId
is same as createdById"""
return self.__owner_id
@owner_id.setter
def owner_id(self, value: str):
self._property_changed('owner_id')
self.__owner_id = value
@property
def oe_id(self) -> str:
"""Goldman Sachs unique identifier for client's organization"""
return self.__oe_id
@oe_id.setter
def oe_id(self, value: str):
self._property_changed('oe_id')
self.__oe_id = value
@property
def tags(self) -> Tuple[str, ...]:
"""Tags associated with the groups"""
return self.__tags
@tags.setter
def tags(self, value: Tuple[str, ...]):
self._property_changed('tags')
self.__tags = value
| 26.678451 | 90 | 0.59639 |
acf04217888338bc19a2d5cda915348b8efb396d | 228 | py | Python | gorden_crawler/sizeregex/skinstore.py | Enmming/gorden_cralwer | 3c279e4f80eaf90f3f03acd31b75cf991952adee | [
"Apache-2.0"
] | 2 | 2019-02-22T13:51:08.000Z | 2020-08-03T14:01:30.000Z | gorden_crawler/sizeregex/skinstore.py | Enmming/gorden_cralwer | 3c279e4f80eaf90f3f03acd31b75cf991952adee | [
"Apache-2.0"
] | null | null | null | gorden_crawler/sizeregex/skinstore.py | Enmming/gorden_cralwer | 3c279e4f80eaf90f3f03acd31b75cf991952adee | [
"Apache-2.0"
] | 1 | 2020-08-03T14:01:32.000Z | 2020-08-03T14:01:32.000Z | import re
from gorden_crawler.sizeregex.basestrategy import BaseStrategy
class Skinstore (BaseStrategy):
def transform(self, size, reg, site, key):
return
def getReMap(self):
return | 17.538462 | 62 | 0.649123 |
acf0424fc6f93c3e2d7bb4eafefbd5d61537a71e | 8,684 | py | Python | gbdxtools/images/tms_image.py | pmakarov/gbdxtools | 07840cd15f64f20c852a90ca9f83749fd376b6bf | [
"MIT"
] | null | null | null | gbdxtools/images/tms_image.py | pmakarov/gbdxtools | 07840cd15f64f20c852a90ca9f83749fd376b6bf | [
"MIT"
] | null | null | null | gbdxtools/images/tms_image.py | pmakarov/gbdxtools | 07840cd15f64f20c852a90ca9f83749fd376b6bf | [
"MIT"
] | null | null | null | import os
import uuid
import threading
from collections import defaultdict
from itertools import chain
from functools import partial
from tempfile import NamedTemporaryFile
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
try:
from functools import lru_cache # python 3
except ImportError:
from cachetools.func import lru_cache
import numpy as np
from affine import Affine
from scipy.misc import imread
import mercantile
from gbdxtools.images.meta import DaskImage, DaskMeta, GeoImage, PlotMixin
from gbdxtools.ipe.util import AffineTransform
from shapely.geometry import mapping, box
from shapely.geometry.base import BaseGeometry
from shapely import ops
import pyproj
import pycurl
_curl_pool = defaultdict(pycurl.Curl)
try:
xrange
except NameError:
xrange = range
@lru_cache(maxsize=128)
def load_url(url, shape=(8, 256, 256)):
""" Loads a geotiff url inside a thread and returns as an ndarray """
thread_id = threading.current_thread().ident
_curl = _curl_pool[thread_id]
_curl.setopt(_curl.URL, url)
_curl.setopt(pycurl.NOSIGNAL, 1)
_, ext = os.path.splitext(urlparse(url).path)
with NamedTemporaryFile(prefix="gbdxtools", suffix="."+ext, delete=False) as temp: # TODO: apply correct file extension
_curl.setopt(_curl.WRITEDATA, temp.file)
_curl.perform()
code = _curl.getinfo(pycurl.HTTP_CODE)
try:
if(code != 200):
raise TypeError("Request for {} returned unexpected error code: {}".format(url, code))
arr = np.rollaxis(imread(temp), 2, 0)
except Exception as e:
print(e)
temp.seek(0)
print(temp.read())
arr = np.zeros(shape, dtype=np.uint8)
_curl.close()
del _curl_pool[thread_id]
finally:
temp.file.flush()
temp.close()
os.remove(temp.name)
return arr
class EphemeralImage(Exception):
pass
def raise_aoi_required():
raise EphemeralImage("Image subset must be specified before it can be made concrete.")
class TmsMeta(DaskMeta):
def __init__(self, access_token=os.environ.get("MAPBOX_API_KEY"),
url="https://api.mapbox.com/v4/digitalglobe.nal0g75k/{z}/{x}/{y}.png",
zoom=22, bounds=None):
self.zoom_level = zoom
self._token = access_token
self._name = "image-{}".format(str(uuid.uuid4()))
self._url_template = url + "?access_token={token}"
_first_tile = mercantile.Tile(z=self.zoom_level, x=0, y=0)
_last_tile = mercantile.Tile(z=self.zoom_level, x=180, y=-85.05)
g = box(*mercantile.xy_bounds(_first_tile)).union(box(*mercantile.xy_bounds(_last_tile)))
self._full_bounds = g.bounds
# TODO: populate rest of fields automatically
self._tile_size = 256
self._nbands = 3
self._dtype = "uint8"
self.bounds = self._expand_bounds(bounds)
self._chunks = tuple([self._nbands] + [self._tile_size, self._tile_size])
@property
def bounds(self):
if self._bounds is None:
return self._full_bounds
return self._bounds
@bounds.setter
def bounds(self, obj):
# TODO: set bounds via shapely or bbox, validation
self._bounds = obj
if obj is not None:
self._urls, self._shape = self._collect_urls(self.bounds)
@property
def name(self):
return self._name
@property
def dask(self):
if self._bounds is None:
return {self._name: (raise_aoi_required, )}
else:
urls, shape = self._collect_urls(self.bounds)
return {(self._name, 0, y, x): (load_url, url, self._chunks) for (y, x), url in urls.items()}
@property
def dtype(self):
return self._dtype
@property
def shape(self):
if self._bounds is None:
_tile = mercantile.tile(180, -85.05, self.zoom_level)
nx = _tile.x * self._tile_size
ny = _tile.y * self._tile_size
return tuple([self._nbands] + [ny, nx])
else:
return self._shape
@property
def chunks(self):
return self._chunks
@property
def __geo_transform__(self):
west, south, east, north = self.bounds
tfm = Affine.translation(west, north) * Affine.scale((east - west) / self.shape[2], (south - north) / self.shape[1])
return AffineTransform(tfm, "EPSG:3857")
def _collect_urls(self, bounds):
minx, miny, maxx, maxy = self._tile_coords(bounds)
urls = {(y - miny, x - minx): self._url_template.format(z=self.zoom_level, x=x, y=y, token=self._token)
for y in xrange(miny, maxy + 1) for x in xrange(minx, maxx + 1)}
return urls, (3, self._tile_size * (maxy - miny), self._tile_size * (maxx - minx))
def _expand_bounds(self, bounds):
if bounds is None:
return bounds
min_tile_x, min_tile_y, max_tile_x, max_tile_y = self._tile_coords(bounds)
ul = box(*mercantile.xy_bounds(mercantile.Tile(z=self.zoom_level, x=min_tile_x, y=max_tile_y)))
lr = box(*mercantile.xy_bounds(mercantile.Tile(z=self.zoom_level, x=max_tile_x, y=min_tile_y)))
return ul.union(lr).bounds
def _tile_coords(self, bounds):
""" Convert tile coords mins/maxs to lng/lat bounds """
tfm = partial(pyproj.transform,
pyproj.Proj(init="epsg:3857"),
pyproj.Proj(init="epsg:4326"))
bounds = ops.transform(tfm, box(*bounds)).bounds
params = list(bounds) + [[self.zoom_level]]
tile_coords = [(tile.x, tile.y) for tile in mercantile.tiles(*params)]
xtiles, ytiles = zip(*tile_coords)
minx = min(xtiles)
maxx = max(xtiles)
miny = min(ytiles)
maxy = max(ytiles)
return minx, miny, maxx, maxy
class TmsImage(DaskImage, GeoImage, PlotMixin):
_default_proj = "EPSG:3857"
def __new__(cls, access_token=os.environ.get("MAPBOX_API_KEY"),
url="https://api.mapbox.com/v4/digitalglobe.nal0g75k/{z}/{x}/{y}.png",
zoom=22, **kwargs):
_tms_meta = TmsMeta(access_token=access_token, url=url, zoom=zoom, bounds=kwargs.get("bounds"))
self = super(TmsImage, cls).create(_tms_meta)
self._base_args = {"access_token": access_token, "url": url, "zoom": zoom}
self._tms_meta = _tms_meta
self.__geo_interface__ = mapping(box(*_tms_meta.bounds))
self.__geo_transform__ = _tms_meta.__geo_transform__
g = self._parse_geoms(**kwargs)
if g is not None:
return self[g]
else:
return self
@property
def __daskmeta__(self):
return self._tms_meta
def rgb(self, **kwargs):
return np.rollaxis(self.read(), 0, 3)
def aoi(self, **kwargs):
g = self._parse_geoms(**kwargs)
return self.__class__(bounds=list(g.bounds), **self._base_args)[g]
def __getitem__(self, geometry):
if isinstance(geometry, BaseGeometry) or getattr(geometry, "__geo_interface__", None) is not None:
if self._tms_meta._bounds is None:
return self.aoi(geojson=mapping(geometry), from_proj=self.proj)
image = GeoImage.__getitem__(self, geometry)
image._tms_meta = self._tms_meta
return image
else:
result = super(TmsImage, self).__getitem__(geometry)
image = super(TmsImage, self.__class__).__new__(self.__class__,
result.dask, result.name, result.chunks,
result.dtype, result.shape)
if all([isinstance(e, slice) for e in geometry]) and len(geometry) == len(self.shape):
xmin, ymin, xmax, ymax = geometry[2].start, geometry[1].start, geometry[2].stop, geometry[1].stop
xmin = 0 if xmin is None else xmin
ymin = 0 if ymin is None else ymin
xmax = self.shape[2] if xmax is None else xmax
ymax = self.shape[1] if ymax is None else ymax
g = ops.transform(self.__geo_transform__.fwd, box(xmin, ymin, xmax, ymax))
image.__geo_interface__ = mapping(g)
image.__geo_transform__ = self.__geo_transform__ + (xmin, ymin)
else:
image.__geo_interface__ = self.__geo_interface__
image.__geo_transform__ = self.__geo_transform__
image._tms_meta = self._tms_meta
return image
| 36.334728 | 124 | 0.620336 |
acf0430fb859ca36a2c159e2af777ea320bd036c | 873 | py | Python | atheppy/heppyresult/__init__.py | alphatwirl/atheppy | e4cbc070c2313f283053383edb629bb3cb80807e | [
"BSD-3-Clause"
] | null | null | null | atheppy/heppyresult/__init__.py | alphatwirl/atheppy | e4cbc070c2313f283053383edb629bb3cb80807e | [
"BSD-3-Clause"
] | null | null | null | atheppy/heppyresult/__init__.py | alphatwirl/atheppy | e4cbc070c2313f283053383edb629bb3cb80807e | [
"BSD-3-Clause"
] | null | null | null | from .Analyzer import Analyzer
from .Component import Component
from .ComponentLoop import ComponentLoop
from .ComponentReaderComposite import ComponentReaderComposite
from .EventBuilderConfig import EventBuilderConfig
from .HeppyResult import HeppyResult
from .ReadComponentConfig import ReadComponentConfig
from .ReadCounter import ReadCounter
from .ReadVersionInfo import ReadVersionInfo
from .TblCounter import TblCounter
from .TblComponentConfig import TblComponentConfig
from .TblCounterLong import TblCounterLong
from .TblBrilCalc import TblBrilCalc
hasROOT = False
try:
import ROOT
hasROOT = True
except ImportError:
pass
if hasROOT:
from .EventBuilder import EventBuilder
from .EventBuilderConfigMaker import EventBuilderConfigMaker
from .TblBranch import TblBranch
from .TblTree import TblTree
from .TblSMSNevt import TblSMSNevt
| 31.178571 | 64 | 0.836197 |
acf0433b11d75b7af25912cd1f141512a10cf8c3 | 617 | py | Python | torcms/model/abc_model.py | bukun/TorCMS | 5d7480865fd46e706b84f5f65a5c24cd03bb2142 | [
"MIT"
] | 243 | 2015-02-11T03:22:19.000Z | 2022-03-02T11:13:27.000Z | torcms/model/abc_model.py | bukun/TorCMS | 5d7480865fd46e706b84f5f65a5c24cd03bb2142 | [
"MIT"
] | 8 | 2015-09-09T10:49:52.000Z | 2020-08-30T08:52:48.000Z | torcms/model/abc_model.py | bukun/TorCMS | 5d7480865fd46e706b84f5f65a5c24cd03bb2142 | [
"MIT"
] | 101 | 2015-02-12T02:17:16.000Z | 2021-11-19T09:20:10.000Z | # -*- coding:utf-8 -*-
'''
The Base of Model
'''
class MHelper():
'''
Common used function for most Model. Using Model as the first parameter.
'''
@staticmethod
def get_by_uid(model, uid):
recs = model.select().where(model.uid == uid)
if recs.count() == 0:
return None
else:
return recs.get()
@staticmethod
def delete(model, uid):
entry = model.delete().where(model.uid == uid)
try:
entry.execute()
return True
except Exception as err:
print(repr(err))
return False
| 22.035714 | 76 | 0.529984 |
acf0449ddce10ce99be1e342ad2041437ec4f697 | 2,283 | py | Python | contrib/zmq/zmq_test.py | RitoProject/Ritocoin | 6950104b40ca6bec36ec98ea2046ea2fdcf4e92a | [
"MIT"
] | 18 | 2018-11-30T19:07:06.000Z | 2021-05-17T11:06:12.000Z | contrib/zmq/zmq_test.py | RitoProject/Ravencoin | 6950104b40ca6bec36ec98ea2046ea2fdcf4e92a | [
"MIT"
] | 1 | 2018-12-08T19:41:43.000Z | 2018-12-08T19:41:43.000Z | contrib/zmq/zmq_test.py | RitoProject/Ravencoin | 6950104b40ca6bec36ec98ea2046ea2fdcf4e92a | [
"MIT"
] | 17 | 2018-11-30T17:16:21.000Z | 2021-10-30T17:33:14.000Z | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Copyright (c) 2017 The Raven Core developers
# Copyright (c) 2018 The Rito Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
ZMQ example using python3's asyncio
Rito should be started with the command line arguments:
ritod -testnet -daemon \
-zmqpubhashblock=tcp://127.0.0.1:28501 \
-zmqpubrawtx=tcp://127.0.0.1:28501 \
-zmqpubhashtx=tcp://127.0.0.1:28501 \
-zmqpubhashblock=tcp://127.0.0.1:28501
"""
import sys
import zmq
import struct
import binascii
import codecs
# Socket to talk to server
context = zmq.Context()
socket = context.socket(zmq.SUB)
print("Getting Ritocoin msgs")
socket.connect("tcp://localhost:28501")
socket.setsockopt_string(zmq.SUBSCRIBE, "hashtx")
socket.setsockopt_string(zmq.SUBSCRIBE, "hashblock")
socket.setsockopt_string(zmq.SUBSCRIBE, "rawblock")
socket.setsockopt_string(zmq.SUBSCRIBE, "rawtx")
while True:
msg = socket.recv_multipart()
topic = msg[0]
body = msg[1]
sequence = "Unknown"
if len(msg[-1]) == 4:
msgSequence = struct.unpack('<I', msg[-1])[-1]
sequence = str(msgSequence)
if topic == b"hashblock":
print('- HASH BLOCK ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"hashtx":
print('- HASH TX ('+sequence+') -')
print(binascii.hexlify(body))
elif topic == b"rawblock":
print('- RAW BLOCK HEADER ('+sequence+') -')
print(binascii.hexlify(body[:80]))
elif topic == b"rawtx":
print('- RAW TX ('+sequence+') -')
astr = binascii.hexlify(body).decode("utf-8")
start = 0
pos = 0
while(pos != -1):
pos = astr.find('72766e', start)
if (pos > -1):
print("FOUND RITO issuance at " + str(pos))
print("After RITO: " + astr[pos+6:pos+8])
sizestr = astr[pos+8:pos+10]
print("sizestr: " + sizestr)
#print(str(astr[pos+8:pos+10]))
size = int(sizestr, 16)
print("Bytes: " + str(size))
print("Name: " + bytes.fromhex(astr[pos+10:pos+10+size*2]).decode('utf-8'))
pos = astr.find('72766e', start)
if (pos > -1):
print("FOUND RITO something at " + str(pos))
start += pos+8
print(astr)
| 29.649351 | 79 | 0.650022 |
acf044d6ebc76dfb6c7651282f761cd73e745383 | 6,591 | py | Python | rsa/transform.py | haoqihua/Crypto-Project | 8a87548ef49c118123fee664811e064589f163e2 | [
"MIT"
] | null | null | null | rsa/transform.py | haoqihua/Crypto-Project | 8a87548ef49c118123fee664811e064589f163e2 | [
"MIT"
] | null | null | null | rsa/transform.py | haoqihua/Crypto-Project | 8a87548ef49c118123fee664811e064589f163e2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data transformation functions.
From bytes to a number, number to bytes, etc.
"""
from __future__ import absolute_import
import binascii
from struct import pack
from rsa import common
from rsa._compat import byte, is_integer, get_word_alignment
def bytes2int(raw_bytes):
r"""Converts a list of bytes or an 8-bit string to an integer.
When using unicode strings, encode it to some encoding like UTF8 first.
>>> (((128 * 256) + 64) * 256) + 15
8405007
>>> bytes2int(b'\x80@\x0f')
8405007
"""
return int(binascii.hexlify(raw_bytes), 16)
def _int2bytes(number, block_size=None):
r"""Converts a number to a string of bytes.
Usage::
>>> _int2bytes(123456789)
b'\x07[\xcd\x15'
>>> bytes2int(_int2bytes(123456789))
123456789
>>> _int2bytes(123456789, 6)
b'\x00\x00\x07[\xcd\x15'
>>> bytes2int(_int2bytes(123456789, 128))
123456789
>>> _int2bytes(123456789, 3)
Traceback (most recent call last):
...
OverflowError: Needed 4 bytes for number, but block size is 3
@param number: the number to convert
@param block_size: the number of bytes to output. If the number encoded to
bytes is less than this, the block will be zero-padded. When not given,
the returned block is not padded.
@throws OverflowError when block_size is given and the number takes up more
bytes than fit into the block.
"""
# Type checking
if not is_integer(number):
raise TypeError("You must pass an integer for 'number', not %s" %
number.__class__)
if number < 0:
raise ValueError('Negative numbers cannot be used: %i' % number)
# Do some bounds checking
if number == 0:
needed_bytes = 1
raw_bytes = [b'\x00']
else:
needed_bytes = common.byte_size(number)
raw_bytes = []
# You cannot compare None > 0 in Python 3x. It will fail with a TypeError.
if block_size and block_size > 0:
if needed_bytes > block_size:
raise OverflowError('Needed %i bytes for number, but block size '
'is %i' % (needed_bytes, block_size))
# Convert the number to bytes.
while number > 0:
raw_bytes.insert(0, byte(number & 0xFF))
number >>= 8
# Pad with zeroes to fill the block
if block_size and block_size > 0:
padding = (block_size - needed_bytes) * b'\x00'
else:
padding = b''
return padding + b''.join(raw_bytes)
def bytes_leading(raw_bytes, needle=b'\x00'):
"""
Finds the number of prefixed byte occurrences in the haystack.
Useful when you want to deal with padding.
:param raw_bytes:
Raw bytes.
:param needle:
The byte to count. Default \x00.
:returns:
The number of leading needle bytes.
"""
leading = 0
# Indexing keeps compatibility between Python 2.x and Python 3.x
_byte = needle[0]
for x in raw_bytes:
if x == _byte:
leading += 1
else:
break
return leading
def int2bytes(number, fill_size=None, chunk_size=None, overflow=False):
"""
Convert an unsigned integer to bytes (base-256 representation)::
Does not preserve leading zeros if you don't specify a chunk size or
fill size.
.. NOTE:
You must not specify both fill_size and chunk_size. Only one
of them is allowed.
:param number:
Integer value
:param fill_size:
If the optional fill size is given the length of the resulting
byte string is expected to be the fill size and will be padded
with prefix zero bytes to satisfy that length.
:param chunk_size:
If optional chunk size is given and greater than zero, pad the front of
the byte string with binary zeros so that the length is a multiple of
``chunk_size``.
:param overflow:
``False`` (default). If this is ``True``, no ``OverflowError``
will be raised when the fill_size is shorter than the length
of the generated byte sequence. Instead the byte sequence will
be returned as is.
:returns:
Raw bytes (base-256 representation).
:raises:
``OverflowError`` when fill_size is given and the number takes up more
bytes than fit into the block. This requires the ``overflow``
argument to this function to be set to ``False`` otherwise, no
error will be raised.
"""
if number < 0:
raise ValueError("Number must be an unsigned integer: %d" % number)
if fill_size and chunk_size:
raise ValueError("You can either fill or pad chunks, but not both")
# Ensure these are integers.
number & 1
raw_bytes = b''
# Pack the integer one machine word at a time into bytes.
num = number
word_bits, _, max_uint, pack_type = get_word_alignment(num)
pack_format = ">%s" % pack_type
while num > 0:
raw_bytes = pack(pack_format, num & max_uint) + raw_bytes
num >>= word_bits
# Obtain the index of the first non-zero byte.
zero_leading = bytes_leading(raw_bytes)
if number == 0:
raw_bytes = b'\x00'
# De-padding.
raw_bytes = raw_bytes[zero_leading:]
length = len(raw_bytes)
if fill_size and fill_size > 0:
if not overflow and length > fill_size:
raise OverflowError(
"Need %d bytes for number, but fill size is %d" %
(length, fill_size)
)
raw_bytes = raw_bytes.rjust(fill_size, b'\x00')
elif chunk_size and chunk_size > 0:
remainder = length % chunk_size
if remainder:
padding_size = chunk_size - remainder
raw_bytes = raw_bytes.rjust(length + padding_size, b'\x00')
return raw_bytes
if __name__ == '__main__':
import doctest
doctest.testmod()
| 30.513889 | 79 | 0.638295 |
acf044e45a1e6a98a32bfa862678eca77eead667 | 2,024 | py | Python | python/hopsworks/client/exceptions.py | robzor92/hopsworks-api | 94a0cfabedc0278e5d5e0eec699317073a65a126 | [
"Apache-2.0"
] | null | null | null | python/hopsworks/client/exceptions.py | robzor92/hopsworks-api | 94a0cfabedc0278e5d5e0eec699317073a65a126 | [
"Apache-2.0"
] | null | null | null | python/hopsworks/client/exceptions.py | robzor92/hopsworks-api | 94a0cfabedc0278e5d5e0eec699317073a65a126 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2022 Logical Clocks AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class RestAPIError(Exception):
"""REST Exception encapsulating the response object and url."""
def __init__(self, url, response):
error_object = response.json()
message = (
"Metadata operation error: (url: {}). Server response: \n"
"HTTP code: {}, HTTP reason: {}, error code: {}, error msg: {}, user "
"msg: {}".format(
url,
response.status_code,
response.reason,
error_object.get("errorCode", ""),
error_object.get("errorMsg", ""),
error_object.get("usrMsg", ""),
)
)
super().__init__(message)
self.url = url
self.response = response
class UnknownSecretStorageError(Exception):
"""This exception will be raised if an unused secrets storage is passed as a parameter."""
class GitException(Exception):
"""Generic git exception"""
class JobException(Exception):
"""Generic job exception"""
class KafkaException(Exception):
"""Generic kafka exception"""
class DatasetException(Exception):
"""Generic dataset exception"""
class ProjectException(Exception):
"""Generic project exception"""
class ExternalClientError(TypeError):
"""Raised when external client cannot be initialized due to missing arguments."""
def __init__(self, message):
super().__init__(message)
| 29.333333 | 94 | 0.647233 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.